diff --git a/.gitignore b/.gitignore index 111a7c5..8b8757b 100644 --- a/.gitignore +++ b/.gitignore @@ -35,8 +35,13 @@ doc # auxiliary files -example/DLRA_laplacian2D_lti_lyapunov/output.txt -example/DLRA_laplacian2D_lti_lyapunov/laplacian.gpp +example/DLRA_laplacian2D_lti_lyapunov/*.txt +example/DLRA_laplacian2D_lti_lyapunov/*.gpp +example/DLRA_ginzburg_landau/*.txt +example/DLRA_ginzburg_landau/*.gpp + +# local data +local/ # data files *.npy diff --git a/example/DLRA_ginzburg_landau/ginzburg_landau_RK_lyapunov.f90 b/example/DLRA_ginzburg_landau/ginzburg_landau_RK_lyapunov.f90 new file mode 100644 index 0000000..ac4eaf9 --- /dev/null +++ b/example/DLRA_ginzburg_landau/ginzburg_landau_RK_lyapunov.f90 @@ -0,0 +1,268 @@ +module Ginzburg_Landau_RK_Lyapunov + ! Standard Library. + use stdlib_optval, only : optval + ! RKLIB module for time integration. + use rklib_module + ! LightKrylov for linear algebra. + use LightKrylov + use LightKrylov, only : wp => dp + ! Ginzburg Landau + use Ginzburg_Landau_Base + use Ginzburg_Landau_Operators + implicit none + + private :: this_module + character*128, parameter :: this_module = 'Ginzburg_Landau_RK_Lyapunov' + + public :: GL_mat + + !------------------------------------------- + !----- LIGHTKRYLOV VECTOR TYPE ----- + !------------------------------------------- + + type, extends(abstract_vector_rdp), public :: state_matrix + real(wp) :: state(N**2) = 0.0_wp + contains + private + procedure, pass(self), public :: zero => matrix_zero + procedure, pass(self), public :: dot => matrix_dot + procedure, pass(self), public :: scal => matrix_scal + procedure, pass(self), public :: axpby => matrix_axpby + procedure, pass(self), public :: rand => matrix_rand + procedure, pass(self), public :: get_size => matrix_get_size + end type state_matrix + + !------------------------------- + !----- RK LYAPUNOV ----- + !------------------------------- + + type, extends(abstract_linop_rdp), public :: RK_lyapunov + real(wp) :: tau ! Integration time. + contains + private + procedure, pass(self), public :: matvec => direct_solver_lyap + procedure, pass(self), public :: rmatvec => adjoint_solver_lyap + end type RK_lyapunov + +contains + + !----- TYPE-BOUND PROCEDURE FOR MATRICES ----- + + subroutine matrix_zero(self) + class(state_matrix), intent(inout) :: self + self%state = 0.0_wp + return + end subroutine matrix_zero + + real(wp) function matrix_dot(self, vec) result(alpha) + class(state_matrix), intent(in) :: self + class(abstract_vector_rdp), intent(in) :: vec + select type(vec) + type is(state_matrix) + alpha = dot_product(self%state, weight_mat*vec%state) + end select + return + end function matrix_dot + + integer function matrix_get_size(self) result(N) + class(state_matrix), intent(in) :: self + N = N**2 + return + end function matrix_get_size + + subroutine matrix_scal(self, alpha) + class(state_matrix), intent(inout) :: self + real(wp), intent(in) :: alpha + self%state = self%state * alpha + return + end subroutine matrix_scal + + subroutine matrix_axpby(self, alpha, vec, beta) + class(state_matrix), intent(inout) :: self + class(abstract_vector_rdp), intent(in) :: vec + real(wp) , intent(in) :: alpha, beta + select type(vec) + type is(state_matrix) + self%state = alpha*self%state + beta*vec%state + end select + return + end subroutine matrix_axpby + + subroutine matrix_rand(self, ifnorm) + class(state_matrix), intent(inout) :: self + logical, optional, intent(in) :: ifnorm + ! internals + logical :: normalize + real(wp) :: alpha + normalize = optval(ifnorm, .true.) + call random_number(self%state) + if (normalize) then + alpha = self%norm() + call self%scal(1.0/alpha) + endif + return + end subroutine matrix_rand + + subroutine GL_mat(flat_mat_out, flat_mat_in, adjoint, transpose) + + !> State vector. + real(wp), dimension(:), intent(in) :: flat_mat_in + !> Time-derivative. + real(wp), dimension(:), intent(out) :: flat_mat_out + !> Adjoint + logical, optional :: adjoint + logical :: adj + logical, optional :: transpose + logical :: trans + + !> Internal variables. + integer :: j + real(wp), dimension(N,N) :: mat, dmat + + !> Deal with optional argument + adj = optval(adjoint,.false.) + trans = optval(transpose,.false.) + + !> Sets the internal variables. + mat = reshape(flat_mat_in(1:N**2),(/N, N/)) + dmat = 0.0_wp + + if (adj) then + if (trans) then + do j = 1,N + call adjoint_GL(mat(:,j), dmat(j,:)) + end do + else + do j = 1,N + call adjoint_GL(mat(:,j), dmat(:,j)) + end do + end if + else + if (trans) then + do j = 1,N + call direct_GL(mat(:,j), dmat(j,:)) + end do + else + do j = 1,N + call direct_GL(mat(:,j), dmat(:,j)) + end do + end if + endif + + !> Reshape for output + flat_mat_out = reshape(dmat, shape(flat_mat_in)) + + return + end subroutine GL_mat + + !-------------------------------------- + !----- WRAPPERS FOR RKLIB ----- + !-------------------------------------- + + subroutine rhs_lyap(me, t, x_flat, f_flat) + ! Time-integrator. + class(rk_class), intent(inout) :: me + ! Current time. + real(wp), intent(in) :: t + ! State vector. + real(wp), dimension(:), intent(in) :: x_flat + ! Time-derivative. + real(wp), dimension(:), intent(out) :: f_flat + + ! internals + real(wp), dimension(N**2) :: x_tmp, AX_flat, XAH_flat + + f_flat = 0.0_wp; AX_flat = 0.0_wp; XAH_flat = 0.0_wp; x_tmp = 0.0_wp + ! A @ X + call GL_mat( AX_flat, x_flat, adjoint = .false., transpose = .false.) + ! build X.T + x_tmp = reshape(transpose(reshape(x_flat, (/ N,N /))), shape(x_flat)) + ! build ( A @ X.T ).T = X @ A.T + call GL_mat(XAH_flat, x_tmp, adjoint = .false., transpose = .true.) + ! construct Lyapunov equation + f_flat = AX_flat + XAH_flat + BBTW_flat + + return + end subroutine rhs_lyap + + subroutine adjoint_rhs_lyap(me, t, x_flat, f_flat) + ! Time-integrator. + class(rk_class), intent(inout) :: me + ! Current time. + real(wp), intent(in) :: t + ! State vector. + real(wp), dimension(:), intent(in) :: x_flat + ! Time-derivative. + real(wp), dimension(:), intent(out) :: f_flat + + ! internals + real(wp), dimension(N**2) :: x_tmp, AHX_flat, XA_flat + + f_flat = 0.0_wp; AHX_flat = 0.0_wp; XA_flat = 0.0_wp; x_tmp = 0.0_wp + ! A.T @ X + call GL_mat(AHX_flat, x_flat, adjoint = .true., transpose = .false.) + ! build X.T + x_tmp = reshape(transpose(reshape(x_flat, (/ N,N /))), shape(x_flat)) + ! build ( A.T @ X.T ).T = X @ A + call GL_mat( XA_flat, x_tmp, adjoint = .true., transpose = .true.) + ! construct Lyapunov equation + f_flat = AHX_flat + XA_flat + CTCW_flat + + return + end subroutine adjoint_rhs_lyap + + !------------------------------------------------------------------------ + !----- TYPE-BOUND PROCEDURES FOR THE EXPONENTIAL PROPAGATOR ----- + !------------------------------------------------------------------------ + + subroutine direct_solver_lyap(self, vec_in, vec_out) + ! Linear Operator. + class(rk_lyapunov), intent(in) :: self + ! Input vector. + class(abstract_vector_rdp), intent(in) :: vec_in + ! Output vector. + class(abstract_vector_rdp), intent(out) :: vec_out + + ! Time-integrator. + type(rks54_class) :: prop + real(kind=wp) :: dt = 1.0_wp + + select type(vec_in) + type is(state_matrix) + select type(vec_out) + type is(state_matrix) + ! Initialize propagator. + call prop%initialize(n=N**2, f=rhs_lyap) + ! Integrate forward in time. + call prop%integrate(0.0_wp, vec_in%state, dt, self%tau, vec_out%state) + end select + end select + return + end subroutine direct_solver_lyap + + subroutine adjoint_solver_lyap(self, vec_in, vec_out) + ! Linear Operator. + class(rk_lyapunov), intent(in) :: self + ! Input vector. + class(abstract_vector_rdp), intent(in) :: vec_in + ! Output vector. + class(abstract_vector_rdp), intent(out) :: vec_out + + ! Time-integrator. + type(rks54_class) :: prop + real(kind=wp) :: dt = 1.0_wp + + select type(vec_in) + type is(state_matrix) + select type(vec_out) + type is(state_matrix) + ! Initialize propagator. + call prop%initialize(n=N**2, f=adjoint_rhs_lyap) + ! Integrate forward in time. + call prop%integrate(0.0_wp, vec_in%state, dt, self%tau, vec_out%state) + end select + end select + return + end subroutine adjoint_solver_lyap + +end module Ginzburg_Landau_RK_Lyapunov \ No newline at end of file diff --git a/example/DLRA_ginzburg_landau/ginzburg_landau_base.f90 b/example/DLRA_ginzburg_landau/ginzburg_landau_base.f90 new file mode 100644 index 0000000..35d22a0 --- /dev/null +++ b/example/DLRA_ginzburg_landau/ginzburg_landau_base.f90 @@ -0,0 +1,217 @@ +module Ginzburg_Landau_Base + ! Standard Library. + use stdlib_optval, only : optval + ! LightKrylov for linear algebra. + use LightKrylov + use LightKrylov, only: wp => dp + use LightKrylov_Logger + use LightKrylov_Utils, only : assert_shape + use LightKrylov_AbstractVectors + ! LightROM + use LightROM_AbstractLTIsystems ! LR_state + implicit none + + private :: this_module + character*128, parameter :: this_module = 'Ginzburg_Landau_Base' + + public :: L, nx, dx + public :: nu, gamma, mu_0, c_mu, mu_2, mu + public :: rk_b, x_b, s_b, rk_c, x_c, s_c + public :: B, CT, weight, weight_mat + public :: N, BBTW_flat, CTCW_flat + public :: Qc, Rinv, CTQcCW_mat, BRinvBTW_mat + + + !------------------------------- + !----- PARAMETERS 1 ----- + !------------------------------- + + ! Mesh related parameters. + real(wp), parameter :: L = 50.0_wp ! Domain length + integer, parameter :: nx = 128 ! Number of grid points (excluding boundaries). + real(wp) :: dx ! Grid size. + + !------------------------------------------- + !----- LIGHTKRYLOV VECTOR TYPE ----- + !------------------------------------------- + + type, extends(abstract_vector_rdp), public :: state_vector + real(wp) :: state(2*nx) = 0.0_wp + contains + private + procedure, pass(self), public :: zero + procedure, pass(self), public :: dot + procedure, pass(self), public :: scal + procedure, pass(self), public :: axpby + procedure, pass(self), public :: rand + procedure, pass(self), public :: get_size + end type state_vector + + !------------------------------------------------------- + !----- LIGHTKRYLOV SYM LOW RANK STATE TYPE ----- + !------------------------------------------------------- + + type, extends(abstract_sym_low_rank_state_rdp), public :: LR_state + contains + private + procedure, pass(self), public :: initialize_LR_state + end type LR_state + + !------------------------------- + !----- PARAMETERS 2 ----- + !------------------------------- + + ! Physical parameters. + complex(wp), parameter :: nu = cmplx(2.0_wp, 0.2_wp, wp) + complex(wp), parameter :: gamma = cmplx(1.0_wp, -1.0_wp, wp) + real(wp), parameter :: mu_0 = 0.38_wp + real(wp), parameter :: c_mu = 0.2_wp + real(wp), parameter :: mu_2 = -0.01_wp + real(wp) :: mu(1:nx) + + ! Input-Output system parameters + real(wp) :: weight(2*nx) ! integration weights + integer, parameter :: rk_b = 2 ! number of inputs to the system + real(wp), parameter :: x_b = -11.0_wp ! location of input Gaussian + real(wp), parameter :: s_b = 1.0_wp ! variance of input Gaussian + type(state_vector) :: B(rk_b) + real(wp), parameter :: x_c = sqrt(-2.0_wp*(mu_0 - c_mu**2)/mu_2) ! location of input Gaussian + real(wp), parameter :: s_c = 1.0_wp ! variance of input Gaussian + integer, parameter :: rk_c = 2 ! number of outputs to the system + type(state_vector) :: CT(rk_c) + real(wp) :: Qc(rk_c,rk_c) + real(wp) :: Rinv(rk_b,rk_b) + + ! Data matrices for RK lyap + integer, parameter :: N = 2*nx ! Number of grid points (excluding boundaries). + real(wp) :: weight_mat(N**2) ! integration weights + real(wp) :: BBTW_flat(N**2) + real(wp) :: CTCW_flat(N**2) + ! Data matrices for Riccatis + real(wp) :: CTQcCW_mat(N,N) + real(wp) :: BRinvBTW_mat(N,N) + +contains + + !========================================================= + !========================================================= + !===== ===== + !===== LIGHTKRYLOV MANDATORY IMPLEMENTATIONS ===== + !===== ===== + !========================================================= + !========================================================= + + !---------------------------------------------------- + !----- TYPE-BOUND PROCEDURE FOR VECTORS ----- + !---------------------------------------------------- + + subroutine zero(self) + class(state_vector), intent(inout) :: self + self%state = 0.0_wp + return + end subroutine zero + + real(wp) function dot(self, vec) result(alpha) + ! weighted inner product + class(state_vector), intent(in) :: self + class(abstract_vector_rdp), intent(in) :: vec + select type(vec) + type is(state_vector) + alpha = dot_product(self%state, weight*vec%state) + end select + return + end function dot + + integer function get_size(self) result(N) + class(state_vector), intent(in) :: self + N = 2*nx + return + end function get_size + + subroutine scal(self, alpha) + class(state_vector), intent(inout) :: self + real(wp), intent(in) :: alpha + self%state = self%state * alpha + return + end subroutine scal + + subroutine axpby(self, alpha, vec, beta) + class(state_vector), intent(inout) :: self + class(abstract_vector_rdp), intent(in) :: vec + real(wp), intent(in) :: alpha, beta + select type(vec) + type is(state_vector) + self%state = alpha*self%state + beta*vec%state + end select + return + end subroutine axpby + + subroutine rand(self, ifnorm) + class(state_vector), intent(inout) :: self + logical, optional, intent(in) :: ifnorm + ! internals + logical :: normalize + real(wp) :: alpha + normalize = optval(ifnorm,.true.) + call random_number(self%state) + if (normalize) then + alpha = self%norm() + call self%scal(1.0/alpha) + endif + return + end subroutine rand + + !------------------------------------------------------ + !----- TYPE BOUND PROCEDURES FOR LR STATES ----- + !------------------------------------------------------ + + subroutine initialize_LR_state(self, U, S, rk, rkmax) + class(LR_state), intent(inout) :: self + class(abstract_vector_rdp), intent(in) :: U(:) + real(wp), intent(in) :: S(:,:) + integer, intent(in) :: rk + integer, optional, intent(in) :: rkmax + + ! internals + real(wp), allocatable :: R(:, :) + integer :: i, n, rka, info + + n = size(U) + call assert_shape(S, [n,n], "initialize_LR_state", "S") + + ! optional size argument + if (present(rkmax)) then + self%rk = rk - 1 + rka = rkmax + else + self%rk = rk + rka = rk + 1 + end if + + select type (U) + type is (state_vector) + ! allocate & initialize + allocate(self%U(rka), source=U(1)); call zero_basis(self%U) + allocate(self%S(rka,rka)); self%S = 0.0_wp + ! copy inputs + if (self%rk > n) then ! copy the full IC into self%U + call copy_basis(self%U(1:n), U) + self%S(1:n,1:n) = S + else ! fill the first self%rk columns of self%U with the first self%rk columns of the IC + call copy_basis(self%U(1:self%rk), U(1:self%rk)) + self%S(1:self%rk,1:self%rk) = S(1:self%rk,1:self%rk) + end if + ! top up basis (to rka for rank-adaptivity) with orthonormal columns if needed + if (rka > n) then + do i = n+1, rka + call self%U(i)%rand() + end do + allocate(R(rka,rka)); R = 0.0_wp + call qr(self%U, R, info) + call check_info(info, 'qr', module=this_module, procedure='initialize_LR_state') + end if + end select + return + end subroutine initialize_LR_state + +end module Ginzburg_Landau_Base diff --git a/example/DLRA_ginzburg_landau/ginzburg_landau_operators.f90 b/example/DLRA_ginzburg_landau/ginzburg_landau_operators.f90 new file mode 100644 index 0000000..a8e056b --- /dev/null +++ b/example/DLRA_ginzburg_landau/ginzburg_landau_operators.f90 @@ -0,0 +1,432 @@ +module Ginzburg_Landau_Operators + ! Standard Library. + use stdlib_optval, only : optval + ! RKLIB module for time integration. + use rklib_module + ! LightKrylov for linear algebra. + use LightKrylov + use LightKrylov, only : wp => dp + use LightKrylov_utils, only : assert_shape + ! LightROM + use LightROM_AbstractLTIsystems ! abstract_lti_system + ! Ginzburg Landau + use Ginzburg_Landau_Base + implicit none + + private :: this_module + character*128, parameter :: this_module = 'Ginzburg_Landau_Operators' + public :: exptA, direct_GL, adjoint_GL + + !----------------------------------------------- + !----- LIGHTKRYLOV LTI SYSTEM TYPE ----- + !----------------------------------------------- + + type, extends(abstract_lti_system_rdp), public :: lti_system + contains + private + procedure, pass(self), public :: initialize_lti_system + end type lti_system + + !-------------------------------------- + !----- LINEAR GL OPERATOR ----- + !-------------------------------------- + + type, extends(abstract_linop_rdp), public :: GL_operator + contains + private + procedure, pass(self), public :: matvec => direct_matvec_GL + procedure, pass(self), public :: rmatvec => adjoint_matvec_GL + end type GL_operator + + !------------------------------------------ + !----- EXPONENTIAL PROPAGATOR ----- + !------------------------------------------ + + type, extends(abstract_linop_rdp), public :: exponential_prop + real(wp) :: tau ! Integration time. + contains + private + procedure, pass(self), public :: matvec => direct_solver + procedure, pass(self), public :: rmatvec => adjoint_solver + end type exponential_prop + +contains + + !======================================================================== + !======================================================================== + !===== ===== + !===== PHYSICAL MODEL : LINEARIZED GINZBURG-LANDAU EQUATION ===== + !===== ===== + !======================================================================== + !======================================================================== + + !--------------------------------------------------------- + !----- LINEARIZED GINZBURG-LANDAU EQUATIONS ----- + !--------------------------------------------------------- + + subroutine direct_GL(vec_in, vec_out) + + !> State vector. + real(wp), dimension(:), intent(in) :: vec_in + !> Time-derivative. + real(wp), dimension(:), intent(out) :: vec_out + + !> Internal variables. + integer :: i + real(wp), dimension(nx) :: u, v, du, dv + real(wp) :: d2u, d2v, cu, cv + + u = vec_in(1:nx) + v = vec_in(nx+1:2*nx) + + !--------------------------------------------------- + !----- Linear Ginzburg Landau Equation ----- + !--------------------------------------------------- + + cu = u(2) / (2*dx) ; cv = v(2) / (2*dx) + du(1) = -(real(nu)*cu - aimag(nu)*cv) ! Convective term. + dv(1) = -(aimag(nu)*cu + real(nu)*cv) ! Convective term. + + d2u = (u(2) - 2*u(1)) / dx**2 ; d2v = (v(2) - 2*v(1)) / dx**2 + du(1) = du(1) + real(gamma)*d2u - aimag(gamma)*d2v ! Diffusion term. + dv(1) = dv(1) + aimag(gamma)*d2u + real(gamma)*d2v ! Diffusion term. + + du(1) = du(1) + mu(1)*u(1) ! Non-parallel term. + dv(1) = dv(1) + mu(1)*v(1) ! Non-parallel term. + + ! Interior nodes. + do i = 2, nx-1 + ! Convective term. + cu = (u(i+1) - u(i-1)) / (2*dx) + cv = (v(i+1) - v(i-1)) / (2*dx) + du(i) = -(real(nu)*cu - aimag(nu)*cv) + dv(i) = -(aimag(nu)*cu + real(nu)*cv) + + ! Diffusion term. + d2u = (u(i+1) - 2*u(i) + u(i-1)) / dx**2 + d2v = (v(i+1) - 2*v(i) + v(i-1)) / dx**2 + du(i) = du(i) + real(gamma)*d2u - aimag(gamma)*d2v + dv(i) = dv(i) + aimag(gamma)*d2u + real(gamma)*d2v + + ! Non-parallel term. + du(i) = du(i) + mu(i)*u(i) + dv(i) = dv(i) + mu(i)*v(i) + enddo + + ! Right most boundary points. + cu = -u(nx-1) / (2*dx) ; cv = -v(nx-1) / (2*dx) + du(nx) = -(real(nu)*cu - aimag(nu)*cv) ! Convective term. + dv(nx) = -(aimag(nu)*cu + real(nu)*cv) ! Convective term. + + d2u = (-2*u(nx) + u(nx-1)) / dx**2 ; d2v = (-2*v(nx) + v(nx-1)) / dx**2 + du(nx) = du(nx) + real(gamma)*d2u - aimag(gamma)*d2v ! Diffusion term. + dv(nx) = dv(nx) + aimag(gamma)*d2u + real(gamma)*d2v ! Diffusion term. + + du(nx) = du(nx) + mu(nx)*u(nx) ! Non-parallel term. + dv(nx) = dv(nx) + mu(nx)*v(nx) ! Non-parallel term. + + vec_out(1:nx) = du + vec_out(nx+1:2*nx) = dv + + return + end subroutine direct_GL + + !----------------------------------------------------------- + !----- Adjoint linear Ginzburg-Landau equation ----- + !----------------------------------------------------------- + + subroutine adjoint_GL(vec_in, vec_out) + !> State vector. + real(wp), dimension(:), intent(in) :: vec_in + !> Time-derivative. + real(wp), dimension(:), intent(out) :: vec_out + + ! Internal variables. + integer :: i + real(wp), dimension(nx) :: u, du + real(wp), dimension(nx) :: v, dv + real(wp) :: d2u, d2v, cu, cv + + ! Sets the internal variables. + u = vec_in(1:nx) + v = vec_in(nx+1:2*nx) + + !--------------------------------------------------- + !----- Linear Ginzburg Landau Equation ----- + !--------------------------------------------------- + + ! Left most boundary points. + cu = u(2) / (2*dx) ; cv = v(2) / (2*dx) + du(1) = (real(nu)*cu + aimag(nu)*cv) ! Convective term. + dv(1) = (-aimag(nu)*cu + real(nu)*cv) ! Convective term. + + d2u = (u(2) - 2*u(1)) / dx**2 ; d2v = (v(2) - 2*v(1)) / dx**2 + du(1) = du(1) + real(gamma)*d2u + aimag(gamma)*d2v ! Diffusion term. + dv(1) = dv(1) - aimag(gamma)*d2u + real(gamma)*d2v ! Diffusion term. + + du(1) = du(1) + mu(1)*u(1) ! Non-parallel term. + dv(1) = dv(1) + mu(1)*v(1) ! Non-parallel term. + + ! Interior nodes. + do i = 2, nx-1 + ! Convective term. + cu = (u(i+1) - u(i-1)) / (2*dx) + cv = (v(i+1) - v(i-1)) / (2*dx) + du(i) = (real(nu)*cu + aimag(nu)*cv) + dv(i) = (-aimag(nu)*cu + real(nu)*cv) + + ! Diffusion term. + d2u = (u(i+1) - 2*u(i) + u(i-1)) / dx**2 + d2v = (v(i+1) - 2*v(i) + v(i-1)) / dx**2 + du(i) = du(i) + real(gamma)*d2u + aimag(gamma)*d2v + dv(i) = dv(i) - aimag(gamma)*d2u + real(gamma)*d2v + + ! Non-parallel term. + du(i) = du(i) + mu(i)*u(i) + dv(i) = dv(i) + mu(i)*v(i) + enddo + + ! Right most boundary points. + cu = -u(nx-1) / (2*dx) ; cv = -v(nx-1) / (2*dx) + du(nx) = (real(nu)*cu + aimag(nu)*cv) ! Convective term. + dv(nx) = (-aimag(nu)*cu + real(nu)*cv) ! Convective term. + + d2u = (-2*u(nx) + u(nx-1)) / dx**2 ; d2v = (-2*v(nx) + v(nx-1)) / dx**2 + du(nx) = du(nx) + real(gamma)*d2u + aimag(gamma)*d2v ! Diffusion term. + dv(nx) = dv(nx) - aimag(gamma)*d2u + real(gamma)*d2v ! Diffusion term. + + du(nx) = du(nx) + mu(nx)*u(nx) ! Non-parallel term. + dv(nx) = dv(nx) + mu(nx)*v(nx) ! Non-parallel term. + + ! Copy results to the output array. + vec_out(1:nx) = du + vec_out(nx+1:2*nx) = dv + + return + end subroutine adjoint_GL + + !-------------------------------------- + !----- WRAPPERS FOR RKLIB ----- + !-------------------------------------- + + subroutine rhs(me, t, x, f) + ! Time-integrator. + class(rk_class), intent(inout) :: me + ! Current time. + real(wp), intent(in) :: t + ! State vector. + real(wp), dimension(:), intent(in) :: x + ! Time-derivative. + real(wp), dimension(:), intent(out) :: f + + f = 0.0_wp + call direct_GL(x, f) + + return + end subroutine rhs + + subroutine adjoint_rhs(me, t, x, f) + ! Time-integrator. + class(rk_class), intent(inout) :: me + ! Current time. + real(wp), intent(in) :: t + ! State vector. + real(wp), dimension(:), intent(in) :: x + ! Time-derivative. + real(wp), dimension(:), intent(out) :: f + + f = 0.0_wp + call adjoint_GL(x, f) + + return + end subroutine adjoint_rhs + + !------------------------------------------------------------- + !----- TYPE-BOUND PROCEDURES FOR THE GL OPERATOR ----- + !------------------------------------------------------------- + + subroutine direct_matvec_GL(self, vec_in, vec_out) + !> Linear Operator. + class(GL_operator), intent(in) :: self + !> Input vector. + class(abstract_vector_rdp), intent(in) :: vec_in + !> Output vector. + class(abstract_vector_rdp), intent(out) :: vec_out + select type(vec_in) + type is (state_vector) + select type(vec_out) + type is (state_vector) + call direct_GL(vec_in%state, vec_out%state) + end select + end select + return + end subroutine direct_matvec_GL + + subroutine adjoint_matvec_GL(self, vec_in, vec_out) + !> Linear Operator. + class(GL_operator), intent(in) :: self + !> Input vector. + class(abstract_vector_rdp), intent(in) :: vec_in + !> Output vector. + class(abstract_vector_rdp), intent(out) :: vec_out + select type(vec_in) + type is (state_vector) + select type(vec_out) + type is (state_vector) + call adjoint_GL(vec_in%state, vec_out%state) + end select + end select + return + end subroutine adjoint_matvec_GL + + !------------------------------------------------------------------------ + !----- TYPE-BOUND PROCEDURES FOR THE EXPONENTIAL PROPAGATOR ----- + !------------------------------------------------------------------------ + + subroutine direct_solver(self, vec_in, vec_out) + ! Linear Operator. + class(exponential_prop), intent(in) :: self + ! Input vector. + class(abstract_vector_rdp), intent(in) :: vec_in + ! Output vector. + class(abstract_vector_rdp), intent(out) :: vec_out + + ! Time-integrator. + type(rks54_class) :: prop + real(wp) :: dt = 1.0_wp + + select type(vec_in) + type is(state_vector) + select type(vec_out) + type is(state_vector) + + ! Initialize propagator. + call prop%initialize(n=2*nx, f=rhs) + ! Integrate forward in time. + call prop%integrate(0.0_wp, vec_in%state, dt, self%tau, vec_out%state) + + end select + end select + return + end subroutine direct_solver + + subroutine adjoint_solver(self, vec_in, vec_out) + ! Linear Operator. + class(exponential_prop), intent(in) :: self + ! Input vector. + class(abstract_vector_rdp), intent(in) :: vec_in + ! Output vector. + class(abstract_vector_rdp), intent(out) :: vec_out + + ! Time-integrator. + type(rks54_class) :: prop + real(wp) :: dt = 1.0_wp + + select type(vec_in) + type is(state_vector) + select type(vec_out) + type is(state_vector) + + ! Initialize propagator. + call prop%initialize(n=2*nx, f=adjoint_rhs) + ! Integrate forward in time. + call prop%integrate(0.0_wp, vec_in%state, dt, self%tau, vec_out%state) + + end select + end select + return + end subroutine adjoint_solver + + !-------------------------------------- + !----- EXP(tA) SUBROUTINE ----- + !-------------------------------------- + + subroutine exptA(vec_out, A, vec_in, tau, info, trans) + !! Subroutine for the exponential propagator that conforms with the abstract interface + !! defined in expmlib.f90 + class(abstract_vector_rdp), intent(out) :: vec_out + !! Output vector + class(abstract_linop_rdp), intent(inout) :: A + !! Linear operator + class(abstract_vector_rdp), intent(in) :: vec_in + !! Input vector. + real(wp), intent(in) :: tau + !! Integration horizon + integer, intent(out) :: info + !! Information flag + logical, optional, intent(in) :: trans + logical :: transpose + !! Direct or Adjoint? + + ! optional argument + transpose = optval(trans, .false.) + + ! time integrator + select type (vec_in) + type is (state_vector) + select type (vec_out) + type is (state_vector) + select type (A) + type is (exponential_prop) + ! set integration time + A%tau = tau + if (transpose) then + call A%rmatvec(vec_in, vec_out) + else + call A%matvec(vec_in, vec_out) + end if + end select + end select + end select + + end subroutine exptA + + !-------------------------------------------------------- + !----- TYPE BOUND PROCEDURES FOR LTI SYSTEMS ----- + !-------------------------------------------------------- + + subroutine initialize_lti_system(self, A, prop, B, CT, D) + class(lti_system), intent(inout) :: self + class(abstract_linop_rdp), intent(in) :: A + class(abstract_linop_rdp), intent(in) :: prop + class(abstract_vector_rdp), intent(in) :: B(:) + class(abstract_vector_rdp), intent(in) :: CT(:) + real(wp), optional, intent(in) :: D(:,:) + + ! internal variables + integer :: rk_b, rk_c + + ! Operator + select type (A) + type is (GL_operator) + allocate(self%A, source=A) + end select + ! Exp prop + select type (prop) + type is (exponential_prop) + allocate(self%prop, source=prop) + end select + ! Input + select type (B) + type is (state_vector) + rk_b = size(B) + allocate(self%B(1:rk_b), source=B(1:rk_b)) + end select + ! Output + select type (CT) + type is (state_vector) + rk_c = size(CT) + allocate(self%CT(1:rk_c), source=CT(1:rk_c)) + end select + ! Throughput + allocate(self%D(1:rk_c, 1:rk_b)) + if (present(D)) then + call assert_shape(D, (/ rk_c, rk_b /), 'initialize_lti_system', 'D') + self%D = D + else + self%D = 0.0_wp + end if + return + end subroutine initialize_lti_system + +end module Ginzburg_Landau_Operators \ No newline at end of file diff --git a/example/DLRA_ginzburg_landau/ginzburg_landau_tests.f90 b/example/DLRA_ginzburg_landau/ginzburg_landau_tests.f90 new file mode 100644 index 0000000..7a24565 --- /dev/null +++ b/example/DLRA_ginzburg_landau/ginzburg_landau_tests.f90 @@ -0,0 +1,1103 @@ +module Ginzburg_Landau_Tests + ! Standard Library. + use stdlib_optval, only : optval + use stdlib_linalg, only : diag, svd, svdvals + use stdlib_io_npy, only : save_npy, load_npy + ! LightKrylov for linear algebra. + use LightKrylov + use LightKrylov, only : wp => dp + use LightKrylov_AbstractVectors ! linear_combination + use LightKrylov_Utils ! svd, sqrtm + ! LightROM + use LightROM_Utils ! Balancing_Transformation + ! Lyapunov Solver + use LightROM_LyapunovSolvers + use LightROM_LyapunovUtils + ! Riccati Solver + use LightROM_RiccatiSolvers + use LightROM_RiccatiUtils + ! Ginzburg Landau + use Ginzburg_Landau_Base + use Ginzburg_Landau_Operators + use Ginzburg_Landau_RK_Lyapunov + use Ginzburg_Landau_Utils + !use fortime + implicit none + + ! IO + integer, parameter :: iunit1 = 1 + integer, parameter :: iunit2 = 2 + character*128, parameter :: basepath = 'local/' + integer, parameter :: rkmax = 40 + integer, parameter :: rk_X0 = 40 + + private :: this_module + public :: iunit1, iunit2, basepath, rkmax, rk_X0 + public :: run_DLRA_lyapunov_test + public :: run_BT_test + public :: run_kexpm_test + public :: run_DLRA_riccati_test + public :: run_lyap_convergence_test + + character*128, parameter :: this_module = 'Ginzburg_Landau_Tests' + +contains + + subroutine run_DLRA_lyapunov_test(LTI, U0, S0, rkv, tauv, TOv, Tend, nrep, ifsave, ifverb, iflogs) + ! LTI system + type(lti_system), intent(inout) :: LTI + ! Initial condition + type(state_vector), intent(in) :: U0(:) + real(wp), intent(in) :: S0(:,:) + ! vector of dt values + real(wp), intent(in) :: tauv(:) + ! vector of rank values + integer, intent(in) :: rkv(:) + ! vector of torders + integer, intent(in) :: TOv(:) + real(wp), intent(in) :: Tend + integer, intent(in) :: nrep + ! Optional + logical, optional, intent(in) :: ifsave + logical :: if_save_npy + logical, optional, intent(in) :: ifverb + logical :: verb + logical, optional, intent(in) :: iflogs + logical :: if_save_logs + + ! Internal variables + type(LR_state), allocatable :: X ! Controllability + type(LR_state), allocatable :: Y ! Observability + real(wp) :: U_out(2*nx,rkmax) + real(wp) :: X_out(2*nx,2*nx) + real(wp), allocatable :: vals(:) + real(wp) :: sfro + real(wp) :: tau, Ttot, etime, etime_tot + integer :: i, j, k, ito, rk, irep, nsteps + integer :: info, torder, iostatus + real(wp) :: lagsvd(rkmax) + real(wp) :: res(N**2) + character*128 :: oname + character*128 :: onameU + character*128 :: onameS + integer :: clock_rate, clock_start, clock_stop + ! DLRA opts + type(dlra_opts) :: opts + + if_save_npy = optval(ifsave, .false.) + verb = optval(ifverb, .false.) + if_save_logs = optval(iflogs, .false.) + + call system_clock(count_rate=clock_rate) + + write(*,*) '' + write(*,*) '----------------------' + write(*,*) ' CONTROLLABILITY' + write(*,*) '----------------------' + + X = LR_state() + do ito = 1, size(TOv) + torder = TOv(ito) + do i = 1, size(rkv) + rk = rkv(i) + if (allocated(vals)) deallocate(vals) + allocate(vals(1:rk)) + do j = 1, size(tauv) + tau = tauv(j) + ! Initialize low-rank representation with rank rk + if (verb) write(*,*) 'Initialize LR state, rk =', rk + call X%initialize_LR_state(U0, S0, rk) + ! Reset time + Ttot = 0.0_wp + lagsvd = 0.0_wp + if (verb) write(*,*) 'Run DRLA' + if (if_save_logs) then + write(oname,'("output_GL_X_norm__n",I4.4,"_TO",I1,"_rk",I2.2,"_t",E8.2,".txt")') nx, torder, rk, tau + open(unit=iunit1, file=trim(basepath)//oname) + call stamp_logfile_header(iunit1, 'Controllability Gramian', rk, tau, Tend, torder) + write(iunit1,'(A16,A4,A10,A18,A18,A20)') 'DLRA:',' rk',' Tend','|| X_DLRA ||_2/N','|| res ||_2/N', 'Elapsed time' + write(oname,'("output_GL_X_sigma_n",I4.4,"_TO",I1,"_rk",I2.2,"_t",E8.2,".txt")') nx, torder, rk, tau + open(unit=iunit2, file=trim(basepath)//oname) + call stamp_logfile_header(iunit2, 'Controllability Gramian', rk, tau, Tend, torder) + write(iunit2,*) 'DLRA: T sigma_i d(sigma-i)/sigma-1 d(sigma_i)/sigma_i ||Sum(sigma_i)||_2' + end if + write(*,'(A16,A4,A4,A10,A6,A8,A18,A18,A20)') 'DLRA:',' rk',' TO','dt','steps','Tend', & + & '|| X_DLRA ||_2/N','|| res ||_2/N', 'Elapsed time' + nsteps = nint(Tend/tau) + etime_tot = 0.0_wp + ! set solver options + opts = dlra_opts(mode=ito, verbose=verb) + do irep = 1, nrep + ! run integrator + etime = 0.0_wp + call system_clock(count=clock_start) ! Start Timer + call projector_splitting_DLRA_lyapunov_integrator(X, LTI%prop, LTI%B, Tend, tau, info, & + & exptA=exptA, iftrans=.false., options=opts) + call system_clock(count=clock_stop) ! Stop Timer + etime = etime + real(clock_stop-clock_start)/real(clock_rate) + ! Compute LR basis spectrum + vals = svdvals(X%S(1:rk,1:rk)) + if (if_save_logs) then + write(iunit2,'("sigma ",F8.4)',ADVANCE='NO') Ttot + do k = 1, rk; write(iunit2,'(E14.6)', ADVANCE='NO') vals(k); end do + write (iunit2,'(A)', ADVANCE='NO') ' | ' + do k = 1, rk; write(iunit2,'(E14.6)', ADVANCE='NO') abs(vals(k) - lagsvd(k))/lagsvd(1); end do + write (iunit2,'(A)', ADVANCE='NO') ' | ' + do k = 1, rk; write(iunit2,'(E14.6)', ADVANCE='NO') abs(vals(k) - lagsvd(k))/lagsvd(k); end do + write (iunit2,'(A)', ADVANCE='NO') ' | ' + lagsvd(1:rk) = lagsvd(1:rk) - vals + sfro = 0.0_wp + do k = 1, rk + sfro = sfro + lagsvd(k)**2 + end do + sfro = sqrt(sfro) + write(iunit2,'(E14.6)'), sfro + end if + lagsvd(1:rk) = vals + ! Reconstruct solution + call reconstruct_solution(X_out, X) + Ttot = Ttot + Tend + call CALE(res, reshape(X_out, shape(res)), BBTW_flat, .false.) + write(*,'(I4," ",A11,I4," TO",I1,F10.6,I6,F8.4,E18.8,E18.8,F18.4," s")') irep, 'Xctl OUTPUT', & + & rk, torder, tau, nsteps, Ttot, norm2(X_out)/N, norm2(res)/N, etime + if (if_save_logs) then + write(iunit1,'(I4," ",A11,I6,F8.4,E18.8,E18.8,E18.8,F18.4," s")') irep, 'Xctl OUTPUT', & + & nsteps, Ttot, norm2(X_out)/N, norm2(res)/N, etime + end if + etime_tot = etime_tot + etime + end do + if (verb) write(*,*) 'Total integration time (DLRA):', etime_tot, 's' + if (if_save_logs) then + write(iunit1,*) 'Total integration time (DLRA):', etime_tot, 's'; close(iunit1) + write(Iunit2,*) 'Total integration time (DLRA):', etime_tot, 's'; close(iunit2) + end if + if (if_save_npy) then + write(onameU,'("data_GLXY_XU_n",I4.4,"_TO",I1,"_rk",I2.2,"_t",E8.2,".npy")') nx, torder, rk, tau + write(onameS,'("data_GLXY_XS_n",I4.4,"_TO",I1,"_rk",I2.2,"_t",E8.2,".npy")') nx, torder, rk, tau + call save_npy(trim(basepath)//onameU, U_out(:,1:rk), iostatus) + if (iostatus /= 0) then; write(*,*) "Error saving file", trim(onameU); STOP 2; end if + call save_npy(trim(basepath)//onameS, X%S(1:rk,1:rk), iostatus) + if (iostatus /= 0) then; write(*,*) "Error saving file", trim(onameS); STOP 2; end if + end if + deallocate(X%U) + deallocate(X%S) + end do + end do + end do + + write(*,*) '' + write(*,*) '--------------------' + write(*,*) ' OBSERVABILITY' + write(*,*) '--------------------' + write(*,*) '' + + Y = LR_state() + do ito = 1, size(TOv) + torder = TOv(ito) + do i = 1, size(rkv) + rk = rkv(i) + if (allocated(vals)) deallocate(vals) + allocate(vals(1:rk)) + do j = 1, size(tauv) + tau = tauv(j) + ! Initialize low-rank representation with rank rk + if (verb) write(*,*) 'Initialize LR state, rk =', rk + call Y%initialize_LR_state(U0, S0, rk) + ! Reset time + Ttot = 0.0_wp + if (verb) write(*,*) 'Run DRLA' + if (if_save_logs) then + write(oname,'("output_GL_Y_norm__n",I4.4,"_TO",I1,"_rk",I2.2,"_t",E8.2,".txt")') nx, torder, rk, tau + open(unit=iunit1, file=trim(basepath)//oname) + call stamp_logfile_header(iunit1, 'Observability Gramian', rk, tau, Tend, torder) + write(iunit1,'(A16,A4,A10,A18,A18,A20)') 'DLRA:',' rk',' Tend','|| X_DLRA ||_2/N','|| res ||_2/N', 'Elapsed time' + write(oname,'("output_GL_Y_sigma_n",I4.4,"_TO",I1,"_rk",I2.2,"_t",E8.2,".txt")') nx, torder, rk, tau + open(unit=iunit2, file=trim(basepath)//oname) + call stamp_logfile_header(iunit2, 'Observability Gramian', rk, tau, Tend, torder) + write(iunit2,*) 'DLRA: T sigma_i d(sigma-i)/sigma-1 d(sigma_i)/sigma_i ||Sum(sigma_i)||_2' + end if + write(*,'(A16,A4,A4,A10,A6,A8,A18,A18,A20)') 'DLRA:',' rk',' TO','dt','steps','Tend', & + & '|| X_DLRA ||_2/N','|| res ||_2/N', 'Elapsed time' + nsteps = nint(Tend/tau) + etime_tot = 0.0_wp + ! set solver options + opts = dlra_opts(mode=ito, verbose=verb) + do irep = 1, nrep + ! run integrator + etime = 0.0_wp + call system_clock(count=clock_start) ! Start Timer + call projector_splitting_DLRA_lyapunov_integrator(Y, LTI%prop, LTI%CT, Tend, tau, info, & + & exptA=exptA, iftrans=.true., options=opts) + call system_clock(count=clock_stop) ! Stop Timer + etime = etime + real(clock_stop-clock_start)/real(clock_rate) + ! Compute LR basis spectrum + vals = svdvals(Y%S(1:rk,1:rk)) + if (if_save_logs) then + write(iunit2,'("sigma ",F8.4)',ADVANCE='NO') Ttot + do k = 1, rk; write(iunit2,'(E14.6)', ADVANCE='NO') vals(k); end do + write (iunit2,'(A)', ADVANCE='NO') ' | ' + do k = 1, rk; write(iunit2,'(E14.6)', ADVANCE='NO') abs(vals(k) - lagsvd(k))/lagsvd(1); end do + write (iunit2,'(A)', ADVANCE='NO') ' | ' + do k = 1, rk; write(iunit2,'(E14.6)', ADVANCE='NO') abs(vals(k) - lagsvd(k))/lagsvd(k); end do + write (iunit2,'(A)', ADVANCE='NO') ' | ' + lagsvd(1:rk) = lagsvd(1:rk) - vals + sfro = 0.0_wp + do k = 1, rk + sfro = sfro + lagsvd(k)**2 + end do + sfro = sqrt(sfro) + write(iunit2,'(E14.6)'), sfro + end if + lagsvd(1:rk) = vals + + ! Reconstruct solution + call reconstruct_solution(X_out, Y) + Ttot = Ttot + Tend + call CALE(res, reshape(X_out, shape(res)), CTCW_flat, .true.) + write(*,'(I4," ",A11,I4," TO",I1,F10.6,I6,F8.4,E18.8,E18.8,F18.4," s")') irep, 'Yobs OUTPUT', & + & rk, torder, tau, nsteps, Ttot, norm2(X_out)/N, norm2(res)/N, etime + if (if_save_logs) then + write(iunit1,'(I4," ",A11,I6,F8.4,E18.8,E18.8,F18.4," s")') irep, 'Yobs OUTPUT', & + & nsteps, Ttot, norm2(X_out)/N, norm2(res)/N, etime + end if + etime_tot = etime_tot + etime + end do + if (verb) write(*,*) 'Total integration time (DLRA):', etime_tot, 's' + if (if_save_logs) then + write(iunit1,*) 'Total integration time (DLRA):', etime_tot, 's'; close(iunit1) + write(Iunit2,*) 'Total integration time (DLRA):', etime_tot, 's'; close(iunit2) + end if + if (if_save_npy) then + write(onameU,'("data_GLXY_YU_n",I4.4,"_TO",I1,"_rk",I2.2,"_t",E8.2,".npy")') nx, torder, rk, tau + write(onameS,'("data_GLXY_YS_n",I4.4,"_TO",I1,"_rk",I2.2,"_t",E8.2,".npy")') nx, torder, rk, tau + call save_npy(trim(basepath)//onameU, U_out(:,1:rk), iostatus) + if (iostatus /= 0) then; write(*,*) "Error saving file", trim(onameU); STOP 2; end if + call save_npy(trim(basepath)//onameS, Y%S(1:rk,1:rk), iostatus) + if (iostatus /= 0) then; write(*,*) "Error saving file", trim(onameS); STOP 2; end if + end if + deallocate(Y%U) + deallocate(Y%S) + end do + end do + end do + + return + end subroutine run_DLRA_lyapunov_test + + ! + ! BALANCED TRUNCATION + ! + + subroutine run_BT_test(LTI, U0, S0, rk, tau, torder, Tmax, nrep, ifsave, ifload, ifverb, iflogs) + ! LTI system + type(lti_system), intent(inout) :: LTI + ! Initial condition + type(state_vector), intent(in) :: U0(:) + real(wp), intent(in) :: S0(:,:) + integer, intent(in) :: rk + real(wp), intent(inout) :: tau + integer, intent(inout) :: torder + real(wp), intent(in) :: Tmax + integer, intent(in) :: nrep + ! Optional + logical, optional, intent(in) :: ifsave + logical :: if_save_npy + logical, optional, intent(in) :: ifload + logical :: if_load_npy + logical, optional, intent(in) :: ifverb + logical :: verb + logical, optional, intent(in) :: iflogs + logical :: if_save_logs + + ! Internal variables + type(LR_state), allocatable :: X ! Controllability + type(LR_state), allocatable :: Y ! Observability + type(state_vector), allocatable :: Utmp(:) + real(wp) :: U0_in(2*nx, rkmax) + real(wp) :: U_out(2*nx,rkmax) + real(wp) :: X_out(2*nx,2*nx) + real(wp), allocatable :: vecs(:,:) + real(wp), allocatable :: vals(:) + real(wp) :: sfro + real(wp) :: Tend, Ttot, etime, etime_tot + integer :: i, j, k, irep, nsteps + integer :: info, iostatus + real(wp) :: lagsvd(rkmax) + real(wp) :: res(N**2) + + ! ROM + real(wp), allocatable :: Swrk(:,:) + real(wp), allocatable :: Ahat(:,:) + real(wp), allocatable :: Bhat(:,:) + real(wp), allocatable :: Chat(:,:) + real(wp), allocatable :: D(:,:) + + ! BT + type(state_vector), allocatable :: T(:) + type(state_vector), allocatable :: Tinv(:) + real(wp), allocatable :: S(:) + real(wp), allocatable :: U_load(:,:) + real(wp), allocatable :: S_load(:,:) + + ! SVD + real(wp) :: U_svd(2*nx,2*nx) + real(wp) :: S_svd(rkmax) + real(wp) :: V_svd(rkmax,rkmax) + + character*128 :: oname + character*128 :: onameU + character*128 :: onameS + logical :: existU, existS + integer :: clock_rate, clock_start, clock_stop + ! DLRA opts + type(dlra_opts) :: opts + + if_save_npy = optval(ifsave, .false.) + if_load_npy = optval(ifload, .false.) + verb = optval(ifverb, .false.) + if_save_logs = optval(iflogs, .false.) + + call system_clock(count_rate=clock_rate) + + Tend = Tmax/nrep + nsteps = nint(Tend/tau) + + write(*,*) '' + write(*,*) '----------------------' + write(*,*) ' CONTROLLABILITY' + write(*,*) '----------------------' + write(*,*) '' + + onameU = trim(basepath)//"GL_Xctl_U.npy" + onameS = trim(basepath)//"GL_Xctl_S.npy" + + X = LR_state() + if (if_load_npy) then + write(*,*) 'Load data from file:' + write(*,*) ' ', trim(onameU) + write(*,*) ' ', trim(onameS) + inquire(file=onameU, exist=existU) + inquire(file=onameS, exist=existS) + if (existU .and. existS) then + call load_npy(onameU, U_load, iostatus) + if (iostatus /= 0) then; write(*,*) "Error loading file", trim(onameU); STOP 2; end if + call load_npy(onameS, S_load, iostatus) + if (iostatus /= 0) then; write(*,*) "Error loading file", trim(onameS); STOP 2; end if + else + write(*,*) 'Files to load X not found.' + STOP 1 + end if + if (.not.allocated(Utmp)) allocate(Utmp(1:rk), source=U0(1)) + call set_state(Utmp, U_load(:,1:rk)) + call X%initialize_LR_state(Utmp, S_load, rk) + else + ! Initialize low-rank representation with rank rk + if (verb) write(*,*) 'Initialize LR state, rk =', rk + call X%initialize_LR_state(U0, S0, rk) + ! Reset time + Ttot = 0.0_wp + etime_tot = 0.0_wp + ! set solver options + opts = dlra_opts(mode=torder, verbose=verb) + write(*,'(A16,A4,A4,A10,A6,A8,A18,A18,A20)') 'DLRA:',' rk',' TO','dt','steps','Tend', & + & '|| X_DLRA ||_2/N','|| res ||_2/N', 'Elapsed time' + do irep = 1, nrep + ! run integrator + etime = 0.0_wp + call system_clock(count=clock_start) ! Start Timer + call projector_splitting_DLRA_lyapunov_integrator(X, LTI%prop, LTI%B, Tend, tau, info, & + & exptA=exptA, iftrans=.false., options=opts) + call system_clock(count=clock_stop) ! Stop Timer + etime = etime + real(clock_stop-clock_start)/real(clock_rate) + + ! Reconstruct solution + call reconstruct_solution(X_out, X) + Ttot = Ttot + Tend + call CALE(res, reshape(X_out, shape(res)), BBTW_flat, .false.) + write(*,'(I4," ",A11,I4," TO",I1,F10.6,I6,F8.4,E18.8,E18.8,F18.4," s")') irep, 'Xctl OUTPUT', & + & rk, torder, tau, nsteps, Ttot, norm2(X_out)/N, norm2(res)/N, etime + etime_tot = etime_tot + etime + end do + if (verb) write(*,*) 'Total integration time (DLRA):', etime_tot, 's' + if (if_save_npy) then + write(*,*) 'Save data to file:' + write(*,*) ' ', trim(onameU) + write(*,*) ' ', trim(onameS) + call save_npy(onameU, U_out(:,1:rk), iostatus) + if (iostatus /= 0) then; write(*,*) "Error saving file", trim(onameU); STOP 2; end if + call save_npy(onameS, X%S(1:rk,1:rk), iostatus) + if (iostatus /= 0) then; write(*,*) "Error saving file", trim(onameS); STOP 2; end if + end if + end if + + write(*,*) '' + write(*,*) '--------------------' + write(*,*) ' OBSERVABILITY' + write(*,*) '--------------------' + write(*,*) '' + + onameU = trim(basepath)//"GL_Yobs_U.npy" + onameS = trim(basepath)//"GL_Yobs_S.npy" + + Y = LR_state() + if (if_load_npy) then + write(*,*) 'Load data from file:' + write(*,*) ' ', trim(onameU) + write(*,*) ' ', trim(onameS) + inquire(file=onameU, exist=existU) + inquire(file=onameS, exist=existS) + if (existU .and. existS) then + call load_npy(onameU, U_load, iostatus) + if (iostatus /= 0) then; write(*,*) "Error loading file", trim(onameU); STOP 2; end if + call load_npy(onameS, S_load, iostatus) + if (iostatus /= 0) then; write(*,*) "Error loading file", trim(onameS); STOP 2; end if + else + write(*,*) 'Files to load Y not found.' + STOP 1 + end if + if (.not.allocated(Utmp)) allocate(Utmp(1:rk), source=U0(1)) + call set_state(Utmp, U_load(:,1:rk)) + call Y%initialize_LR_state(Utmp, S_load, rk) + else + ! Initialize low-rank representation with rank rk + if (verb) write(*,*) 'Initialize LR state, rk =', rk + call Y%initialize_LR_state(U0, S0, rk) + ! Reset time + Ttot = 0.0_wp + etime_tot = 0.0_wp + ! set solver options + opts = dlra_opts(mode=torder, verbose=verb) + write(*,'(A16,A4,A4,A10,A6,A8,A18,A18,A20)') 'DLRA:',' rk',' TO','dt','steps','Tend', & + & '|| X_DLRA ||_2/N','|| res ||_2/N', 'Elapsed time' + do irep = 1, nrep + ! run integrator + etime = 0.0_wp + call system_clock(count=clock_start) ! Start Timer + call projector_splitting_DLRA_lyapunov_integrator(Y, LTI%prop, LTI%CT, Tend, tau, info, & + & exptA=exptA, iftrans=.true., options=opts) + call system_clock(count=clock_stop) ! Stop Timer + etime = etime + real(clock_stop-clock_start)/real(clock_rate) + + ! Reconstruct solution + call reconstruct_solution(X_out, Y) + Ttot = Ttot + Tend + call CALE(res, reshape(X_out, shape(res)), CTCW_flat, .true.) + write(*,'(I4," ",A11,I4," TO",I1,F10.6,I6,F8.4,E18.8,E18.8,F18.4," s")') irep, 'Yobs OUTPUT', & + & rk, torder, tau, nsteps, Ttot, norm2(X_out)/N, norm2(res)/N, etime + etime_tot = etime_tot + etime + end do + if (verb) write(*,*) 'Total integration time (DLRA):', etime_tot, 's' + if (if_save_npy) then + write(*,*) 'Save data to file:' + write(*,*) ' ', trim(onameU) + write(*,*) ' ', trim(onameS) + call save_npy(onameU, U_out(:,1:rk), iostatus) + if (iostatus /= 0) then; write(*,*) "Error saving file", trim(onameU); STOP 2; end if + call save_npy(onameS, Y%S(1:rk,1:rk), iostatus) + if (iostatus /= 0) then; write(*,*) "Error saving file", trim(onameS); STOP 2; end if + end if + end if + + write(*,*) '' + write(*,*) '------------------------------' + write(*,*) ' BALANCING TRANSFORMATION' + write(*,*) '------------------------------' + write(*,*) '' + + allocate(Swrk(1:rk,1:rk)) + if (.not.allocated(Utmp)) allocate(Utmp(1:rk), source=U0(1)) + + ! compute sqrt of coefficient matrix X%S and right-multiply it to X%U + Swrk = 0.0_wp + call sqrtm(X%S(1:rk,1:rk), Swrk(1:rk,1:rk), info) + block + class(abstract_vector_rdp), allocatable :: Xwrk(:) + call linear_combination(Xwrk, X%U(1:rk), Swrk(1:rk,1:rk)) + call copy_basis(Utmp, Xwrk) + end block + !call linear_combination(Utmp, X%U, Swrk(1:rk,1:rk)) + call get_state(U0_in(:,1:rk), Utmp) + ! compute SVD of updated X%U + call svd(U0_in(:,1:rk), S_svd(1:rk), U_svd(:,1:2*nx), V_svd(1:rk,1:rk)) + call set_state(X%U(1:rk), matmul(U_svd(:,1:rk), diag(S_svd(1:rk)))) + + ! compute sqrt of coefficient matrix Y%S and right-multiply it to Y%U + Swrk = 0.0_wp + call sqrtm(Y%S(1:rk,1:rk), Swrk(1:rk,1:rk), info) + block + class(abstract_vector_rdp), allocatable :: Xwrk(:) + call linear_combination(Xwrk, Y%U(1:rk), Swrk(1:rk,1:rk)) + call copy_basis(Utmp, Xwrk) + end block + !call linear_combination(Utmp, Y%U, Swrk(1:rk,1:rk)) + call get_state(U0_in(:,1:rk), Utmp) + ! compute SVD of updated Y%U + call svd(U0_in(:,1:rk), S_svd(1:rk), U_svd(:,1:2*nx), V_svd(1:rk,1:rk)) + call set_state(Y%U(1:rk), matmul(U_svd(:,1:rk), diag(S_svd(1:rk)))) + + ! compute balancing transformation based on SVD of Gramians + allocate(T(1:rk), source=U0(1)); allocate(Tinv(1:rk), source=U0(1)); allocate(S(1:rk)) + call Balancing_Transformation(T, S, Tinv, X%U(1:rk), Y%U(1:rk)) + + call ROM_Petrov_Galerkin_Projection(Ahat, Bhat, Chat, D, LTI, T, Tinv) + + if (if_save_npy) then + write(*,*) 'Save data to file:' + onameU = trim(basepath)//"GL_Ahat.npy" + write(*,*) ' ', trim(onameU) + call save_npy(onameU, Ahat) + if (iostatus /= 0) then; write(*,*) "Error saving file", trim(onameU); STOP 2; end if + onameU = trim(basepath)//"GL_Bhat.npy" + write(*,*) ' ', trim(onameU) + call save_npy(onameU, Bhat) + if (iostatus /= 0) then; write(*,*) "Error saving file", trim(onameU); STOP 2; end if + onameU = trim(basepath)//"GL_Chat.npy" + write(*,*) ' ', trim(onameU) + call save_npy(onameU, Chat) + if (iostatus /= 0) then; write(*,*) "Error saving file", trim(onameU); STOP 2; end if + end if + write(*,*) '' + + end subroutine run_BT_test + + subroutine run_DLRA_riccati_test(LTI, U0, S0, Qc, Rinv, rkv, tauv, Tend, nrep, ifsave, ifverb, iflogs) + type(lti_system), intent(inout) :: LTI + !! Considered LTI system + type(state_vector), intent(in) :: U0(:) + real(wp), intent(in) :: S0(:,:) + !! Initial condition + real(wp), intent(in) :: Qc(:,:) + !! Measurement weights. + real(wp), intent(in) :: Rinv(:,:) + !! Inverse of the actuation weights. + real(wp), intent(in) :: tauv(:) + !! vector of dt values + integer, intent(in) :: rkv(:) + !! vector of rank values + real(wp), intent(in) :: Tend + integer, intent(in) :: nrep + ! Optional + logical, optional, intent(in) :: ifsave + logical :: if_save_npy + logical, optional, intent(in) :: ifverb + logical :: verb + logical, optional, intent(in) :: iflogs + logical :: if_save_logs + + ! Internal variables + type(LR_state), allocatable :: X ! Controllability + type(LR_state), allocatable :: Y ! Observability + real(wp) :: U_out(2*nx,rkmax) + real(wp) :: X_out(2*nx,2*nx) + real(wp), allocatable :: vecs(:,:) + real(wp), allocatable :: vals(:) + real(wp) :: sfro + real(wp) :: tau, Ttot, etime, etime_tot + integer :: i, j, k, rk, irep, nsteps + integer :: info, torder, iostatus + real(wp) :: res(N**2) + character*128 :: oname + character*128 :: onameU + character*128 :: onameS + integer :: clock_rate, clock_start, clock_stop + ! DLRA opts + type(dlra_opts) :: opts + + if_save_npy = optval(ifsave, .false.) + verb = optval(ifverb, .false.) + if_save_logs = optval(iflogs, .false.) + + call system_clock(count_rate=clock_rate) + + write(*,*) '----------------------' + write(*,*) ' RICCATI EQUATION' + write(*,*) '----------------------' + + X = LR_state() + do torder = 1, 1 + do i = 1, size(rkv) + rk = rkv(i) + do j = 1, size(tauv) + tau = tauv(j) + ! Initialize low-rank representation with rank rk + if (verb) write(*,*) 'Initialize LR state, rk =', rk + call X%initialize_LR_state(U0, S0, rk) + ! Reset time + Ttot = 0.0_wp + if (verb) write(*,*) 'Run DRLA' + write(*,'(A16,A4,A4,A10,A6,A8,A18,A18,A20)') 'DLRA:',' rk',' TO','dt','steps','Tend', & + & '|| X_DLRA ||_2', '|| res ||_2','Elapsed time' + nsteps = nint(Tend/tau) + etime_tot = 0.0_wp + ! set solver options + opts = dlra_opts(mode=torder, verbose=verb) + do irep = 1, nrep + ! run integrator + etime = 0.0_wp + call system_clock(count=clock_start) ! Start Timer + !call projector_splitting_DLRA_riccati_integrator(X, LTI%prop, LTI%B, LTI%CT, Qc, Rinv, & + ! & Tend, tau, info, & + ! & exptA=exptA, iftrans=.false., options=opts) + call system_clock(count=clock_stop) ! Stop Timer + etime = etime + real(clock_stop-clock_start)/real(clock_rate) + + ! Reconstruct solution + call reconstruct_solution(X_out, X) + Ttot = Ttot + Tend + call CARE(res, reshape(X_out, shape(res)), reshape(CTQcCW_mat, shape(res)), BRinvBTW_mat, .false.) + write(*,'(I4," ",A11,I4," TO",I1,F10.6,I6,F8.4,E18.8,E18.8,F18.4," s")') irep, 'Xricc OUTPUT', & + & rk, torder, tau, nsteps, Ttot, norm2(X_out)/N, norm2(res)/N, etime + etime_tot = etime_tot + etime + end do + if (verb) write(*,*) 'Total integration time (DLRA):', etime_tot, 's' + if (if_save_npy) then + write(onameU,'("data_GL_Riccati_XU_n",I4.4,"_TO",I1,"_rk",I2.2,"_t",E8.2,".npy")') nx, torder, rk, tau + write(onameS,'("data_GL_Riccati_XS_n",I4.4,"_TO",I1,"_rk",I2.2,"_t",E8.2,".npy")') nx, torder, rk, tau + call save_npy(trim(basepath)//onameU, U_out(:,1:rk), iostatus) + if (iostatus /= 0) then; write(*,*) "Error saving file", trim(onameU); STOP 2; end if + call save_npy(trim(basepath)//onameS, X%S(1:rk,1:rk), iostatus) + if (iostatus /= 0) then; write(*,*) "Error saving file", trim(onameS); STOP 2; end if + end if + deallocate(X%U) + deallocate(X%S) + end do + end do + end do + + end subroutine run_DLRA_riccati_test + + + subroutine run_kexpm_test(A, prop, U0, tauv, torder, N) + class(abstract_linop_rdp), intent(inout) :: A + !! Linear operator: A + class(abstract_linop_rdp), intent(inout) :: prop + !! Linear operator: exponential propagator + class(abstract_vector_rdp), intent(in) :: U0 + !! Abstract vector as a source + real(wp), intent(in) :: tauv(:) + !! vector of dt values + integer, intent(inout) :: torder + !! torder + integer, intent(in) :: n + !! Number of repeats + + ! internal variables + class(abstract_vector_rdp), allocatable :: U, V_kryl, V_rk ! scratch bases + integer :: i, j, info + real(wp) :: tau + real(wp) :: tv_kryl(N), tv_rk(N) + real(wp) :: etime_kryl, etime_rk, stddev_kryl, stddev_rk + integer :: clock_rate, clock_start, clock_stop + !type(timer) :: tmr + + call system_clock(count_rate=clock_rate) + + allocate(U, source=U0); call U%zero() + allocate(V_kryl, source=U0); call V_kryl%zero() + allocate(V_rk, source=U0); call V_rk%zero() + + tv_kryl = 0.0_wp + tv_rk = 0.0_wp + + write(*,*) 'Comparison over N = ', N, 'runs.' + write(*,'(A8," | ",2(10X,A10,10X,5X))') 'tau','KRYLOV','R-K' + write(*,'(A8," | ",3(A10,1X),3X,3(A10,1X))') ' ','TOT','AVG','STDDEV','TOT','AVG','STDDEV' + write(*,*) '------------------------------------------------------------------------------' + + do i = 1, size(tauv) + tau = tauv(i) + ! Reset time + etime_kryl = 0.0_wp + etime_rk = 0.0_wp + + !call tmr%timer_start() + !call U%rand() + !call k_exptA(V_kryl, A, U, tau, info, .false.) + !call tmr%timer_stop(nloops=N,message="test" , print=.true., color='red') + !call tmr%timer_start() + !call U%rand() + !call exptA(V_rk, prop, U, tau, info, .false.) + !call tmr%timer_stop(nloops=N, message="test" , print=.true., color='blue') + + do j = 1, N + ! generate random vecor + call U%rand() + ! run Krylov based exponential propagator + call system_clock(count=clock_start) ! Start Timer + call k_exptA(V_kryl, A, U, tau, info, .false.) + call system_clock(count=clock_stop) ! Stop Timer + tv_kryl(j) = real(clock_stop-clock_start)/real(clock_rate) + etime_kryl = etime_kryl + tv_kryl(j) + ! run RK integrator + call system_clock(count=clock_start) ! Start Timer + call exptA(V_rk, prop, U, tau, info, .false.) + call system_clock(count=clock_stop) ! Stop Timer + tv_rk(j) = real(clock_stop-clock_start)/real(clock_rate) + etime_rk = etime_rk + tv_rk(j) + ! Check solution + call V_kryl%axpby(1.0_wp, V_rk, -1.0_wp) + if (V_kryl%norm()/(2*nx) > 10*atol_dp ) then + write(*,*) "Iteration", j, ": Solutions do not match!" + write(*,* ) " tol", 10*atol_dp , "delta = ", V_kryl%norm()/(2*nx) + end if + end do + tv_kryl = tv_kryl - etime_kryl/N + tv_rk = tv_rk - etime_rk/N + stddev_kryl = 0.0_wp + stddev_rk = 0.0_wp + do j = 1, N + stddev_kryl = stddev_kryl + tv_kryl(j)**2 + stddev_rk = stddev_rk + tv_rk(j)**2 + end do + write(*,'(F8.6," | ",3(F10.6,1X),3X,3(F10.6,1X))') tau, & + & etime_kryl, etime_kryl/N, sqrt(stddev_kryl/(N-1)), & + & etime_rk, etime_rk/N, sqrt(stddev_rk/(N-1)) + + end do + + return + end subroutine run_kexpm_test + + subroutine run_lyap_convergence_test(LTI, U0, S0, Tend, tauv, rkv, TOv, ifsave, ifverb) + ! LTI system + type(lti_system), intent(inout) :: LTI + ! Initial condition + type(state_vector), intent(inout) :: U0(:) + real(wp), intent(inout) :: S0(:,:) + real(wp), intent(in) :: Tend + ! vector of dt values + real(wp), intent(in) :: tauv(:) + ! vector of rank values + integer, intent(in) :: rkv(:) + ! vector of torders + integer, intent(in) :: TOv(:) + ! Optional + logical, optional, intent(in) :: ifsave + logical :: if_save_npy + logical, optional, intent(in) :: ifverb + logical :: verb + + ! Internals + type(LR_state), allocatable :: X_state + type(rk_lyapunov), allocatable :: RK_propagator + type(state_matrix) :: X_mat(2) + real(wp), allocatable :: X_RKlib(:,:,:) + real(wp) :: X_mat_ref(N,N) + real(wp) :: U0_mat(N, rkmax) + integer :: info + integer :: i, j, ito, rk, nsteps, torder + integer :: irep, nrep + real(wp) :: etime, tau + ! OUTPUT + real(wp) :: U_out(N,rkmax) + real(wp) :: X_out(N,N) + real(wp) :: Bmat(N,2) + real(wp) :: tmp(N,2) + real(wp), allocatable :: U_load(:,:) + real(wp) :: X_mat_flat(N**2) + real(wp) :: res_flat(N**2) + character*128 :: oname + character*128 :: onameU + character*128 :: onameS + integer :: iostatus + ! timer + integer :: clock_rate, clock_start, clock_stop + logical :: existfile, load_data + ! DLRA opts + type(dlra_opts) :: opts + + if_save_npy = optval(ifsave, .false.) + verb = optval(ifverb, .false.) + + load_data = .false. + + call system_clock(count_rate=clock_rate) + + ! initialize exponential propagator + RK_propagator = RK_lyapunov(Tend) + + oname = trim(basepath)//"Xctl_RK_W/data_BS_X_W.npy" + inquire(file=oname, exist=existfile) + if (existfile) then + call load_npy(trim(oname), U_load, iostatus) + print *, 'Load Bartels-Stuart solution ', trim(oname) + if (iostatus /= 0) then; write(*,*) "Error loading file", trim(oname); STOP 2; end if + else + write(*,*) 'Cannot find ', trim(oname); STOP 12 + end if + call CALE(res_flat, reshape(U_load, shape(res_flat)), BBTW_flat, .false.) + print *, ' || res ||_2/N = ', norm2(res_flat)/N + + nrep = 1 + allocate(X_RKlib(N, N, nrep)) + if (load_data) then + ! Load initial condition + write(oname,'("data_GL_lyapconv_X0_RK_n",I4.4,".npy")') nx + inquire(file=trim(basepath)//oname, exist=existfile) + if (existfile) then + call load_npy(trim(basepath)//trim(oname), U_load, iostatus) + print *, 'Load initial condition: ', trim(basepath)//trim(oname) + if (iostatus /= 0) then; write(*,*) "Error loading file", trim(basepath)//trim(oname); STOP 2; end if + else + write(*,*) 'Cannot find ', trim(basepath)//trim(oname); STOP 12 + end if + X_out = U_load(1:N, 1:N) + ! Load reference RK solution + write(oname,'("data_GL_lyapconv_X_RK_n",I4.4,"_r",I3.3,".npy")') nx, nrep + inquire(file=trim(basepath)//trim(oname), exist=existfile) + if (existfile) then + print *, 'Load RK solution ', trim(basepath)//trim(oname) + call load_npy(trim(basepath)//trim(oname), U_load, iostatus) + if (iostatus /= 0) then; write(*,*) "Error loading file", trim(basepath)//trim(oname); STOP 2; end if + else + write(*,*) 'Cannot find ', trim(basepath)//trim(oname); STOP 12 + end if + ! Set reference RK solution + X_mat_ref = U_load(1:N, 1:N) + else + ! Set random initial condition + call get_state(U0_mat(:,1:rk_X0), U0) + X_out = matmul( U0_mat, matmul( S0, transpose(U0_mat ) ) ) + if (if_save_npy) then + ! Save forcing RK + write(oname,'("data_GL_lyapconv_BBTW_RK_n",I4.4,".npy")') nx + write(*,*) 'Save ', trim(basepath)//trim(oname) + call save_npy(trim(basepath)//oname, reshape(BBTW_flat, (/ N,N /)), iostatus) + if (iostatus /= 0) then; write(*,*) "Error saving file", trim(basepath)//trim(oname); STOP 2; end if + ! Save initial condition + write(oname,'("data_GL_lyapconv_X0_RK_n",I4.4,".npy")') nx + write(*,*) 'Save ', trim(basepath)//trim(oname) + call save_npy(trim(basepath)//oname, X_out, iostatus) + if (iostatus /= 0) then; write(*,*) "Error saving file", trim(basepath)//trim(oname); STOP 2; end if + ! Save forcing DLRA + Bmat = 0.0_wp + call get_state(Bmat, LTI%B(1:rk_b)) + X_out = matmul(Bmat, transpose(Bmat)) + write(oname,'("data_GL_lyapconv_BBTW_DLRA_n",I4.4,".npy")') nx + write(*,*) 'Save ', trim(basepath)//trim(oname) + call save_npy(trim(basepath)//oname, X_out, iostatus) + if (iostatus /= 0) then; write(*,*) "Error saving file", trim(basepath)//trim(oname); STOP 2; end if + end if + X_out = matmul( U0_mat, matmul( S0, transpose(U0_mat ) ) ) + ! Set initial condition for RK + call set_state(X_mat(1:1), X_out) + write(*,'(A10,A26,A26,A26,A20)') 'RKlib:','Tend','|| X_RK ||_2/N', '|| res ||_2/N','Elapsed time' + write(*,*) ' ------------------------------------------------------------------------' + do irep = 1, nrep + call system_clock(count=clock_start) ! Start Timer + ! integrate + call RK_propagator%matvec(X_mat(1), X_mat(2)) + call system_clock(count=clock_stop) ! Stop Timer + ! recover output + call get_state(X_RKlib(:,:,irep), X_mat(2:2)) + call CALE(res_flat, reshape(X_RKlib(:,:,irep), shape(res_flat)), BBTW_flat, .false.) + ! replace input + call set_state(X_mat(1:1), X_RKlib(:,:,irep)) + write(*,'(I10,F26.4,E26.8,E26.8,F18.4," s")') irep, irep*Tend, norm2(X_RKlib(:,:,irep))/N, norm2(res_flat)/N, & + & real(clock_stop-clock_start)/real(clock_rate) + if (if_save_npy) then + write(oname,'("data_GL_lyapconv_X_RK_n",I4.4,"_r",I3.3,".npy")') nx, irep + write(*,*) 'Save ', trim(basepath)//trim(oname) + call save_npy(trim(basepath)//oname, X_RKlib(:,:,irep), iostatus) + if (iostatus /= 0) then; write(*,*) "Error saving file", trim(basepath)//trim(oname); STOP 2; end if + end if + enddo + ! Set reference RK solution + X_mat_ref = X_RKlib(:,:,nrep) + end if + + write(*,*) '' + write(*,'(A16,A4,A4,A10,A6,A8,A26,A20)') 'DLRA:',' rk',' TO','dt','steps','Tend', & + & '|| X_DLRA - X_RK ||_2/N', 'Elapsed time' + X_state = LR_state() + do ito = 1, size(TOv) + torder = TOv(ito) + do i = 1, size(rkv) + rk = rkv(i) + do j = 1, size(tauv) + tau = tauv(j) + ! set solver options + opts = dlra_opts(mode=ito, verbose=verb) + ! Initialize low-rank representation with rank rk + if (verb) write(*,*) 'Initialize LR state, rk =', rk + call X_state%initialize_LR_state(U0, S0, rk) + if (verb) write(*,*) 'Run DRLA' + nsteps = nint(Tend/tau) + ! run integrator + call system_clock(count=clock_start) ! Start Timer + call projector_splitting_DLRA_lyapunov_integrator(X_state, LTI%prop, LTI%B, Tend, tau, info, & + & exptA=exptA, iftrans=.false., options=opts) + call system_clock(count=clock_stop) ! Stop Timer + etime = real(clock_stop-clock_start)/real(clock_rate) + ! Reconstruct solution + call reconstruct_solution(X_out, X_state) + write(*,'(I4," ",A11,I4," TO",I1,F10.6,I6,F8.4,E26.8,F18.4," s")') 1, 'Xctl OUTPUT', & + & rk, torder, tau, nsteps, Tend, norm2(X_out - X_mat_ref)/N, etime + if (verb) write(*,*) 'Total integration time (DLRA):', etime, 's' + if (j == size(tauv) .and. if_save_npy) then + write(onameU,'("data_GL_lyapconv_XU_n",I4.4,"_TO",I1,"_rk",I3.3,"_t",E8.2,".npy")') nx, torder, rk, tau + write(onameS,'("data_GL_lyapconv_XS_n",I4.4,"_TO",I1,"_rk",I3.3,"_t",E8.2,".npy")') nx, torder, rk, tau + write(*,*) 'Save ', trim(basepath)//trim(onameU) + call save_npy(trim(basepath)//onameU, U_out(:,1:rk), iostatus) + if (iostatus /= 0) then; write(*,*) "Error saving file", trim(onameU); STOP 2; end if + write(*,*) 'Save ', trim(basepath)//trim(onameS) + call save_npy(trim(basepath)//onameS, X_state%S(1:rk,1:rk), iostatus) + if (iostatus /= 0) then; write(*,*) "Error saving file", trim(onameS); STOP 2; end if + end if + deallocate(X_state%U) + deallocate(X_state%S) + end do + end do + end do + + end subroutine run_lyap_convergence_test + + subroutine run_DLRA_rank_adaptive_test(LTI, U0, S0, rkv, tauv, TOv, Tend, nrep, ifsave, ifverb, iflogs) + ! LTI system + type(lti_system), intent(inout) :: LTI + ! Initial condition + type(state_vector), intent(in) :: U0(:) + real(wp), intent(in) :: S0(:,:) + ! vector of dt values + real(wp), intent(in) :: tauv(:) + ! vector of rank values + integer, intent(in) :: rkv(:) + ! vector of torders + integer, intent(in) :: TOv(:) + real(wp), intent(in) :: Tend + integer, intent(in) :: nrep + ! Optional + logical, optional, intent(in) :: ifsave + logical :: if_save_npy + logical, optional, intent(in) :: ifverb + logical :: verb + logical, optional, intent(in) :: iflogs + logical :: if_save_logs + + ! Internal variables + type(LR_state), allocatable :: X ! Controllability + type(LR_state), allocatable :: Y ! Observability + real(wp) :: U_out(2*nx,rkmax) + real(wp) :: X_out(2*nx,2*nx) + real(wp), allocatable :: vals(:) + real(wp) :: sfro + real(wp) :: tau, Ttot, etime, etime_tot + integer :: i, j, k, ito, rk, irep, nsteps + integer :: info, torder, iostatus + real(wp) :: lagsvd(rkmax) + real(wp) :: res(N**2) + character*128 :: oname + character*128 :: onameU + character*128 :: onameS + integer :: clock_rate, clock_start, clock_stop + ! DLRA opts + type(dlra_opts) :: opts + + if_save_npy = optval(ifsave, .false.) + verb = optval(ifverb, .false.) + if_save_logs = optval(iflogs, .false.) + + call system_clock(count_rate=clock_rate) + + write(*,*) '------------------------' + write(*,*) ' RANK-ADAPTIVE DLRA' + write(*,*) '------------------------' + write(*,*) ' CONTROLLABILITY' + write(*,*) '------------------------' + write(*,*) '' + + X = LR_state() + do ito = 1, size(TOv) + torder = TOv(ito) + do i = 1, size(rkv) + rk = rkv(i) + if (allocated(vals)) deallocate(vals) + allocate(vals(1:rk)) + do j = 1, size(tauv) + tau = tauv(j) + ! Initialize low-rank representation with rank rk + if (verb) write(*,*) 'Initialize LR state, rk =', rk + call X%initialize_LR_state(U0, S0, rk, rkmax) + ! Reset time + Ttot = 0.0_wp + lagsvd = 0.0_wp + if (verb) write(*,*) 'Run DRLA' + if (if_save_logs) then + write(oname,'("output_GL_X_norm__n",I4.4,"_TO",I1,"_rk",I2.2,"_t",E8.2,".txt")') nx, torder, rk, tau + open(unit=iunit1, file=trim(basepath)//oname) + call stamp_logfile_header(iunit1, 'Controllability Gramian', rk, tau, Tend, torder) + write(iunit1,'(A16,A4,A10,A18,A18,A20)') 'DLRA:',' rk',' Tend','|| X_DLRA ||_2/N','|| res ||_2/N', 'Elapsed time' + write(oname,'("output_GL_X_sigma_n",I4.4,"_TO",I1,"_rk",I2.2,"_t",E8.2,".txt")') nx, torder, rk, tau + open(unit=iunit2, file=trim(basepath)//oname) + call stamp_logfile_header(iunit2, 'Controllability Gramian', rk, tau, Tend, torder) + write(iunit2,*) 'DLRA: T sigma_i d(sigma-i)/sigma-1 d(sigma_i)/sigma_i ||Sum(sigma_i)||_2' + end if + write(*,'(A16,A4,A4,A10,A6,A8,A18,A18,A20)') 'DLRA:',' rk',' TO','dt','steps','Tend', & + & '|| X_DLRA ||_2/N','|| res ||_2/N', 'Elapsed time' + nsteps = nint(Tend/tau) + etime_tot = 0.0_wp + ! set solver options + opts = dlra_opts(mode=ito, if_rank_adaptive=.true., tol=1e-6_wp, & + & use_err_est = .false., verbose=verb) + do irep = 1, nrep + ! run integrator + etime = 0.0_wp + call system_clock(count=clock_start) ! Start Timer + call projector_splitting_DLRA_lyapunov_integrator(X, LTI%prop, LTI%B, Tend, tau, info, & + & exptA=exptA, iftrans=.false., options=opts) + call system_clock(count=clock_stop) ! Stop Timer + etime = etime + real(clock_stop-clock_start)/real(clock_rate) + ! Compute LR basis spectrum + vals = svdvals(X%S(1:rk,1:rk)) + if (if_save_logs) then + write(iunit2,'("sigma ",F8.4)',ADVANCE='NO') Ttot + do k = 1, rk; write(iunit2,'(E14.6)', ADVANCE='NO') vals(k); end do + write (iunit2,'(A)', ADVANCE='NO') ' | ' + do k = 1, rk; write(iunit2,'(E14.6)', ADVANCE='NO') abs(vals(k) - lagsvd(k))/lagsvd(1); end do + write (iunit2,'(A)', ADVANCE='NO') ' | ' + do k = 1, rk; write(iunit2,'(E14.6)', ADVANCE='NO') abs(vals(k) - lagsvd(k))/lagsvd(k); end do + write (iunit2,'(A)', ADVANCE='NO') ' | ' + lagsvd(1:rk) = lagsvd(1:rk) - vals + sfro = 0.0_wp + do k = 1, rk + sfro = sfro + lagsvd(k)**2 + end do + sfro = sqrt(sfro) + write(iunit2,'(E14.6)'), sfro + end if + lagsvd(1:rk) = vals + ! Reconstruct solution + call reconstruct_solution(X_out, X) + Ttot = Ttot + Tend + call CALE(res, reshape(X_out, shape(res)), BBTW_flat, .false.) + write(*,'(I4," ",A11,I4," TO",I1,F10.6,I6,F8.4,E18.8,E18.8,F18.4," s")') irep, 'Xctl OUTPUT', & + & rk, torder, tau, nsteps, Ttot, norm2(X_out)/N, norm2(res)/N, etime + if (if_save_logs) then + write(iunit1,'(I4," ",A11,I6,F8.4,E18.8,E18.8,E18.8,F18.4," s")') irep, 'Xctl OUTPUT', & + & nsteps, Ttot, norm2(X_out)/N, norm2(res)/N, etime + end if + etime_tot = etime_tot + etime + end do + if (verb) write(*,*) 'Total integration time (DLRA):', etime_tot, 's' + if (if_save_logs) then + write(iunit1,*) 'Total integration time (DLRA):', etime_tot, 's'; close(iunit1) + write(Iunit2,*) 'Total integration time (DLRA):', etime_tot, 's'; close(iunit2) + end if + if (if_save_npy) then + write(onameU,'("data_GLXY_XU_n",I4.4,"_TO",I1,"_rk",I2.2,"_t",E8.2,".npy")') nx, torder, rk, tau + write(onameS,'("data_GLXY_XS_n",I4.4,"_TO",I1,"_rk",I2.2,"_t",E8.2,".npy")') nx, torder, rk, tau + call save_npy(trim(basepath)//onameU, U_out(:,1:rk), iostatus) + if (iostatus /= 0) then; write(*,*) "Error saving file", trim(onameU); STOP 2; end if + call save_npy(trim(basepath)//onameS, X%S(1:rk,1:rk), iostatus) + if (iostatus /= 0) then; write(*,*) "Error saving file", trim(onameS); STOP 2; end if + end if + deallocate(X%U) + deallocate(X%S) + end do + end do + end do + + return + end subroutine run_DLRA_rank_adaptive_test + +end module Ginzburg_Landau_Tests \ No newline at end of file diff --git a/example/DLRA_ginzburg_landau/ginzburg_landau_utils.f90 b/example/DLRA_ginzburg_landau/ginzburg_landau_utils.f90 new file mode 100644 index 0000000..901eabf --- /dev/null +++ b/example/DLRA_ginzburg_landau/ginzburg_landau_utils.f90 @@ -0,0 +1,348 @@ +module Ginzburg_Landau_Utils + ! Standard Library. + use stdlib_math, only : linspace + use stdlib_optval, only : optval + use stdlib_linalg, only : eye, diag, svd + use stdlib_io_npy, only : save_npy, load_npy + !use fortime + ! LightKrylov for linear algebra. + use LightKrylov + use LightKrylov, only : wp => dp + use LightKrylov_AbstractVectors + use LightKrylov_Utils, only : assert_shape + ! LightROM + use LightROM_AbstractLTIsystems + use LightROM_Utils + ! Lyapunov Solver + use LightROM_LyapunovSolvers + use LightROM_LyapunovUtils + ! Riccati Solver + use LightROM_RiccatiSolvers + use LightROM_RiccatiUtils + ! Ginzburg Landau + use Ginzburg_Landau_Base + use Ginzburg_Landau_Operators + use Ginzburg_Landau_RK_Lyapunov + + implicit none + + private :: this_module + ! mesh construction + public :: initialize_parameters + ! utilities for state_vectors + public :: set_state, get_state, init_rand, reconstruct_solution + ! initial conditions + public :: generate_random_initial_condition + ! logfiles + public :: stamp_logfile_header + ! misc + public :: CALE, CARE + + character*128, parameter :: this_module = 'Ginzburg_Landau_Utils' + +contains + + !-------------------------------------------------------------- + !----- CONSTRUCT THE MESH AND PHYSICAL PARAMETERS ----- + !-------------------------------------------------------------- + + subroutine initialize_parameters() + implicit none + ! Mesh array. + real(wp), allocatable :: x(:) + real(wp) :: x2(1:2*nx) + real(wp) :: tmpv(N, 2) + integer :: i + + ! Construct mesh. + x = linspace(-L/2, L/2, nx+2) + dx = x(2)-x(1) + + ! Construct mu(x) + mu(:) = (mu_0 - c_mu**2) + (mu_2 / 2.0_wp) * x(2:nx+1)**2 + + ! Define integration weights + weight = dx + weight_mat = dx + + ! Construct B & C + ! B = [ [ Br, -Bi ], [ Bi, Br ] ] + ! B = [ [ Cr, -Ci ], [ Ci, Cr ] ] + ! where Bi = Ci = 0 + + ! actuator is a Guassian centered just upstream of branch I + ! column 1 + x2 = 0.0_wp + x2(1:nx) = x(2:nx+1) + B(1)%state = 0.5*exp(-((x2 - x_b)/s_b)**2)*sqrt(weight) + ! column 2 + x2 = 0.0_wp + x2(nx+1:2*nx) = x(2:nx+1) + B(2)%state = 0.5*exp(-((x2 - x_b)/s_b)**2)*sqrt(weight) + + ! the sensor is a Gaussian centered at branch II + ! column 1 + x2 = 0.0_wp + x2(1:nx) = x(2:nx+1) + CT(1)%state = 0.5*exp(-((x2 - x_c)/s_c)**2)*sqrt(weight) + ! column 2 + x2 = 0.0_wp + x2(nx+1:2*nx) = x(2:nx+1) + CT(2)%state = 0.5*exp(-((x2 - x_c)/s_c)**2)*sqrt(weight) + + ! Note that we have included the integration weights into the actuator/sensor definitions + + ! RK lyap & riccati + Qc = eye(rk_c) + Rinv = eye(rk_b) + tmpv = 0.0_wp + call get_state(tmpv(:,1:rk_b), B(1:rk_b)) + BBTW_flat(1:N**2) = reshape(matmul(tmpv, transpose(tmpv)), shape(BBTW_flat)) + BRinvBTW_mat(1:N,1:N) = matmul(matmul(tmpv, Rinv), transpose(tmpv)) + call get_state(tmpv(:,1:rk_c), CT(1:rk_c)) + CTCW_flat(1:N**2) = reshape(matmul(tmpv, transpose(tmpv)), shape(CTCW_flat)) + CTQcCW_mat(1:N,1:N) = matmul(matmul(tmpv, Qc), transpose(tmpv)) + + return + end subroutine initialize_parameters + + !-------------------------------------------------------------------- + !----- UTILITIES FOR STATE_VECTOR AND STATE MATRIX TYPES ----- + !-------------------------------------------------------------------- + + subroutine get_state(mat_out, state_in) + !! Utility function to transfer data from a state vector to a real array + real(wp), intent(out) :: mat_out(:,:) + class(abstract_vector_rdp), intent(in) :: state_in(:) + ! internal variables + integer :: k, kdim + mat_out = 0.0_wp + select type (state_in) + type is (state_vector) + kdim = size(state_in) + call assert_shape(mat_out, (/ N, kdim /), 'get_state -> state_vector', 'mat_out') + do k = 1, kdim + mat_out(:,k) = state_in(k)%state + end do + type is (state_matrix) + call assert_shape(mat_out, (/ N, N /), 'get_state -> state_matrix', 'mat_out') + mat_out = reshape(state_in(1)%state, (/ N, N /)) + end select + return + end subroutine get_state + + subroutine set_state(state_out, mat_in) + !! Utility function to transfer data from a real array to a state vector + class(abstract_vector_rdp), intent(out) :: state_out(:) + real(wp), intent(in) :: mat_in(:,:) + ! internal variables + integer :: k, kdim + select type (state_out) + type is (state_vector) + kdim = size(state_out) + call assert_shape(mat_in, (/ N, kdim /), 'set_state -> state_vector', 'mat_in') + call zero_basis(state_out) + do k = 1, kdim + state_out(k)%state = mat_in(:,k) + end do + type is (state_matrix) + call assert_shape(mat_in, (/ N, N /), 'set_state -> state_matrix', 'mat_in') + call zero_basis(state_out) + state_out(1)%state = reshape(mat_in, shape(state_out(1)%state)) + end select + return + end subroutine set_state + + subroutine init_rand(state, ifnorm) + !! Utility function to initialize a state vector with random data + class(abstract_vector_rdp), intent(inout) :: state(:) + logical, optional, intent(in) :: ifnorm + ! internal variables + integer :: k, kdim + logical :: normalize + normalize = optval(ifnorm,.true.) + select type (state) + type is (state_vector) + kdim = size(state) + do k = 1, kdim + call state(k)%rand(ifnorm = normalize) + end do + type is (state_matrix) + kdim = size(state) + do k = 1, kdim + call state(k)%rand(ifnorm = normalize) + end do + end select + return + end subroutine init_rand + + subroutine reconstruct_solution(X, LR_X) + real(wp), intent(out) :: X(:,:) + type(LR_state), intent(in) :: LR_X + + ! internals + real(wp) :: wrk(N, LR_X%rk) + + call assert_shape(X, (/ N, N /), 'reconstruct_solution', 'X') + + call get_state(wrk, LR_X%U(1:LR_X%rk)) + X = matmul(wrk, matmul(LR_X%S(1:LR_X%rk,1:LR_X%rk), transpose(wrk))) + + return + end subroutine reconstruct_solution + + !------------------------------------ + !----- INITIAL CONDIIONS ----- + !------------------------------------ + + subroutine generate_random_initial_condition(U, S, rk) + class(state_vector), intent(out) :: U(:) + real(wp), intent(out) :: S(:,:) + integer, intent(in) :: rk + ! internals + class(state_vector), allocatable :: Utmp(:) + integer, allocatable :: perm(:) + ! SVD + real(wp) :: U_svd(rk,rk) + real(wp) :: S_svd(rk) + real(wp) :: V_svd(rk,rk) + integer :: i, info + + if (size(U) < rk) then + write(*,*) 'Input krylov basis size incompatible with requested rank', rk + STOP 1 + else + call zero_basis(U) + do i = 1,rk + call U(i)%rand(.false.) + end do + end if + if (size(S,1) < rk) then + write(*,*) 'Input coefficient matrix size incompatible with requested rank', rk + STOP 1 + else if (size(S,1) /= size(S,2)) then + write(*,*) 'Input coefficient matrix must be square.' + STOP 2 + else + S = 0.0_wp + end if + ! perform QR + allocate(perm(1:rk)); perm = 0 + allocate(Utmp(1:rk), source=U(1:rk)) + call qr(Utmp, S, perm, info, verbosity=.false.) + if (info /= 0) write(*,*) ' [generate_random_initial_condition] Info: Colinear vectors detected in QR, column ', info + ! perform SVD + call svd(S(:,1:rk), S_svd(1:rk), U_svd(:,1:rk), V_svd(1:rk,1:rk)) + S = diag(S_svd) + block + class(abstract_vector_rdp), allocatable :: Xwrk(:) + call linear_combination(Xwrk, Utmp, U_svd) + call copy_basis(U, Xwrk) + end block + + end subroutine + + !----------------------------- + !----- LOGFILES ----- + !----------------------------- + + subroutine stamp_logfile_header(iunit, problem, rk, tau, Tend, torder) + integer, intent(in) :: iunit + character(*), intent(in) :: problem + integer, intent(in) :: rk + real(wp), intent(in) :: tau + real(wp), intent(in) :: Tend + integer, intent(in) :: torder + + write(iunit,*) '-----------------------' + write(iunit,*) ' GINZBURG LANDAU' + write(iunit,*) '-----------------------' + write(iunit,*) 'nu = ', nu + write(iunit,*) 'gamma = ', gamma + write(iunit,*) 'mu_0 = ', mu_0 + write(iunit,*) 'c_mu = ', c_mu + write(iunit,*) 'mu_2 = ', mu_2 + write(iunit,*) '-----------------------' + write(iunit,*) problem + write(iunit,*) '-----------------------' + write(iunit,*) 'nx = ', nx + write(iunit,*) 'rk_b = ', rk_b + write(iunit,*) 'x_b = ', x_b + write(iunit,*) 's_b = ', s_b + write(iunit,*) 'rk_c = ', rk_c + write(iunit,*) 'x_c = ', x_c + write(iunit,*) 's_c = ', s_c + write(iunit,*) '-----------------------' + write(iunit,*) 'Time Integration: DLRA' + write(iunit,*) '-----------------------' + write(iunit,*) 'Tend =', Tend + write(iunit,*) 'torder =', torder + write(iunit,*) 'tau =', tau + write(iunit,*) 'rk =', rk + write(iunit,*) '---------------------' + write(iunit,*) '---------------------' + return + end subroutine stamp_logfile_header + + !------------------------- + !----- MISC ----- + !------------------------- + + subroutine CALE(res_flat, x_flat, Q_flat, adjoint) + ! residual + real(wp), intent(out) :: res_flat(:) + ! solution + real(wp), intent(in) :: x_flat(:) + ! inhomogeneity + real(wp), intent(in) :: Q_flat(:) + !> Adjoint + logical, optional :: adjoint + logical :: adj + + ! internals + real(wp), dimension(N**2) :: x_tmp, AX_flat, XAH_flat + + !> Deal with optional argument + adj = optval(adjoint,.false.) + + res_flat = 0.0_wp; AX_flat = 0.0_wp; XAH_flat = 0.0_wp; x_tmp = 0.0_wp + call GL_mat( AX_flat, x_flat, adjoint = adj, transpose = .false.) + x_tmp = reshape(transpose(reshape(x_flat, (/ N,N /))), shape(x_flat)) + call GL_mat(XAH_flat, x_tmp, adjoint = adj, transpose = .true. ) + ! construct Lyapunov equation + res_flat = AX_flat + XAH_flat + Q_flat + + end subroutine CALE + + subroutine CARE(res_flat, x_flat, CTQcC_flat, BRinvBT_mat, adjoint) + ! residual + real(wp), intent(out) :: res_flat(:) + ! solution + real(wp), intent(in) :: x_flat(:) + ! inhomogeneity + real(wp), intent(in) :: CTQcC_flat(:) + ! inhomogeneity + real(wp), intent(in) :: BRinvBT_mat(:,:) + !> Adjoint + logical, optional :: adjoint + logical :: adj + + ! internals + real(wp), dimension(N**2) :: x_tmp, AX_flat, XAH_flat, NL_flat + real(wp), dimension(N,N) :: x_mat + + !> Deal with optional argument + adj = optval(adjoint,.false.) + + res_flat = 0.0_wp; AX_flat = 0.0_wp; XAH_flat = 0.0_wp; x_tmp = 0.0_wp + call GL_mat( AX_flat, x_flat, adjoint = adj, transpose = .false.) + x_mat = reshape(x_flat, (/ N,N /)) + x_tmp = reshape(transpose(x_mat), shape(x_flat)) + call GL_mat(XAH_flat, x_tmp, adjoint = adj, transpose = .true. ) + NL_flat = reshape(matmul(x_mat, matmul(BRinvBTW_mat, x_mat)), shape(NL_flat)) + ! construct Lyapunov equation + res_flat = AX_flat + XAH_flat + CTQcC_flat + NL_flat + + end subroutine CARE + +end module Ginzburg_Landau_Utils \ No newline at end of file diff --git a/example/DLRA_ginzburg_landau/main.f90 b/example/DLRA_ginzburg_landau/main.f90 new file mode 100644 index 0000000..d7dd589 --- /dev/null +++ b/example/DLRA_ginzburg_landau/main.f90 @@ -0,0 +1,252 @@ +program demo + ! Standard Library. + use stdlib_optval, only : optval + use stdlib_linalg, only : eye, diag + use stdlib_math, only : all_close, logspace + use stdlib_io_npy, only : save_npy, load_npy + use stdlib_logger, only : information_level, warning_level, debug_level, error_level, none_level + ! LightKrylov for linear algebra. + use LightKrylov + use LightKrylov, only : wp => dp + use LightKrylov_Logger + use LightKrylov_AbstractVectors + use LightKrylov_ExpmLib + use LightKrylov_Utils + ! LightROM + use LightROM_AbstractLTIsystems + use LightROM_Utils + use LightROM_LyapunovSolvers + use LightROM_LyapunovUtils + ! GInzburg-Landau + use Ginzburg_Landau_Base + use Ginzburg_Landau_Operators + use Ginzburg_Landau_Utils + use Ginzburg_Landau_Tests + implicit none + + ! DLRA + logical, parameter :: verb = .true. + ! + logical :: run_test + ! + character*128 :: oname + character*128 :: onameU + character*128 :: onameS + ! rk_B & rk_C are set in ginzburg_landau_base.f90 + + integer :: nrk, ntau, rk, torder + real(wp) :: tau, Tend, Ttot + ! vector of dt values + real(wp), allocatable :: tauv(:) + ! vector of rank values + integer, allocatable :: rkv(:), TOv(:) + + ! Exponential propagator (RKlib). + type(GL_operator), allocatable :: A + type(exponential_prop), allocatable :: prop + + ! LTI system + type(lti_system) :: LTI + + ! Initial condition + type(state_vector) :: U0(1:rkmax) + real(wp) :: S0(rkmax,rkmax) + ! matrix + real(wp) :: U0_in(2*nx, rkmax) + + ! OUTPUT + real(wp) :: U_out(2*nx,rkmax) + real(wp) :: X_out(2*nx,2*nx) + real(wp) :: lagsvd(rkmax) + + ! Information flag. + integer :: info + + ! Counters + integer :: i, j, k, irep, nrep, istep, nsteps + integer, allocatable :: perm(:) + real(wp) :: Tmax, etime + + logical :: ifsave, ifload, ifverb, iflogs + + call logger%configure(level=error_level, time_stamp=.false.) + + !---------------------------------- + !----- INITIALIZATION ----- + !---------------------------------- + + ! Initialize mesh and system parameters A, B, CT + if (verb) write(*,*) 'Initialize parameters' + call initialize_parameters() + + ! Initialize propagator + if (verb) write(*,*) 'Initialize exponential propagator' + prop = exponential_prop(1.0_wp) + + ! Initialize LTI system + A = GL_operator() + if (verb) write(*,*) 'Initialize LTI system (A, prop, B, CT, _)' + LTI = lti_system() + call LTI%initialize_lti_system(A, prop, B, CT) + + ! Define initial condition of the form X0 + U0 @ S0 @ U0.T SPD + if (verb) write(*,*) ' Define initial condition' + call generate_random_initial_condition(U0, S0, rk_X0) + call get_state(U_out, U0) + + !---------------------------------- + ! + ! DLRA CONVERGENCE TEST FOR LYAPUNOV EQUATION + ! + !---------------------------------- + + run_test = .false. + if (run_test) then + nrk = 8; allocate(rkv(1:nrk)); rkv = (/ 2, 6, 10, 14, 20, 40, 80, 128, 256 /) + ntau = 3; allocate(tauv(1:ntau)); tauv = logspace(-4.0, -3.0, ntau) + allocate(TOv(2)); TOv = (/ 1, 2 /) + Tend = 0.01_wp + ! run DLRA + ifsave = .true. ! save X_rk to disk (LightROM/local) + ifverb = .true. ! verbosity + call run_lyap_convergence_test(LTI, U0, S0, Tend, tauv, rkv, TOv, ifverb) + deallocate(rkv) + deallocate(tauv) + deallocate(TOv) + end if + + !---------------------------------- + ! + ! DLRA TEST FOR LYAPUNOV EQUATION + ! + !---------------------------------- + + run_test = .false. + if (run_test) then + !nrk = 6; allocate(rkv(1:nrk)); rkv = (/ 6, 10, 12, 14, 20, 40 /) + !ntau = 5; allocate(tauv(1:ntau)); tauv = (/ 1.0, 0.1, 0.01, 0.001, 0.0001 /) + !allocate(TOv(2)); TOv = (/ 1, 2 /) + nrk = 1; allocate(rkv(1:nrk)); rkv = (/ 12 /) + ntau = 1; allocate(tauv(1:ntau)); tauv = (/ 0.1 /) + allocate(TOv(1)); TOv = (/ 1 /) + Tend = 1.0_wp + nrep = 60 + ! run DLRA + ifsave = .false. ! save X and Y matrices to disk (LightROM/local) + ifverb = .false. ! verbosity + iflogs = .false. ! write logs with convergence and signular value evolution + call run_DLRA_lyapunov_test(LTI, U0, S0, rkv, tauv, TOv, Tend, nrep, ifsave, ifverb, iflogs) + deallocate(rkv) + deallocate(tauv) + deallocate(TOv) + end if + + !---------------------------------- + ! + ! DLRA TEST FOR BALANCING TRANSFORMATION + ! + !---------------------------------- + + run_test = .false. + if (run_test) then + ! Set parameters + rk = 14 + tau = 0.1_wp + torder = 2 + ! integration time + Tmax = 60.0_wp + nrep = 12 + Tend = Tmax/nrep + nsteps = nint(Tend/tau) + ! run DLRA + ifsave = .true. ! save X and Y matrices to disk (LightROM/local) + ifload = .false. ! read X and Y matrices from disk (LightROM/local) + ifverb = .true. ! verbosity + iflogs = .true. ! write logs with convergence and signular value evolution + call run_BT_test(LTI, U0, S0, rk, tau, torder, Tmax, nrep, ifsave, ifload, ifverb, iflogs) + end if + + !---------------------------------- + ! + ! TEST COMPARING THE SPEED OF RK vs KRYLOV EXPONTNTIAL INTEGRATORS + ! + !---------------------------------- + + run_test = .false. + if (run_test) then + ntau = 20; allocate(tauv(1:ntau)); tauv = logspace(-5.0_wp,0.0_wp,ntau) + torder = 1 + call run_kexpm_test(LTI%A, LTI%prop, U0(1), tauv, torder, 1000) + end if + + !---------------------------------- + ! + ! DLRA TEST FOR RICCATI EQUATION + ! + !---------------------------------- + + run_test = .false. + if (run_test) then + nrk = 6; allocate(rkv(1:nrk)); rkv = (/ 6, 10, 12, 14, 20, 40 /) + ntau = 5; allocate(tauv(1:ntau)); tauv = (/ 1.0, 0.1, 0.01, 0.001, 0.0001 /) + Tend = 1.0_wp + nrep = 60 + ! run DLRA + ifsave = .true. ! save X and Y matrices to disk (LightROM/local) + ifverb = .true. ! verbosity + iflogs = .true. ! write logs with convergence and signular value evolution + call run_DLRA_riccati_test(LTI, U0, S0, Qc, Rinv, & + & rkv, tauv, Tend, nrep, & + & ifsave, ifverb, iflogs) + deallocate(rkv) + deallocate(tauv) + end if + + !---------------------------------- + ! + ! RANK-ADAPTIVE DLRA LYAPUNOV + ! + !---------------------------------- + + run_test = .false. + if (run_test) then + nrk = 1; allocate(rkv(1:nrk)); rkv = (/ 6 /) + ntau = 1; allocate(tauv(1:ntau)); tauv = (/ 0.1 /) + allocate(TOv(1)); TOv = (/ 1 /) + Tend = 1.0_wp + nrep = 60 + ! run DLRA + ifsave = .false. ! save X and Y matrices to disk (LightROM/local) + ifverb = .false. ! verbosity + iflogs = .false. ! write logs with convergence and signular value evolution + call run_DLRA_rank_adaptive_test(LTI, U0, S0, rkv, tauv, TOv, Tend, nrep, ifsave, ifverb, iflogs) + deallocate(rkv) + deallocate(tauv) + deallocate(TOv) + end if + + !---------------------------------- + ! + ! RANK-ADAPTIVE DLRA LYAPUNOV -- CONVERGENCE with increment norm + ! + !---------------------------------- + + run_test = .true. + if (run_test) then + nrk = 1; allocate(rkv(1:nrk)); rkv = (/ 6 /) + ntau = 1; allocate(tauv(1:ntau)); tauv = (/ 0.1 /) + allocate(TOv(1)); TOv = (/ 1 /) + Tend = 150.0_wp + ! run DLRA + ifsave = .false. ! save X and Y matrices to disk (LightROM/local) + ifverb = .false. ! verbosity + iflogs = .false. ! write logs with convergence and signular value evolution + call logger%configure(level=warning_level) + call run_DLRA_rank_adaptive_test(LTI, U0, S0, rkv, tauv, TOv, Tend, 1, ifsave, ifverb, iflogs) + deallocate(rkv) + deallocate(tauv) + deallocate(TOv) + end if + + return +end program demo \ No newline at end of file diff --git a/example/DLRA_laplacian2D_lti_lyapunov/laplacian2D_lti_lyapunov_RKlib.f90 b/example/DLRA_laplacian2D_lti_lyapunov/laplacian2D_lti_lyapunov_RKlib.f90 index 7a06ddf..08811c8 100644 --- a/example/DLRA_laplacian2D_lti_lyapunov/laplacian2D_lti_lyapunov_RKlib.f90 +++ b/example/DLRA_laplacian2D_lti_lyapunov/laplacian2D_lti_lyapunov_RKlib.f90 @@ -1,32 +1,36 @@ module Laplacian2D_LTI_Lyapunov_RKlib - use Laplacian2D_LTI_Lyapunov_Base - use laplacian2D_LTI_Lyapunov_Operators - !> RKLIB module for time integration. - use rklib_module - !> LightKrylov for linear algebra. - use LightKrylov !> Standard Library. use stdlib_math, only : linspace use stdlib_optval, only : optval use stdlib_linalg, only : eye + !> RKLIB module for time integration. + use rklib_module + !> LightKrylov for linear algebra. + use LightKrylov + use LightKrylov, only : wp => dp + !> Laplacian + use Laplacian2D_LTI_Lyapunov_Base + use laplacian2D_LTI_Lyapunov_Operators implicit none - private + private :: this_module + + character*128, parameter :: this_module = 'Laplacian2D_LTI_Lyapunov_RKLib' !----------------------------------------------- !----- EXPONENTIAL PROPAGATOR RKLIB ----- !----------------------------------------------- - type, extends(abstract_linop), public :: rklib_exptA_laplacian - real(kind=wp) :: tau ! Integration time. + type, extends(abstract_linop_rdp), public :: rklib_exptA_laplacian + real(wp) :: tau ! Integration time. contains private procedure, pass(self), public :: matvec => direct_solver_vec procedure, pass(self), public :: rmatvec => direct_solver_vec ! dummy end type rklib_exptA_laplacian - type, extends(abstract_linop), public :: rklib_lyapunov_mat - real(kind=wp) :: tau ! Integration time. + type, extends(abstract_linop_rdp), public :: rklib_lyapunov_mat + real(wp) :: tau ! Integration time. contains private procedure, pass(self), public :: matvec => direct_solver_mat @@ -43,13 +47,13 @@ module Laplacian2D_LTI_Lyapunov_RKlib subroutine rhs(me, t, x, f) !> Time-integrator. - class(rk_class), intent(inout) :: me + class(rk_class), intent(inout) :: me !> Current time. - real(kind=wp) , intent(in) :: t + real(wp), intent(in) :: t !> State vector. - real(kind=wp) , dimension(:), intent(in) :: x + real(wp), dimension(:), intent(in) :: x !> Time-derivative. - real(kind=wp) , dimension(:), intent(out) :: f + real(wp), dimension(:), intent(out) :: f f = 0.0_wp call laplacian(f(1:N), x(1:N)) @@ -61,13 +65,13 @@ subroutine direct_solver_vec(self, vec_in, vec_out) !> Linear Operator. class(rklib_exptA_laplacian), intent(in) :: self !> Input vector. - class(abstract_vector) , intent(in) :: vec_in + class(abstract_vector_rdp), intent(in) :: vec_in !> Output vector. - class(abstract_vector) , intent(out) :: vec_out + class(abstract_vector_rdp), intent(out) :: vec_out !> Time-integrator. type(rks54_class) :: prop - real(kind=wp) :: dt = 1.0_wp + real(wp) :: dt = 1.0_wp select type(vec_in) type is (state_vector) @@ -88,17 +92,17 @@ end subroutine direct_solver_vec subroutine rhs_lyap(me, t, x, f) !> Time-integrator. - class(rk_class), intent(inout) :: me + class(rk_class), intent(inout) :: me !> Current time. - real(kind=wp) , intent(in) :: t + real(wp), intent(in) :: t !> State vector. - real(kind=wp) , dimension(:), intent(in) :: x + real(wp), dimension(:), intent(in) :: x !> Time-derivative. - real(kind=wp) , dimension(:), intent(out) :: f + real(wp), dimension(:), intent(out) :: f !> Internal variables. integer :: i, j, k - real(kind=wp), dimension(N**2) :: dv, dvT + real(wp), dimension(N**2) :: dv, dvT !> Sets the internal variables. dv = 0.0_wp @@ -118,14 +122,14 @@ end subroutine rhs_lyap subroutine direct_solver_mat(self, vec_in, vec_out) !> Linear Operator. - class(rklib_lyapunov_mat), intent(in) :: self + class(rklib_lyapunov_mat), intent(in) :: self !> Input vector. - class(abstract_vector) , intent(in) :: vec_in + class(abstract_vector_rdp), intent(in) :: vec_in !> Output vector. - class(abstract_vector) , intent(out) :: vec_out + class(abstract_vector_rdp), intent(out) :: vec_out !> Time-integrator. type(rks54_class) :: prop - real(kind=wp) :: dt = 0.1_wp + real(wp) :: dt = 0.1_wp select type(vec_in) type is (state_matrix) diff --git a/example/DLRA_laplacian2D_lti_lyapunov/laplacian2D_lti_lyapunov_base.f90 b/example/DLRA_laplacian2D_lti_lyapunov/laplacian2D_lti_lyapunov_base.f90 index be375ed..93b5a04 100644 --- a/example/DLRA_laplacian2D_lti_lyapunov/laplacian2D_lti_lyapunov_base.f90 +++ b/example/DLRA_laplacian2D_lti_lyapunov/laplacian2D_lti_lyapunov_base.f90 @@ -1,82 +1,78 @@ module laplacian2D_LTI_Lyapunov_Base - !> LightKrylov for linear algebra. - use LightKrylov - use LightKrylov_utils, only : assert_shape - use LightROM_AbstractLTIsystems - !> Standard Library. - use stdlib_math, only : linspace + ! Standard Library. use stdlib_optval, only : optval + ! LightKrylov for linear algebra. + use LightKrylov + use LightKrylov, only : wp => dp + use LightKrylov_Logger + use LightKrylov_Utils, only : assert_shape + use LightKrylov_AbstractVectors ! zero_basis + ! LightROM + use LightROM_AbstractLTIsystems ! LR_state implicit none - private + private :: this_module + character*128, parameter :: this_module = 'Laplacian2D_LTI_Lyapunov_Base' + ! problem parameters - public :: N, nx, dx, dx2, L, rk_b, B, BBT - ! mesh and operator - public :: initialize_mesh - ! utils - public :: get_state, set_state, init_rand + public :: N, nx, dx, dx2, L, rk_b, B, BBT !------------------------------ !----- PARAMETERS ----- !------------------------------ ! --> Mesh related parameters. - real(kind=wp), parameter :: L = 1.0_wp !> Domain length + real(wp), parameter :: L = 1.0_wp !> Domain length integer, parameter :: nx = 4 !> Number of grid points per direction integer, parameter :: N = nx**2 !> total number of grid points - real(kind=wp), parameter :: dx = L/nx !> Grid size. - real(kind=wp), parameter :: dx2= dx**2 !> Grid size. + real(wp), parameter :: dx = L/nx !> Grid size. + real(wp), parameter :: dx2= dx**2 !> Grid size. integer, parameter :: rk_b = 5 !> rank of the RHS + !------------------------------------------------------- + !----- LIGHTKRYLOV SYM LOW RANK STATE TYPE ----- + !------------------------------------------------------- + + type, extends(abstract_sym_low_rank_state_rdp), public :: LR_state + contains + private + procedure, pass(self), public :: initialize_LR_state + end type LR_state + !------------------------------------------- !----- LIGHTKRYLOV VECTOR TYPE ----- !------------------------------------------- - type, extends(abstract_vector), public :: state_vector - real(kind=wp) :: state(N) = 0.0_wp + type, extends(abstract_vector_rdp), public :: state_vector + real(wp) :: state(N) = 0.0_wp contains private procedure, pass(self), public :: zero => vector_zero - procedure, pass(self), public :: dot => vector_dot + procedure, pass(self), public :: rand => vector_rand procedure, pass(self), public :: scal => vector_scal procedure, pass(self), public :: axpby => vector_axpby - procedure, pass(self), public :: rand => vector_rand + procedure, pass(self), public :: dot => vector_dot + procedure, pass(self), public :: get_size => vector_get_size end type state_vector !------------------------------------------- !----- LIGHTKRYLOV VECTOR TYPE ----- !------------------------------------------- - type, extends(abstract_vector), public :: state_matrix - real(kind=wp) :: state(N**2) = 0.0_wp + type, extends(abstract_vector_rdp), public :: state_matrix + real(wp) :: state(N**2) = 0.0_wp contains private procedure, pass(self), public :: zero => matrix_zero - procedure, pass(self), public :: dot => matrix_dot + procedure, pass(self), public :: rand => matrix_rand procedure, pass(self), public :: scal => matrix_scal procedure, pass(self), public :: axpby => matrix_axpby - procedure, pass(self), public :: rand => matrix_rand + procedure, pass(self), public :: dot => matrix_dot + procedure, pass(self), public :: get_size => matrix_get_size end type state_matrix type(state_vector) :: B(rk_b) - real(kind=wp) :: BBT(N**2) - - !----------------------------------------------- - !----- LIGHTKRYLOV LTI SYSTEM TYPE ----- - !----------------------------------------------- - - type, extends(abstract_lti_system), public :: lti_system - end type lti_system - - !------------------------------------------------------- - !----- LIGHTKRYLOV SYM LOW RANK STATE TYPE ----- - !------------------------------------------------------- - - type, extends(abstract_sym_low_rank_state), public :: LR_state - contains - private - procedure, pass(self), public :: set_LR_state - end type LR_state + real(wp) :: BBT(N**2) contains @@ -88,9 +84,9 @@ subroutine vector_zero(self) return end subroutine vector_zero - real(kind=wp) function vector_dot(self, vec) result(alpha) - class(state_vector) , intent(in) :: self - class(abstract_vector), intent(in) :: vec + real(wp) function vector_dot(self, vec) result(alpha) + class(state_vector) , intent(in) :: self + class(abstract_vector_rdp), intent(in) :: vec select type(vec) type is (state_vector) alpha = dot_product(self%state, vec%state) @@ -98,17 +94,23 @@ real(kind=wp) function vector_dot(self, vec) result(alpha) return end function vector_dot + integer function vector_get_size(self) result(N) + class(state_vector), intent(in) :: self + N = nx + return + end function vector_get_size + subroutine vector_scal(self, alpha) class(state_vector), intent(inout) :: self - real(kind=wp) , intent(in) :: alpha + real(wp) , intent(in) :: alpha self%state = self%state * alpha return end subroutine vector_scal subroutine vector_axpby(self, alpha, vec, beta) - class(state_vector) , intent(inout) :: self - class(abstract_vector), intent(in) :: vec - real(kind=wp) , intent(in) :: alpha, beta + class(state_vector) , intent(inout) :: self + class(abstract_vector_rdp), intent(in) :: vec + real(wp) , intent(in) :: alpha, beta select type(vec) type is (state_vector) self%state = alpha*self%state + beta*vec%state @@ -121,7 +123,7 @@ subroutine vector_rand(self, ifnorm) logical, optional, intent(in) :: ifnorm ! internals logical :: normalize - real(kind=wp) :: alpha + real(wp) :: alpha normalize = optval(ifnorm,.true.) call random_number(self%state) if (normalize) then @@ -139,9 +141,9 @@ subroutine matrix_zero(self) return end subroutine matrix_zero - real(kind=wp) function matrix_dot(self, vec) result(alpha) - class(state_matrix) , intent(in) :: self - class(abstract_vector), intent(in) :: vec + real(wp) function matrix_dot(self, vec) result(alpha) + class(state_matrix) , intent(in) :: self + class(abstract_vector_rdp), intent(in) :: vec select type(vec) type is(state_matrix) alpha = dot_product(self%state, vec%state) @@ -149,17 +151,23 @@ real(kind=wp) function matrix_dot(self, vec) result(alpha) return end function matrix_dot + integer function matrix_get_size(self) result(N) + class(state_matrix), intent(in) :: self + N = N + return + end function matrix_get_size + subroutine matrix_scal(self, alpha) class(state_matrix), intent(inout) :: self - real(kind=wp) , intent(in) :: alpha + real(wp) , intent(in) :: alpha self%state = self%state * alpha return end subroutine matrix_scal subroutine matrix_axpby(self, alpha, vec, beta) - class(state_matrix) , intent(inout) :: self - class(abstract_vector), intent(in) :: vec - real(kind=wp) , intent(in) :: alpha, beta + class(state_matrix) , intent(inout) :: self + class(abstract_vector_rdp), intent(in) :: vec + real(wp) , intent(in) :: alpha, beta select type(vec) type is(state_matrix) self%state = alpha*self%state + beta*vec%state @@ -172,7 +180,7 @@ subroutine matrix_rand(self, ifnorm) logical, optional, intent(in) :: ifnorm ! internals logical :: normalize - real(kind=wp) :: alpha + real(wp) :: alpha normalize = optval(ifnorm, .true.) call random_number(self%state) if (normalize) then @@ -182,107 +190,57 @@ subroutine matrix_rand(self, ifnorm) return end subroutine matrix_rand - !--------------------------------------- - !----- CONSTRUCT THE MESH ----- - !--------------------------------------- - - subroutine initialize_mesh() - implicit none - !> Mesh array. - real(kind=wp), allocatable :: x(:) - integer :: i + !----------------------------------------------------------------------- + !----- TYPE BOUND PROCEDURE FOR SYM LOW RANK REPRESENTATION ----- + !----------------------------------------------------------------------- - !> Construct mesh. - x = linspace(-L/2, L/2, nx) + subroutine initialize_LR_state(self, U, S, rk, rkmax) + class(LR_state), intent(inout) :: self + class(abstract_vector_rdp), intent(in) :: U(:) + real(wp), intent(in) :: S(:,:) + integer, intent(in) :: rk + integer, optional, intent(in) :: rkmax - return - end subroutine initialize_mesh - - !-------------------------------------------------------------------- - !----- UTILITIES FOR STATE_VECTOR AND STATE MATRIX TYPES ----- - !-------------------------------------------------------------------- - - subroutine get_state(mat_out, state_in) - !! Utility function to transfer data from a state vector to a real array - real(kind=wp), intent(out) :: mat_out(:,:) - class(abstract_vector), intent(in) :: state_in(:) - ! internal variables - integer :: k, kdim - mat_out = 0.0_wp - select type (state_in) - type is (state_vector) - kdim = size(state_in) - call assert_shape(mat_out, (/ N, kdim /), 'get_state -> state_vector', 'mat_out') - do k = 1, kdim - mat_out(:,k) = state_in(k)%state - end do - type is (state_matrix) - call assert_shape(mat_out, (/ N, N /), 'get_state -> state_matrix', 'mat_out') - mat_out = reshape(state_in(1)%state, (/ N, N /)) - end select - return - end subroutine get_state - - subroutine set_state(state_out, mat_in) - !! Utility function to transfer data from a real array to a state vector - class(abstract_vector), intent(out) :: state_out(:) - real(kind=wp), intent(in) :: mat_in(:,:) - ! internal variables - integer :: k, kdim - select type (state_out) - type is (state_vector) - kdim = size(state_out) - call assert_shape(mat_in, (/ N, kdim /), 'set_state -> state_vector', 'mat_in') - call mat_zero(state_out) - do k = 1, kdim - state_out(k)%state = mat_in(:,k) - end do - type is (state_matrix) - call assert_shape(mat_in, (/ N, N /), 'set_state -> state_matrix', 'mat_in') - call mat_zero(state_out) - state_out(1)%state = reshape(mat_in, shape(state_out(1)%state)) - end select - return - end subroutine set_state - - subroutine init_rand(state, ifnorm) - !! Utility function to initialize a state vector with random data - class(abstract_vector), intent(inout) :: state(:) - logical, optional, intent(in) :: ifnorm - ! internal variables - integer :: k, kdim - logical :: normalize - normalize = optval(ifnorm,.true.) - select type (state) + ! internals + real(wp), allocatable :: R(:, :) + integer :: i, n, rka, info + + n = size(U) + call assert_shape(S, [n,n], "initialize_LR_state", "S") + + ! optional size argument + if (present(rkmax)) then + self%rk = rkmax - 1 + rka = rkmax + else + self%rk = rk + rka = rk + 1 + end if + + select type (U) type is (state_vector) - kdim = size(state) - do k = 1, kdim - call state(k)%rand(ifnorm = normalize) - end do - type is (state_matrix) - kdim = size(state) - do k = 1, kdim - call state(k)%rand(ifnorm = normalize) - end do + ! allocate & initialize + allocate(self%U(rka), source=U(1)); call zero_basis(self%U) + allocate(self%S(rka,rka)); self%S = 0.0_wp + ! copy inputs + if (self%rk > n) then ! copy the full IC into self%U + call copy_basis(self%U(1:n), U) + self%S(1:n,1:n) = S + else ! fill the first self%rk columns of self%U with the first self%rk columns of the IC + call copy_basis(self%U(1:self%rk), U(1:self%rk)) + self%S(1:self%rk,1:self%rk) = S(1:self%rk,1:self%rk) + end if + ! top up basis (to rka for rank-adaptivity) with orthonormal columns if needed + if (rka > n) then + do i = n+1, rka + call self%U(i)%rand() + end do + allocate(R(rka,rka)); R = 0.0_wp + call qr(self%U, R, info) + call check_info(info, 'qr', module=this_module, procedure='initialize_LR_state') + end if end select return - end subroutine init_rand - - !------------------------------------------------------------ - !----- UTILITIES FOR SYM LOW RANK REPRESENTATION ----- - !------------------------------------------------------------ - - subroutine set_LR_state(self, U, S) - class(LR_state), intent(inout) :: self - real(kind=wp), intent(in) :: U(:,:) - real(kind=wp), intent(in) :: S(:,:) - ! internals - integer :: rk - rk = size(U,2) - call assert_shape(S, (/ rk, rk /), 'set_LR_state', 'S') - call set_state(self%U, U) - self%S = S - return - end subroutine set_LR_state + end subroutine initialize_LR_state end module laplacian2D_LTI_Lyapunov_Base \ No newline at end of file diff --git a/example/DLRA_laplacian2D_lti_lyapunov/laplacian2D_lti_lyapunov_operators.f90 b/example/DLRA_laplacian2D_lti_lyapunov/laplacian2D_lti_lyapunov_operators.f90 index 055de1c..5c540e2 100644 --- a/example/DLRA_laplacian2D_lti_lyapunov/laplacian2D_lti_lyapunov_operators.f90 +++ b/example/DLRA_laplacian2D_lti_lyapunov/laplacian2D_lti_lyapunov_operators.f90 @@ -1,22 +1,37 @@ module Laplacian2D_LTI_Lyapunov_Operators - use Laplacian2D_LTI_Lyapunov_Base - !> LightKrylov for linear algebra. - use LightKrylov - use LightKrylov_utils - !> Standard Library. - use stdlib_math, only : linspace + ! Standard Library. use stdlib_optval, only : optval - use stdlib_linalg, only : eye + ! LightKrylov for linear algebra. + use LightKrylov + use LightKrylov, only : wp => dp + use LightKrylov_Utils, only : assert_shape + ! LightROM + use LightROM_AbstractLTIsystems ! abstract_lti_system + ! Laplacian + use Laplacian2D_LTI_Lyapunov_Base implicit none - public :: CALE, laplacian, laplacian_mat - public :: build_operator, reconstruct_TQ + private :: this_module + character*128, parameter :: this_module = 'Laplacian2D_LTI_Lyapunov_Operators' + ! operator + public :: laplacian, laplacian_mat + + + !----------------------------------------------- + !----- LIGHTKRYLOV LTI SYSTEM TYPE ----- + !----------------------------------------------- + + type, extends(abstract_lti_system_rdp), public :: lti_system + contains + private + procedure, pass(self), public :: initialize_lti_system + end type lti_system !----------------------------------- !----- LAPLACE OPERATOR ----- !----------------------------------- - type, extends(abstract_linop), public :: laplace_operator + type, extends(abstract_linop_rdp), public :: laplace_operator contains private procedure, pass(self), public :: matvec => direct_matvec_laplace @@ -25,20 +40,15 @@ module Laplacian2D_LTI_Lyapunov_Operators contains - function CALE(X,A,Q) result(Y) - real(kind=wp), dimension(n,n) :: X, A, Q, Y - Y = matmul(transpose(A), X) + matmul(X, A) + Q - end function CALE - !----- TYPE-BOUND PROCEDURE FOR LAPLACE OPERATOR ----- subroutine direct_matvec_laplace(self, vec_in, vec_out) !> Linear Operator. - class(laplace_operator),intent(in) :: self + class(laplace_operator), intent(in) :: self !> Input vector. - class(abstract_vector) , intent(in) :: vec_in + class(abstract_vector_rdp), intent(in) :: vec_in !> Output vector. - class(abstract_vector) , intent(out) :: vec_out + class(abstract_vector_rdp), intent(out) :: vec_out select type(vec_in) type is (state_vector) select type(vec_out) @@ -56,9 +66,9 @@ end subroutine direct_matvec_laplace subroutine laplacian(vec_out, vec_in) !> State vector. - real(kind=wp) , dimension(:), intent(in) :: vec_in + real(wp), dimension(:), intent(in) :: vec_in !> Time-derivative. - real(kind=wp) , dimension(:), intent(out) :: vec_out + real(wp), dimension(:), intent(out) :: vec_out !> Internal variables. integer :: i, j, in @@ -96,16 +106,16 @@ end subroutine laplacian subroutine laplacian_mat(flat_mat_out, flat_mat_in, transpose) !> State vector. - real(kind=wp) , dimension(:), intent(in) :: flat_mat_in + real(wp), dimension(:), intent(in) :: flat_mat_in !> Time-derivative. - real(kind=wp) , dimension(:), intent(out) :: flat_mat_out + real(wp), dimension(:), intent(out) :: flat_mat_out !> Transpose logical, optional :: transpose logical :: trans !> Internal variables. integer :: j - real(kind=wp), dimension(N,N) :: mat, dmat + real(wp), dimension(N,N) :: mat, dmat !> Deal with optional argument trans = optval(transpose,.false.) @@ -130,60 +140,84 @@ subroutine laplacian_mat(flat_mat_out, flat_mat_in, transpose) return end subroutine laplacian_mat - subroutine build_operator(A) - !! Build the two-dimensional Laplace operator explicitly - real(kind=wp), intent(out) :: A(N,N) - integer :: i, j, k - - A = -4.0_wp/dx2*eye(N) - do i = 1, nx - do j = 1, nx - 1 - k = (i-1)*nx + j - A(k + 1, k) = 1.0_wp/dx2 - A(k, k + 1) = 1.0_wp/dx2 - end do - end do - do i = 1, N-nx - A(i, i + nx) = 1.0_wp/dx2 - A(i + nx, i) = 1.0_wp/dx2 - end do - return - end subroutine build_operator + !-------------------------------------- + !----- EXP(tA) SUBROUTINE ----- + !-------------------------------------- + + subroutine exptA(vec_out, A, vec_in, tau, info, trans) + !! Subroutine for the exponential propagator that conforms with the abstract interface + !! defined in expmlib.f90 + class(abstract_vector_rdp), intent(out) :: vec_out + !! Output vector + class(abstract_linop_rdp), intent(inout) :: A + !! Linear operator + class(abstract_vector_rdp), intent(in) :: vec_in + !! Input vector. + real(wp), intent(in) :: tau + !! Integration horizon + integer, intent(out) :: info + !! Information flag + logical, optional, intent(in) :: trans + logical :: transpose + !! Direct or Adjoint? + + ! optional argument + transpose = optval(trans, .false.) + + ! time integrator + select type (vec_in) + type is (state_vector) + select type (vec_out) + type is (state_vector) + select type (A) + type is (laplace_operator) + call k_exptA(vec_out, A, vec_in, tau, info, transpose) + end select + end select + end select - subroutine reconstruct_TQ(T, Q, A, D, E, tw) - !! Reconstruct tridiagonal matrix T and orthogonal projector Q from dsytd2 output (A, D, E) - real(kind=wp), intent(out) :: T(N,N) - real(kind=wp), intent(out) :: Q(N,N) - real(kind=wp), intent(in) :: A(N,N) - real(kind=wp), intent(in) :: D(N) - real(kind=wp), intent(in) :: E(N-1) - real(kind=wp), intent(in) :: tw(N-1) + end subroutine exptA - ! internal variables - real(wp) :: Hi(N,N) - real(wp) :: vec(N,1) - integer :: i - - ! Build orthogonal Q = H(1) @ H(2) @ ... @ H(n-1) - Q = eye(N) - do i = 1, N - 1 - vec = 0.0_wp - vec(i+1,1) = 1.0_wp - vec(i+2:N,1) = A(i+2:N,i) - Hi = eye(N) - tw(i) * matmul( vec, transpose(vec) ) - Q = matmul( Q, Hi ) - end do + !-------------------------------------------------------- + !----- TYPE BOUND PROCEDURES FOR LTI SYSTEMS ----- + !-------------------------------------------------------- - ! Build tridiagonal T - T = 0.0_wp - do i = 1, N - T(i,i) = D(i) - end do - do i = 1, N - 1 - T(i,i+1) = E(i) - T(i+1,i) = E(i) - end do + subroutine initialize_lti_system(self, A, B, CT, D) + class(lti_system), intent(inout) :: self + class(abstract_linop_rdp), intent(in) :: A + class(abstract_vector_rdp), intent(in) :: B(:) + class(abstract_vector_rdp), intent(in) :: CT(:) + real(wp), optional, intent(in) :: D(:,:) + + ! internal variables + integer :: rk_b, rk_c - end subroutine reconstruct_TQ + ! Operator + select type (A) + type is (laplace_operator) + allocate(self%A, source=A) + end select + ! Input + select type (B) + type is (state_vector) + rk_b = size(B) + allocate(self%B(1:rk_b), source=B(1:rk_b)) + end select + ! Output + select type (CT) + type is (state_vector) + rk_c = size(CT) + allocate(self%CT(1:rk_c), source=CT(1:rk_c)) + end select + ! Throughput + allocate(self%D(1:rk_c, 1:rk_b)) + if (present(D)) then + call assert_shape(D, (/ rk_c, rk_b /), 'initialize_lti_system', 'D') + self%D = D + else + self%D = 0.0_wp + end if + return + end subroutine initialize_lti_system end module Laplacian2D_LTI_Lyapunov_Operators \ No newline at end of file diff --git a/example/DLRA_laplacian2D_lti_lyapunov/laplacian2D_lti_lyapunov_utils.f90 b/example/DLRA_laplacian2D_lti_lyapunov/laplacian2D_lti_lyapunov_utils.f90 new file mode 100644 index 0000000..345ada0 --- /dev/null +++ b/example/DLRA_laplacian2D_lti_lyapunov/laplacian2D_lti_lyapunov_utils.f90 @@ -0,0 +1,229 @@ +module Laplacian2D_LTI_Lyapunov_Utils + ! Standard Library. + use stdlib_math, only : linspace + use stdlib_optval, only : optval + use stdlib_linalg, only : eye, diag, svd + ! RKLIB module for time integration. + use rklib_module + ! LightKrylov for linear algebra. + use LightKrylov, only : wp => dp + use LightKrylov_AbstractVectors ! linear_combination + ! Laplacian + use Laplacian2D_LTI_Lyapunov_Base + use laplacian2D_LTI_Lyapunov_Operators + implicit none + + private:: this_module + ! mesh + public :: initialize_mesh + ! utilities for state matrix + public :: get_state, set_state, init_rand + ! initial conditions + public :: generate_random_initial_condition + ! misc + public :: CALE, build_operator, reconstruct_TQ + + character*128, parameter :: this_module = 'Laplacian2D_LTI_Lyapunov_Utils' + +contains + + !--------------------------------------- + !----- CONSTRUCT THE MESH ----- + !--------------------------------------- + + subroutine initialize_mesh() + implicit none + !> Mesh array. + real(wp), allocatable :: x(:) + integer :: i + + !> Construct mesh. + x = linspace(-L/2, L/2, nx) + + return + end subroutine initialize_mesh + + !-------------------------------------------------------------------- + !----- UTILITIES FOR STATE_VECTOR AND STATE MATRIX TYPES ----- + !-------------------------------------------------------------------- + + subroutine get_state(mat_out, state_in) + !! Utility function to transfer data from a state vector to a real array + real(wp), intent(out) :: mat_out(:,:) + class(abstract_vector_rdp), intent(in) :: state_in(:) + ! internal variables + integer :: k, kdim + mat_out = 0.0_wp + select type (state_in) + type is (state_vector) + kdim = size(state_in) + call assert_shape(mat_out, (/ N, kdim /), 'get_state -> state_vector', 'mat_out') + do k = 1, kdim + mat_out(:,k) = state_in(k)%state + end do + type is (state_matrix) + call assert_shape(mat_out, (/ N, N /), 'get_state -> state_matrix', 'mat_out') + mat_out = reshape(state_in(1)%state, (/ N, N /)) + end select + return + end subroutine get_state + + subroutine set_state(state_out, mat_in) + !! Utility function to transfer data from a real array to a state vector + class(abstract_vector_rdp), intent(out) :: state_out(:) + real(wp), intent(in) :: mat_in(:,:) + ! internal variables + integer :: k, kdim + select type (state_out) + type is (state_vector) + kdim = size(state_out) + call assert_shape(mat_in, (/ N, kdim /), 'set_state -> state_vector', 'mat_in') + call zero_basis(state_out) + do k = 1, kdim + state_out(k)%state = mat_in(:,k) + end do + type is (state_matrix) + call assert_shape(mat_in, (/ N, N /), 'set_state -> state_matrix', 'mat_in') + call zero_basis(state_out) + state_out(1)%state = reshape(mat_in, shape(state_out(1)%state)) + end select + return + end subroutine set_state + + subroutine init_rand(state, ifnorm) + !! Utility function to initialize a state vector with random data + class(abstract_vector_rdp), intent(inout) :: state(:) + logical, optional, intent(in) :: ifnorm + ! internal variables + integer :: k, kdim + logical :: normalize + normalize = optval(ifnorm,.true.) + select type (state) + type is (state_vector) + kdim = size(state) + do k = 1, kdim + call state(k)%rand(ifnorm = normalize) + end do + type is (state_matrix) + kdim = size(state) + do k = 1, kdim + call state(k)%rand(ifnorm = normalize) + end do + end select + return + end subroutine init_rand + + !-------------------------------------- + !----- INITIAL CONDITIONS ----- + !-------------------------------------- + + subroutine generate_random_initial_condition(U, S, rk) + class(state_vector), intent(out) :: U(:) + real(wp), intent(out) :: S(:,:) + integer, intent(in) :: rk + ! internals + class(state_vector), allocatable :: Utmp(:) + integer, allocatable :: perm(:) + ! SVD + real(wp) :: U_svd(rk,rk) + real(wp) :: S_svd(rk) + real(wp) :: V_svd(rk,rk) + integer :: i, info + + if (size(U) < rk) then + write(*,*) 'Input krylov basis size incompatible with requested rank', rk + STOP 1 + else + call init_rand(U, .false.) + end if + if (size(S,1) < rk) then + write(*,*) 'Input coefficient matrix size incompatible with requested rank', rk + STOP 1 + else if (size(S,1) /= size(S,2)) then + write(*,*) 'Input coefficient matrix must be square.' + STOP 2 + else + S = 0.0_wp + end if + ! perform QR + allocate(perm(1:rk)); perm = 0 + allocate(Utmp(1:rk), source=U(1:rk)) + call qr(Utmp, S, perm, info, verbosity=.false.) + if (info /= 0) write(*,*) ' [generate_random_initial_condition] Info: Colinear vectors detected in QR, column ', info + ! perform SVD + call svd(S(:,1:rk), S_svd(1:rk), U_svd(:,1:rk), V_svd(1:rk,1:rk)) + S = diag(S_svd) + block + class(abstract_vector_rdp), allocatable :: Xwrk(:) + call linear_combination(Xwrk, Utmp, U_svd) + call copy_basis(U, Xwrk) + end block + + end subroutine + + !------------------------ + !----- MISC ----- + !------------------------ + + function CALE(X,A,Q) result(Y) + real(wp), dimension(n,n) :: X, A, Q, Y + Y = matmul(transpose(A), X) + matmul(X, A) + Q + end function CALE + + subroutine build_operator(A) + !! Build the two-dimensional Laplace operator explicitly + real(wp), intent(out) :: A(N,N) + integer :: i, j, k + + A = -4.0_wp/dx2*eye(N) + do i = 1, nx + do j = 1, nx - 1 + k = (i-1)*nx + j + A(k + 1, k) = 1.0_wp/dx2 + A(k, k + 1) = 1.0_wp/dx2 + end do + end do + do i = 1, N-nx + A(i, i + nx) = 1.0_wp/dx2 + A(i + nx, i) = 1.0_wp/dx2 + end do + return + end subroutine build_operator + + subroutine reconstruct_TQ(T, Q, A, D, E, tw) + !! Reconstruct tridiagonal matrix T and orthogonal projector Q from dsytd2 output (A, D, E) + real(wp), intent(out) :: T(N,N) + real(wp), intent(out) :: Q(N,N) + real(wp), intent(in) :: A(N,N) + real(wp), intent(in) :: D(N) + real(wp), intent(in) :: E(N-1) + real(wp), intent(in) :: tw(N-1) + + ! internal variables + real(wp) :: Hi(N,N) + real(wp) :: vec(N,1) + integer :: i + + ! Build orthogonal Q = H(1) @ H(2) @ ... @ H(n-1) + Q = eye(N) + do i = 1, N - 1 + vec = 0.0_wp + vec(i+1,1) = 1.0_wp + vec(i+2:N,1) = A(i+2:N,i) + Hi = eye(N) - tw(i) * matmul( vec, transpose(vec) ) + Q = matmul( Q, Hi ) + end do + + ! Build tridiagonal T + T = 0.0_wp + do i = 1, N + T(i,i) = D(i) + end do + do i = 1, N - 1 + T(i,i+1) = E(i) + T(i+1,i) = E(i) + end do + + end subroutine reconstruct_TQ + +end module Laplacian2D_LTI_Lyapunov_Utils \ No newline at end of file diff --git a/example/DLRA_laplacian2D_lti_lyapunov/main.f90 b/example/DLRA_laplacian2D_lti_lyapunov/main.f90 index 65c6096..99abb45 100644 --- a/example/DLRA_laplacian2D_lti_lyapunov/main.f90 +++ b/example/DLRA_laplacian2D_lti_lyapunov/main.f90 @@ -1,31 +1,37 @@ program demo + ! Standard Library + use stdlib_optval, only : optval + use stdlib_linalg, only : eye + use stdlib_math, only : all_close, logspace + use stdlib_io_npy, only : save_npy + use stdlib_logger, only : error_level + ! LightKrylov for linear algebra use LightKrylov - use LightKrylov_expmlib - use LightKrylov_utils - + use LightKrylov, only : wp => dp + use LightKrylov_Logger + use LightKrylov_ExpmLib + use LightKrylov_Utils + ! LightROM use LightROM_AbstractLTIsystems - use LightROM_utils - + use LightROM_Utils use LightROM_LyapunovSolvers use LightROM_LyapunovUtils - + ! Laplacian use Laplacian2D_LTI_Lyapunov_Base use Laplacian2D_LTI_Lyapunov_Operators use Laplacian2D_LTI_Lyapunov_RKlib - - use stdlib_optval, only : optval - use stdlib_linalg, only : eye - use stdlib_math, only : all_close, logspace - use stdlib_io_npy, only : save_npy + use Laplacian2D_LTI_Lyapunov_Utils implicit none + character*128, parameter :: this_module = 'Laplacian2D_LTI_Lyapunov_Main' + !---------------------------------------------------------- !----- LYAPUNOV EQUATION FOR LAPLACE OPERATOR ----- !---------------------------------------------------------- ! DLRA integer, parameter :: rkmax = 14 - integer, parameter :: rk_X0 = 10 + integer, parameter :: rk_X0 = 14 logical, parameter :: verb = .false. logical, parameter :: save = .false. character*128 :: oname @@ -43,10 +49,11 @@ program demo ! LTI system type(lti_system) :: LTI + real(wp), allocatable :: D(:,:) integer :: p ! Laplacian - type(laplace_operator), allocatable :: A + type(laplace_operator), allocatable :: A ! LR representation type(LR_state) :: X @@ -59,8 +66,10 @@ program demo real(wp) :: X_RKlib_ref(N,N) ! Initial condition - real(wp) :: U0(N, rkmax) + type(state_vector) :: U0(rkmax) real(wp) :: S0(rkmax,rkmax) + ! Matrix + real(wp) :: U0_in(N,rkmax) real(wp) :: X0(N,N) ! OUTPUT @@ -84,14 +93,15 @@ program demo real(wp) :: T(N,N), Q(N,N), Z(N,N), Vdata(N,N), Wdata(N,N), Ydata(N,N) real(wp) :: scale integer :: isgn - ! SVD - real(wp) :: U_svd(N,N) - real(wp) :: S_svd(rkmax) - real(wp) :: V_svd(rkmax,rkmax) ! timer integer :: clock_rate, clock_start, clock_stop + ! DLRA opts + type(dlra_opts) :: opts + + call logger%configure(level=error_level); write(*,*) 'Logging set to error_level.' + call system_clock(count_rate=clock_rate) write(*,*) '---------------------------------------------' @@ -120,25 +130,18 @@ program demo BBTdata = -matmul(Bdata(:,1:rk_b), transpose(Bdata(:,1:rk_b))) BBT(1:N**2) = -reshape(BBTdata, shape(BBT)) - p = 1 + ! Define LTI system LTI = lti_system() - allocate(LTI%A, source=A) - allocate(LTI%B(1:rk_b), source=B(1:rk_b)); - allocate(LTI%CT(1:p), source=B(1)); call mat_zero(LTI%CT) - allocate(LTI%D(1:p,1:rk_b)); LTI%D = 0.0_wp - - ! Define initial condition - call random_number(U0(:, 1:rk_X0)) - ! Compute SVD to get low-rank representation - call svd(U0(:,1:rk_X0), U_svd(:,1:N), S_svd(1:rk_X0), V_svd(1:rk_X0,1:rk_X0)) - S0 = 0.0_wp - do i = 1,rk_X0 - S0(i,i) = S_svd(i) - end do - U0(:,1:rk_X0) = U_svd(:,1:rk_X0) + call LTI%initialize_lti_system(A, B, B) + call zero_basis(LTI%CT) + + ! Define initial condition of the form X0 + U0 @ S0 @ U0.T SPD + if (verb) write(*,*) ' Define initial condition' + call generate_random_initial_condition(U0, S0, rk_X0) + call get_state(U_out, U0) ! Compute the full initial condition X0 = U_in @ S0 @ U_in.T - X0 = matmul( U0(:,1:rk_X0), matmul(S0(1:rk_X0,1:rk_X0), transpose(U0(:,1:rk_X0)))) + X0 = matmul( U_out(:,1:rk_X0), matmul(S0(1:rk_X0,1:rk_X0), transpose(U_out(:,1:rk_X0)))) !------------------ ! COMPUTE EXACT SOLUTION OF THE LYAPUNOV EQUATION WITH LAPACK @@ -175,6 +178,10 @@ program demo write(*,'(A40,F10.4," s")') '--> X_ref. Elapsed time:', real(clock_stop-clock_start)/real(clock_rate) write(*,*) + ! sanity check + X0 = CALE(Xref, Adata, BBT) + write(*,*) ' Direct problem:', norm2(X0)/N + !------------------ ! COMPUTE SOLUTION WITH RK FOR DIFFERENT INTEGRATION TIMES AND COMPARE TO STUART-BARTELS !------------------ @@ -229,9 +236,6 @@ program demo do i = 1, nrk rk = rkv(i) - allocate(U(1:rk)); call mat_zero(U) - allocate(X%U(1:rk), source=U(1:rk)) - allocate(X%S(1:rk,1:rk)) write(*,'(A10,I1)') ' torder = ', torder do j = ndt, 1, -1 @@ -239,11 +243,12 @@ program demo if (verb) write(*,*) ' dt = ', dt, 'Tend = ', Tend ! Reset input - call X%set_LR_state(U0(:,1:rk), S0(1:rk,1:rk)) + call X%initialize_LR_state(U0, S0, rk) ! run step + opts = dlra_opts(mode=torder, verbose=verb) call system_clock(count=clock_start) ! Start Timer - call numerical_low_rank_splitting_lyapunov_integrator(X, LTI, Tend, dt, torder, info) + call projector_splitting_DLRA_lyapunov_integrator(X, LTI%A, LTI%B, Tend, dt, info, exptA=exptA, options=opts) call system_clock(count=clock_stop) ! Stop Timer ! Reconstruct solution @@ -254,6 +259,9 @@ program demo & rk, torder, dt, Tend, & & norm2(X_RKlib_ref - X_out)/N, & & real(clock_stop-clock_start)/real(clock_rate) + + deallocate(X%U) + deallocate(X%S) end do if (save) then @@ -261,10 +269,6 @@ program demo call save_npy(oname, X_out) end if - deallocate(X%U); - deallocate(X%S); - deallocate(U); - end do end do diff --git a/example/DLRA_laplacian2D_lti_riccati/laplacian2D_lti_riccati_RKlib.f90 b/example/DLRA_laplacian2D_lti_riccati/laplacian2D_lti_riccati_RKlib.f90 index 6ff38f3..52071cd 100644 --- a/example/DLRA_laplacian2D_lti_riccati/laplacian2D_lti_riccati_RKlib.f90 +++ b/example/DLRA_laplacian2D_lti_riccati/laplacian2D_lti_riccati_RKlib.f90 @@ -1,32 +1,35 @@ module Laplacian2D_LTI_Riccati_RKlib - use Laplacian2D_LTI_Riccati_Base - use laplacian2D_LTI_Riccati_Operators - !> RKLIB module for time integration. - use rklib_module - !> LightKrylov for linear algebra. - use LightKrylov !> Standard Library. use stdlib_math, only : linspace use stdlib_optval, only : optval use stdlib_linalg, only : eye + !> RKLIB module for time integration. + use rklib_module + !> LightKrylov for linear algebra. + use LightKrylov + use LightKrylov, only : wp => dp + !> Laplacian + use Laplacian2D_LTI_Riccati_Base + use laplacian2D_LTI_Riccati_Operators implicit none - private + private :: this_module + character*128, parameter :: this_module = 'Laplacian2D_LTI_Lyapunov_Base' !----------------------------------------------- !----- EXPONENTIAL PROPAGATOR RKLIB ----- !----------------------------------------------- - type, extends(abstract_linop), public :: rklib_exptA_laplacian - real(kind=wp) :: tau ! Integration time. + type, extends(abstract_linop_rdp), public :: rklib_exptA_laplacian + real(wp) :: tau ! Integration time. contains private procedure, pass(self), public :: matvec => direct_solver_vec procedure, pass(self), public :: rmatvec => direct_solver_vec ! dummy end type rklib_exptA_laplacian - type, extends(abstract_linop), public :: rklib_riccati_mat - real(kind=wp) :: tau ! Integration time. + type, extends(abstract_linop_rdp), public :: rklib_riccati_mat + real(wp) :: tau ! Integration time. contains private procedure, pass(self), public :: matvec => direct_solver_riccati_mat @@ -43,13 +46,13 @@ module Laplacian2D_LTI_Riccati_RKlib subroutine rhs(me, t, x, f) !> Time-integrator. - class(rk_class), intent(inout) :: me + class(rk_class), intent(inout) :: me !> Current time. - real(kind=wp) , intent(in) :: t + real(wp) , intent(in) :: t !> State vector. - real(kind=wp) , dimension(:), intent(in) :: x + real(wp) , dimension(:), intent(in) :: x !> Time-derivative. - real(kind=wp) , dimension(:), intent(out) :: f + real(wp) , dimension(:), intent(out) :: f f = 0.0_wp call laplacian(f(1:N), x(1:N)) @@ -61,13 +64,13 @@ subroutine direct_solver_vec(self, vec_in, vec_out) !> Linear Operator. class(rklib_exptA_laplacian), intent(in) :: self !> Input vector. - class(abstract_vector) , intent(in) :: vec_in + class(abstract_vector_rdp), intent(in) :: vec_in !> Output vector. - class(abstract_vector) , intent(out) :: vec_out + class(abstract_vector_rdp), intent(out) :: vec_out !> Time-integrator. type(rks54_class) :: prop - real(kind=wp) :: dt = 1.0_wp + real(wp) :: dt = 1.0_wp select type(vec_in) type is (state_vector) @@ -88,18 +91,18 @@ end subroutine direct_solver_vec subroutine rhs_riccati(me, t, x, f) !> Time-integrator. - class(rk_class), intent(inout) :: me + class(rk_class), intent(inout) :: me !> Current time. - real(kind=wp) , intent(in) :: t + real(wp) , intent(in) :: t !> State vector. - real(kind=wp) , dimension(:), intent(in) :: x + real(wp) , dimension(:), intent(in) :: x !> Time-derivative. - real(kind=wp) , dimension(:), intent(out) :: f + real(wp) , dimension(:), intent(out) :: f !> Internal variables. integer :: i, j, k - real(kind=wp), dimension(N,N) :: xm - real(kind=wp), dimension(N**2) :: dv, dvT + real(wp), dimension(N,N) :: xm + real(wp), dimension(N**2) :: dv, dvT !> Sets the internal variables. dv = 0.0_wp @@ -116,14 +119,14 @@ end subroutine rhs_riccati subroutine direct_solver_riccati_mat(self, vec_in, vec_out) !> Linear Operator. - class(rklib_riccati_mat), intent(in) :: self + class(rklib_riccati_mat), intent(in) :: self !> Input vector. - class(abstract_vector) , intent(in) :: vec_in + class(abstract_vector_rdp), intent(in) :: vec_in !> Output vector. - class(abstract_vector) , intent(out) :: vec_out + class(abstract_vector_rdp), intent(out) :: vec_out !> Time-integrator. type(rks54_class) :: prop - real(kind=wp) :: dt = 0.1_wp + real(wp) :: dt = 0.1_wp select type(vec_in) type is (state_matrix) diff --git a/example/DLRA_laplacian2D_lti_riccati/laplacian2D_lti_riccati_base.f90 b/example/DLRA_laplacian2D_lti_riccati/laplacian2D_lti_riccati_base.f90 index fbfd815..d0db6a7 100644 --- a/example/DLRA_laplacian2D_lti_riccati/laplacian2D_lti_riccati_base.f90 +++ b/example/DLRA_laplacian2D_lti_riccati/laplacian2D_lti_riccati_base.f90 @@ -1,54 +1,46 @@ module Laplacian2D_LTI_Riccati_Base - !> LightKrylov for linear algebra. - use LightKrylov - use LightKrylov_utils, only : assert_shape - use LightROM_AbstractLTIsystems - !> Standard Library. - use stdlib_math, only : linspace + ! Standard Library. use stdlib_optval, only : optval - use stdlib_linalg, only : eye + ! LightKrylov for linear algebra. + use LightKrylov + use LightKrylov, only : wp => dp + use LightKrylov_Logger + use LightKrylov_Utils, only : assert_shape + use LightKrylov_AbstractVectors + ! LightROM + use LightROM_AbstractLTIsystems ! LR_state implicit none - private + private :: this_module + character*128, parameter :: this_module = 'Laplacian2D_LTI_Riccati_Base' ! problem parameters - public :: N, nx, dx, dx2, L, rk_b, rk_c + public :: N, nx, dx, dx2, L, rk_b, rk_c ! problem definition - public :: B, CT, Qc, Rinv + public :: B, CT, Qc, Rinv ! derived data - public :: Bdata, CTdata, CTQcC, BRinvBTdata, CTQcCdata - ! initialisation - public :: initialize_problem - ! utils - public :: get_state, set_state, init_rand + public :: Bdata, CTdata, CTQcC, BRinvBTdata, CTQcCdata !------------------------------ !----- PARAMETERS ----- !------------------------------ ! --> Mesh related parameters. - real(kind=wp), parameter :: L = 1.0_wp !> Domain length - integer , parameter :: nx = 4 !> Number of grid points per direction - integer , parameter :: N = nx**2 !> total number of grid points - real(kind=wp), parameter :: dx = L/nx !> Grid size. - real(kind=wp), parameter :: dx2= dx**2 !> Grid size. - integer, parameter :: rk_b = 1 !> rank of the RHS - integer, parameter :: rk_c = 1 !> rank of Q = CTC - - !----------------------------------------------- - !----- LIGHTKRYLOV LTI SYSTEM TYPE ----- - !----------------------------------------------- - - type, extends(abstract_lti_system), public :: lti_system - end type lti_system + real(wp), parameter :: L = 1.0_wp !> Domain length + integer, parameter :: nx = 4 !> Number of grid points per direction + integer, parameter :: N = nx**2 !> total number of grid points + real(wp), parameter :: dx = L/nx !> Grid size. + real(wp), parameter :: dx2= dx**2 !> Grid size. + integer, parameter :: rk_b = 1 !> rank of the RHS + integer, parameter :: rk_c = 1 !> rank of Q = CTC !------------------------------------------------------- !----- LIGHTKRYLOV SYM LOW RANK STATE TYPE ----- !------------------------------------------------------- - type, extends(abstract_sym_low_rank_state), public :: LR_state + type, extends(abstract_sym_low_rank_state_rdp), public :: LR_state contains private - procedure, pass(self), public :: set_LR_state + procedure, pass(self), public :: initialize_LR_state end type LR_state @@ -56,8 +48,8 @@ module Laplacian2D_LTI_Riccati_Base !----- LIGHTKRYLOV VECTOR TYPE ----- !------------------------------------------- - type, extends(abstract_vector), public :: state_vector - real(kind=wp) :: state(N) = 0.0_wp + type, extends(abstract_vector_rdp), public :: state_vector + real(wp) :: state(N) = 0.0_wp contains private procedure, pass(self), public :: zero => vector_zero @@ -65,14 +57,15 @@ module Laplacian2D_LTI_Riccati_Base procedure, pass(self), public :: scal => vector_scal procedure, pass(self), public :: axpby => vector_axpby procedure, pass(self), public :: rand => vector_rand + procedure, pass(self), public :: get_size => vector_get_size end type state_vector !------------------------------------------- !----- LIGHTKRYLOV VECTOR TYPE ----- !------------------------------------------- - type, extends(abstract_vector), public :: state_matrix - real(kind=wp) :: state(N**2) = 0.0_wp + type, extends(abstract_vector_rdp), public :: state_matrix + real(wp) :: state(N**2) = 0.0_wp contains private procedure, pass(self), public :: zero => matrix_zero @@ -80,21 +73,19 @@ module Laplacian2D_LTI_Riccati_Base procedure, pass(self), public :: scal => matrix_scal procedure, pass(self), public :: axpby => matrix_axpby procedure, pass(self), public :: rand => matrix_rand + procedure, pass(self), public :: get_size => matrix_get_size end type state_matrix type(state_vector) :: B(rk_b) type(state_vector) :: CT(rk_c) - real(kind=wp) :: Qc(rk_c,rk_c) - real(kind=wp) :: Rinv(rk_b,rk_b) + real(wp) :: Qc(rk_c,rk_c) + real(wp) :: Rinv(rk_b,rk_b) - real(kind=wp) :: Bdata(N,rk_b) - real(kind=wp) :: CTdata(N,rk_c) - real(kind=wp) :: Bwrk(N,rk_b) - real(kind=wp) :: CTQcC(N**2) - real(kind=wp) :: CTQcCdata(N,N) - real(kind=wp) :: BRinvBTdata(N,N) - - + real(wp) :: Bdata(N,rk_b) + real(wp) :: CTdata(N,rk_c) + real(wp) :: CTQcC(N**2) + real(wp) :: CTQcCdata(N,N) + real(wp) :: BRinvBTdata(N,N) contains @@ -106,9 +97,9 @@ subroutine vector_zero(self) return end subroutine vector_zero - real(kind=wp) function vector_dot(self, vec) result(alpha) - class(state_vector) , intent(in) :: self - class(abstract_vector), intent(in) :: vec + real(wp) function vector_dot(self, vec) result(alpha) + class(state_vector), intent(in) :: self + class(abstract_vector_rdp), intent(in) :: vec select type(vec) type is (state_vector) alpha = dot_product(self%state, vec%state) @@ -116,17 +107,23 @@ real(kind=wp) function vector_dot(self, vec) result(alpha) return end function vector_dot + integer function vector_get_size(self) result(N) + class(state_vector), intent(in) :: self + N = nx + return + end function vector_get_size + subroutine vector_scal(self, alpha) class(state_vector), intent(inout) :: self - real(kind=wp) , intent(in) :: alpha + real(wp), intent(in) :: alpha self%state = self%state * alpha return end subroutine vector_scal subroutine vector_axpby(self, alpha, vec, beta) - class(state_vector) , intent(inout) :: self - class(abstract_vector), intent(in) :: vec - real(kind=wp) , intent(in) :: alpha, beta + class(state_vector), intent(inout) :: self + class(abstract_vector_rdp), intent(in) :: vec + real(wp), intent(in) :: alpha, beta select type(vec) type is (state_vector) self%state = alpha*self%state + beta*vec%state @@ -139,7 +136,7 @@ subroutine vector_rand(self, ifnorm) logical, optional, intent(in) :: ifnorm ! internals logical :: normalize - real(kind=wp) :: alpha + real(wp) :: alpha normalize = optval(ifnorm,.true.) call random_number(self%state) if (normalize) then @@ -157,9 +154,9 @@ subroutine matrix_zero(self) return end subroutine matrix_zero - real(kind=wp) function matrix_dot(self, vec) result(alpha) - class(state_matrix) , intent(in) :: self - class(abstract_vector), intent(in) :: vec + real(wp) function matrix_dot(self, vec) result(alpha) + class(state_matrix), intent(in) :: self + class(abstract_vector_rdp), intent(in) :: vec select type(vec) type is(state_matrix) alpha = dot_product(self%state, vec%state) @@ -167,17 +164,23 @@ real(kind=wp) function matrix_dot(self, vec) result(alpha) return end function matrix_dot + integer function matrix_get_size(self) result(N) + class(state_matrix), intent(in) :: self + N = N + return + end function matrix_get_size + subroutine matrix_scal(self, alpha) class(state_matrix), intent(inout) :: self - real(kind=wp) , intent(in) :: alpha + real(wp), intent(in) :: alpha self%state = self%state * alpha return end subroutine matrix_scal subroutine matrix_axpby(self, alpha, vec, beta) - class(state_matrix) , intent(inout) :: self - class(abstract_vector), intent(in) :: vec - real(kind=wp) , intent(in) :: alpha, beta + class(state_matrix), intent(inout) :: self + class(abstract_vector_rdp), intent(in) :: vec + real(wp), intent(in) :: alpha, beta select type(vec) type is(state_matrix) self%state = alpha*self%state + beta*vec%state @@ -190,7 +193,7 @@ subroutine matrix_rand(self, ifnorm) logical, optional, intent(in) :: ifnorm ! internals logical :: normalize - real(kind=wp) :: alpha + real(wp) :: alpha normalize = optval(ifnorm, .true.) call random_number(self%state) if (normalize) then @@ -198,131 +201,59 @@ subroutine matrix_rand(self, ifnorm) call self%scal(1.0/alpha) endif return - end subroutine matrix_rand - - !--------------------------------------- - !----- CONSTRUCT THE MESH ----- - !--------------------------------------- - - subroutine initialize_problem(magQ, magR) - implicit none - real(kind=wp), intent(in) :: magQ, magR - real(kind=wp), allocatable :: x(:) - integer :: i - - !> Construct mesh. - x = linspace(-L/2, L/2, nx) - - ! Define C, Qc & compute CTQcC - Qc = magQ*eye(rk_c) - - call init_rand(CT, ifnorm = .false.) - call get_state(CTdata, CT) - CTQcCdata = matmul(CTdata, matmul( Qc, transpose(CTdata))) - CTQcC(1:N**2) = reshape(CTQcCdata, shape(CTQcC)) - - ! Define B, Rinv & compule BRinvBT - if (magR .lt. atol) then - Rinv = 0.0_wp - else - Rinv = 1/magR*eye(rk_b) - endif + end subroutine matrix_rand - call init_rand(B, ifnorm = .false.) - Bdata = 0.0_wp - call get_state(Bdata, B) - Bwrk = 0.0_wp - Bwrk = matmul(Bdata, Rinv) - BRinvBTdata = matmul( Bwrk, transpose(Bdata) ) - - return - end subroutine initialize_problem - - !-------------------------------------------------------------------- - !----- UTILITIES FOR STATE_VECTOR AND STATE MATRIX TYPES ----- - !-------------------------------------------------------------------- - - subroutine get_state(mat_out, state_in) - !! Utility function to transfer data from a state vector to a real array - real(kind=wp), intent(out) :: mat_out(:,:) - class(abstract_vector), intent(in) :: state_in(:) - ! internal variables - integer :: k, kdim - mat_out = 0.0_wp - select type (state_in) - type is (state_vector) - kdim = size(state_in) - call assert_shape(mat_out, (/ N, kdim /), 'get_state -> state_vector', 'mat_out') - do k = 1, kdim - mat_out(:,k) = state_in(k)%state - end do - type is (state_matrix) - call assert_shape(mat_out, (/ N, N /), 'get_state -> state_matrix', 'mat_out') - mat_out = reshape(state_in(1)%state, (/ N, N /)) - end select - return - end subroutine get_state - - subroutine set_state(state_out, mat_in) - !! Utility function to transfer data from a real array to a state vector - class(abstract_vector), intent(out) :: state_out(:) - real(kind=wp), intent(in) :: mat_in(:,:) - ! internal variables - integer :: k, kdim - select type (state_out) - type is (state_vector) - kdim = size(state_out) - call assert_shape(mat_in, (/ N, kdim /), 'set_state -> state_vector', 'mat_in') - call mat_zero(state_out) - do k = 1, kdim - state_out(k)%state = mat_in(:,k) - end do - type is (state_matrix) - call assert_shape(mat_in, (/ N, N /), 'set_state -> state_matrix', 'mat_in') - call mat_zero(state_out) - state_out(1)%state = reshape(mat_in, shape(state_out(1)%state)) - end select - return - end subroutine set_state - - subroutine init_rand(state, ifnorm) - !! Utility function to initialize a state vector with random data - class(abstract_vector), intent(inout) :: state(:) - logical, optional, intent(in) :: ifnorm - ! internal variables - integer :: k, kdim - logical :: normalize - normalize = optval(ifnorm,.true.) - select type (state) - type is (state_vector) - kdim = size(state) - do k = 1, kdim - call state(k)%rand(ifnorm = normalize) - end do - type is (state_matrix) - kdim = size(state) - do k = 1, kdim - call state(k)%rand(ifnorm = normalize) - end do - end select - return - end subroutine init_rand + !----------------------------------------------------------------------- + !----- TYPE BOUND PROCEDURE FOR SYM LOW RANK REPRESENTATION ----- + !----------------------------------------------------------------------- - !------------------------------------------------------------ - !----- UTILITIES FOR SYM LOW RANK REPRESENTATION ----- - !------------------------------------------------------------ + subroutine initialize_LR_state(self, U, S, rk, rkmax) + class(LR_state), intent(inout) :: self + class(abstract_vector_rdp), intent(in) :: U(:) + real(wp), intent(in) :: S(:,:) + integer, intent(in) :: rk + integer, optional, intent(in) :: rkmax - subroutine set_LR_state(self, U, S) - class(LR_state), intent(inout) :: self - real(kind=wp), intent(in) :: U(:,:) - real(kind=wp), intent(in) :: S(:,:) ! internals - integer :: rk - rk = size(U,2) - call assert_shape(S, (/ rk, rk /), 'set_LR_state', 'S') - call set_state(self%U, U) - self%S = S + real(wp), allocatable :: R(:, :) + integer :: i, n, rka, info + + n = size(U) + call assert_shape(S, [n,n], "initialize_LR_state", "S") + + ! optional size argument + if (present(rkmax)) then + self%rk = rkmax - 1 + rka = rkmax + else + self%rk = rk + rka = rk + 1 + end if + + select type (U) + type is (state_vector) + ! allocate & initialize + allocate(self%U(rka), source=U(1)); call zero_basis(self%U) + allocate(self%S(rka,rka)); self%S = 0.0_wp + ! copy inputs + if (self%rk > n) then ! copy the full IC into self%U + call copy_basis(self%U(1:n), U) + self%S(1:n,1:n) = S + else ! fill the first self%rk columns of self%U with the first self%rk columns of the IC + call copy_basis(self%U(1:self%rk), U(1:self%rk)) + self%S(1:self%rk,1:self%rk) = S(1:self%rk,1:self%rk) + end if + ! top up basis (to rka for rank-adaptivity) with orthonormal columns if needed + if (rka > n) then + do i = n+1, rka + call self%U(i)%rand() + end do + allocate(R(rka,rka)); R = 0.0_wp + call qr(self%U, R, info) + call check_info(info, 'qr', module=this_module, procedure='initialize_LR_state') + end if + end select return - end subroutine set_LR_state + end subroutine initialize_LR_state end module Laplacian2D_LTI_Riccati_Base \ No newline at end of file diff --git a/example/DLRA_laplacian2D_lti_riccati/laplacian2D_lti_riccati_operators.f90 b/example/DLRA_laplacian2D_lti_riccati/laplacian2D_lti_riccati_operators.f90 index 8525709..d2ff397 100644 --- a/example/DLRA_laplacian2D_lti_riccati/laplacian2D_lti_riccati_operators.f90 +++ b/example/DLRA_laplacian2D_lti_riccati/laplacian2D_lti_riccati_operators.f90 @@ -1,22 +1,38 @@ module Laplacian2D_LTI_Riccati_Operators - use Laplacian2D_LTI_Riccati_Base - !> LightKrylov for linear algebra. - use LightKrylov - use LightKrylov_utils - !> Standard Library. - use stdlib_math, only : linspace + ! Standard Library. use stdlib_optval, only : optval use stdlib_linalg, only : eye + ! LightKrylov for linear algebra. + use LightKrylov + use LightKrylov, only : wp => dp + use LightKrylov_Utils ! svd + ! LightROM + use LightROM_AbstractLTIsystems ! abstract_lti_system + ! Laplacian + use Laplacian2D_LTI_Riccati_Base implicit none + private :: this_module ! operator - public :: CARE, build_operator, laplacian, laplacian_mat + public :: build_operator, laplacian, laplacian_mat, exptA + + character*128, parameter :: this_module = 'Laplacian2D_LTI_Riccati_Operators' + + !----------------------------------------------- + !----- LIGHTKRYLOV LTI SYSTEM TYPE ----- + !----------------------------------------------- + + type, extends(abstract_lti_system_rdp), public :: lti_system + contains + private + procedure, pass(self), public :: initialize_lti_system + end type lti_system !----------------------------------- !----- LAPLACE OPERATOR ----- !----------------------------------- - type, extends(abstract_linop), public :: laplace_operator + type, extends(abstract_linop_rdp), public :: laplace_operator contains private procedure, pass(self), public :: matvec => direct_matvec_laplace @@ -25,20 +41,15 @@ module Laplacian2D_LTI_Riccati_Operators contains - function CARE(X,A,Q,BRinvBT) result(Y) - real(kind=wp), dimension(n,n) :: X, A, Q, BRinvBT, Y - Y = matmul(transpose(A), X) + matmul(X, A) + Q - matmul(X, matmul(BRinvBT, X)) - end function CARE - !----- TYPE-BOUND PROCEDURE FOR LAPLACE OPERATOR ----- subroutine direct_matvec_laplace(self, vec_in, vec_out) !> Linear Operator. - class(laplace_operator),intent(in) :: self + class(laplace_operator), intent(in) :: self !> Input vector. - class(abstract_vector) , intent(in) :: vec_in + class(abstract_vector_rdp), intent(in) :: vec_in !> Output vector. - class(abstract_vector) , intent(out) :: vec_out + class(abstract_vector_rdp), intent(out) :: vec_out select type(vec_in) type is (state_vector) select type(vec_out) @@ -55,7 +66,7 @@ end subroutine direct_matvec_laplace subroutine build_operator(A) !! Build the two-dimensional Laplace operator explicitly - real(kind=wp), intent(out) :: A(N,N) + real(wp), intent(out) :: A(N,N) integer i, j, k A = -4.0_wp/dx2*eye(N) @@ -76,9 +87,9 @@ end subroutine build_operator subroutine laplacian(vec_out, vec_in) !> State vector. - real(kind=wp) , dimension(:), intent(in) :: vec_in + real(wp), dimension(:), intent(in) :: vec_in !> Time-derivative. - real(kind=wp) , dimension(:), intent(out) :: vec_out + real(wp), dimension(:), intent(out) :: vec_out !> Internal variables. integer :: i, j, in @@ -116,16 +127,16 @@ end subroutine laplacian subroutine laplacian_mat(flat_mat_out, flat_mat_in, transpose) !> State vector. - real(kind=wp) , dimension(:), intent(in) :: flat_mat_in + real(wp), dimension(:), intent(in) :: flat_mat_in !> Time-derivative. - real(kind=wp) , dimension(:), intent(out) :: flat_mat_out + real(wp), dimension(:), intent(out) :: flat_mat_out !> Transpose logical, optional :: transpose logical :: trans !> Internal variables. integer :: j - real(kind=wp), dimension(N,N) :: mat, dmat + real(wp), dimension(N,N) :: mat, dmat !> Deal with optional argument trans = optval(transpose,.false.) @@ -150,4 +161,84 @@ subroutine laplacian_mat(flat_mat_out, flat_mat_in, transpose) return end subroutine laplacian_mat + !-------------------------------------- + !----- EXP(tA) SUBROUTINE ----- + !-------------------------------------- + + subroutine exptA(vec_out, A, vec_in, tau, info, trans) + !! Subroutine for the exponential propagator that conforms with the abstract interface + !! defined in expmlib.f90 + class(abstract_vector_rdp), intent(out) :: vec_out + !! Output vector + class(abstract_linop_rdp), intent(inout) :: A + !! Linear operator + class(abstract_vector_rdp), intent(in) :: vec_in + !! Input vector. + real(wp), intent(in) :: tau + !! Integration horizon + integer, intent(out) :: info + !! Information flag + logical, optional, intent(in) :: trans + logical :: transpose + !! Direct or Adjoint? + + ! optional argument + transpose = optval(trans, .false.) + + ! time integrator + select type (vec_in) + type is (state_vector) + select type (vec_out) + type is (state_vector) + select type (A) + type is (laplace_operator) + call k_exptA(vec_out, A, vec_in, tau, info, transpose) + end select + end select + end select + + end subroutine exptA + + !-------------------------------------------------------- + !----- TYPE BOUND PROCEDURES FOR LTI SYSTEMS ----- + !-------------------------------------------------------- + + subroutine initialize_lti_system(self, A, B, CT, D) + class(lti_system), intent(inout) :: self + class(abstract_linop_rdp), intent(in) :: A + class(abstract_vector_rdp), intent(in) :: B(:) + class(abstract_vector_rdp), intent(in) :: CT(:) + real(wp), optional, intent(in) :: D(:,:) + + ! internal variables + integer :: rk_b, rk_c + + ! Operator + select type (A) + type is (laplace_operator) + allocate(self%A, source=A) + end select + ! Input + select type (B) + type is (state_vector) + rk_b = size(B) + allocate(self%B(1:rk_b), source=B(1:rk_b)) + end select + ! Output + select type (CT) + type is (state_vector) + rk_c = size(CT) + allocate(self%CT(1:rk_c), source=CT(1:rk_c)) + end select + ! Throughput + allocate(self%D(1:rk_c, 1:rk_b)) + if (present(D)) then + call assert_shape(D, (/ rk_c, rk_b /), 'initialize_lti_system', 'D') + self%D = D + else + self%D = 0.0_wp + end if + return + end subroutine initialize_lti_system + end module Laplacian2D_LTI_Riccati_Operators \ No newline at end of file diff --git a/example/DLRA_laplacian2D_lti_riccati/laplacian2D_lti_riccati_utils.f90 b/example/DLRA_laplacian2D_lti_riccati/laplacian2D_lti_riccati_utils.f90 new file mode 100644 index 0000000..c22a2d1 --- /dev/null +++ b/example/DLRA_laplacian2D_lti_riccati/laplacian2D_lti_riccati_utils.f90 @@ -0,0 +1,199 @@ +module Laplacian2D_LTI_Riccati_Utils + ! Standard Library. + use stdlib_math, only : linspace + use stdlib_optval, only : optval + use stdlib_linalg, only : diag, svd + ! RKLIB module for time integration. + use rklib_module + ! LightKrylov for linear algebra. + use LightKrylov + use LightKrylov, only : wp => dp + use LightKrylov_AbstractVectors ! zero_basis + use LightKrylov_Utils, only : assert_shape + ! Laplacian + use Laplacian2D_LTI_Riccati_Base + use laplacian2D_LTI_Riccati_Operators + implicit none + + private :: this_module + ! initialisation + public :: initialize_problem + ! utils for state vector/matrix + public :: get_state, set_state, init_rand + ! initial condition + public :: generate_random_initial_condition + ! misc + public :: CARE + + character*128, parameter :: this_module = 'Laplacian2D_LTI_Riccati_Utils' + +contains + + !--------------------------------------- + !----- CONSTRUCT THE MESH ----- + !--------------------------------------- + + subroutine initialize_problem(magQ, magR) + implicit none + real(wp), intent(in) :: magQ, magR + ! internals + real(wp), allocatable :: x(:) + real(wp) :: Bwrk(N,rk_b) + integer :: i + + !> Construct mesh. + x = linspace(-L/2, L/2, nx) + + ! Define C, Qc & compute CTQcC + Qc = magQ*eye(rk_c) + + call init_rand(CT, ifnorm = .false.) + call get_state(CTdata, CT) + CTQcCdata = matmul(CTdata, matmul( Qc, transpose(CTdata))) + CTQcC(1:N**2) = reshape(CTQcCdata, shape(CTQcC)) + + ! Define B, Rinv & compule BRinvBT + if (magR .lt. atol_dp) then + Rinv = 0.0_wp + else + Rinv = 1/magR*eye(rk_b) + endif + + call init_rand(B, ifnorm = .false.) + Bdata = 0.0_wp + call get_state(Bdata, B) + Bwrk = 0.0_wp + Bwrk = matmul(Bdata, Rinv) + BRinvBTdata = matmul( Bwrk, transpose(Bdata) ) + + return + end subroutine initialize_problem + + !-------------------------------------------------------------------- + !----- UTILITIES FOR STATE_VECTOR AND STATE MATRIX TYPES ----- + !-------------------------------------------------------------------- + + subroutine get_state(mat_out, state_in) + !! Utility function to transfer data from a state vector to a real array + real(wp), intent(out) :: mat_out(:,:) + class(abstract_vector_rdp), intent(in) :: state_in(:) + ! internal variables + integer :: k, kdim + mat_out = 0.0_wp + select type (state_in) + type is (state_vector) + kdim = size(state_in) + call assert_shape(mat_out, (/ N, kdim /), 'get_state -> state_vector', 'mat_out') + do k = 1, kdim + mat_out(:,k) = state_in(k)%state + end do + type is (state_matrix) + call assert_shape(mat_out, (/ N, N /), 'get_state -> state_matrix', 'mat_out') + mat_out = reshape(state_in(1)%state, (/ N, N /)) + end select + return + end subroutine get_state + + subroutine set_state(state_out, mat_in) + !! Utility function to transfer data from a real array to a state vector + class(abstract_vector_rdp), intent(out) :: state_out(:) + real(wp), intent(in) :: mat_in(:,:) + ! internal variables + integer :: k, kdim + select type (state_out) + type is (state_vector) + kdim = size(state_out) + call assert_shape(mat_in, (/ N, kdim /), 'set_state -> state_vector', 'mat_in') + call zero_basis(state_out) + do k = 1, kdim + state_out(k)%state = mat_in(:,k) + end do + type is (state_matrix) + call assert_shape(mat_in, (/ N, N /), 'set_state -> state_matrix', 'mat_in') + call zero_basis(state_out) + state_out(1)%state = reshape(mat_in, shape(state_out(1)%state)) + end select + return + end subroutine set_state + + subroutine init_rand(state, ifnorm) + !! Utility function to initialize a state vector with random data + class(abstract_vector_rdp), intent(inout) :: state(:) + logical, optional, intent(in) :: ifnorm + ! internal variables + integer :: k, kdim + logical :: normalize + normalize = optval(ifnorm,.true.) + select type (state) + type is (state_vector) + kdim = size(state) + do k = 1, kdim + call state(k)%rand(ifnorm = normalize) + end do + type is (state_matrix) + kdim = size(state) + do k = 1, kdim + call state(k)%rand(ifnorm = normalize) + end do + end select + return + end subroutine init_rand + + !-------------------------------------- + !----- INITIAL CONDITIONS ----- + !-------------------------------------- + + subroutine generate_random_initial_condition(U, S, rk) + class(state_vector), intent(out) :: U(:) + real(wp), intent(out) :: S(:,:) + integer, intent(in) :: rk + ! internals + class(state_vector), allocatable :: Utmp(:) + integer, allocatable :: perm(:) + ! SVD + real(wp) :: U_svd(rk,rk) + real(wp) :: S_svd(rk) + real(wp) :: V_svd(rk,rk) + integer :: i, info + + if (size(U) < rk) then + write(*,*) 'Input krylov basis size incompatible with requested rank', rk + STOP 1 + else + call init_rand(U, .false.) + end if + if (size(S,1) < rk) then + write(*,*) 'Input coefficient matrix size incompatible with requested rank', rk + STOP 1 + else if (size(S,1) /= size(S,2)) then + write(*,*) 'Input coefficient matrix must be square.' + STOP 2 + else + S = 0.0_wp + end if + ! perform QR + allocate(perm(1:rk)); perm = 0 + allocate(Utmp(1:rk), source=U(1:rk)) + call qr(Utmp, S, perm, info, verbosity=.false.) + if (info /= 0) write(*,*) ' [generate_random_initial_condition] Info: Colinear vectors detected in QR, column ', info + ! perform SVD + call svd(S(:,1:rk), S_svd(1:rk), U_svd(:,1:rk), V_svd(1:rk,1:rk)) + S = diag(S_svd) + block + class(abstract_vector_rdp), allocatable :: Xwrk(:) + call linear_combination(Xwrk, Utmp, U_svd) + call copy_basis(U, Xwrk) + end block + + end subroutine + + !------------------------ + !----- MISC ----- + !------------------------ + + function CARE(X,A,Q,BRinvBT) result(Y) + real(wp), dimension(n,n) :: X, A, Q, BRinvBT, Y + Y = matmul(transpose(A), X) + matmul(X, A) + Q - matmul(X, matmul(BRinvBT, X)) + end function CARE + +end module Laplacian2D_LTI_Riccati_Utils \ No newline at end of file diff --git a/example/DLRA_laplacian2D_lti_riccati/main.f90 b/example/DLRA_laplacian2D_lti_riccati/main.f90 index 9ce3e76..90e1ba9 100644 --- a/example/DLRA_laplacian2D_lti_riccati/main.f90 +++ b/example/DLRA_laplacian2D_lti_riccati/main.f90 @@ -1,28 +1,31 @@ program demo + ! Standard Library + use stdlib_optval, only : optval + use stdlib_linalg, only : eye, diag + use stdlib_math, only : all_close, logspace + use stdlib_io_npy, only : save_npy + ! LightKrylov for Linear Algebra use LightKrylov - use LightKrylov_expmlib - use LightKrylov_utils - + use LightKrylov, only : wp => dp + use LightKrylov_AbstractVectors + use LightKrylov_ExpmLib + use LightKrylov_Utils + ! LightROM use LightROM_AbstractLTIsystems - use LightROM_utils - + use LightROM_Utils use LightROM_LyapunovSolvers use LightROM_LyapunovUtils - use LightROM_RiccatiSolvers - + use LightROM_RiccatiSolvers, only : projector_splitting_DLRA_riccati_integrator + ! Laplacian use Laplacian2D_LTI_Riccati_Base use Laplacian2D_LTI_Riccati_Operators use Laplacian2D_LTI_Riccati_RKlib - - use stdlib_optval, only : optval - use stdlib_linalg, only : eye - use stdlib_math, only : all_close, logspace - use stdlib_io_npy, only : save_npy + use Laplacian2D_LTI_Riccati_Utils implicit none ! DLRA integer, parameter :: rkmax = 14 - integer, parameter :: rk_X0 = 10 + integer, parameter :: rk_X0 = 14 logical, parameter :: verb = .false. logical, parameter :: save = .false. character*128 :: oname @@ -49,14 +52,16 @@ program demo type(state_vector), allocatable :: U(:) real(wp) , allocatable :: S(:,:) - !> STATE MATRIX (RKlib) + ! STATE MATRIX (RKlib) type(state_matrix) :: X_mat_RKlib(2) real(wp), allocatable :: X_RKlib(:,:,:) real(wp) :: X_RKlib_ref(N,N) ! Initial condition - real(wp) :: U0(N, rkmax) + type(state_vector) :: U0(rkmax) real(wp) :: S0(rkmax,rkmax) + ! matrix + real(wp) :: U0_in(N,rkmax) real(wp) :: X0(N,N) ! OUTPUT @@ -70,27 +75,25 @@ program demo ! PROBLEM DEFINITION real(wp) :: Adata(N,N) - ! SVD - real(wp) :: U_svd(N,N) - real(wp) :: S_svd(rkmax) - real(wp) :: V_svd(rkmax,rkmax) - ! LAPACK SOLUTION RICATTI - real(kind=wp) :: Hdata(2*N,2*N) - real(kind=wp) :: wr(2*N), wi(2*N) - real(kind=wp) :: VR(2*N,2*N) + real(wp) :: Hdata(2*N,2*N) + real(wp) :: wr(2*N), wi(2*N) + real(wp) :: VR(2*N,2*N) integer, parameter :: lwork = 1040 - real(kind=wp) :: work(lwork) - real(kind=wp) :: UR(2*N,N) - real(kind=wp) :: UI(2*N,N) + real(wp) :: work(lwork) + real(wp) :: UR(2*N,N) + real(wp) :: UI(2*N,N) logical :: flag - real(kind=wp) :: F(N,N) - real(kind=wp) :: Ginv(N,N) - real(kind=wp) :: Xref(N,N) + real(wp) :: F(N,N) + real(wp) :: Ginv(N,N) + real(wp) :: Xref(N,N) integer :: icnt ! timer integer :: clock_rate, clock_start, clock_stop + + ! DLRA opts + type(dlra_opts) :: opts call system_clock(count_rate=clock_rate) @@ -99,11 +102,8 @@ program demo ! Define LTI system LTI = lti_system() - allocate(LTI%A, source=A) - allocate(LTI%B(1:rk_b), source=B(1:rk_b)) - allocate(LTI%CT(1:rk_c), source=CT(1:rk_c)) - allocate(LTI%D(1:rk_c,1:rk_b)); LTI%D = 0.0_wp - + call LTI%initialize_lti_system(A, B, CT) + write(*,*) write(*,*) 'RICCATI EQUATION FOR THE 2D LAPLACE OPERATOR:' write(*,*) @@ -169,27 +169,21 @@ program demo call inv(Ginv) Xref = matmul(F, Ginv) - ! sanity check - !call print_mat(N,N,Xref) - !X0 = CARE(Xref, Adata, CTQcCdata, BRinvBTdata) - call system_clock(count=clock_stop) ! Stop Timer write(*,'(A40,F10.4," s")') '--> X_ref. Elapsed time:', real(clock_stop-clock_start)/real(clock_rate) write(*,*) - ! Define initial condition - U0 = 0.0_wp - call random_number(U0(:, 1:rk_X0)) - ! Compute SVD to get low-rank representation - call svd(U0(:,1:rk_X0), U_svd(:,1:N), S_svd(1:rk_X0), V_svd(1:rk_X0,1:rk_X0)) - S0 = 0.0_wp - do i = 1,rk_X0 - S0(i,i) = S_svd(i) - end do - U0(:,1:rk_X0) = U_svd(:,1:rk_X0) + ! sanity check + X0 = CARE(Xref, Adata, CTQcCdata, BRinvBTdata) + write(*,*) ' Direct problem:', norm2(X0)/N + + ! Define initial condition of the form X0 + U0 @ S0 @ U0.T SPD + if (verb) write(*,*) ' Define initial condition' + call generate_random_initial_condition(U0, S0, rk_X0) + call get_state(U_out, U0) ! Compute the full initial condition X0 = U_in @ S0 @ U_in.T - X0 = matmul( U0(:,1:rk_X0), matmul(S0(1:rk_X0,1:rk_X0), transpose(U0(:,1:rk_X0)))) + X0 = matmul( U_out(:,1:rk_X0), matmul(S0(1:rk_X0,1:rk_X0), transpose(U_out(:,1:rk_X0)))) write(*,*) write(*,*) 'II. Compute approximate solution of the differential Riccati equation using RKlib:' @@ -239,14 +233,10 @@ program demo X = LR_state() - do torder = 1, 2 + do torder = 1, 1 ! 2 do i = 1, nrk rk = rkv(i) - allocate(U(1:rk)); call mat_zero(U) - allocate(S(1:rk,1:rk)); S = 0.0_wp - allocate(X%U(1:rk), source=U(1:rk)) - allocate(X%S(1:rk,1:rk)); write(*,'(A10,I1)') ' torder = ', torder do j = ndt, 1, -1 @@ -254,11 +244,16 @@ program demo if (verb) write(*,*) ' dt = ', dt, 'Tend = ', Tend ! Reset input - call X%set_LR_state(U0(:,1:rk), S0(1:rk,1:rk)) + call X%initialize_LR_state(U0, S0, rk) + + ! set options + opts = dlra_opts(mode=torder, verbose=verb) ! run step call system_clock(count=clock_start) ! Start Timer - call numerical_low_rank_splitting_riccati_integrator(X, LTI, Qc, Rinv, Tend, dt, torder, info) + call projector_splitting_DLRA_riccati_integrator(X, LTI%A, LTI%B, LTI%CT, Qc, Rinv, & + & Tend, dt, torder, info, & + & exptA=exptA, iftrans=.false., options=opts) call system_clock(count=clock_stop) ! Stop Timer ! Reconstruct solution @@ -269,6 +264,9 @@ program demo & rk, torder, dt, Tend, & & norm2(X_RKlib_ref - X_out)/N, & & real(clock_stop-clock_start)/real(clock_rate) + + deallocate(X%U) + deallocate(X%S) end do if (save) then @@ -276,11 +274,6 @@ program demo call save_npy(oname, X_out) end if - deallocate(X%U); - deallocate(X%S); - deallocate(U); - deallocate(S); - end do end do deallocate(rkv); deallocate(dtv); @@ -289,8 +282,8 @@ program demo call initialize_problem(1.0_wp, 1.0_wp) ! Reset LTI system - call mat_copy(LTI%B, B) - call mat_copy(LTI%CT, CT) + call copy_basis(LTI%B, B) + call copy_basis(LTI%CT, CT) write(*,*) write(*,*) 'RICCATI EQUATION FOR THE 2D LAPLACE OPERATOR:' @@ -356,26 +349,21 @@ program demo Xref = matmul(F, Ginv) ! sanity check - !call print_mat(N,N,Xref) - !X0 = CARE(Xref, Adata, CTQcCdata, BRinvBTdata) + X0 = CARE(Xref, Adata, CTQcCdata, BRinvBTdata) + write(*,*) ' Direct problem:', norm2(X0)/N call system_clock(count=clock_stop) ! Stop Timer write(*,'(A40,F10.4," s")') '--> X_ref. Elapsed time:', real(clock_stop-clock_start)/real(clock_rate) write(*,*) ! Define initial condition - U0 = 0.0_wp - call random_number(U0(:, 1:rk_X0)) - ! Compute SVD to get low-rank representation - call svd(U0(:,1:rk_X0), U_svd(:,1:N), S_svd(1:rk_X0), V_svd(1:rk_X0,1:rk_X0)) - S0 = 0.0_wp - do i = 1,rk_X0 - S0(i,i) = S_svd(i) - end do - U0(:,1:rk_X0) = U_svd(:,1:rk_X0) + if (verb) write(*,*) 'Define initial condition' + call generate_random_initial_condition(U0, S0, rk_X0) + call get_state(U_out, U0) ! Compute the full initial condition X0 = U_in @ S0 @ U_in.T - X0 = matmul( U0(:,1:rk_X0), matmul(S0(1:rk_X0,1:rk_X0), transpose(U0(:,1:rk_X0)))) + X0 = matmul( U_out(:,1:rk_X0), matmul(S0(1:rk_X0,1:rk_X0), transpose(U_out(:,1:rk_X0)))) + write(*,*) write(*,*) 'II. Compute approximate solution of the differential Riccati equation using RKlib:' @@ -421,13 +409,10 @@ program demo X = LR_state() - do torder = 1, 2 + do torder = 1, 1 !2 do i = 1, nrk rk = rkv(i) - allocate(U(1:rk)); call mat_zero(U) - allocate(X%U(1:rk), source=U(1:rk)) - allocate(X%S(1:rk,1:rk)); write(*,'(A10,I1)') ' torder = ', torder do j = ndt, 1, -1 @@ -435,11 +420,16 @@ program demo if (verb) write(*,*) ' dt = ', dt, 'Tend = ', Tend ! Reset input - call X%set_LR_state(U0(:,1:rk), S0(1:rk,1:rk)) + call X%initialize_LR_state(U0, S0, rk) + + ! set options + opts = dlra_opts(mode=torder, verbose=verb) ! run step call system_clock(count=clock_start) ! Start Timer - call numerical_low_rank_splitting_riccati_integrator(X, LTI, Qc, Rinv, Tend, dt, torder, info) + call projector_splitting_DLRA_riccati_integrator(X, LTI%A, LTI%B, LTI%CT, Qc, Rinv, & + & Tend, dt, torder, info, & + & exptA=exptA, iftrans=.false., options=opts) call system_clock(count=clock_stop) ! Stop Timer ! Reconstruct solution @@ -450,6 +440,9 @@ program demo & rk, torder, dt, Tend, & & norm2(X_RKlib_ref - X_out)/N, & & real(clock_stop-clock_start)/real(clock_rate) + + deallocate(X%U) + deallocate(X%S) end do if (save) then @@ -457,10 +450,6 @@ program demo call save_npy(oname, X_out) end if - deallocate(X%U); - deallocate(X%S); - deallocate(U); - end do end do diff --git a/example/ginzburg_landau/demo_gl.f90 b/example/ginzburg_landau/demo_gl.f90 deleted file mode 100644 index 8f78bf8..0000000 --- a/example/ginzburg_landau/demo_gl.f90 +++ /dev/null @@ -1,5 +0,0 @@ -program demo -implicit none - -print *, "Put some examples in here!" -end program demo diff --git a/fpm.toml b/fpm.toml index 391d91b..89afa0c 100644 --- a/fpm.toml +++ b/fpm.toml @@ -23,12 +23,11 @@ source-form = "free" [dependencies] stdlib = "*" -LightKrylov = { path = "../LightKrylov" } -#{ git = "https://github.com/nekStab/LightKrylov.git", branch = "dev"} +#LightKrylov = { path = "../LightKrylov" } +LightKrylov = { git = "https://github.com/nekStab/LightKrylov.git", branch = "dev"} [dev-dependencies] -test-drive.git = "https://github.com/fortran-lang/test-drive" -test-drive.tag = "v0.4.0" +test-drive.git = "https://github.com/nekStab/test-drive" [[example]] @@ -45,3 +44,12 @@ source-dir = "example/DLRA_laplacian2D_lti_riccati" [example.dependencies] rklib = { git="https://github.com/jacobwilliams/rklib.git" } + +[[example]] + +name = "DLRA_ginzburg_landau" +source-dir = "example/DLRA_ginzburg_landau" + +[example.dependencies] +rklib = { git="https://github.com/jacobwilliams/rklib.git" } +fortime = { git = "https://github.com/gha3mi/fortime.git" } diff --git a/run_tests.sh b/run_tests.sh new file mode 100755 index 0000000..9eee5ca --- /dev/null +++ b/run_tests.sh @@ -0,0 +1,13 @@ +# Gfortran. +#python deployment.py + +fpm test --compiler "gfortran" --flag "-O0 -g3 -fbacktrace -Wall -Wextra -fcheck=all -pedantic -Wconversion -fbounds-check -ffpe-trap=zero,overflow,underflow" --verbose +#fpm test --compiler "gfortran" --flag "-O3 -march=native -mtune=native" + +# Ifort +#fpm test --compiler "ifort" +#fpm test --compiler "ifort" --flag "-O3 -xhost" + +# Ifx +#fpm test --compiler "ifx" +#fpm test --compiler "ifx" --flag "-O3 -xhost" diff --git a/src/AbstractLTIsystems.f90 b/src/AbstractLTIsystems.f90 index c8f051f..fa12867 100644 --- a/src/AbstractLTIsystems.f90 +++ b/src/AbstractLTIsystems.f90 @@ -1,8 +1,7 @@ module LightROM_AbstractLTIsystems !> Use the abstract linear operator types defined in LightKrylov. - use LightKrylov, only : abstract_linop, abstract_vector + use LightKrylov, only : abstract_linop_rdp, abstract_vector_rdp, wp => dp implicit none - include "dtypes.h" private @@ -15,46 +14,35 @@ module LightROM_AbstractLTIsystems end type abstract_dynamical_system !> Abstract continuous LTI system. - type, extends(abstract_dynamical_system), abstract, public :: abstract_lti_system + type, extends(abstract_dynamical_system), abstract, public :: abstract_lti_system_rdp !> Dynamics matrix. - class(abstract_linop), allocatable :: A + class(abstract_linop_rdp), allocatable :: A + !> Exponential propagator. + class(abstract_linop_rdp), allocatable :: prop !> Input-to-state matrix. - class(abstract_vector), allocatable :: B(:) + class(abstract_vector_rdp), allocatable :: B(:) !> State-to-output matrix. - class(abstract_vector), allocatable :: CT(:) + class(abstract_vector_rdp), allocatable :: CT(:) !> Feedthrough matrix. - real(kind=wp) , allocatable :: D(:, :) + real(wp), allocatable :: D(:, :) contains - end type abstract_lti_system + end type abstract_lti_system_rdp !> Abstract discrete LTI system. - type, extends(abstract_dynamical_system), abstract, public :: abstract_dlti_system + type, extends(abstract_dynamical_system), abstract, public :: abstract_dlti_system_rdp !> Dynamic matrix. - class(abstract_linop), allocatable :: A + class(abstract_linop_rdp), allocatable :: A !> Input-to-state matrix. - class(abstract_vector), allocatable :: B(:) + class(abstract_vector_rdp), allocatable :: B(:) !> State-to-output matrix. - class(abstract_vector), allocatable :: CT(:) + class(abstract_vector_rdp), allocatable :: CT(:) !> Feedthrough matrix. - real(kind=wp) , allocatable :: D(:, :) + real(wp), allocatable :: D(:, :) !> Sampling period. - real(kind=wp) :: dt = 1.0_wp + real(wp) :: dt = 1.0_wp contains private - end type abstract_dlti_system - - !> Abstract reduced-order continuous LTI system. - type, extends(abstract_dynamical_system), abstract, public :: abstract_ROM_lti_system - !> Dynamics matrix. - real(kind=wp) , allocatable :: A(:, :) - !> Input-to-state matrix. - real(kind=wp) , allocatable :: B(:, :) - !> State-to-output matrix. - real(kind=wp) , allocatable :: C(:, :) - !> Feedthrough matrix. - real(kind=wp) , allocatable :: D(:, :) - contains - end type abstract_ROM_lti_system + end type abstract_dlti_system_rdp !-------------------------------------------------------------------- !----- ABSTRACT LOW RANK REPRESENTATION TYPE DEFINITION ----- @@ -65,13 +53,17 @@ module LightROM_AbstractLTIsystems end type abstract_low_rank_representation !> Abstract symmetric low-rank representation. - type, extends(abstract_low_rank_representation), abstract, public :: abstract_sym_low_rank_state + type, extends(abstract_low_rank_representation), abstract, public :: abstract_sym_low_rank_state_rdp !> Low-Rank basis. - class(abstract_vector), allocatable :: U(:) + class(abstract_vector_rdp), allocatable :: U(:) !> Coefficients - real(kind=wp) , allocatable :: S(:, :) + real(wp), allocatable :: S(:, :) + !> Current approximation rank + integer :: rk = 1 + !> Has rank been initialized? (for rank-adaptive DLRA) + logical :: rank_is_initialised = .false. contains - end type abstract_sym_low_rank_state + end type abstract_sym_low_rank_state_rdp contains diff --git a/src/LightROM.f90 b/src/LightROM.f90 index ac995ea..3f1d6bf 100644 --- a/src/LightROM.f90 +++ b/src/LightROM.f90 @@ -5,11 +5,11 @@ module LightROM private !> Global variables. - public :: greetings, wp, atol, rtol + public :: greetings_LightROM, wp, atol, rtol contains - subroutine greetings() + subroutine greetings_LightROM() write(*, *) write(*, *) write(*, *) "-------------------------------------------------" @@ -38,6 +38,6 @@ subroutine greetings() write(*, *) "-------------------------------------------------" write(*, *) write(*, *) - end subroutine greetings + end subroutine greetings_LightROM end module LightROM diff --git a/src/LyapunovSolvers.f90 b/src/LyapunovSolvers.f90 index 5895815..3289db7 100644 --- a/src/LyapunovSolvers.f90 +++ b/src/LyapunovSolvers.f90 @@ -1,34 +1,77 @@ module LightROM_LyapunovSolvers !! This module provides the implementation of the Krylov-based solvers for the Differential Lyapunov !! equation based on the dynamic low-rank approximation and operator splitting. - + ! Standard library + use stdlib_linalg, only : eye, diag, svd, svdvals + use stdlib_optval, only : optval ! LightKrylov modules use LightKrylov - use LightKrylov_expmlib + use LightKrylov, only: wp => dp + use LightKrylov_Logger + use LightKrylov_AbstractVectors + use LightKrylov_ExpmLib use LightKrylov_BaseKrylov ! LightROM modules use LightROM_AbstractLTIsystems use LightROM_LyapunovUtils - use LightROM_utils - ! Standard library - use stdlib_linalg, only : eye - use stdlib_optval, only : optval + use LightROM_Utils + implicit none ! global scratch arrays - class(abstract_vector) , allocatable :: U1(:) - class(abstract_vector), allocatable :: Uwrk(:) - class(abstract_vector) , allocatable :: BBTU(:) - real(kind=wp), allocatable :: Swrk(:,:) - - private - public :: numerical_low_rank_splitting_lyapunov_integrator - public :: M_forward_map, G_forward_map_lyapunov, K_step_lyapunov, S_step_lyapunov, L_step_lyapunov + class(abstract_vector_rdp), allocatable :: U1(:) + class(abstract_vector_rdp), allocatable :: Uwrk(:) + class(abstract_vector_rdp), allocatable :: BBTU(:) + real(wp), allocatable :: Swrk(:,:) + + ! lagged solution for computation of increment norm + class(abstract_vector_rdp), allocatable :: U_lag(:) + real(wp), allocatable :: S_lag(:,:) + + ! svd + real(wp), allocatable :: ssvd(:) + real(wp), allocatable :: Usvd(:,:), VTsvd(:,:) + + ! module name + private :: this_module + character*128, parameter :: this_module = 'LightROM_LyapunovSolvers' + + public :: projector_splitting_DLRA_lyapunov_integrator + public :: M_forward_map + public :: G_forward_map_lyapunov + public :: K_step_lyapunov + public :: S_step_lyapunov + public :: L_step_lyapunov + + interface projector_splitting_DLRA_lyapunov_integrator + module procedure projector_splitting_DLRA_lyapunov_integrator_rdp + end interface + + interface M_forward_map + module procedure M_forward_map_rdp + end interface + + interface G_forward_map_lyapunov + module procedure G_forward_map_lyapunov_rdp + end interface + + interface K_step_lyapunov + module procedure K_step_lyapunov_rdp + end interface + + interface S_step_lyapunov + module procedure S_step_lyapunov_rdp + end interface + + interface L_step_lyapunov + module procedure L_step_lyapunov_rdp + end interface contains - subroutine numerical_low_rank_splitting_lyapunov_integrator(X,LTI,Tend,tau,torder,info,exptA,iftrans) - !! Numerical integrator for the matrix-valued differential Lyapunov equation of the form + subroutine projector_splitting_DLRA_lyapunov_integrator_rdp(X, A, B, Tend, tau, info, & + & exptA, iftrans, options) + !! Main driver for the numerical integrator for the matrix-valued differential Lyapunov equation of the form !! !! $$ \dot{\mathbf{X}} = \mathbf{A} \mathbf{X} + \mathbf{X} \mathbf{A}^T + \mathbf{B} \mathbf{B}^T $$ !! @@ -40,51 +83,54 @@ subroutine numerical_low_rank_splitting_lyapunov_integrator(X,LTI,Tend,tau,torde !! !! $$ \mathbf{0} = \mathbf{A} \mathbf{X} + \mathbf{X} \mathbf{A}^T + \mathbf{B} \mathbf{B}^T $$ !! - !! The algorithm is based on three main ideas: + !! The algorithm is based on four main ideas: !! - !! - The operator splitting scheme proposed by Lubich & Oseledets (2014) that splits the - !! right-hand side of the differential equation into a linear stiff part that is solved - !! explicitly and a possibly non-linear non-stiff part which is solved numerically. The - !! two operators are then composed to obtain the integrator for the full Lyapunov equation. - !! - The Dynamic Low-Rank Approximation for the solution of general matrix differential - !! equations proposed by Nonnenmacher & Lubich (2007) which seeks to integrate only the - !! leading low-rank factors of the solution to a large system by updating the matrix - !! factorization. The dynamical low-rank approximation scheme for the low-rank factors - !! of the solution is itself solved using a projector-splitting technique to cheaply - !! maintain orthonormality or the low-rank basis without explicit SVDs. - !! - This algorithm has been applied to the Lyapunov and Riccati equations by Mena et al. - !! (2018) with improvements taking advantage of the symmetry of the problem/solution. + !! - Dynamic Low-Rank Approximation (DLRA). DLRA is a method for the solution of general matrix differential + !! equations proposed by Nonnenmacher & Lubich (2007) which seeks to integrate only the leading low-rank + !! factors of the solution to a large system by updating an appropriate matrix factorization. The time-integration + !! is achieved by splitting the step into three sequential substeps, each updating a part of the factorization + !! taking advantage of and maintaining the orthogonality of the left and right low-rank bases of the factorization. + !! - Projector-Splitting Integration (PSI). The projector-splitting scheme proposed by Lubich & Oseledets (2014) + !! for the solution of DLRA splits the right-hand side of the differential equation into a linear stiff part + !! that is integrated exactly and a (possibly non-linear) non-stiff part which is integrated numerically. + !! The two operators are then composed to obtain the integrator for the full differential equation. + !! The advantage of the projector splitting integration is that it maintains orthonormality of the basis + !! of the low-rank approximation to the solution without requiring SVDs of the full matrix. + !! - The third element is the application of the general framework of projector-splitting integration for + !! dynamical low-rank approximation to the Lyapunov equations by Mena et al. (2018). As the solutions + !! to the Lyapunov equation are by construction SPD, this fact can be taken advantage of to reduce the + !! computational cost of the integration and, in particular, doing away with one QR factorization per timestep + !! while maintaining symmetry of the resulting matrix factorization. + !! - The final element is the addition of the capability of dyanmic rank adaptivity for the projector-splitting + !! integrator proposed by Hochbruck et al. (2023). At the cost of integrating a supplementary solution vector, + !! the rank of the solution is dynamically adapted to ensure that the corresponding additional singular value + !! stays below a chosen threshold. !! !! **Algorithmic Features** !! - !! - Separate integration of the stiff inhomogeneous part of the Lyapunov equation and the - !! non-stiff inhomogeneity - !! - Rank preserving time-integration that maintains orthonormality of the factorization - !! basis + !! - Separate integration of the stiff inhomogeneous part of the Lyapunov equation and the non-stiff inhomogeneity + !! - Rank preserving time-integration that maintains orthonormality of the factorization basis + !! - Alternatively, dynamical rank-adaptivity based on the instantaneous singular values !! - The stiff part of the problem is solved using a time-stepper approach to approximate !! the action of the exponential propagator !! !! **Advantages** !! - !! - Rank of the approximate solution is user defined - !! - The timesteps of the stiff and non-stiff parts of the code are independent + !! - Rank of the approximate solution is user defined or chosen adaptively based on the solution !! - The integrator is adjoint-free - !! - The operator of the homogeneous part and the inhomogeneity are not needed explicitly - !! i.e. the algorithm is amenable to solution using Krylov methods (in particular for - !! the solution of the stiff part of the problem) - !! - No SVDs are necessary for this alogorithm - !! - Lie and Strang splitting implemented allowing for first and second order integration - !! in time + !! - The operator of the homogeneous part and the inhomogeneity are not needed explicitly i.e. the algorithm + !! is amenable to solution using Krylov methods (in particular for the solution of the stiff part of the problem) + !! - No SVDs of the full solution are required for this algorithm + !! - Lie and Strang splitting implemented allowing for first and second order integration in time !! !! ** Limitations** !! - !! - Rank of the approximate solution is user defined. The appropriateness of this - !! approximation is not considered - !! - The current implementation does not require an adjoint integrator. This means that - !! the temporal order of the basic operator splitting scheme is limited to 1 (Lie-Trotter - !! splitting) or at most 2 (Strang splitting). Higher order integrators are possible, but - !! require at least some backward integration (via the adjoint) in BOTH parts of the splitting. - !! (see Sheng-Suzuki and Goldman-Kaper theorems) + !! - Rank of the approximate solution is user defined. The appropriateness of this approximation is not considered. + !! This does not apply to the rank-adaptive version of the integrator. + !! - The current implementation does not require an adjoint integrator. This means that the temporal order of the + !! basic operator splitting scheme is limited to 1 (Lie-Trotter splitting) or at most 2 (Strang splitting). + !! Higher order integrators are possible, but require at least some backward integration (via the adjoint) + !! in BOTH parts of the splitting (see Sheng-Suzuki and Goldman-Kaper theorems). !! !! **References** !! @@ -95,297 +141,758 @@ subroutine numerical_low_rank_splitting_lyapunov_integrator(X,LTI,Tend,tau,torde !! - Mena, H., Ostermann, A., Pfurtscheller, L.-M., Piazzola, C. (2018). "Numerical low-rank !! approximation of matrix differential equations", Journal of Computational and Applied Mathematics, !! 340, 602-614 - class(abstract_sym_low_rank_state), intent(inout) :: X + !! - Hochbruck, M., Neher, M., Schrammer, S. (2023). "Rank-adaptive dynamical low-rank integrators for + !! first-order and second-order matrix differential equations", BIT Numerical Mathematics 63:9 + class(abstract_sym_low_rank_state_rdp), intent(inout) :: X !! Low-Rank factors of the solution. - class(abstract_lti_system), intent(in) :: LTI - !! LTI dynamical system defining the problem. - real(kind=wp), intent(in) :: Tend + class(abstract_linop_rdp), intent(inout) :: A + !! Linear operator + class(abstract_vector_rdp), intent(in) :: B(:) + !! Low-Rank inhomogeneity. + real(wp), intent(in) :: Tend !! Integration time horizon. - real(kind=wp), intent(inout) :: tau + real(wp), intent(inout) :: tau !! Desired time step. The avtual time-step will be computed such as to reach Tend in an integer number !! of steps. - integer, intent(in) :: torder - !! Order of time integration. Only 1st (Lie splitting) and 2nd (Strang splitting) orders are implemented. - integer, intent(out) :: info + integer, intent(out) :: info !! Information flag - procedure(abstract_exptA), optional :: exptA + procedure(abstract_exptA_rdp), optional :: exptA !! Routine for computation of the exponential propagator (default: Krylov-based exponential operator). - logical, optional, intent(in) :: iftrans + logical, optional, intent(in) :: iftrans + logical :: trans !! Determine whether \(\mathbf{A}\) (default `.false.`) or \( \mathbf{A}^T\) (`.true.`) is used. + type(dlra_opts), optional, intent(in) :: options + type(dlra_opts) :: opts + !! Options for solver configuration - ! Internal variables - integer :: istep, nsteps, iostep - real(kind=wp) :: T - logical, parameter :: verbose = .false. - logical :: trans - procedure(abstract_exptA), pointer :: p_exptA => null() - - ! Optional argument + ! Internal variables + integer :: istep, nsteps, chkstep + integer :: rk_reduction_lock ! 'timer' to disable rank reduction + real(wp) :: T ! simulation time + real(wp) :: nrm, nrmX ! increment and solution norm + real(wp) :: El ! aggregate error estimate + real(wp) :: err_est ! current error estimate + real(wp) :: tol ! current tolerance + logical :: verbose, converged + character*128 :: msg + procedure(abstract_exptA_rdp), pointer :: p_exptA => null() + + ! Optional arguments trans = optval(iftrans, .false.) + + ! Options + if (present(options)) then + opts = options + else ! default + opts = dlra_opts() + end if + + ! set tolerance and verbosity + tol = opts%tol + verbose = opts%verbose + if (present(exptA)) then p_exptA => exptA else - p_exptA => k_exptA - endif + p_exptA => k_exptA_rdp + end if - T = 0.0_wp + ! Initialize + T = 0.0_wp + rk_reduction_lock = 10 + converged = .false. ! Compute number of steps nsteps = floor(Tend/tau) - - iostep = nsteps/10 - if ( iostep .eq. 0 ) then - iostep = 10 + if (verbose) then + write(msg,*) 'Integration over', nsteps, 'steps with dt = ', tau + call logger%log_message(trim(msg), module=this_module, procedure='DLRA') + end if + + ! Determine IO step + chkstep = get_chkstep(opts, verbose, tau) + + if ( opts%mode > 2 ) then + write(msg, *) "Time-integration order for the operator splitting of d > 2 & + & requires adjoint solves and is not implemented. Resetting torder = 2." + call logger%log_message(trim(msg), module=this_module, procedure='DLRA') + else if ( opts%mode < 1 ) then + write(msg, *) "Invalid time-integration order specified: ", opts%mode + call stop_error(trim(msg), module=this_module, & + & procedure='projector_splitting_DLRA_lyapunov_integrator_rdp') endif + ! determine initial rank if rank-adaptive + if (opts%if_rank_adaptive) then + if (.not. X%rank_is_initialised) then + call set_initial_rank(X, A, B, tau, opts%mode, p_exptA, trans, 1e-6_wp, verbose=.true.) + end if + if (opts%use_err_est) then + err_est = 0.0_wp + El = 0.0_wp + call compute_splitting_error(err_est, X, A, B, tau, opts%mode, exptA, trans) + tol = err_est / sqrt(256_wp - real(X%rk + 1)) + if (verbose) then + write(msg, *) 'Initialization complete: rk = ', X%rk, ', local error estimate: ', tol + call logger%log_message(trim(msg), module=this_module, procedure='RA-DLRA') + end if + end if + end if + dlra : do istep = 1, nsteps ! dynamical low-rank approximation solver - call numerical_low_rank_splitting_lyapunov_step(X, LTI, tau, torder, info, p_exptA, trans) - + if (opts%if_rank_adaptive) then + call rank_adaptive_PS_DLRA_lyapunov_step_rdp(X, A, B, tau, opts%mode, info, rk_reduction_lock, & + & p_exptA, trans, verbose, tol) + + if ( opts%use_err_est ) then + if ( mod(istep, opts%err_est_step) == 0 ) then + call compute_splitting_error(err_est, X, A, B, tau, opts%mode, exptA, trans) + El = El + err_est + tol = El / sqrt(256_wp - real(X%rk + 1)) + if (verbose) then + write(msg, '(3X,I3,A,E8.2)') istep, ': recomputed error estimate: ', tol + call logger%log_message(trim(msg), module=this_module, procedure='RA-DLRA') + end if + else + El = El + err_est + end if + end if + ! + else + call projector_splitting_DLRA_lyapunov_step_rdp(X, A, B, tau, opts%mode, info, & + & p_exptA, trans, verbose) + end if + + ! update time T = T + tau + + ! save lag data + if ( mod(istep + 1, chkstep) == 0 .or. istep == nsteps -1 ) then + ! allocate lag data (we do it here so we do not need to store the data size and can pass the whole array) + allocate(U_lag(X%rk), source=X%U(:X%rk)) ! U_lag = X%U + allocate(S_lag(X%rk, X%rk)); S_lag = X%S(:X%rk,:X%rk) + if (verbose) then + write(msg, *) 'Solution saved for increment norm computation.' + call logger%log_debug(trim(msg), module=this_module, procedure='DLRA') + endif + end if + ! here we can do some checks such as whether we have reached steady state - if ( mod(istep,iostep) .eq. 0 ) then + if ( mod(istep, chkstep) == 0 .or. istep == nsteps ) then if (verbose) then - write(*, *) "INFO : ", ISTEP, " steps of DLRA computed. T = ",T + write(msg, '(3X,I3,A,F6.3)') istep, " steps of DLRA computed. T = ", T + call logger%log_message(trim(msg), module=this_module, procedure='DLRA') endif + call compute_increment_norm(nrm, X%U(:X%rk), X%S(:X%rk,:X%rk), U_lag, S_lag) + nrmX = compute_norm(X) + write(msg, '(A,I4,A,F6.2,A,E10.4,A,E10.4,A,E10.4)') "Step ", istep, ", T = ", T, & + & ": dX = ", nrm, ' X = ', nrmX, ' dX/X = ', nrm/nrmX + call logger%log_message(trim(msg), module=this_module, procedure='DLRA') + deallocate(U_lag); deallocate(S_lag) + ! Check convergence + converged = is_converged(nrm, nrmX, opts) + if (converged) then + write(msg, *) "Step ", istep, "Solution converged!" + call logger%log_message(trim(msg), module=this_module, procedure='DLRA') + exit dlra + else ! if final step + if (istep == nsteps) then + write(msg, *) "Step ", istep, "Solution not converged!" + call logger%log_message(trim(msg), module=this_module, procedure='DLRA') + end if + end if endif enddo dlra + if (allocated(U1)) deallocate(U1) + if (allocated(Uwrk)) deallocate(Uwrk) + if (allocated(BBTU)) deallocate(BBTU) + return - end subroutine numerical_low_rank_splitting_lyapunov_integrator + end subroutine projector_splitting_DLRA_lyapunov_integrator_rdp - !----------------------------- - !----- UTILITIES ----- - !----------------------------- + !----------------------- + !----- PSI ----- + !----------------------- - subroutine numerical_low_rank_splitting_lyapunov_step(X, LTI, tau, torder, info, exptA, iftrans) - class(abstract_sym_low_rank_state), intent(inout) :: X + subroutine projector_splitting_DLRA_lyapunov_step_rdp(X, A, B, tau, mode, info, exptA, trans, verbose) + !! Driver for the time-stepper defining the splitting logic for each step of the the + !! projector-splitting integrator + class(abstract_sym_low_rank_state_rdp), intent(inout) :: X !! Low-Rank factors of the solution. - class(abstract_lti_system), intent(in) :: LTI - !! LTI dynamical system defining the problem. - real(kind=wp), intent(in) :: tau + class(abstract_linop_rdp), intent(inout) :: A + !! Linear operator + class(abstract_vector_rdp), intent(in) :: B(:) + !! Low-Rank inhomogeneity. + real(wp), intent(in) :: tau !! Time step. - integer, intent(in) :: torder - !! Order of time integration. Only 1st (Lie splitting) and 2nd (Strang splitting) orders are implemented. - integer, intent(out) :: info + integer, intent(in) :: mode + !! TIme integration mode. Only 1st (Lie splitting - mode 1) and 2nd (Strang splitting - mode 2) + !! orders are implemented. + integer, intent(out) :: info !! Information flag - procedure(abstract_exptA) :: exptA + procedure(abstract_exptA_rdp) :: exptA !! Routine for computation of the exponential propagator (default: Krylov-based exponential operator). - logical, optional, intent(in) :: iftrans + logical, intent(in) :: trans !! Determine whether \(\mathbf{A}\) (default `.false.`) or \( \mathbf{A}^T\) (`.true.`) is used. + logical, intent(in) :: verbose + !! Verbosity ! Internal variables - integer :: istep, nsteps, integrator - logical :: trans + integer :: istep, nsteps + character*128 :: msg - ! Optional argument - trans = optval(iftrans, .false.) - - if ( torder .eq. 1 ) then - integrator = 1 - else if ( torder .eq. 2 ) then - integrator = 2 - else if ( torder .gt. 2 ) then - write(*,*) "INFO : Time-integration order for the operator splitting of d > 2 & - &requires adjoint solves and is not implemented." - write(*,*) " Resetting torder = 2." - info = 1 - integrator = 2 - else - write(*,*) "INFO : Invalid time-integration order specified." - info = -1 - integrator = 2 - endif - - select case (integrator) + select case (mode) case (1) ! Lie-Trotter splitting - call M_forward_map( X, LTI, tau, info, exptA, trans) - call G_forward_map_lyapunov(X, LTI, tau, info) + call M_forward_map( X, A, tau, info, exptA, trans) + call G_forward_map_lyapunov(X, B, tau, info) case (2) ! Strang splitting - call M_forward_map( X, LTI, 0.5*tau, info, exptA, trans) - call G_forward_map_lyapunov(X, LTI, tau, info) - call M_forward_map( X, LTI, 0.5*tau, info, exptA, trans) + call M_forward_map( X, A, 0.5*tau, info, exptA, trans) + call G_forward_map_lyapunov(X, B, tau, info) + call M_forward_map( X, A, 0.5*tau, info, exptA, trans) end select return - end subroutine numerical_low_rank_splitting_lyapunov_step + end subroutine projector_splitting_DLRA_lyapunov_step_rdp - subroutine M_forward_map(X, LTI, tau, info, exptA, iftrans) - class(abstract_sym_low_rank_state), intent(inout) :: X + !----------------------------- + ! + ! RANK-ADAPTIVE PSI + ! + !----------------------------- + + subroutine rank_adaptive_PS_DLRA_lyapunov_step_rdp(X, A, B, tau, mode, info, rk_reduction_lock, exptA, trans, verbose, tol) + !! Wrapper for projector_splitting_DLRA_lyapunov_step_rdp adding the logic for rank-adaptivity + class(abstract_sym_low_rank_state_rdp), intent(inout) :: X !! Low-Rank factors of the solution. - class(abstract_lti_system), intent(in) :: LTI - !! LTI dynamical system defining the problem. - real(kind=wp), intent(in) :: tau + class(abstract_linop_rdp), intent(inout) :: A + !! Linear operator + class(abstract_vector_rdp), intent(in) :: B(:) + !! Low-Rank inhomogeneity. + real(wp), intent(in) :: tau !! Time step. - integer, intent(out) :: info + integer, intent(in) :: mode + !! Time integration mode. Only 1st (Lie splitting - mode 1) and 2nd (Strang splitting - mode 2) + !! orders are implemented. + integer, intent(out) :: info !! Information flag - procedure(abstract_exptA) :: exptA + integer, intent(inout) :: rk_reduction_lock + !! 'timer' to disable rank reduction + procedure(abstract_exptA_rdp) :: exptA !! Routine for computation of the exponential propagator (default: Krylov-based exponential operator). - logical, optional, intent(in) :: iftrans + logical, intent(in) :: trans + !! Determine whether \(\mathbf{A}\) (default `.false.`) or \( \mathbf{A}^T\) (`.true.`) is used. + logical, intent(in) :: verbose + !! Toggle verbosity + real(wp), intent(in) :: tol + + ! Internal variables + integer :: istep, rk, irk + logical :: accept_step, found + real(wp), allocatable :: coef(:) + real(wp) :: norm + character*128 :: msg + + integer, parameter :: max_step = 5 ! might not be needed + + ! Allocate memory for SVD + if (.not.allocated( Usvd)) allocate( Usvd(size(X%U),size(X%U))) + if (.not.allocated( ssvd)) allocate( ssvd(size(X%U))) + if (.not.allocated(VTsvd)) allocate(VTsvd(size(X%U),size(X%U))) + + ! ensure that we are integrating one more rank than we use for approximation + X%rk = X%rk + 1 + rk = X%rk ! this is only to make the code more readable + + accept_step = .false. + istep = 1 + do while ( .not. accept_step .and. istep < max_step ) + ! run a regular step + call projector_splitting_DLRA_lyapunov_step_rdp(X, A, B, tau, mode, info, exptA, trans, verbose) + ! compute singular values of X%S + Usvd = 0.0_wp; ssvd = 0.0_wp; VTsvd = 0.0_wp + call svd(X%S(:rk,:rk), ssvd(:rk), Usvd(:rk,:rk), VTsvd(:rk,:rk)) + found = .false. + tol_chk: do irk = 1, rk + if ( ssvd(irk) < tol ) then + found = .true. + exit tol_chk + end if + end do tol_chk + if (.not. found) irk = irk - 1 + + ! choose action + if (.not. found) then ! none of the singular values is below tolerance + ! increase rank and run another step + if (rk == size(X%U)) then ! cannot increase rank without reallocating X%U and X%S + write(msg, *) 'Cannot increase rank, rkmax is reached. Increase rkmax and restart!' + call stop_error(trim(msg), module=this_module, procedure='rank_adaptive_PS_DLRA_lyapunov_step_rdp') + else + write(msg,'(A,I3)') 'rk =', rk + 1 + call logger%log_warning(trim(msg), module=this_module, procedure='RA-DLRA') + + X%rk = X%rk + 1 + rk = X%rk ! this is only to make the code more readable + ! set coefficients to zero (for redundancy) + X%S(:rk, rk) = 0.0_wp + X%S( rk,:rk) = 0.0_wp + ! add random vector ... + call X%U(rk)%rand(.false.) + ! ... and orthonormalize + call orthogonalize_against_basis(X%U(rk), X%U(:rk-1), info, if_chk_orthonormal=.false.) + call check_info(info, 'orthogonalize_against_basis', module=this_module, & + & procedure='rank_adaptive_PS_DLRA_lyapunov_step_rdp') + call X%U(rk)%scal(1.0_wp / X%U(rk)%norm()) + + rk_reduction_lock = 10 ! avoid rank oscillations + + end if + else ! the rank of the solution is sufficient + accept_step = .true. + + if (irk /= rk .and. rk_reduction_lock == 0) then ! we should decrease the rank + ! decrease rank + + ! rotate basis onto principal axes + block + class(abstract_vector_rdp), allocatable :: Xwrk(:) + call linear_combination(Xwrk, X%U(:rk), Usvd(:rk,:rk)) + call copy_basis(X%U(:rk), Xwrk) + end block + X%S(:rk,:rk) = diag(ssvd(:rk)) + + rk = max(irk, rk - 2) ! reduce by at most 2 + + write(msg, '(A,I3)') 'rk =', rk + call logger%log_warning(trim(msg), module=this_module, procedure='RA-DLRA') + end if + + end if ! found + istep = istep + 1 + end do ! while .not. accept_step + if (verbose) then + write(msg,'(A,I3,A,I2,A,E14.8,A,I2)') 'rk = ', X%rk-1, ': s_', irk,' = ', & + & ssvd(irk), ', rank_lock: ', rk_reduction_lock + call logger%log_message(trim(msg), module=this_module, procedure='RA-DLRA') + end if + + ! decrease rk_reduction_lock + if (rk_reduction_lock > 0) rk_reduction_lock = rk_reduction_lock - 1 + + ! reset to the rank of the approximation which we use outside of the integrator + X%rk = rk - 1 + + return + end subroutine rank_adaptive_PS_DLRA_lyapunov_step_rdp + + subroutine set_initial_rank(X, A, B, tau, mode, exptA, trans, tol, verbose, rk_init, nsteps) + class(abstract_sym_low_rank_state_rdp), intent(inout) :: X + !! Low-Rank factors of the solution. + class(abstract_linop_rdp), intent(inout) :: A + !! Linear operator + class(abstract_vector_rdp), intent(in) :: B(:) + !! Low-Rank inhomogeneity. + real(wp), intent(in) :: tau + !! Time step. + integer, intent(in) :: mode + !! TIme integration mode. Only 1st (Lie splitting - mode 1) and 2nd (Strang splitting - mode 2) orders are implemented. + procedure(abstract_exptA_rdp) :: exptA + !! Routine for computation of the exponential propagator (default: Krylov-based exponential operator). + logical, intent(in) :: trans + !! Determine whether \(\mathbf{A}\) (default `.false.`) or \( \mathbf{A}^T\) (`.true.`) is used. + real(wp), intent(in) :: tol + !! Tolerance on the last singular value to determine rank + logical, intent(in) :: verbose + !! verbosity + integer, optional, intent(in) :: rk_init + !! Smallest tested rank + integer, optional, intent(in) :: nsteps + integer :: n + !! Number of steps to run before checking the singular values + + ! internal + integer :: i, irk, info, rkmax + class(abstract_vector_rdp), allocatable :: Utmp(:) + real(wp), allocatable :: Stmp(:,:) + logical :: found, accept_rank + character*128 :: msg + + ! optional arguments + X%rk = optval(rk_init, 1) + n = optval(nsteps, 5) + rkmax = size(X%U) + + ! Allocate memory for SVD + if (.not.allocated( Usvd)) allocate( Usvd(rkmax,rkmax)) + if (.not.allocated( ssvd)) allocate( ssvd(rkmax)) + if (.not.allocated(VTsvd)) allocate(VTsvd(rkmax,rkmax)) + + info = 0 + accept_rank = .false. + + ! save initial condition + allocate(Utmp(rkmax), source=X%U) + allocate(Stmp(rkmax,rkmax)); Stmp = X%S + + do while (.not. accept_rank .and. X%rk <= rkmax) + ! run integrator + do i = 1,n + call projector_splitting_DLRA_lyapunov_step_rdp(X, A, B, tau, mode, info, exptA, trans, verbose) + end do + + ! check if singular values are resolved + Usvd = 0.0_wp; ssvd = 0.0_wp; VTsvd = 0.0_wp + call svd(X%S(:X%rk,:X%rk), ssvd(:X%rk), Usvd(:X%rk,:X%rk), VTsvd(:X%rk,:X%rk)) + found = .false. + tol_chk: do irk = 1, X%rk + if ( ssvd(irk) < tol ) then + found = .true. + exit tol_chk + end if + end do tol_chk + if (.not. found) irk = irk - 1 + if (verbose) then + write(msg,'(A,I2,A,E8.2)') ' rk = ', X%rk, ' s_r =', ssvd(X%rk) + call logger%log_message(trim(msg), module=this_module, procedure='set_initial_rank') + end if + if (found) then + accept_rank = .true. + X%rk = irk + write(msg,'(A,I2,A,E10.4)') ' Accpeted rank: r = ', X%rk-1, ', s_{r+1} = ', ssvd(X%rk) + call logger%log_message(trim(msg), module=this_module, procedure='set_initial_rank') + else + X%rk = 2*X%rk + end if + + ! reset initial conditions + call copy_basis(X%U, Utmp) + X%S = Stmp + end do + + if (X%rk > rkmax) then + write(msg, *) 'Maximum rank reached but singular values are not converged. Increase rkmax and restart.' + call stop_error(trim(msg), module=this_module, procedure='set_initial_rank') + end if + + ! reset to the rank of the approximation which we use outside of the integrator & mark rank as initialized + X%rk = X%rk - 1 + X%rank_is_initialised = .true. + + end subroutine set_initial_rank + + subroutine compute_splitting_error(err_est, X, A, B, tau, mode, exptA, trans) + !! This function estimates the splitting error of the integrator as a function of the chosen timestep. + !! This error estimation can be integrated over time to give an estimate of the compound error due to + !! the splitting approach. + !! This error can be used as a tolerance for the rank-adaptivity to ensure that the low-rank truncation + !! error is smaller than the splitting error. + real(wp), intent(out) :: err_est + !! Estimation of the splitting error + class(abstract_sym_low_rank_state_rdp), intent(inout) :: X + !! Low-Rank factors of the solution. + class(abstract_linop_rdp), intent(inout) :: A + !! Linear operator + class(abstract_vector_rdp), intent(in) :: B(:) + !! Low-Rank inhomogeneity. + real(wp), intent(in) :: tau + !! Time step. + integer, intent(in) :: mode + !! TIme integration mode. Only 1st (Lie splitting - mode 1) and 2nd (Strang splitting - mode 2) orders are implemented. + procedure(abstract_exptA_rdp) :: exptA + !! Routine for computation of the exponential propagator (default: Krylov-based exponential operator). + logical, intent(in) :: trans + !! Determine whether \(\mathbf{A}\) (default `.false.`) or \( \mathbf{A}^T\) (`.true.`) is used. + + ! internals + ! save current state to reset it later + class(abstract_vector_rdp), allocatable :: Utmp(:) + real(wp), allocatable :: Stmp(:,:) + ! first solution to compute the difference against + class(abstract_vector_rdp), allocatable :: U1(:) + real(wp), allocatable :: S1(:,:) + ! projected bases + real(wp), allocatable :: V1(:,:), V2(:,:) + ! projected difference + real(wp), allocatable :: D(:,:) + integer :: rx, r, info + + ! svd + real(wp), allocatable :: ssvd(:) + real(wp), allocatable :: Usvd(:,:), VTsvd(:,:) + + rx = X%rk + r = 2*rx + + ! save curret state + allocate(Utmp(rx), source=X%U(:rx)) + allocate(Stmp(rx,rx)); Stmp = X%S(:rx,:rx) + + ! tau step + call projector_splitting_DLRA_lyapunov_step_rdp(X, A, B, tau, mode, info, exptA, trans, verbose=.false.) + ! save result + allocate(U1(rx), source=X%U(:rx)) + allocate(S1(rx,rx)); S1 = X%S(:rx,:rx) + + ! reset curret state + call copy_basis(X%U(:rx), Utmp) + X%S(:rx,:rx) = Stmp + + ! tau/2 steps + call projector_splitting_DLRA_lyapunov_step_rdp(X, A, B, 0.5*tau, mode, info, exptA, trans, verbose=.false.) + call projector_splitting_DLRA_lyapunov_step_rdp(X, A, B, 0.5*tau, mode, info, exptA, trans, verbose=.false.) + + ! compute common basis + call project_onto_common_basis(V1, V2, U1(:rx), X%U(:rx)) + + ! project second low-rank state onto common basis and construct difference + allocate(D(r,r)); D = 0.0_wp + D( :rx, :rx) = S1 - matmul(V1, matmul(X%S(:rx,:rx), transpose(V1))) + D(rx+1:r , :rx) = - matmul(V2, matmul(X%S(:rx,:rx), transpose(V1))) + D( :rx, rx+1:r ) = - matmul(V1, matmul(X%S(:rx,:rx), transpose(V2))) + D(rx+1:r , rx+1:r ) = - matmul(V2, matmul(X%S(:rx,:rx), transpose(V2))) + + ! svd + allocate( Usvd(r,r)) + allocate( ssvd(r)) + allocate(VTsvd(r,r)) + call svd(D, ssvd, Usvd, VTsvd) + + ! compute local error based on frobenius norm of difference + err_est = 2**mode / (2**mode - 1) * sqrt( sum( ssvd ** 2 ) ) + + ! reset curret state + call copy_basis(X%U(:rx), Utmp) + X%S(:rx,:rx) = Stmp + + end subroutine compute_splitting_error + + subroutine compute_increment_norm(nrm, U, S, U_lag, S_lag) + !! This function computes the norm of the solution increment in a cheap way avoiding the + !! construction of the full low-rank solutions. + real(wp), intent(out) :: nrm + !! Increment norm of current timestep + class(abstract_vector_rdp), intent(in) :: U(:) + !! Low-rank basis of current solution + real(wp), intent(in) :: S(:,:) + !! Coefficients of current solution + class(abstract_vector_rdp), intent(in) :: U_lag(:) + !! Low-rank basis of lagged solution + real(wp), intent(in) :: S_lag(:,:) + !! Coefficients of lagged solution + + ! internals + real(wp), dimension(:,:), allocatable :: D, V1, V2 + real(wp), dimension(:), allocatable :: svals + integer :: r, rl + + r = size(U) + rl = size(U_lag) + + ! compute common basis + call project_onto_common_basis(V1, V2, U_lag, U) + + ! project second low-rank state onto common basis and construct difference + allocate(D(r+rl,r+rl)); D = 0.0_wp + D( :rl , :rl ) = S_lag - matmul(V1, matmul(S, transpose(V1))) + D(rl+1:rl+r, :rl ) = - matmul(V2, matmul(S, transpose(V1))) + D( :rl , rl+1:rl+r) = - matmul(V1, matmul(S, transpose(V2))) + D(rl+1:rl+r, rl+1:rl+r) = - matmul(V2, matmul(S, transpose(V2))) + + ! compute Frobenius norm of difference + svals = svdvals(D) + nrm = sqrt(sum(svals**2)) !/ U%get_size() + + return + end subroutine compute_increment_norm + + subroutine M_forward_map_rdp(X, A, tau, info, exptA, iftrans) + !! This subroutine computes the solution of the stiff linear part of the + !! differential equation exactly using the matrix exponential. + class(abstract_sym_low_rank_state_rdp), intent(inout) :: X + !! Low-Rank factors of the solution. + class(abstract_linop_rdp), intent(inout) :: A + !! Linear operator. + real(wp), intent(in) :: tau + !! Time step. + integer, intent(out) :: info + !! Information flag + procedure(abstract_exptA_rdp) :: exptA + !! Routine for computation of the exponential pabstract_vector), ropagator (default: Krylov-based exponential operator). + logical, optional, intent(in) :: iftrans + logical :: trans !! Determine whether \(\mathbf{A}\) (default `.false.`) or \( \mathbf{A}^T\) (`.true.`) is used. ! Internal variables - logical :: trans - class(abstract_vector), allocatable :: Uwrk ! scratch basis - real(kind=wp), allocatable :: R(:,:) ! QR coefficient matrix - integer, allocatable :: perm(:) ! Permutation vector - real(kind=wp), allocatable :: wrk(:,:) - integer :: i, rk + class(abstract_vector_rdp), allocatable :: exptAU ! scratch basis + real(wp), allocatable :: R(:,:) ! QR coefficient matrix + integer, allocatable :: perm(:) ! Permutation vector + real(wp), allocatable :: wrk(:,:) + integer :: i, rk ! Optional argument trans = optval(iftrans, .false.) - rk = size(X%U) - allocate(R(1:rk,1:rk)); R = 0.0_wp - allocate(perm(1:rk)); perm = 0 - allocate(wrk(1:rk,1:rk)); wrk = 0.0_wp + rk = X%rk + allocate(R(rk,rk)); R = 0.0_wp + allocate(perm(rk)); perm = 0 + allocate(wrk(rk,rk)); wrk = 0.0_wp ! Apply propagator to initial basis - if (.not. allocated(Uwrk)) allocate(Uwrk, source=X%U(1)) - call Uwrk%zero() + allocate(exptAU, source=X%U(1)); call exptAU%zero() do i = 1, rk - call exptA(Uwrk, LTI%A, X%U(i), tau, info, trans) - call X%U(i)%axpby(0.0_wp, Uwrk, 1.0_wp) ! overwrite old solution - enddo + call exptA(exptAU, A, X%U(i), tau, info, trans) + call X%U(i)%axpby(0.0_wp, exptAU, 1.0_wp) ! overwrite old solution + end do ! Reorthonormalize in-place - call qr_factorization(X%U, R, perm, info, ifpivot = .true.) + call qr(X%U(:rk), R, perm, info) + call check_info(info, 'qr_pivot', module=this_module, procedure='M_forward_map_rdp') ! Update low-rank fcators - call apply_permutation(R, perm, trans = .true.) + call apply_inverse_permutation_matrix(R, perm) ! Update coefficient matrix - wrk = matmul(X%S, transpose(R)) - X%S = matmul(R, wrk) + wrk = matmul(X%S(:rk,:rk), transpose(R)) + X%S(:rk,:rk) = matmul(R, wrk) return - end subroutine M_forward_map - - subroutine G_forward_map_lyapunov(X, LTI, tau, info) - class(abstract_sym_low_rank_state), intent(inout) :: X + end subroutine M_forward_map_rdp + + subroutine G_forward_map_lyapunov_rdp(X, B, tau, info) + !! This subroutine computes the solution of the non-stiff part of the + !! differential equation numerically using first-order explicit Euler. + !! The update of the full low-rank factorization requires three separate + !! steps called K, S, L. + class(abstract_sym_low_rank_state_rdp), intent(inout) :: X !! Low-Rank factors of the solution. - class(abstract_lti_system), intent(in) :: LTI - !! LTI dynamical system defining the problem. - real(kind=wp), intent(in) :: tau + class(abstract_vector_rdp), intent(in) :: B(:) + !! Low-Rank inhomogeneity. + real(wp), intent(in) :: tau !! Time step. - integer, intent(out) :: info + integer, intent(out) :: info !! Information flag. ! Internal variables - class(abstract_vector), allocatable :: U1(:) - class(abstract_vector), allocatable :: BBTU(:) - integer :: rk - - rk = size(X%U) - if (.not. allocated(U1)) allocate(U1(1:rk), source=X%U(1)) - if (.not. allocated(BBTU)) allocate(BBTU(1:rk), source=X%U(1)) - call mat_zero(U1); call mat_zero(BBTU) - - call K_step_lyapunov(X, U1, BBTU, LTI%B, tau, info) - call S_step_lyapunov(X, U1, BBTU, tau, info) - call L_step_lyapunov(X, U1, LTI%B, tau, info) + class(abstract_vector_rdp), allocatable :: U1(:) + class(abstract_vector_rdp), allocatable :: BBTU(:) + integer :: rk, rkmax + + rk = X%rk + rkmax = size(X%U) + if (.not. allocated(U1)) allocate(U1( rkmax), source=X%U(1)) + if (.not. allocated(BBTU)) allocate(BBTU(rkmax), source=X%U(1)) + call zero_basis(U1); call zero_basis(BBTU) + + call K_step_lyapunov(X, U1(:rk), BBTU(:rk), B, tau, info) + call S_step_lyapunov(X, U1(:rk), BBTU(:rk), tau, info) + call L_step_lyapunov(X, U1(:rk), B, tau, info) ! Copy updated low-rank factors to output - call mat_copy(X%U, U1) + call copy_basis(X%U(:rk), U1(:rk)) return - end subroutine G_forward_map_lyapunov + end subroutine G_forward_map_lyapunov_rdp - subroutine K_step_lyapunov(X, U1, BBTU, B, tau, info) - class(abstract_sym_low_rank_state), intent(inout) :: X + subroutine K_step_lyapunov_rdp(X, U1, BBTU, B, tau, info) + class(abstract_sym_low_rank_state_rdp), intent(inout) :: X !! Low-Rank factors of the solution. - class(abstract_vector), intent(out) :: U1(:) + class(abstract_vector_rdp), intent(out) :: U1(:) !! Intermediate low-rank factor. - class(abstract_vector), intent(out) :: BBTU(:) + class(abstract_vector_rdp), intent(out) :: BBTU(:) !! Precomputed application of the inhomogeneity. - class(abstract_vector), intent(in) :: B(:) + class(abstract_vector_rdp), intent(in) :: B(:) !! Low-Rank inhomogeneity. - real(kind=wp), intent(in) :: tau + real(wp), intent(in) :: tau !! Time step. - integer, intent(out) :: info + integer, intent(out) :: info !! Information flag. ! Internal variables - real(kind=wp), allocatable :: Swrk(:,:) - integer, allocatable :: perm(:) ! Permutation vector - integer :: rk + integer, allocatable :: perm(:) ! Permutation vector + integer :: rk, rkmax info = 0 - rk = size(X%U) - if (.not. allocated(Swrk)) allocate(Swrk(1:rk,1:rk)); - Swrk = 0.0_wp - allocate(perm(1:rk)); perm = 0 - - call mat_mult(U1, X%U, X%S) ! K0 - call apply_outerproduct(BBTU, B, X%U) ! Kdot + rk = X%rk + rkmax = size(X%U) + if (.not. allocated(Swrk)) allocate(Swrk(rkmax,rkmax)); Swrk = 0.0_wp + allocate(perm(rk)); perm = 0 + block + class(abstract_vector_rdp), allocatable :: Xwrk(:) + call linear_combination(Xwrk, X%U(:rk), X%S(:rk,:rk)) ! K0 + call copy_basis(U1, Xwrk) + end block + call apply_outerprod(BBTU, B, X%U(:rk)) ! Kdot ! Construct intermediate solution U1 - call mat_axpby(U1, 1.0_wp, BBTU, tau) ! K0 + tau*Kdot + call axpby_basis(U1, 1.0_wp, BBTU(:rk), tau) ! K0 + tau*Kdot ! Orthonormalize in-place - call qr_factorization(U1, Swrk, perm, info, ifpivot = .true.) - call apply_permutation(Swrk, perm, trans = .true.) - X%S = Swrk + call qr(U1(:rk), Swrk(:rk,:rk), perm, info) + call check_info(info, 'qr_pivot', module=this_module, procedure='K_step_Lyapunov_rdp') + call apply_inverse_permutation_matrix(Swrk(:rk,:rk), perm) + X%S(:rk,:rk) = Swrk(:rk,:rk) return - end subroutine K_step_lyapunov + end subroutine K_step_lyapunov_rdp - subroutine S_step_lyapunov(X, U1, BBTU, tau, info) - class(abstract_sym_low_rank_state), intent(inout) :: X + subroutine S_step_lyapunov_rdp(X, U1, BBTU, tau, info) + class(abstract_sym_low_rank_state_rdp), intent(inout) :: X !! Low-Rank factors of the solution. - class(abstract_vector), intent(in) :: U1(:) + class(abstract_vector_rdp), intent(in) :: U1(:) !! Intermediate low-rank factor. - class(abstract_vector), intent(in) :: BBTU(:) + class(abstract_vector_rdp), intent(in) :: BBTU(:) !! Precomputed application of the inhomogeneity. - real(kind=wp), intent(in) :: tau + real(wp), intent(in) :: tau !! Time step. - integer, intent(out) :: info + integer, intent(out) :: info !! Information flag. ! Internal variables - real(kind=wp), allocatable :: Swrk(:,:) - integer :: rk + integer :: rk, rkmax info = 0 - rk = size(X%U) - if (.not. allocated(Swrk)) allocate(Swrk(1:rk,1:rk)) - Swrk = 0.0_wp - call mat_mult(Swrk, U1, BBTU) ! - Sdot + rk = X%rk + rkmax = size(X%U) + if (.not. allocated(Swrk)) allocate(Swrk(rkmax,rkmax)); Swrk = 0.0_wp + call innerprod(Swrk(:rk,:rk), U1, BBTU) ! - Sdot ! Construct intermediate coefficient matrix - call mat_axpby(X%S, 1.0_wp, Swrk, -tau) + X%S(:rk,:rk) = X%S(:rk,:rk) - tau*Swrk(:rk,:rk) return - end subroutine S_step_lyapunov + end subroutine S_step_lyapunov_rdp - subroutine L_step_lyapunov(X, U1, B, tau, info) - class(abstract_sym_low_rank_state), intent(inout) :: X + subroutine L_step_lyapunov_rdp(X, U1, B, tau, info) + class(abstract_sym_low_rank_state_rdp), intent(inout) :: X !! Low-Rank factors of the solution. - class(abstract_vector), intent(in) :: U1(:) + class(abstract_vector_rdp), intent(in) :: U1(:) !! Intermediate low-rank factor (from K step). - class(abstract_vector), intent(in) :: B(:) + class(abstract_vector_rdp), intent(in) :: B(:) !! Low-Rank inhomogeneity. - real(kind=wp), intent(in) :: tau + real(wp), intent(in) :: tau !! Time step. - integer, intent(out) :: info + integer, intent(out) :: info !! Information flag. ! Internal variables - class(abstract_vector), allocatable :: Uwrk(:) - integer :: rk + integer :: rk, rkmax info = 0 - rk = size(X%U) - if (.not. allocated(Uwrk)) allocate(Uwrk(1:rk), source=X%U(1)) - call mat_zero(Uwrk) + rk = X%rk + if (.not. allocated(Uwrk)) allocate(Uwrk(rkmax), source=X%U(1)); call zero_basis(Uwrk) - call mat_mult(Uwrk, X%U, transpose(X%S)) ! L0.T - call apply_outerproduct(X%U, B, U1) ! Ldot.T + block + class(abstract_vector_rdp), allocatable :: Xwrk(:) + call linear_combination(Xwrk, X%U(:rk), transpose(X%S(:rk,:rk))) ! L0.T + call copy_basis(Uwrk(:rk), Xwrk) + end block + call apply_outerprod(X%U(:rk), B, U1) ! Ldot.T ! Construct solution L1.T - call mat_axpby(Uwrk, 1.0_wp, X%U, tau) + call axpby_basis(Uwrk(:rk), 1.0_wp, X%U(:rk), tau) ! Update coefficient matrix - call mat_mult(X%S, Uwrk, U1) + call innerprod(X%S(:rk,:rk), Uwrk(:rk), U1) return - end subroutine L_step_lyapunov + end subroutine L_step_lyapunov_rdp -end module lightROM_LyapunovSolvers +end module LightROM_LyapunovSolvers diff --git a/src/LyapunovUtils.f90 b/src/LyapunovUtils.f90 index fb9f9fa..bbec32e 100644 --- a/src/LyapunovUtils.f90 +++ b/src/LyapunovUtils.f90 @@ -1,35 +1,78 @@ module LightROM_LyapunovUtils - Use LightKrylov - Use LightKrylov_expmlib + ! LightKrylov + use LightKrylov + use LightKrylov, only: wp => dp + use LightKrylov_Constants + use LightKrylov_AbstractVectors + ! LightROM + use LightROM_AbstractLTIsystems + implicit none - private + ! module name + private :: this_module + character*128, parameter :: this_module = 'LightROM_LyapunovUtils' + ! Matrix operations for abstract vector types - public :: apply_outerproduct + public :: apply_outerprod + + interface apply_outerprod + module procedure apply_outerprod_vector_rdp + module procedure apply_outerprod_basis_rdp + end interface + +contains - contains + subroutine apply_outerprod_vector_rdp(c,A,b) + !! Computes the matrix product \( \mathbf{c} = \mathbf{Q} \mathbf{b} \) where + !! \( \mathbf{Q} = \mathbf{A} \mathbf{A}^T \) is the outer product of \( \mathbf{A} \) + !! with itself with + !! + !! - \( \mathbf{C} \): `abstract vector` + !! - \( \mathbf{A} \): `abstract vector` type Krylov basis of size (n x m) + !! - \( \mathbf{B} \): `abstract vector` + !! + !! In order to avoid building \( \mathbf{Q} \) (n x n), we compute sequentially + !! \( \mathbf{c} = \mathbf{A} ( \mathbf{A}^T \mathbf{b} ) \) + class(abstract_vector_rdp), intent(out) :: c + class(abstract_vector_rdp), intent(in) :: A(:) + class(abstract_vector_rdp), intent(in) :: b + ! Intermediate basis + real(wp) :: wrk(size(A)) + wrk = zero_rdp + call innerprod(wrk, A, b) + block + class(abstract_vector_rdp), allocatable :: xwrk + call linear_combination(xwrk, A, wrk) + call c%zero(); call c%add(xwrk) + end block + return + end subroutine apply_outerprod_vector_rdp - subroutine apply_outerproduct(C,A,B) - !! Computes the matrix product \( \mathbf{C} = \mathbf{Q} \mathbf{B} \) where - !! \( \mathbf{Q} = \mathbf{A} \mathbf{A}^T \) is the outer product of \( \mathbf{A} \) - !! with itself with - !! - !! - \( \mathbf{C} \): `abstract vector` type Krylov basis of size (n x r) - !! - \( \mathbf{A} \): `abstract vector` type Krylov basis of size (n x m) - !! - \( \mathbf{B} \): `abstract vector` type Krylov basis of size (n x r) - !! - !! In order to avoid building \( \mathbf{Q} \) (n x n), we compute sequentially - !! \( \mathbf{C} = \mathbf{A} ( \mathbf{A}^T \mathbf{B} ) \) - class(abstract_vector) , intent(out) :: C(:) - class(abstract_vector) , intent(in) :: A(:) - class(abstract_vector) , intent(in) :: B(:) - ! Intermediate basis - real(kind=wp), allocatable :: wrk(:,:) - allocate(wrk(1:size(A),1:size(B))) - call mat_mult(wrk, A, B) - call mat_mult(C, A, wrk) - deallocate(wrk) - return - end subroutine apply_outerproduct + subroutine apply_outerprod_basis_rdp(C,A,B) + !! Computes the matrix product \( \mathbf{C} = \mathbf{Q} \mathbf{B} \) where + !! \( \mathbf{Q} = \mathbf{A} \mathbf{A}^T \) is the outer product of \( \mathbf{A} \) + !! with itself with + !! + !! - \( \mathbf{C} \): `abstract vector` type Krylov basis of size (n x r) + !! - \( \mathbf{A} \): `abstract vector` type Krylov basis of size (n x m) + !! - \( \mathbf{B} \): `abstract vector` type Krylov basis of size (n x r) + !! + !! In order to avoid building \( \mathbf{Q} \) (n x n), we compute sequentially + !! \( \mathbf{C} = \mathbf{A} ( \mathbf{A}^T \mathbf{B} ) \) + class(abstract_vector_rdp), intent(out) :: C(:) + class(abstract_vector_rdp), intent(in) :: A(:) + class(abstract_vector_rdp), intent(in) :: B(:) + ! Intermediate basis + real(wp) :: wrk(size(A),size(B)) + wrk = zero_rdp + call innerprod(wrk, A, B) + block + class(abstract_vector_rdp), allocatable :: Xwrk(:) + call linear_combination(Xwrk, A, wrk) + call copy_basis(C, Xwrk) + end block + return + end subroutine apply_outerprod_basis_rdp end module LightROM_LyapunovUtils \ No newline at end of file diff --git a/src/RiccatiSolvers.f90 b/src/RiccatiSolvers.f90 index 013796e..f1cd0f6 100644 --- a/src/RiccatiSolvers.f90 +++ b/src/RiccatiSolvers.f90 @@ -1,43 +1,74 @@ module LightROM_RiccatiSolvers !! This module provides the implementation of the Krylov-based solvers for the Differential Riccati !! equation based on the dynamic low-rank approximation and operator splitting. - + ! Standard Library + use stdlib_optval, only : optval + use stdlib_linalg, only : eye ! LightKrylov modules use LightKrylov - use LightKrylov_expmlib - use lightkrylov_BaseKrylov + use LightKrylov, only: wp => dp + use LightKrylov_Logger + use LightKrylov_AbstractVectors + use LightKrylov_ExpmLib + use Lightkrylov_BaseKrylov ! LightROM modules + use LightROM_Utils use LightROM_LyapunovUtils - use LightROM_LyapunovSolvers + use LightROM_LyapunovSolvers, only : M_forward_map use LightROM_RiccatiUtils - use lightROM_AbstractLTIsystems - ! Standard Library - use stdlib_optval, only : optval - use stdlib_linalg, only : eye + use LightROM_AbstractLTIsystems + implicit none ! global scratch arrays - class(abstract_vector), allocatable :: Uwrk0(:) - class(abstract_vector), allocatable :: Uwrk1(:) - class(abstract_vector), allocatable :: U1(:) - class(abstract_vector), allocatable :: QU(:) - real(kind=wp), allocatable :: Swrk0(:,:) - real(kind=wp), allocatable :: Swrk1(:,:) + class(abstract_vector_rdp), allocatable :: Uwrk0(:) + class(abstract_vector_rdp), allocatable :: Uwrk1(:) + class(abstract_vector_rdp), allocatable :: U1(:) + class(abstract_vector_rdp), allocatable :: QU(:) + real(wp), allocatable :: Swrk0(:,:) + real(wp), allocatable :: Swrk1(:,:) ! global scratch arrays for the predictor step - class(abstract_vector), allocatable :: U0(:) - class(abstract_vector), allocatable :: T0(:) - class(abstract_vector), allocatable :: Ut(:) - class(abstract_vector), allocatable :: Tt(:) - real(kind=wp), allocatable :: S0(:,:) - - private - public :: numerical_low_rank_splitting_riccati_integrator - public :: G_forward_map_riccati, K_step_riccati, S_step_riccati, L_step_riccati + class(abstract_vector_rdp), allocatable :: U0(:) + class(abstract_vector_rdp), allocatable :: T0(:) + class(abstract_vector_rdp), allocatable :: Ut(:) + class(abstract_vector_rdp), allocatable :: Tt(:) + real(wp), allocatable :: S0(:,:) + + private + ! module name + private :: this_module + character*128, parameter :: this_module = 'LightROM_RiccatiSolvers' + public :: projector_splitting_DLRA_riccati_integrator + public :: G_forward_map_riccati + public :: K_step_riccati + public :: S_step_riccati + public :: L_step_riccati + + interface projector_splitting_DLRA_riccati_integrator + module procedure projector_splitting_DLRA_riccati_integrator_rdp + end interface + + interface G_forward_map_riccati + module procedure G_forward_map_riccati_rdp + end interface + + interface K_step_riccati + module procedure K_step_riccati_rdp + end interface + + interface S_step_riccati + module procedure S_step_riccati_rdp + end interface + + interface L_step_riccati + module procedure L_step_riccati_rdp + end interface contains - subroutine numerical_low_rank_splitting_riccati_integrator(X,LTI,Qc,Rinv,Tend,tau,torder,info,exptA,iftrans) - !! Numerical integrator for the matrix-valued differential Riccati equation of the form + subroutine projector_splitting_DLRA_riccati_integrator_rdp(X, A, B, CT, Qc, Rinv, Tend, tau, mode, info, & + & exptA, iftrans, options) + !! Main driver for the numerical integrator for the matrix-valued differential Riccati equation of the form !! !! $$\dot{\mathbf{X}} = \mathbf{A} \mathbf{X} + \mathbf{X} \mathbf{A}^T + \mathbf{C}^T \mathbf{Q} \mathbf{C} - \mathbf{X} \mathbf{B} \mathbf{R}^{-1} \mathbf{B}^T \mathbf{X} $$ !! @@ -49,51 +80,54 @@ subroutine numerical_low_rank_splitting_riccati_integrator(X,LTI,Qc,Rinv,Tend,ta !! !! $$\mathbf{0} = \mathbf{A} \mathbf{X} + \mathbf{X} \mathbf{A}^T + \mathbf{C}^T \mathbf{Q} \mathbf{C} - \mathbf{X} \mathbf{B} \mathbf{R}^{-1} \mathbf{B}^T \mathbf{X} $$ !! - !! The algorithm is based on three main ideas: + !! The algorithm is based on four main ideas: !! - !! - The operator splitting scheme proposed by Lubich & Oseledets (2014) that splits the - !! right-hand side of the differential equation into a linear stiff part that is solved - !! explicitly and a possibly non-linear non-stiff part which is solved numerically. The - !! two operators are then composed to obtain the integrator for the full Lyapunov equation. - !! - The Dynamic Low-Rank Approximation for the solution of general matrix differential - !! equations proposed by Nonnenmacher & Lubich (2007) which seeks to integrate only the - !! leading low-rank factors of the solution to a large system by updating the matrix - !! factorization. The dynamical low-rank approximation scheme for the low-rank factors - !! of the solution is itself solved using a projector-splitting technique to cheaply - !! maintain orthonormality or the low-rank basis without explicit SVDs. - !! - This algorithm has been applied to the Lyapunov and Riccati equations by Mena et al. - !! (2018) with improvements taking advantage of the symmetry of the problem/solution. + !! - Dynamic Low-Rank Approximation (DLRA). DLRA is a method for the solution of general matrix differential + !! equations proposed by Nonnenmacher & Lubich (2007) which seeks to integrate only the leading low-rank + !! factors of the solution to a large system by updating an appropriate matrix factorization. The time-integration + !! is achieved by splitting the step into three sequential substeps, each updating a part of the factorization + !! taking advantage of and maintaining the orthogonality of the left and right low-rank bases of the factorization. + !! - Projector-Splitting Integration (PSI). The projector-splitting scheme proposed by Lubich & Oseledets (2014) + !! for the solution of DLRA splits the right-hand side of the differential equation into a linear stiff part + !! that is integrated exactly and a (possibly non-linear) non-stiff part which is integrated numerically. + !! The two operators are then composed to obtain the integrator for the full differential equation. + !! The advantage of the projector splitting integration is that it maintains orthonormality of the basis + !! of the low-rank approximation to the solution without requiring SVDs of the full matrix. + !! - The third element is the application of the general framework of projector-splitting integration for + !! dynamical low-rank approximation to the Riccati equations by Mena et al. (2018). As the solutions + !! to the Riccati equation are by construction SPD, this fact can be taken advantage of to reduce the + !! computational cost of the integration and, in particular, doing away with one QR factorization per timestep + !! while maintaining symmetry of the resulting matrix factorization. + !! - The final element is the addition of the capability of dyanmic rank adaptivity for the projector-splitting + !! integrator proposed by Hochbruck et al. (2023). At the cost of integrating a supplementary solution vector, + !! the rank of the solution is dynamically adapted to ensure that the corresponding additional singular value + !! stays below a chosen threshold. !! !! **Algorithmic Features** !! - !! - Separate integration of the stiff inhomogeneous part of the Lyapunov equation and the - !! non-stiff inhomogeneity - !! - Rank preserving time-integration that maintains orthonormality of the factorization - !! basis + !! - Separate integration of the stiff inhomogeneous part of the Riccati equation and the non-stiff inhomogeneity + !! - Rank preserving time-integration that maintains orthonormality of the factorization basis + !! - Alternatively, dynamical rank-adaptivity based on the instantaneous singular values !! - The stiff part of the problem is solved using a time-stepper approach to approximate !! the action of the exponential propagator !! !! **Advantages** !! - !! - Rank of the approximate solution is user defined - !! - The timesteps of the stiff and non-stiff parts of the code are independent + !! - Rank of the approximate solution is user defined or chosen adaptively based on the solution !! - The integrator is adjoint-free - !! - The operator of the homogeneous part and the inhomogeneity are not needed explicitly - !! i.e. the algorithm is amenable to solution using Krylov methods (in particular for - !! the solution of the stiff part of the problem) - !! - No SVDs are necessary for this alogorithm - !! - Lie and Strang splitting implemented allowing for first and second order integration - !! in time + !! - The operator of the homogeneous part and the inhomogeneity are not needed explicitly i.e. the algorithm + !! is amenable to solution using Krylov methods (in particular for the solution of the stiff part of the problem) + !! - No SVDs of the full solution are required for this algorithm + !! - Lie and Strang splitting implemented allowing for first and second order integration in time !! !! ** Limitations** !! - !! - Rank of the approximate solution is user defined. The appropriateness of this - !! approximation is not considered - !! - The current implementation does not require an adjoint integrator. This means that - !! the temporal order of the basic operator splitting scheme is limited to 1 (Lie-Trotter - !! splitting) or at most 2 (Strang splitting). Higher order integrators are possible, but - !! require at least some backward integration (via the adjoint) in BOTH parts of the splitting. - !! (see Sheng-Suzuki and Goldman-Kaper theorems) + !! - Rank of the approximate solution is user defined. The appropriateness of this approximation is not considered. + !! This does not apply to the rank-adaptive version of the integrator. + !! - The current implementation does not require an adjoint integrator. This means that the temporal order of the + !! basic operator splitting scheme is limited to 1 (Lie-Trotter splitting) or at most 2 (Strang splitting). + !! Higher order integrators are possible, but require at least some backward integration (via the adjoint) + !! in BOTH parts of the splitting (see Sheng-Suzuki and Goldman-Kaper theorems). !! !! **References** !! @@ -104,60 +138,88 @@ subroutine numerical_low_rank_splitting_riccati_integrator(X,LTI,Qc,Rinv,Tend,ta !! - Mena, H., Ostermann, A., Pfurtscheller, L.-M., Piazzola, C. (2018). "Numerical low-rank !! approximation of matrix differential equations", Journal of Computational and Applied Mathematics, !! 340, 602-614 - class(abstract_sym_low_rank_state), intent(inout) :: X + !! - Hochbruck, M., Neher, M., Schrammer, S. (2023). "Rank-adaptive dynamical low-rank integrators for + !! first-order and second-order matrix differential equations", BIT Numerical Mathematics 63:9 + class(abstract_sym_low_rank_state_rdp), intent(inout) :: X !! Low-Rank factors of the solution. - class(abstract_lti_system), intent(in) :: LTI - !! LTI dynamical system defining the problem. - real(kind=wp), intent(in) :: Qc(:,:) + class(abstract_linop_rdp), intent(inout) :: A + !! Linear operator. + class(abstract_vector_rdp), intent(in) :: B(:) + !! System input. + class(abstract_vector_rdp), intent(in) :: CT(:) + !! System output. + real(wp), intent(in) :: Qc(:,:) !! Measurement weights. - real(kind=wp), intent(in) :: Rinv(:,:) + real(wp), intent(in) :: Rinv(:,:) !! Inverse of the actuation weights. - real(kind=wp), intent(in) :: Tend + real(wp), intent(in) :: Tend !! Integration time horizon. - real(kind=wp), intent(inout) :: tau + real(wp), intent(inout) :: tau !! Desired time step. The avtual time-step will be computed such as to reach Tend in an integer number !! of steps. - integer, intent(in) :: torder + integer, intent(in) :: mode !! Order of time integration. Only 1st (Lie splitting) and 2nd (Strang splitting) orders are implemented. - integer, intent(out) :: info + integer, intent(out) :: info !! Information flag. - procedure(abstract_exptA), optional :: exptA + procedure(abstract_exptA_rdp), optional :: exptA !! Routine for computation of the exponential propagator (default: Krylov-based exponential operator). - logical, optional, intent(in) :: iftrans + logical, optional, intent(in) :: iftrans + logical :: trans !! Determine whether \(\mathbf{A}\) (default `.false.`) or \( \mathbf{A}^T\) (`.true.`) is used. + type(dlra_opts), optional, intent(in) :: options + type(dlra_opts) :: opts + !! Options for solver configuration ! Internal variables - integer :: istep, nsteps, iostep - real(kind=wp) :: T - logical, parameter :: verbose = .false. - logical :: trans - procedure(abstract_exptA), pointer :: p_exptA => null() + integer :: istep, nsteps + logical :: verbose, converged + real(wp) :: T + character*128 :: msg + procedure(abstract_exptA_rdp), pointer :: p_exptA => null() ! Optional arguments trans = optval(iftrans, .false.) + + ! Options + if (present(options)) then + opts = options + else ! default + opts = dlra_opts() + end if + + ! set tolerance and verbosity + verbose = opts%verbose + if (present(exptA)) then p_exptA => exptA else - p_exptA => k_exptA + p_exptA => k_exptA_rdp endif - T = 0.0_wp + ! Initialize + T = 0.0_wp + converged = .false. ! Compute number of steps nsteps = floor(Tend/tau) - iostep = nsteps/10 - if ( iostep .eq. 0 ) then - iostep = 10 + if ( opts%mode > 2 ) then + write(msg, *) "Time-integration order for the operator splitting of d > 2 & + & requires adjoint solves and is not implemented. Resetting torder = 2." + call logger%log_message(trim(msg), module=this_module, procedure='DLRA') + else if ( opts%mode < 1 ) then + write(msg, *) "Invalid time-integration order specified: ", opts%mode + call stop_error(trim(msg), module=this_module, & + & procedure='projector_splitting_DLRA_lyapunov_integrator_rdp') endif dlra : do istep = 1, nsteps ! dynamical low-rank approximation solver - call numerical_low_rank_splitting_riccati_step(X, LTI, Qc, Rinv, tau, torder, info, p_exptA, trans) + call projector_splitting_DLRA_riccati_step_rdp(X, A, B, CT, Qc, Rinv, tau, opts%mode, info, p_exptA, trans) T = T + tau !> here we can do some checks such as whether we have reached steady state - if ( mod(istep,iostep) .eq. 0 ) then + if ( mod(istep,opts%chkstep) .eq. 0 ) then if (verbose) then write(*, *) "INFO : ", ISTEP, " steps of DLRA computed. T = ",T endif @@ -177,60 +239,51 @@ subroutine numerical_low_rank_splitting_riccati_integrator(X,LTI,Qc,Rinv,Tend,ta if (allocated(S0)) deallocate(S0) return - end subroutine numerical_low_rank_splitting_riccati_integrator + end subroutine projector_splitting_DLRA_riccati_integrator_rdp - !----------------------------- - !----- UTILITIES ----- - !----------------------------- + !----------------------- + !----- PSI ----- + !----------------------- - subroutine numerical_low_rank_splitting_riccati_step(X, LTI, Qc, Rinv, tau, torder, info, exptA, iftrans) - class(abstract_sym_low_rank_state), intent(inout) :: X + subroutine projector_splitting_DLRA_riccati_step_rdp(X, A, B, CT, Qc, Rinv, tau, mode, info, exptA, iftrans) + !! Driver for the time-stepper defining the splitting logic for each step of the the + !! projector-splitting integrator + class(abstract_sym_low_rank_state_rdp), intent(inout) :: X !! Low-Rank factors of the solution. - class(abstract_lti_system), intent(in) :: LTI - !! LTI dynamical system defining the problem. - real(kind=wp), intent(in) :: Qc(:,:) + class(abstract_linop_rdp), intent(inout) :: A + !! Linear operator. + class(abstract_vector_rdp), intent(in) :: B(:) + !! System input. + class(abstract_vector_rdp), intent(in) :: CT(:) + !! System output. + real(wp), intent(in) :: Qc(:,:) !! Measurement weights. - real(kind=wp), intent(in) :: Rinv(:,:) + real(wp), intent(in) :: Rinv(:,:) !! Inverse of the actuation weights. - real(kind=wp), intent(inout) :: tau + real(wp), intent(inout) :: tau !! Time step. - integer, intent(in) :: torder - !! Order of time integration. Only 1st (Lie splitting) and 2nd (Strang splitting) orders are implemented. - integer, intent(out) :: info + integer, intent(in) :: mode + !! Order of time integration. Only 1st (Lie splitting) and 2nd (Strang splitting) + !! orders are implemented. + integer, intent(out) :: info !! Information flag. - procedure(abstract_exptA), optional :: exptA + procedure(abstract_exptA_rdp), optional :: exptA !! Routine for computation of the exponential propagator (default: Krylov-based exponential operator). - logical, optional, intent(in) :: iftrans + logical, optional, intent(in) :: iftrans !! Determine whether \(\mathbf{A}\) (default `.false.`) or \( \mathbf{A}^T\) (`.true.`) is used. ! Internal variables - integer :: istep, nsteps, integrator, rk - logical :: trans + integer :: istep, nsteps, rk + logical :: trans ! Optional argument trans = optval(iftrans, .false.) - if ( torder .eq. 1 ) then - integrator = 1 - else if ( torder .eq. 2 ) then - integrator = 2 - else if ( torder .gt. 2 ) then - write(*,*) "INFO : Time-integration order for the operator splitting of d > 2 & - &requires adjoint solves and is not implemented." - write(*,*) " Resetting torder = 2." - info = 1 - integrator = 2 - else - write(*,*) "INFO : Invalid time-integration order specified." - info = -1 - integrator = 2 - endif - - select case (integrator) + select case (mode) case (1) ! Lie-Trotter splitting - call M_forward_map (X, LTI, tau, info, exptA, trans) - call G_forward_map_riccati(X, LTI, Qc, Rinv, tau, info) + call M_forward_map (X, A, tau, info, exptA, trans) + call G_forward_map_riccati(X, B, CT, Qc, Rinv, tau, info) case (2) ! Strang splitting ! Prepare arrays for predictor step @@ -241,128 +294,148 @@ subroutine numerical_low_rank_splitting_riccati_step(X, LTI, Qc, Rinv, tau, tord if (.not. allocated(Ut)) allocate(Ut(1:rk), source=X%U(1:rk)) if (.not. allocated(Tt)) allocate(Tt(1:rk), source=X%U(1:rk)) if (.not. allocated(S0)) allocate(S0(1:rk,1:rk)) - call mat_zero(U0); call mat_zero(T0); call mat_zero(Ut); call mat_zero(Tt); S0 = 0.0_wp + call zero_basis(U0); call zero_basis(T0); call zero_basis(Ut); call zero_basis(Tt); S0 = 0.0_wp ! scratch arrays if (.not. allocated(Uwrk0)) allocate(Uwrk0(1:rk), source=X%U(1:rk)) if (.not. allocated(Swrk0)) allocate(Swrk0(1:rk,1:rk)) - call mat_zero(Uwrk0); Swrk0 = 0.0_wp + call zero_basis(Uwrk0); Swrk0 = 0.0_wp ! second order step - call M_forward_map (X, LTI, 0.5*tau, info, exptA, trans) + call M_forward_map (X, A, 0.5*tau, info, exptA, trans) ! Save current state - call mat_copy(U0, X%U); S0 = X%S ! --> save + call copy_basis(U0, X%U); S0 = X%S ! --> save ! Precompute T0 - call mat_mult(Uwrk0, U0, S0) ! K0 = U0 @ S0 - call apply_p_outerproduct_w(Swrk0, U0, Uwrk0, LTI%B, Rinv) ! (U0.T) @ B @ R^(-1) @ B.T @ K0 - call mat_mult(T0, Uwrk0, Swrk0) ! K0 @ Swrk0 + block + class(abstract_vector_rdp), allocatable :: Xwrk(:) + call linear_combination(Xwrk, U0, S0); call copy_basis(Uwrk0, Xwrk) ! K0 = U0 @ S0 + call apply_premult_outerprod_w(Swrk0, U0, Uwrk0, B, Rinv) ! (U0.T) @ B @ R^(-1) @ B.T @ K0 + call linear_combination(Xwrk, Uwrk0, Swrk0); call copy_basis(T0, Xwrk) ! K0 @ Swrk0 + end block ! First order integration - call G_forward_map_riccati(X, LTI, Qc, Rinv, tau, info, ifpred=.true., T0=T0) + call G_forward_map_riccati(X, B, CT, Qc, Rinv, tau, info, ifpred=.true., T0=T0) ! Precompute Tt - call mat_copy(Ut, X%U) ! --> save - call mat_mult(Uwrk0, X%U, X%S) ! Kt = Ut @ St - call apply_p_outerproduct_w(Swrk0, X%U, Uwrk0, LTI%B, Rinv) ! (Ut.T) @ B @ R^(-1) @ B.T @ Kt - call mat_mult(Tt, Uwrk0, Swrk0) ! Kt @ Swrk0 + call copy_basis(Ut, X%U) ! --> save + block + class(abstract_vector_rdp), allocatable :: Xwrk(:) + call linear_combination(Xwrk, X%U, X%S); call copy_basis(Uwrk0, Xwrk) ! Kt = Ut @ St + call apply_premult_outerprod_w(Swrk0, X%U, Uwrk0, B, Rinv) ! (Ut.T) @ B @ R^(-1) @ B.T @ Kt + call linear_combination(Xwrk, Uwrk0, Swrk0); call copy_basis(Tt, Xwrk) ! Kt @ Swrk0 + end block ! Reset state - call mat_copy(X%U, U0); X%S = S0 + call copy_basis(X%U, U0); X%S = S0 ! Second order integration - call G_forward_map_riccati(X, LTI, Qc, Rinv, tau, info, ifpred=.false., T0=T0, Tt=Tt, U0=U0, Ut=Ut) - call M_forward_map (X, LTI, 0.5*tau, info, exptA, trans) + call G_forward_map_riccati(X, B, CT, Qc, Rinv, tau, info, ifpred=.false., & + & T0=T0, Tt=Tt, U0=U0, Ut=Ut) + call M_forward_map (X, A, 0.5*tau, info, exptA, trans) end select return - end subroutine numerical_low_rank_splitting_riccati_step + end subroutine projector_splitting_DLRA_riccati_step_rdp - subroutine G_forward_map_riccati(X, LTI, Qc, Rinv, tau, info, ifpred, T0, Tt, U0, Ut) - class(abstract_sym_low_rank_state), intent(inout) :: X + subroutine G_forward_map_riccati_rdp(X, B, CT, Qc, Rinv, tau, info, ifpred, T0, Tt, U0, Ut) + !! This subroutine computes the solution of the non-stiff non-linear part of the + !! differential equation numerically using first-order explicit Euler. + !! The update of the full low-rank factorization requires three separate + !! steps called K, S, L. + class(abstract_sym_low_rank_state_rdp), intent(inout) :: X !! Low-Rank factors of the solution. - class(abstract_lti_system), intent(in) :: LTI - !! LTI dynamical system defining the problem. - real(kind=wp), intent(in) :: Qc(:,:) + class(abstract_vector_rdp), intent(in) :: B(:) + !! System input. + class(abstract_vector_rdp), intent(in) :: CT(:) + !! System output. + real(wp), intent(in) :: Qc(:,:) !! Measurement weights. - real(kind=wp), intent(in) :: Rinv(:,:) + real(wp), intent(in) :: Rinv(:,:) !! Inverse of the actuation weights. - real(kind=wp), intent(in) :: tau + real(wp), intent(in) :: tau !! Time step. - integer, intent(out) :: info + integer, intent(out) :: info !! Information flag. - logical, optional, intent(in) :: ifpred + logical, optional, intent(in) :: ifpred !! For Strang splitting: Determine whether we are in the predictor or corrector step - class(abstract_vector), optional, intent(inout) :: T0(:) ! will be reused as Gamma - class(abstract_vector), optional, intent(in) :: Tt(:) - class(abstract_vector), optional, intent(in) :: U0(:) - class(abstract_vector), optional, intent(in) :: Ut(:) + class(abstract_vector_rdp), optional, intent(inout) :: T0(:) ! will be reused as Gamma + class(abstract_vector_rdp), optional, intent(in) :: Tt(:) + class(abstract_vector_rdp), optional, intent(in) :: U0(:) + class(abstract_vector_rdp), optional, intent(in) :: Ut(:) !! Intermediate values ! Internal variables - integer :: rk + integer :: rk rk = size(X%U) if (.not. allocated(U1)) allocate(U1( 1:rk), source=X%U(1)); if (.not. allocated(QU)) allocate(QU( 1:rk), source=X%U(1)); if (.not. allocated(Swrk0)) allocate(Swrk0(1:rk,1:rk)) - call mat_zero(U1); call mat_zero(QU) + call zero_basis(U1); call zero_basis(QU); Swrk0 = 0.0_wp if (present(ifpred)) then ! second order in time if (ifpred) then ! predictor step with precomputed T0 - call K_step_riccati(X, U1, QU, LTI, Qc, Rinv, tau, info, reverse=.false., NL=T0) - call S_step_riccati(X, U1, QU, LTI, Qc, Rinv, tau, info, reverse=.false.) - call L_step_riccati(X, U1, LTI, Qc, Rinv, tau, info) + call K_step_riccati(X, U1, QU, B, CT, Qc, Rinv, tau, info, reverse=.false., NL=T0) + call S_step_riccati(X, U1, QU, B, CT, Qc, Rinv, tau, info, reverse=.false.) + call L_step_riccati(X, U1, B, CT, Qc, Rinv, tau, info) else ! corrector step with precomputed T0, Tt and U0, Ut ! forward steps based on T0, U0 (within X) - call K_step_riccati(X, U1, QU, LTI, Qc, Rinv, 0.5*tau, info, reverse=.false., NL=T0) - call S_step_riccati(X, U1, QU, LTI, Qc, Rinv, 0.5*tau, info, reverse=.false.) - call L_step_riccati(X, U1, LTI, Qc, Rinv, tau, info) + call K_step_riccati(X, U1, QU, B, CT, Qc, Rinv, 0.5*tau, info, reverse=.false., NL=T0) + call S_step_riccati(X, U1, QU, B, CT, Qc, Rinv, 0.5*tau, info, reverse=.false.) + call L_step_riccati(X, U1, B, CT, Qc, Rinv, tau, info) ! Compute Gamma = 0.5*(T0 @ (U1.T @ U0) + Tt @ (U1.T @ Ut)) - call mat_zero(QU); Swrk0 = 0.0_wp ! we use QU as a scratch array - call mat_mult(Swrk0, X%U, U0); call mat_mult(QU, T0, Swrk0) - call mat_mult(Swrk0, X%U, Ut); call mat_mult(T0, Tt, Swrk0) ! overwrite T0 with Gamma - call mat_axpby(T0, 0.5_wp, QU, 0.5_wp) + call zero_basis(QU); Swrk0 = 0.0_wp ! we use QU as a scratch array + block + class(abstract_vector_rdp), allocatable :: Xwrk(:) + call innerprod(Swrk0, X%U, U0) + call linear_combination(Xwrk, T0, Swrk0); call copy_basis(QU, Xwrk) + call innerprod(Swrk0, X%U, Ut) + call linear_combination(Xwrk, Tt, Swrk0); call copy_basis(T0, Xwrk) ! overwrite T0 with Gamma + end block + call axpby_basis(T0, 0.5_wp, QU, 0.5_wp) ! Update X to most recent value - call mat_copy(X%U, U1) + call copy_basis(X%U, U1) ! reverse steps based on Gamma - call S_step_riccati(X, U1, QU, LTI, Qc, Rinv, 0.5*tau, info, reverse=.true., NL=T0) - call K_step_riccati(X, U1, QU, LTI, Qc, Rinv, 0.5*tau, info, reverse=.true., NL=T0) + call S_step_riccati(X, U1, QU, B, CT, Qc, Rinv, 0.5*tau, info, reverse=.true., NL=T0) + call K_step_riccati(X, U1, QU, B, CT, Qc, Rinv, 0.5*tau, info, reverse=.true., NL=T0) end if else ! first order in time - call K_step_riccati( X, U1, QU, LTI, Qc, Rinv, tau, info) - call S_step_riccati( X, U1, QU, LTI, Qc, Rinv, tau, info) - call L_step_riccati( X, U1, LTI, Qc, Rinv, tau, info) + call K_step_riccati( X, U1, QU, B, CT, Qc, Rinv, tau, info) + call S_step_riccati( X, U1, QU, B, CT, Qc, Rinv, tau, info) + call L_step_riccati( X, U1, B, CT, Qc, Rinv, tau, info) end if ! Copy updated low-rank factors to output - call mat_copy(X%U, U1) + call copy_basis(X%U, U1) return - end subroutine G_forward_map_riccati + end subroutine G_forward_map_riccati_rdp - subroutine K_step_riccati(X, U1, QU, LTI, Qc, Rinv, tau, info, reverse, NL) - class(abstract_sym_low_rank_state), intent(inout) :: X + subroutine K_step_riccati_rdp(X, U1, QU, B, CT, Qc, Rinv, tau, info, reverse, NL) + class(abstract_sym_low_rank_state_rdp),intent(inout) :: X !! Low-Rank factors of the solution. - class(abstract_vector), intent(out) :: U1(:) + class(abstract_vector_rdp), intent(out) :: U1(:) !! Intermediate low-rank factor. - class(abstract_vector), intent(inout) :: QU(:) + class(abstract_vector_rdp), intent(inout) :: QU(:) !! Precomputed application of the inhomogeneity. - class(abstract_lti_system), intent(in) :: LTI - !! LTI dynamical system defining the problem. - real(kind=wp), intent(in) :: Qc(:,:) + class(abstract_vector_rdp), intent(in) :: B(:) + !! System input. + class(abstract_vector_rdp), intent(in) :: CT(:) + !! System output. + real(wp), intent(in) :: Qc(:,:) !! Measurement weights. - real(kind=wp), intent(in) :: Rinv(:,:) + real(wp), intent(in) :: Rinv(:,:) !! Inverse of the actuation weights. - real(kind=wp), intent(in) :: tau + real(wp), intent(in) :: tau !! Time step. - integer, intent(out) :: info + integer, intent(out) :: info !! Information flag. - logical, optional, intent(in) :: reverse + logical, optional, intent(in) :: reverse !! For Strang splitting: Determine if we are in forward or reverse branch - class(abstract_vector), optional, intent(in) :: NL(:) + class(abstract_vector_rdp), optional, intent(in) :: NL(:) !! Precomputed non-linear term. ! Internal variables - integer, allocatable :: perm(:) ! Permutation vector - integer :: rk - logical :: reverse_order + integer, allocatable :: perm(:) ! Permutation vector + integer :: rk + logical :: reverse_order ! Optional arguments reverse_order = optval(reverse, .false.) @@ -373,62 +446,73 @@ subroutine K_step_riccati(X, U1, QU, LTI, Qc, Rinv, tau, info, reverse, NL) if (.not. allocated(Uwrk0)) allocate(Uwrk0(1:rk), source=X%U(1)); if (.not. allocated(Swrk0)) allocate(Swrk0(1:rk,1:rk)); allocate(perm(1:rk)); perm = 0 - call mat_zero(Uwrk0); Swrk0 = 0.0_wp + call zero_basis(Uwrk0); Swrk0 = 0.0_wp ! Constant part --> QU if (.not. reverse_order) then ! compute QU and pass to S step - call apply_outerproduct_w(QU, X%U, LTI%CT, Qc) + call apply_outerprod_w(QU, X%U, CT, Qc) end if ! Non-linear part --> Uwrk0 - call mat_mult(U1, X%U, X%S) ! K0 = U0 @ S0 + block + class(abstract_vector_rdp), allocatable :: Xwrk(:) + call linear_combination(Xwrk, X%U, X%S) ! K0 = U0 @ S0 + call copy_basis(U1, Xwrk) + end block if (.not.present(NL)) then - call apply_p_outerproduct_w(Swrk0, X%U, U1, LTI%B, Rinv) ! (U0.T) @ B @ R^(-1) @ B.T @ K0 - call mat_mult(Uwrk0, U1, Swrk0) ! K0 @ Swrk0 + call apply_premult_outerprod_w(Swrk0, X%U, U1, B, Rinv) ! (U0.T) @ B @ R^(-1) @ B.T @ K0 + block + class(abstract_vector_rdp), allocatable :: Xwrk(:) + call linear_combination(Xwrk, U1, Swrk0) ! K0 @ Swrk0 + call copy_basis(Uwrk0, Xwrk) + end block else ! non-linear term precomputed - call mat_copy(Uwrk0, NL) + call copy_basis(Uwrk0, NL) end if ! Combine to form G( K @ U.T ) @ U --> Uwrk0 - call mat_axpby(Uwrk0, -1.0_wp, QU, 1.0_wp) + call axpby_basis(Uwrk0, -1.0_wp, QU, 1.0_wp) ! Construct intermediate solution U1 - call mat_axpby(U1, 1.0_wp, Uwrk0, tau) ! K0 + tau*Kdot + call axpby_basis(U1, 1.0_wp, Uwrk0, tau) ! K0 + tau*Kdot ! Orthonormalize in-place - call qr_factorization(U1, Swrk0, perm, info, ifpivot = .true.) - call apply_permutation(Swrk0, perm, trans = .true.) + call qr(U1, Swrk0, perm, info) + call check_info(info, 'qr_pivot', module=this_module, procedure='K_step_Riccati_rdp') + call apply_inverse_permutation_matrix(Swrk0, perm) X%S = Swrk0 return - end subroutine K_step_riccati + end subroutine K_step_riccati_rdp - subroutine S_step_riccati(X, U1, QU, LTI, Qc, Rinv, tau, info, reverse, NL) - class(abstract_sym_low_rank_state), intent(inout) :: X + subroutine S_step_riccati_rdp(X, U1, QU, B, CT, Qc, Rinv, tau, info, reverse, NL) + class(abstract_sym_low_rank_state_rdp),intent(inout) :: X !! Low-Rank factors of the solution. - class(abstract_vector), intent(in) :: U1(:) + class(abstract_vector_rdp), intent(in) :: U1(:) !! Intermediate low-rank factor. - class(abstract_vector), intent(inout) :: QU(:) + class(abstract_vector_rdp), intent(inout) :: QU(:) !! Precomputed application of the inhomogeneity. - class(abstract_lti_system), intent(in) :: LTI - !! LTI dynamical system defining the problem. - real(kind=wp), intent(in) :: Qc(:,:) + class(abstract_vector_rdp), intent(in) :: B(:) + !! System input. + class(abstract_vector_rdp), intent(in) :: CT(:) + !! System output. + real(wp), intent(in) :: Qc(:,:) !! Measurement weights. - real(kind=wp), intent(in) :: Rinv(:,:) + real(wp), intent(in) :: Rinv(:,:) !! Inverse of the actuation weights. - real(kind=wp), intent(in) :: tau + real(wp), intent(in) :: tau !! Time step. - integer, intent(out) :: info + integer, intent(out) :: info !! Information flag. - logical, optional, intent(in) :: reverse + logical, optional, intent(in) :: reverse !! For Strang splitting: Determine if we are in forward or reverse branch - class(abstract_vector), optional, intent(in) :: NL(:) + class(abstract_vector_rdp), optional, intent(in) :: NL(:) !! Precomputed non-linear term. ! Internal variables - integer :: rk - logical :: reverse_order + integer :: rk + logical :: reverse_order info = 0 @@ -443,45 +527,47 @@ subroutine S_step_riccati(X, U1, QU, LTI, Qc, Rinv, tau, info, reverse, NL) ! Constant part --> Swrk0 if (reverse_order) then ! Compute QU and pass to K step - call apply_outerproduct_w(QU, X%U, LTI%CT, Qc) + call apply_outerprod_w(QU, X%U, CT, Qc) endif - call mat_mult(Swrk0, U1, QU) + call innerprod(Swrk0, U1, QU) ! Non-linear part --> Swrk1 if (.not.present(NL)) then - call apply_p_outerproduct_w(Swrk1, X%U, U1, LTI%B, Rinv) ! U0.T @ B @ R^(-1) @ B.T @ U1 - Swrk1 = matmul(X%S, matmul(Swrk1, X%S)) ! S0 @ (U0.T @ B @ R^(-1) @ B.T @ U1) @ S0 + call apply_premult_outerprod_w(Swrk1, X%U, U1, B, Rinv) ! U0.T @ B @ R^(-1) @ B.T @ U1 + Swrk1 = matmul(X%S, matmul(Swrk1, X%S)) ! S0 @ (U0.T @ B @ R^(-1) @ B.T @ U1) @ S0 else ! Non-linear term precomputed - call mat_mult(Swrk1, U1, NL) + call innerprod(Swrk1, U1, NL) end if ! Combine to form -U1.T @ G( U1 @ S @ U0.T ) @ U0 - call mat_axpby(Swrk0, -1.0_wp, Swrk1, 1.0_wp) + Swrk0 = Swrk1 - Swrk0 ! Construct intermediate coefficient matrix - call mat_axpby(X%S, 1.0_wp, Swrk0, tau) + X%S = X%S + tau*Swrk0 return - end subroutine S_step_riccati + end subroutine S_step_riccati_rdp - subroutine L_step_riccati(X, U1, LTI, Qc, Rinv, tau, info) - class(abstract_sym_low_rank_state), intent(inout) :: X + subroutine L_step_riccati_rdp(X, U1, B, CT, Qc, Rinv, tau, info) + class(abstract_sym_low_rank_state_rdp),intent(inout) :: X !! Low-Rank factors of the solution. - class(abstract_vector), intent(in) :: U1(:) + class(abstract_vector_rdp), intent(in) :: U1(:) !! Intermediate low-rank factor. - class(abstract_lti_system), intent(in) :: LTI - !! LTI dynamical system defining the problem. - real(kind=wp), intent(in) :: Qc(:,:) + class(abstract_vector_rdp), intent(in) :: B(:) + !! System input. + class(abstract_vector_rdp), intent(in) :: CT(:) + !! System output. + real(wp), intent(in) :: Qc(:,:) !! Measurement weights. - real(kind=wp), intent(in) :: Rinv(:,:) + real(wp), intent(in) :: Rinv(:,:) !! Inverse of the actuation weights. - real(kind=wp), intent(in) :: tau + real(wp), intent(in) :: tau !! Time step. - integer, intent(out) :: info + integer, intent(out) :: info !! Information flag. ! Internal variables - integer :: rk + integer :: rk info = 0 @@ -489,27 +575,34 @@ subroutine L_step_riccati(X, U1, LTI, Qc, Rinv, tau, info) if (.not. allocated(Uwrk0)) allocate(Uwrk0(1:rk), source=X%U(1)) if (.not. allocated(Uwrk1)) allocate(Uwrk1(1:rk), source=X%U(1)) if (.not. allocated(Swrk0)) allocate(Swrk0(1:rk,1:rk)) - call mat_zero(Uwrk0); call mat_zero(Uwrk1); Swrk0 = 0.0_wp - - call mat_mult(Uwrk1, X%U, transpose(X%S)) ! L0.T U0 @ S.T + call zero_basis(Uwrk0); call zero_basis(Uwrk1); Swrk0 = 0.0_wp + block + class(abstract_vector_rdp), allocatable :: Xwrk(:) + call linear_combination(Xwrk, X%U, transpose(X%S)) ! L0.T U0 @ S.T + call copy_basis(Uwrk1, Xwrk) + end block ! Constant part --> Uwrk0 - call apply_outerproduct_w(Uwrk0, U1, LTI%CT, Qc) + call apply_outerprod_w(Uwrk0, U1, CT, Qc) ! Non-linear part --> U - call apply_p_outerproduct_w(Swrk0, U1, Uwrk1, LTI%B, Rinv) ! U1.T @ B @ R^(-1) @ B.T @ U0 @ S.T - call mat_mult(X%U, Uwrk1, Swrk0) ! (U0 @ S.T) @ (U1.T @ B @ R^(-1) @ B.T @ U0 @ S.T) + call apply_premult_outerprod_w(Swrk0, U1, Uwrk1, B, Rinv) ! U1.T @ B @ R^(-1) @ B.T @ U0 @ S.T + block + class(abstract_vector_rdp), allocatable :: Xwrk(:) + call linear_combination(Xwrk, Uwrk1, Swrk0) ! (U0 @ S.T) @ (U1.T @ B @ R^(-1) @ B.T @ U0 @ S.T) + call copy_basis(X%U, Xwrk) + end block ! Combine to form U1.T @ G( U1.T@L.T ) - call mat_axpby(Uwrk0, 1.0_wp, X%U, -1.0_wp) + call axpby_basis(Uwrk0, 1.0_wp, X%U, -1.0_wp) ! Construct solution L1.T - call mat_axpby(Uwrk1, 1.0_wp, Uwrk0, tau) ! L0.T + tau*Ldot.T + call axpby_basis(Uwrk1, 1.0_wp, Uwrk0, tau) ! L0.T + tau*Ldot.T ! Update coefficient matrix - call mat_mult(X%S, Uwrk1, U1) + call innerprod(X%S, Uwrk1, U1) return - end subroutine L_step_riccati + end subroutine L_step_riccati_rdp -end module lightROM_RiccatiSolvers +end module LightROM_RiccatiSolvers diff --git a/src/RiccatiUtils.f90 b/src/RiccatiUtils.f90 index 09547cd..13ab7f7 100644 --- a/src/RiccatiUtils.f90 +++ b/src/RiccatiUtils.f90 @@ -1,111 +1,167 @@ module LightROM_RiccatiUtils use LightKrylov + use LightKrylov, only: wp => dp + use LightKrylov_AbstractVectors + use LightROM_AbstractLTIsystems - use LightKrylov_utils, only : assert_shape + use LightKrylov_Utils, only : assert_shape implicit none - ! scratch arrays - real(kind=wp) , allocatable :: Swrk(:,:) - private - public :: apply_outerproduct_w, apply_p_outerproduct_w, precompute_NL + + public :: apply_outerprod_w + public :: apply_premult_outerprod_w + public :: precompute_NL !------------------------------ !----- INTERFACES ----- !------------------------------ + interface apply_outerprod_w + module procedure apply_outerprod_w_vector_rdp + module procedure apply_outerprod_w_basis_rdp + end interface + + interface apply_premult_outerprod_w + module procedure apply_premult_outerprod_w_vector_rdp + module procedure apply_premult_outerprod_w_basis_rdp + end interface + interface precompute_NL - module procedure precompute_NL_K - module procedure precompute_NL_S + module procedure precompute_NL_K_rdp + module procedure precompute_NL_S_rdp end interface contains - subroutine apply_outerproduct_w(Z, U, B, W) + subroutine apply_outerprod_w_vector_rdp(z, u, B, W) + !! Computes the matrix product \( \mathbf{z} = \mathbf{B} \mathbf{W} \mathbf{B}^T \mathbf{u} \) + class(abstract_vector_rdp), intent(out) :: z + class(abstract_vector_rdp), intent(in) :: u + class(abstract_vector_rdp), intent(in) :: B(:) + real(wp), intent(in) :: W(:,:) + ! internals + real(wp) :: wrk(size(B)) + + call assert_shape(W, (/ size(B), size(B) /), 'apply_outerprod_w_vector_rdp', 'W') + + call innerprod(wrk, B, u) + block + class(abstract_vector_rdp), allocatable :: xwrk + call linear_combination(xwrk, B, matmul(W, wrk)) + call z%zero(); call z%add(xwrk) + end block + + return + end subroutine apply_outerprod_w_vector_rdp + + subroutine apply_outerprod_w_basis_rdp(Z, U, B, W) !! Computes the matrix product \( \mathbf{Z} = \mathbf{B} \mathbf{W} \mathbf{B}^T \mathbf{U} \) - class(abstract_vector), intent(out) :: Z(:) - class(abstract_vector), intent(in) :: U(:) - class(abstract_vector), intent(in) :: B(:) - real(kind=wp), intent(in) :: W(:,:) + class(abstract_vector_rdp), intent(out) :: Z(:) + class(abstract_vector_rdp), intent(in) :: U(:) + class(abstract_vector_rdp), intent(in) :: B(:) + real(wp), intent(in) :: W(:,:) ! internals - integer :: p, rk - real(kind=wp), allocatable :: wrk(:,:) + real(wp) :: wrk(size(B),size(U)) + + call assert_shape(W, (/ size(B), size(B) /), 'apply_outerprod_w_basis_rdp', 'W') - p = size(B) - rk = size(U) - allocate(wrk(1:p,1:rk)); wrk = 0.0_wp + call zero_basis(Z) + call innerprod(wrk, B, U) + block + class(abstract_vector_rdp), allocatable :: Xwrk(:) + call linear_combination(Xwrk, B, matmul(W, wrk)) + call copy_basis(Z, Xwrk) + end block + + return + end subroutine apply_outerprod_w_basis_rdp - call assert_shape(W, (/ p, p /), 'apply_outerproduct_w', 'W') + subroutine apply_premult_outerprod_w_vector_rdp(m, uL, uR, B, W) + !! Computes the matrix product \( \mathbf{M} = \mathbf{U}_L^T \mathbf{B} \mathbf{W} \mathbf{B}^T \mathbf{U}_R \) + real(wp), intent(out) :: m + class(abstract_vector_rdp), intent(in) :: uL + class(abstract_vector_rdp), intent(in) :: uR + class(abstract_vector_rdp), intent(in) :: B(:) + real(wp), intent(in) :: W(:,:) + ! internals + real(wp) :: BTuR(size(B)) + real(wp) :: uLTB(size(B)) + + call assert_shape(W, (/ size(B), size(B) /), 'apply_premult_outerprod_w_vector_rdp', 'W') - call mat_zero(Z) - call mat_mult(wrk, B, U) - call mat_mult(Z, B, matmul(W, wrk)) + BTuR = 0.0_wp; uLTB = 0.0_wp; m = 0.0_wp + call innerprod(BTuR, B, uR) + call innerprod(uLTB, B, uL) + + m = dot_product( uLTB, matmul( W, BTuR ) ) return - end subroutine apply_outerproduct_w + end subroutine apply_premult_outerprod_w_vector_rdp - subroutine apply_p_outerproduct_w(M, UL, UR, B, W) + subroutine apply_premult_outerprod_w_basis_rdp(M, UL, UR, B, W) !! Computes the matrix product \( \mathbf{M} = \mathbf{U}_L^T \mathbf{B} \mathbf{W} \mathbf{B}^T \mathbf{U}_R \) - real(kind=wp), intent(out) :: M(:,:) - class(abstract_vector), intent(in) :: UL(:) - class(abstract_vector), intent(in) :: UR(:) - class(abstract_vector), intent(in) :: B(:) - real(kind=wp), intent(in) :: W(:,:) + real(wp), intent(out) :: M(:,:) + class(abstract_vector_rdp), intent(in) :: UL(:) + class(abstract_vector_rdp), intent(in) :: UR(:) + class(abstract_vector_rdp), intent(in) :: B(:) + real(wp), intent(in) :: W(:,:) ! internals - real(kind=wp) :: BTUR(size(B),size(UR)) - real(kind=wp) :: ULTB(size(UL),size(B)) + real(wp) :: BTUR(size(B),size(UR)) + real(wp) :: ULTB(size(UL),size(B)) - call assert_shape(M, (/ size(UL), size(UR) /), 'apply_p_outerproduct_w', 'M') + call assert_shape(W, (/ size(B), size(B) /), 'apply_premult_outerprod_w_basis_rdp', 'W') + call assert_shape(M, (/ size(UL), size(UR) /), 'apply_premult_outerprod_w_basis_rdp', 'M') BTUR = 0.0_wp; ULTB = 0.0_wp; M = 0.0_wp - call mat_mult(BTUR, B, UR) - call mat_mult(ULTB, UL, B) + call innerprod(BTUR, B, UR) + call innerprod(ULTB, UL, B) M = matmul( ULTB, matmul( W, BTUR ) ) return - end subroutine apply_p_outerproduct_w + end subroutine apply_premult_outerprod_w_basis_rdp - subroutine precompute_NL_K(N, X, K, B, W) + subroutine precompute_NL_K_rdp(N, X, K, B, W) !! Computes the matrix product \( \mathbf{N} = \mathbf{K} \mathbf{U}_L^T \mathbf{B} \mathbf{W} \mathbf{B}^T \mathbf{K} \) - class(abstract_vector), intent(out) :: N(:) - class(abstract_sym_low_rank_state), intent(in) :: X - class(abstract_vector), intent(in) :: K(:) - class(abstract_vector), intent(in) :: B(:) - real(kind=wp), intent(in) :: W(:,:) + class(abstract_vector_rdp), intent(out) :: N(:) + class(abstract_sym_low_rank_state_rdp), intent(in) :: X + class(abstract_vector_rdp), intent(in) :: K(:) + class(abstract_vector_rdp), intent(in) :: B(:) + real(wp), intent(in) :: W(:,:) ! internals - integer :: rk - - rk = size(K) - if (.not.allocated(Swrk)) allocate(Swrk(1:rk,1:rk)) - Swrk = 0.0_wp + real(wp) :: wrk(X%rk,X%rk) - call apply_p_outerproduct_w(Swrk, X%U, K, B, W) ! (U.T) @ B @ R^(-1) @ B.T @ K - call mat_mult(N, K, Swrk) ! K @ Swrk + call assert_shape(W, (/ size(B), size(B) /), 'precompute_NL_K_rdp', 'W') + + call apply_premult_outerprod_w(wrk, X%U(1:X%rk), K, B, W) ! (U.T) @ B @ R^(-1) @ B.T @ K + block + class(abstract_vector_rdp), allocatable :: Xwrk(:) + call linear_combination(Xwrk, K, wrk) ! K @ (U.T @ B @ R^(-1) @ B.T @ K) + call copy_basis(N, Xwrk) + end block return - end subroutine precompute_NL_K + end subroutine precompute_NL_K_rdp - subroutine precompute_NL_S(N, X, U, B, W) + subroutine precompute_NL_S_rdp(N, X, U, B, W) !! Computes the matrix product \( \mathbf{N} = \mathbf{S} \mathbf{U}_L^T \mathbf{B} \mathbf{W} \mathbf{B}^T \mathbf{S} \) - real(kind=wp), intent(out) :: N(:,:) - class(abstract_sym_low_rank_state), intent(in) :: X - class(abstract_vector), intent(in) :: U(:) - class(abstract_vector), intent(in) :: B(:) - real(kind=wp), intent(in) :: W(:,:) + real(wp), intent(out) :: N(:,:) + class(abstract_sym_low_rank_state_rdp), intent(in) :: X + class(abstract_vector_rdp), intent(in) :: U(:) + class(abstract_vector_rdp), intent(in) :: B(:) + real(wp), intent(in) :: W(:,:) ! internals - integer :: rk - - rk = size(U) - if (.not.allocated(Swrk)) allocate(Swrk(1:rk,1:rk)) - Swrk = 0.0_wp + real(wp) :: wrk(X%rk,X%rk) + + call assert_shape(W, (/ size(B), size(B) /), 'precompute_NL_S_rdp', 'W') - call apply_p_outerproduct_w(Swrk, U, X%U, B, W) ! U.T @ B @ R^(-1) @ B.T @ X%U - N = matmul(X%S, matmul(Swrk, X%S)) ! X%S @ (U.T @ B @ R^(-1) @ B.T @ X%U) @ X%S + call apply_premult_outerprod_w(wrk, U, X%U(1:X%rk), B, W) ! U.T @ B @ R^(-1) @ B.T @ X%U + N = matmul(X%S(1:X%rk,1:X%rk), matmul(wrk, X%S(1:X%rk,1:X%rk))) ! X%S @ (U.T @ B @ R^(-1) @ B.T @ X%U) @ X%S return - end subroutine precompute_NL_S + end subroutine precompute_NL_S_rdp end module LightROM_RiccatiUtils \ No newline at end of file diff --git a/src/Utils.f90 b/src/Utils.f90 index 7e701b9..495ed5d 100644 --- a/src/Utils.f90 +++ b/src/Utils.f90 @@ -1,106 +1,142 @@ -module LightROM_utils +module LightROM_Utils + ! stdlib + use stdlib_linalg, only : eye, diag, svd, svdvals, is_symmetric + use stdlib_optval, only : optval + ! LightKrylov for Linear Algebra use LightKrylov - use LightKrylov_utils + use LightKrylov, only : dp, wp => dp + use LightKrylov_Logger + use LightKrylov_AbstractVectors + use LightKrylov_BaseKrylov, only : orthogonalize_against_basis + use LightKrylov_Utils, only : abstract_opts + ! LightROM use LightROM_AbstractLTIsystems - - use stdlib_linalg, only : eye, diag - use stdlib_optval, only : optval + implicit none - private - public Approximate_Balanced_Truncation, ROM_Petrov_Galerkin_Projection, ROM_Galerkin_Projection + private :: this_module + character(len=*), parameter :: this_module = 'LightROM_Utils' + + public :: dlra_opts + public :: compute_norm + public :: is_converged + public :: project_onto_common_basis + public :: Balancing_Transformation + public :: ROM_Petrov_Galerkin_Projection + public :: ROM_Galerkin_Projection + + interface Balancing_Transformation + module procedure Balancing_Transformation_rdp + end interface + + interface ROM_Petrov_Galerkin_Projection + module procedure ROM_Petrov_Galerkin_Projection_rdp + end interface + + interface ROM_Galerkin_Projection + module procedure ROM_Galerkin_Projection_rdp + end interface + + interface project_onto_common_basis + module procedure project_onto_common_basis_rdp + end interface + + type, extends(abstract_opts), public :: dlra_opts + !! Options container for the (rank-adaptive) projector-splitting dynalical low-rank approximation + !! integrator + integer :: mode = 1 + !! Time integration mode. Only 1st order (Lie splitting - mode 1) and + !! 2nd order (Strang splitting - mode 2) are implemented. (default: 1) + logical :: verbose = .false. + !! Verbosity control (default: .false.) + integer :: chkstep = 10 + !! Time step interval at which convergence is checked and runtime information is printed (default: 10) + integer :: chktime = 1.0_wp + !! Simulation time interval at which convergence is checked and runtime information is printed (default: 1.0) + logical :: chkctrl_time = .true. + !! IO control: use time instead of timestep control (default: .true.) + real(wp) :: inc_tol = 1e-6_wp + !! Tolerance on the increment norm for convergence (default: 1e-6) + logical :: relative_norm = .true. + !! Tolerance control: Check convergence for dX/X (true) or dX (false)? (default: .true.) + ! + ! RANK-ADPATIVE SPECIFICS + ! + logical :: if_rank_adaptive = .true. + !! Allow rank-adaptivity + real(wp) :: tol = 1e-6_wp + !! Tolerance on the extra singular value to determine rank-adaptation + logical :: use_err_est = .false. + !! Choose whether to base the tolerance on 'tol' or on the splitting error estimate + integer :: err_est_step = 10 + !! Time step interval for recomputing the splitting error estimate (only of use_err_est = .true.) + end type contains - subroutine approximate_Balanced_Truncation(T,S,ST,X,Y) - !! Computes the the biorthogonal balancing transformation \( \mathbf{T}, \mathbf{S}^T \) from the - !! low-rank approximations of the obervability and controlability Gramians, \( \mathbf{W}_o \) and - !! \( \mathbf{W}_c \) respectively, given as: - !! \[ - !! \mathbf{W}_o = \mathbf{X}_o \mathbf{S}_o \mathbf{X}_o^T - !! \quad \text{and} \quad - !! \mathbf{W}_c = \mathbf{Y}_c \mathbf{S}_c \mathbf{Y}_c^T - !! \] + subroutine Balancing_Transformation_rdp(T, S, Tinv, Xc, Yo) + !! Computes the the biorthogonal balancing transformation \( \mathbf{T}, \mathbf{T}^{-1} \) from the + !! low-rank representation of the SVD of the controllability and observability Gramians, \( \mathbf{W}_c \) + !! and \( \mathbf{W}_o \) respectively, given as: + !! \[ \begin{align} + !! \mathbf{W}_c &= \mathbf{X}_c \mathbf{X}_c^T \\ + !! \mathbf{W}_o &= \mathbf{Y}_o \mathbf{Y}_o^T + !! \end{align} \] !! - !! Given the SVD of the cross-Gramians: - !! $$ \mathbf{S}_c^T \mathbf{Y}_c^T \mathbf{X}_o \mathbf{S}_o = \mathbf{U} \mathbf{S} \mathbf{V}^T $$ + !! Given the SVD of the cross-Gramian: + !! $$ \mathbf{X}_c^T \mathbf{Y}_o = \mathbf{U} \mathbf{S} \mathbf{V}^T $$ !! the balancing transformation and its inverse are given by: !! \[ \begin{align} !! \mathbf{T} &= \mathbf{X}_o \mathbf{S}_o^{1/2} \mathbf{V} \mathbf{S}^{-1/2} \\ - !! \mathbf{S}^T &= \mathbf{Y}_c \mathbf{S}_c^{1/2} \mathbf{U} \mathbf{S}^{-1/2} + !! \mathbf{Tinv}^T &= \mathbf{Y}_c \mathbf{S}_c^{1/2} \mathbf{U} \mathbf{S}^{-1/2} !! \end{align} \] !! Note: In the current implementation, the numerical rank of the SVD is not considered. - class(abstract_vector), intent(out) :: T(:) + class(abstract_vector_rdp), intent(out) :: T(:) !! Balancing transformation - real(kind=wp), intent(out) :: S(:) + real(wp), intent(out) :: S(:) !! Singular values of the BT - class(abstract_vector), intent(out) :: ST(:) + class(abstract_vector_rdp), intent(out) :: Tinv(:) !! Inverse balancing transformation - class(abstract_sym_low_rank_state), intent(inout) :: X + class(abstract_vector_rdp), intent(in) :: Xc(:) !! Low-rank representation of the Controllability Gramian - class(abstract_sym_low_rank_state), intent(inout) :: Y + class(abstract_vector_rdp), intent(in) :: Yo(:) !! Low-rank representation of the Observability Gramian ! internal variables integer :: i, rkc, rko, rk, rkmin - real(kind=wp), allocatable :: S_svd(:,:) - real(kind=wp), allocatable :: Swrk(:,:) - real(kind=wp), allocatable :: Sigma(:) - real(kind=wp), allocatable :: V(:,:), W(:,:) - class(abstract_vector), allocatable :: Uwrk(:) - - rkc = size(X%U) - rko = size(Y%U) - allocate(S_svd(rkc,rko)) - ! scratch arrays - rk = max(rkc, rko) - rkmin = min(rkc, rko) - allocate(Swrk(rk,rk)) - allocate(Uwrk(rk), source=T(1)) - - ! compute inner product with Gramian bases - call mat_mult(S_svd, Y%U, X%U) - - ! compute Cholesky factors of Y update LR factor with Cholesky factor - Swrk = 0.0_wp - call sqrtm(Swrk(1:rkc,1:rkc), Y%S) - call mat_zero(Uwrk) - call mat_mult(Uwrk(1:rkc), Y%U, Swrk(1:rkc,1:rkc)) - call mat_copy(Y%U, Uwrk(1:rkc)) - ! Update data matrix - S_svd = matmul(Swrk(1:rkc,1:rkc), S_svd) - - ! compute Cholesky factors of X update LR factor with Cholesky factor - Swrk = 0.0_wp - call sqrtm(Swrk(1:rko,1:rko), X%S) - call mat_zero(Uwrk) - call mat_mult(Uwrk(1:rkc), X%U, Swrk(1:rkc,1:rkc)) - call mat_copy(X%U, Uwrk(1:rkc)) - ! Update data matrix - S_svd = matmul(S_svd, Swrk(1:rko,1:rko)) - - ! Compute BT - allocate(V(rkc,rkc)); allocate(W(rko,rko)); allocate(Sigma(rkmin)) - call svd(S_svd, V, Sigma, W) - - ! We truncate in case come singular values are very small - s_inv: do i = 1, rkmin - if (Sigma(i) < atol) then - exit s_inv - rk = i-1 - end if - Sigma(i) = 1/sqrt(Sigma(i)) - enddo s_inv - - call mat_mult( T, X%U, matmul( W(:, 1:rk), diag(sigma(1:rk)))) - call mat_mult(ST, Y%U, matmul(transpose(V(1:rk, :)), diag(sigma(1:rk)))) + real(wp), allocatable :: LRCrossGramian(:,:) + real(wp), allocatable :: Swrk(:,:) + real(wp), allocatable :: Sigma(:) + real(wp), allocatable :: V(:,:), W(:,:) + + rkc = size(Xc) + rko = size(Yo) + rk = max(rkc, rko) + rkmin = min(rkc, rko) + + ! compute inner product with Gramian bases and compte SVD + allocate(LRCrossGramian(rkc,rko)); allocate(V(rko,rko)); allocate(W(rkc,rkc)) + call innerprod(LRCrossGramian, Xc, Yo) + call svd(LRCrossGramian, S, V, W) + allocate(Sigma(rkmin)) + do i = 1, rkmin + Sigma(i) = 1/sqrt(S(i)) + enddo + block + class(abstract_vector_rdp), allocatable :: Xwrk(:) + call linear_combination(Xwrk, Yo(1:rkmin), matmul(W(1:rkmin,1:rkmin), diag(Sigma))) + call copy_basis(T(1:rkmin), Xwrk) + call linear_combination(Xwrk, Xc(1:rkmin), matmul(V(1:rkmin,1:rkmin), diag(Sigma))) + call copy_basis(Tinv(1:rkmin), Xwrk) + end block + return - end subroutine + end subroutine Balancing_Transformation_rdp - subroutine ROM_Petrov_Galerkin_Projection(romLTI, LTI, T, ST) - !! Computes the Reduced-Order of the input LTI dynamical system via Petrov-Galerkin projection using - !! the biorthogonal projection bases \( \mathbf{V} \) and \( \mathbf{W} \) with + subroutine ROM_Petrov_Galerkin_Projection_rdp(Ahat, Bhat, Chat, D, LTI, T, Tinv) + !! Computes the Reduced-Order Model of the input LTI dynamical system via Petrov-Galerkin projection + !! using the biorthogonal projection bases \( \mathbf{V} \) and \( \mathbf{W} \) with !! \( \mathbf{W}^T \mathbf{V} = \mathbf{I} \). !! !! Given an LTI system defined by the matrices \( \mathbf{A}, \mathbf{B}, \mathbf{C}, \mathbf{D}\), @@ -112,34 +148,49 @@ subroutine ROM_Petrov_Galerkin_Projection(romLTI, LTI, T, ST) !! \hat{\mathbf{C}} = \mathbf{C} \mathbf{V}, \qquad !! \hat{\mathbf{D}} = \mathbf{D} . !! \] - class(abstract_ROM_lti_system), intent(out) :: romLTI - !! Reduced-order LTI - class(abstract_lti_system), intent(in) :: LTI + real(wp), allocatable, intent(out) :: Ahat(:, :) + !! Reduced-order dynamics matrix. + real(wp), allocatable, intent(out) :: Bhat(:, :) + !! Reduced-order input-to-state matrix. + real(wp), allocatable, intent(out) :: Chat(:, :) + !! Reduced-order state-to-output matrix. + real(wp), allocatable, intent(out) :: D(:, :) + !! Feed-through matrix + class(abstract_lti_system_rdp), intent(in) :: LTI !! Large-scale LTI to project - class(abstract_vector), intent(inout) :: T(:) + class(abstract_vector_rdp), intent(in) :: T(:) !! Balancing transformation - class(abstract_vector), intent(in) :: ST(:) + class(abstract_vector_rdp), intent(in) :: Tinv(:) !! Inverse balancing transformation ! internal variables - integer :: i, rk - class(abstract_vector), allocatable :: Uwrk(:) + integer :: i, rk, rkc, rkb + class(abstract_vector_rdp), allocatable :: Uwrk(:) + real(wp), allocatable :: Cwrk(:, :) - rk = size(T) - allocate(Uwrk(rk), source=T(1)); call mat_zero(Uwrk) + rk = size(T) + rkb = size(LTI%B) + rkc = size(LTI%CT) + allocate(Uwrk(rk), source=T(1)); call zero_basis(Uwrk) + allocate(Ahat(1:rk, 1:rk )); Ahat = 0.0_wp + allocate(Bhat(1:rk, 1:rkb)); Bhat = 0.0_wp + allocate(Cwrk(1:rk, 1:rkc)); Cwrk = 0.0_wp + allocate(Chat(1:rkc,1:rk )); Chat = 0.0_wp + allocate(D(1:size(LTI%D,1),1:size(LTI%D,2))); D = 0.0_wp do i = 1, rk - call LTI%A%matvec(Uwrk(i), T(i)) + call LTI%A%matvec(Tinv(i), Uwrk(i)) end do - call mat_mult(romLTI%A, ST, Uwrk) - call mat_mult(romLTI%B, ST, LTI%B) - call mat_mult(romLTI%C, LTI%CT, T) - romLTI%D = LTI%D + call innerprod(Ahat, T, Uwrk) + call innerprod(Bhat, T, LTI%B) + call innerprod(Cwrk, LTI%CT, Tinv) + Chat = transpose(Cwrk) + D = LTI%D - end subroutine ROM_Petrov_Galerkin_Projection + end subroutine ROM_Petrov_Galerkin_Projection_rdp - subroutine ROM_Galerkin_Projection(romLTI, LTI, T) - !! Computes the Reduced-Order of the input LTI dynamical system via Galerkin projection using + subroutine ROM_Galerkin_Projection_rdp(Ahat, Bhat, Chat, D, LTI, T) + !! Computes the Reduced-Order Model of the input LTI dynamical system via Galerkin projection using !! the orthogonal projection basis \( \mathbf{V} \) with \( \mathbf{V}^T \mathbf{V} = \mathbf{I} \). !! !! Given an LTI system defined by the matrices \( \mathbf{A}, \mathbf{B}, \mathbf{C}, \mathbf{D}\), @@ -150,16 +201,148 @@ subroutine ROM_Galerkin_Projection(romLTI, LTI, T) !! \hat{\mathbf{C}} = \mathbf{C} \mathbf{V}, \qquad !! \hat{\mathbf{D}} = \mathbf{D} . !! \] - class(abstract_ROM_lti_system), intent(out) :: romLTI - !! Reduced-order LTI - class(abstract_lti_system), intent(in) :: LTI + real(wp), allocatable, intent(out) :: Ahat(:, :) + !! Reduced-order dynamics matrix. + real(wp), allocatable, intent(out) :: Bhat(:, :) + !! Reduced-order input-to-state matrix. + real(wp), allocatable, intent(out) :: Chat(:, :) + !! Reduced-order state-to-output matrix. + real(wp), allocatable, intent(out) :: D(:, :) + !! Feed-through matrix + class(abstract_lti_system_rdp), intent(in) :: LTI !! Large-scale LTI to project - class(abstract_vector), intent(inout) :: T(:) + class(abstract_vector_rdp), intent(inout) :: T(:) !! Balancing transformation - call ROM_Petrov_Galerkin_Projection(romLTI, LTI, T, T) + call ROM_Petrov_Galerkin_Projection(Ahat, Bhat, Chat, D, LTI, T, T) return - end subroutine ROM_Galerkin_Projection + end subroutine ROM_Galerkin_Projection_rdp + + subroutine project_onto_common_basis_rdp(UTV, VpTV, U, V) + !! Computes the common orthonormal basis of the space spanned by the union of the input Krylov bases + !! \( [ \mathbf{U}, \mathbf{V} ] \) by computing \( \mathbf{V_\perp} \) as an orthonormal basis of + !! \( \mathbf{V} \) lying in the orthogonal complement of \( \mathbf{U} \) given by + !! \[ + !! \mathbf{V_\perp}, R = \text{qr}( \mathbf{V} - \mathbf{U} \mathbf{U}^T \mathbf{V} ) + !! \[ + !! + !! NOTE: The orthonormality of \( \mathbf{U} \) is assumed and not checked. + !! + !! The output is + !! \[ + !! \mathbf{U}^T \mathbf{V}, \qquad \text{and } \qquad \mathbf{V_perp}^T \mathbf{V} + !! \hat{\mathbf{D}} = \mathbf{D} . + !! \] + real(wp), allocatable, intent(out) :: UTV(:,:) + real(wp), allocatable, intent(out) :: VpTV(:,:) + class(abstract_vector_rdp), intent(in) :: U(:) + class(abstract_vector_rdp), intent(in) :: V(:) + + ! internals + class(abstract_vector_rdp), allocatable :: Vp(:) + real(wp), allocatable :: wrk(:,:) + integer :: ru, rv, r, info + + ru = size(U) + rv = size(V) + r = ru + rv + + allocate(Vp(rv), source=V) ! Vp = V + allocate(UTV( ru,rv)); UTV = 0.0_wp + allocate(VpTV(rv,rv)); VpTV = 0.0_wp + + ! orthonormalize second basis against first + call orthogonalize_against_basis(Vp, U, info, if_chk_orthonormal=.false., beta=UTV) + call check_info(info, 'orthogonalize_against_basis', module=this_module, procedure='project_onto_common_basis_rdp') + allocate(wrk(rv,rv)); wrk = 0.0_wp + call qr(Vp, wrk, info) + call check_info(info, 'qr', module=this_module, procedure='project_onto_common_basis_rdp') + + ! compute inner product between second basis and its orthonormalized version + call innerprod(VpTV, Vp, V) + + return + end subroutine project_onto_common_basis_rdp + + real(dp) function compute_norm(X) result(nrm) + !! This function computes the Frobenius norm of a low-rank approximation via an SVD of the (small) coefficient matrix + class(abstract_sym_low_rank_state_rdp), intent(in) :: X + !! Low-Rank factors of the solution. + real(wp) :: s(X%rk) + s = svdvals(X%S(:X%rk,:X%rk)) + nrm = sqrt(sum(s**2)) + end function compute_norm + + logical function is_converged(nrm, nrmX, opts) result(converged) + !! This function checks the convergence of the solution based on the (relative) increment norm + real(wp), intent(in) :: nrm + real(wp), optional, intent(in) :: nrmX + real(wp) :: nrmX_ + type(dlra_opts), optional, intent(in) :: opts + type(dlra_opts) :: opts_ + + ! internals + character*128 :: msg + + if (present(opts)) then + opts_ = opts + else + opts_ = dlra_opts() + end if + + if (present(nrmX)) then + nrmX_ = nrmX + else + nrmX_ = 1.0_wp + end if + + converged = .false. + + if (opts%relative_norm) then + if (nrm/nrmX_ < opts%inc_tol) converged = .true. + else + if (nrm < opts%inc_tol) converged = .true. + end if + + end function is_converged + + integer function get_chkstep(opts, verbose, tau) result(chkstep) + + type(dlra_opts), intent(inout) :: opts + logical, intent(in) :: verbose + real(wp), intent(in) :: tau + + ! internal + character(len=128) :: msg + type(dlra_opts) :: opts_default + + opts_default = dlra_opts() + + if (opts%chkctrl_time) then + if (opts%chktime <= 0.0_wp) then + opts%chktime = opts_default%chktime + write(msg, *) "Invalid chktime. Reset to default (", opts%chktime,")" + call logger%log_message(trim(msg), module=this_module, procedure='DLRA') + end if + chkstep = max(1, NINT(opts%chktime/tau)) + if (verbose) then + write(msg,*) 'Output every', opts%chkctrl_time, 'time units (', chkstep, 'steps)' + call logger%log_message(trim(msg), module=this_module, procedure='DLRA') + end if + else + if (opts%chkstep <= 0) then + opts%chkstep = opts_default%chkstep + write(msg, *) "Invalid chktime. Reset to default (", opts%chkstep,")" + call logger%log_message(trim(msg), module=this_module, procedure='DLRA') + end if + chkstep = opts%chkstep + if (verbose) then + write(msg,*) 'Output every', chkstep, 'steps (based on steps).' + call logger%log_message(trim(msg), module=this_module, procedure='DLRA') + end if + end if + + end function get_chkstep -end module LightROM_utils +end module LightROM_Utils diff --git a/test/TestExpm.f90 b/test/TestExpm.f90 deleted file mode 100644 index b177c3f..0000000 --- a/test/TestExpm.f90 +++ /dev/null @@ -1,221 +0,0 @@ -module TestExpm - use LightKrylov - use LightKrylov_expmlib - use TestVector - use TestMatrices - use testdrive , only : new_unittest, unittest_type, error_type, check - use stdlib_math, only : all_close - implicit none - - private - - public :: collect_expm_testsuite - - contains - - !--------------------------------------------------------- - !----- ----- - !----- TEST SUITE FOR THE MATRIX EXPONENTIAL ----- - !----- ----- - !--------------------------------------------------------- - - subroutine collect_expm_testsuite(testsuite) - !> Collection of tests. - type(unittest_type), allocatable, intent(out) :: testsuite(:) - - testsuite = [& - new_unittest("Dense Matrix Exponential", test_dense_matrix_exponential), & - new_unittest("Krylov Matrix Exponential", test_krylov_matrix_exponential), & - new_unittest("Block Krylov Matrix Exponential", test_block_krylov_matrix_exponential) & - ] - - return - end subroutine collect_expm_testsuite - - subroutine test_dense_matrix_exponential(error) - !> This function tests the scaling and squaring followed by rational Pade approximation - ! of the matrix exponential for a matrix for which the exponential propagator is known - ! analytically - - !> Error type to be returned. - type(error_type), allocatable, intent(out) :: error - !> Problem dimension. - integer, parameter :: n = 5 - integer, parameter :: m = 6 - !> Test matrix. - real(kind=wp) :: A(n, n) - real(kind=wp) :: E(n, n) - real(kind=wp) :: Eref(n, n) - integer :: i, j - - ! --> Initialize matrix. - A = 0.0_wp - do i = 1, n-1 - A(i,i+1) = m*1.0_wp - end do - ! --> Reference with analytical exponential - Eref = 0.0_wp - forall (i=1:n) Eref(i, i) = 1.0_wp - do i = 1, n-1 - do j = 1, n-i - Eref(i,i+j) = Eref(i,i+j-1)*m/j - end do - end do - ! --> Compute exponential numerically - E = 0.0_wp - call expm(E, A) - - call check(error, maxval(E-Eref) < rtol) - - return - end subroutine test_dense_matrix_exponential - - subroutine test_krylov_matrix_exponential(error) - !> This function tests the Krylov based approximation of the action of the exponential - ! propagator against the dense computation for a random operator, a random RHS and a - ! typical value of tau. - - !> Error type to be returned. - type(error_type), allocatable, intent(out) :: error - class(rmatrix), allocatable :: A - !> Basis vectors. - class(rvector), allocatable :: Q - class(rvector), allocatable :: Xref - class(rvector), allocatable :: Xkryl - !> Krylov subspace dimension. - integer, parameter :: kdim = test_size - !> Test matrix. - real(kind=wp) :: Amat(kdim, kdim) - real(kind=wp) :: Emat(kdim, kdim) - !> GS factors. - real(kind=wp) :: R(kdim, kdim) - real(kind=wp) :: Id(kdim, kdim) - !> Information flag. - integer :: info - !> Test parameters - integer, parameter :: nkmax = 15 - real(kind=wp), parameter :: tau = 0.1_wp - real(kind=wp), parameter :: tol = 1e-10_wp - !> Misc. - integer :: i,j,k - real(kind=wp) :: Xmat(test_size), Qmat(test_size) - real(kind=wp) :: err - - Amat = 0.0_wp; Emat = 0.0_wp; Xmat = 0.0_wp - allocate(Q); allocate(Xref); allocate(Xkryl) - call Xref%zero() - call Xkryl%zero() - - ! --> Initialize operator. - A = rmatrix() ; call random_number(A%data) - Amat = A%data - ! --> Initialize rhs. - call random_number(Q%data) - Qmat(:) = Q%data - - !> Comparison is dense computation (10th order Pade approximation) - call expm(Emat, tau*Amat) - Xmat = matmul(Emat,Qmat) - - !> Copy reference data into Krylov vector - Xref%data = Xmat(:) - - !> Compute Krylov matrix exponential using the arnoldi method - call kexpm(Xkryl, A, Q, tau, tol, info, verbosity = .true., kdim = nkmax) - call Xkryl%axpby(1.0_wp, Xref, -1.0_wp) - - !> Compute 2-norm of the error - err = Xkryl%norm() - write(*, *) ' true error: ||error||_2 = ', err - - call check(error, err < rtol) - - return - end subroutine test_krylov_matrix_exponential - - subroutine test_block_krylov_matrix_exponential(error) - !> This function tests the Krylov based approximation of the action of the exponential - ! propagator against the dense computation for a random operator, a random RHS and a - ! typical value of tau. - - !> Error type to be returned. - type(error_type), allocatable, intent(out) :: error - class(rmatrix), allocatable :: A - !> Basis vectors. - class(rvector), allocatable :: Q(:) - class(rvector), allocatable :: Xref(:) - class(rvector), allocatable :: Xkryl(:) - class(rvector), allocatable :: Xkryl_block(:) - !> Krylov subspace dimension. - integer, parameter :: kdim = test_size - !> Test matrix. - real(kind=wp) :: Amat(kdim, kdim) - real(kind=wp) :: Emat(kdim, kdim) - !> GS factors. - real(kind=wp) :: R(kdim, kdim) - real(kind=wp) :: Id(kdim, kdim) - !> Information flag. - integer :: info - !> Test parameters - integer, parameter :: nkmax = 15 - integer, parameter :: p = 2 - real(kind=wp), parameter :: tau = 0.1_wp - real(kind=wp), parameter :: tol = 1e-10_wp - !> Misc. - integer :: i,j,k - real(kind=wp) :: Xmat(test_size,p), Qmat(test_size,p) - real(kind=wp) :: alpha - real(kind=wp) :: err(p,p) - - Amat = 0.0_wp; Emat = 0.0_wp; Xmat = 0.0_wp - allocate(Xref(1:p)) ; call mat_zero(Xref) - allocate(Xkryl(1:p)) ; call mat_zero(Xkryl) - allocate(Xkryl_block(1:p)) ; call mat_zero(Xkryl_block) - - ! --> Initialize operator. - A = rmatrix() ; call random_number(A%data) - Amat = A%data - ! --> Initialize rhs. - allocate(Q(1:p)) ; - do i = 1,p - call random_number(Q(i)%data) - Qmat(:,i) = Q(i)%data - end do - - !> Comparison is dense computation (10th order Pade approximation) - call expm(Emat, tau*Amat) - Xmat = matmul(Emat,Qmat) - !> Copy reference data into Krylov vector - do i = 1,p - Xref(i)%data = Xmat(:,i) - end do - - !> Compute Krylov matrix exponential using sequential arnoldi method for each input column - write(*,*) 'SEQUENTIAL ARNOLDI' - do i = 1,p - write(*,*) ' column',i - call kexpm(Xkryl(i:i), A, Q(i:i), tau, tol, info, verbosity = .true., kdim = nkmax) - call Xkryl(i)%axpby(1.0_wp, Xref(i), -1.0_wp) - end do - write(*,*) 'BLOCK-ARNOLDI' - !> Compute Krylov matrix exponential using block-arnoldi method - call kexpm(Xkryl_block(1:p), A, Q(1:p), tau, tol, info, verbosity = .true., kdim = nkmax) - do i = 1,p - call Xkryl_block(i)%axpby(1.0_wp, Xref(i), -1.0_wp) - end do - - !> Compute 2-norm of the error - call mat_mult(err,Xkryl(1:p),Xkryl(1:p)) - alpha = sqrt(norm2(err)) - write(*,*) '--------------------------------------------------------------------' - write(*, *) ' true error (seq.): ||error||_2 = ', alpha - call mat_mult(err,Xkryl_block(1:p),Xkryl_block(1:p)) - alpha = sqrt(norm2(err)) - write(*, *) ' true error (block): ||error||_2 = ', alpha - - call check(error, alpha < rtol) - - return - end subroutine test_block_krylov_matrix_exponential - -end module TestExpm \ No newline at end of file diff --git a/test/TestLyapunov.f90 b/test/TestLyapunov.f90 index 09eeab5..484b188 100644 --- a/test/TestLyapunov.f90 +++ b/test/TestLyapunov.f90 @@ -1,13 +1,27 @@ module TestLyapunov + ! standard library + use stdlib_math, only : linspace, all_close + use stdlib_stats_distribution_normal, only: normal => rvs_normal + use stdlib_linalg, only : svdvals + ! testing library + use testdrive , only : new_unittest, unittest_type, error_type, check + ! LightKrylov for Linear Algebra use LightKrylov - use TestVector - use TestMatrices + use LightKrylov, only : dp, wp => dp + use LightKrylov_Logger + use LightKrylov_TestTypes + ! LightROM + use LightROM_Utils + ! Specific types for testing + use TestUtils + ! Tests Use LightROM_LyapunovUtils - use testdrive , only : new_unittest, unittest_type, error_type, check - use stdlib_math, only : all_close + + implicit none - private + private :: this_module + character(len=*), parameter :: this_module = 'LightROM_TestUtils' public :: collect_lyapunov_utils_testsuite @@ -20,56 +34,86 @@ module TestLyapunov !------------------------------------------- subroutine collect_lyapunov_utils_testsuite(testsuite) - !> Collection of tests. type(unittest_type), allocatable, intent(out) :: testsuite(:) testsuite = [& - new_unittest("Development tests", playground) & + new_unittest("project onto common basis", test_project_onto_common_basis_rdp) & ] return end subroutine collect_lyapunov_utils_testsuite - subroutine playground(error) - - !> Error type to be returned. + subroutine test_project_onto_common_basis_rdp(error) + ! Error type to be returned. type(error_type), allocatable, intent(out) :: error - class(rmatrix), allocatable :: A - !> Basis vectors. - class(rvector), allocatable :: Q(:) - class(rvector), allocatable :: Xref(:) - class(rvector), allocatable :: Xkryl(:) - class(rvector), allocatable :: Xkrylc(:) - !> Krylov subspace dimension. - integer, parameter :: kdim = test_size - !> Test matrix. - real(kind=wp) :: Amat(kdim, kdim) - real(kind=wp) :: Emat(kdim, kdim) - !> GS factors. - real(kind=wp) :: R(kdim, kdim) - real(kind=wp) :: Id(kdim, kdim) - !> Information flag. + ! Test Vectors. + integer, parameter :: ku = 10 + integer, parameter :: kv = 15 + type(vector_rdp), allocatable :: U(:), V(:) + ! Coefficient matrices. + real(dp), allocatable :: S(:, :), G(:, :) + ! Data matrices. + real(dp), allocatable :: Udata(:, :), Vdata(:, :) + ! Common basis projection results. + real(dp), allocatable :: UTV(:, :), VpTV(:, :) + ! Information flag. integer :: info - !> Misc. - integer :: i,j,k - integer, parameter :: nk = 10 - real(kind=wp) :: Xmat(test_size, nk), Qmat(test_size) - real(kind=wp) :: Xrefmat(test_size) - real(kind=wp) :: alpha - real(kind=wp) :: Xreshape(test_size*kdim,1) - real(kind=wp) :: Xmatr(kdim,test_size) - real(wp) :: pad(1) - real(wp) :: tau, z, c - real(wp) :: difference(nk,2) + ! Miscellaneous. + integer :: kmax + real(dp) :: mu, var + real(wp), allocatable :: wrk(:,:) + real(wp), dimension(:), allocatable :: svals, sdata + real(dp) :: err, sigma_direct, sigma_projected + real(dp), dimension(:,:), allocatable :: Ddata, DLR + character*256 :: msg - pad = 0.0_wp + mu = 0.0_dp + var = 1.0_dp - ! --> Initialize matrix. - A = rmatrix() ; call random_number(A%data) + ! scratch + kmax = max(ku,kv) + allocate(wrk(kmax,kmax)); + + ! Initialize bases and coefficients. + allocate(U(ku), V(kv)) + call init_rand(U); wrk = 0.0_wp; call qr(U, wrk(:ku,:ku), info) + call init_rand(V); wrk = 0.0_wp; call qr(V, wrk(:kv,:kv), info) + allocate(S(ku, ku), G(kv, kv)) + S = normal(mu, var); S = 0.5*(S + transpose(S)) + G = normal(mu, var); G = 0.5*(G + transpose(G)) + + ! Get data. + allocate(Udata(test_size, ku), Vdata(test_size, kv)) + call get_data(Udata, U) + call get_data(Vdata, V) + + ! Compute the first singular value directly. + allocate(Ddata(test_size, test_size)); allocate(sdata(test_size)) + Ddata = matmul(Udata, matmul(S, transpose(Udata))) - matmul(Vdata, matmul(G, transpose(Vdata))) + sdata = svdvals(Ddata) - call check(error, 0.0_wp < rtol) + ! Project onto common basis. + allocate(UTV(ku, kv), VpTV(kv, kv)) + call project_onto_common_basis_rdp(UTV, VpTV, U, V) + call check_info(info, 'project_onto_common_basis_rdp', module=this_module, & + & procedure='test_project_onto_common_basis_rdp') + + ! Compute the first singular value from the projection. + allocate(DLR(ku+kv, ku+kv)); allocate(svals(ku+kv)) + DLR( :ku , :ku ) = S - matmul(UTV, matmul(G, transpose(UTV)) ) + DLR(ku+1:ku+kv , :ku ) = - matmul(VpTV, matmul(G, transpose(UTV)) ) + DLR( :ku , ku+1:ku+kv) = - matmul(UTV, matmul(G, transpose(VpTV))) + DLR(ku+1:ku+kv , ku+1:ku+kv) = - matmul(VpTV, matmul(G, transpose(VpTV))) + svals = svdvals(DLR) + + ! Check correctness. + err = abs(sdata(1) - svals(1)) + call get_err_str(msg, "max err: ", err) + call check(error, err < rtol_dp) + call check_test(error, 'test_project_onto_common_basis_rdp', & + & info='Singular value comparison', eq='s_1 = s(LR)_1', context=msg) return - end subroutine playground + end subroutine test_project_onto_common_basis_rdp end module TestLyapunov \ No newline at end of file diff --git a/test/TestMatrices.f90 b/test/TestMatrices.f90 deleted file mode 100644 index abdcc70..0000000 --- a/test/TestMatrices.f90 +++ /dev/null @@ -1,94 +0,0 @@ -module TestMatrices - use LightKrylov - use TestVector - - implicit none - - private - - !--------------------------------------- - !----- GENERAL REAL MATRIX ----- - !--------------------------------------- - type, extends(abstract_linop), public :: rmatrix - real(kind=wp), dimension(test_size, test_size) :: data = 0.0_wp - contains - private - procedure, pass(self), public :: matvec => general_matvec - procedure, pass(self), public :: rmatvec => general_rmatvec - end type rmatrix - - !---------------------------------------- - !----- SYM. POS. DEF MATRIX ----- - !---------------------------------------- - type, extends(abstract_spd_linop), public :: spd_matrix - real(kind=wp), dimension(test_size, test_size) :: data = 0.0_wp - contains - private - procedure, pass(self), public :: matvec => spd_matvec - procedure, pass(self), public :: rmatvec => spd_matvec - end type spd_matrix - -contains - - !------------------------------------------------------------------------------------- - !----- ----- - !----- DEFINITION OF THE TYPE-BOUND PROCEDURES FOR GENERAL REAL MATRICES ----- - !----- ----- - !------------------------------------------------------------------------------------- - - subroutine general_matvec(self, vec_in, vec_out) - class(rmatrix) , intent(in) :: self - class(abstract_vector), intent(in) :: vec_in - class(abstract_vector), intent(out) :: vec_out - - select type(vec_in) - type is(rvector) - select type(vec_out) - type is(rvector) - vec_out%data = matmul(self%data, vec_in%data) - end select - end select - end subroutine general_matvec - - subroutine general_rmatvec(self, vec_in, vec_out) - class(rmatrix), intent(in) :: self - class(abstract_vector), intent(in) :: vec_in - class(abstract_vector), intent(out) :: vec_out - - select type(vec_in) - type is(rvector) - select type(vec_out) - type is(rvector) - vec_out%data = matmul(transpose(self%data), vec_in%data) - end select - end select - end subroutine general_rmatvec - - !----------------------------------------------------------------------------------------------- - !----- ----- - !----- DEFINITION OF THE TYPE-BOUND PROCEDURES FOR SYM. POS. DEF. LINEAR OPERATORS ----- - !----- ----- - !----------------------------------------------------------------------------------------------- - - subroutine spd_matvec(self, vec_in, vec_out) - class(spd_matrix) , intent(in) :: self - class(abstract_vector), intent(in) :: vec_in - class(abstract_vector), intent(out) :: vec_out - - select type(vec_in) - type is(rvector) - select type(vec_out) - type is(rvector) - vec_out%data = matmul(self%data, vec_in%data) - end select - end select - return - end subroutine spd_matvec - - !---------------------------------------------------------------------------------- - !----- ----- - !----- DEFINITION OF THE TYPE-BOUND PROCEDURES FOR HERMITIAN MATRICES ----- - !----- ----- - !---------------------------------------------------------------------------------- - -end module TestMatrices diff --git a/test/TestUtils.f90 b/test/TestUtils.f90 new file mode 100644 index 0000000..f2d678b --- /dev/null +++ b/test/TestUtils.f90 @@ -0,0 +1,478 @@ +module TestUtils + use stdlib_io_npy, only: save_npy + use stdlib_linalg, only: eye, diag + use stdlib_stats_distribution_normal, only: normal => rvs_normal + use LightKrylov + use LightKrylov_Constants + use LightKrylov_TestTypes + + implicit none + + private + + character(len=128), parameter, private :: this_module = 'LightKrylov_TestUtils' + + public :: get_data + public :: put_data + public :: init_rand + public :: get_err_str + + interface get_data + module procedure get_data_vec_rsp + module procedure get_data_vec_basis_rsp + module procedure get_data_linop_rsp + module procedure get_data_vec_rdp + module procedure get_data_vec_basis_rdp + module procedure get_data_linop_rdp + module procedure get_data_vec_csp + module procedure get_data_vec_basis_csp + module procedure get_data_linop_csp + module procedure get_data_vec_cdp + module procedure get_data_vec_basis_cdp + module procedure get_data_linop_cdp + end interface + + interface put_data + module procedure put_data_vec_rsp + module procedure put_data_vec_basis_rsp + module procedure put_data_linop_rsp + module procedure put_data_vec_rdp + module procedure put_data_vec_basis_rdp + module procedure put_data_linop_rdp + module procedure put_data_vec_csp + module procedure put_data_vec_basis_csp + module procedure put_data_linop_csp + module procedure put_data_vec_cdp + module procedure put_data_vec_basis_cdp + module procedure put_data_linop_cdp + end interface + + interface init_rand + module procedure init_rand_vec_rsp + module procedure init_rand_basis_rsp + module procedure init_rand_linop_rsp + module procedure init_rand_spd_linop_rsp + module procedure init_rand_vec_rdp + module procedure init_rand_basis_rdp + module procedure init_rand_linop_rdp + module procedure init_rand_spd_linop_rdp + module procedure init_rand_vec_csp + module procedure init_rand_basis_csp + module procedure init_rand_linop_csp + module procedure init_rand_hermitian_linop_csp + module procedure init_rand_vec_cdp + module procedure init_rand_basis_cdp + module procedure init_rand_linop_cdp + module procedure init_rand_hermitian_linop_cdp + end interface + + interface get_err_str + module procedure get_err_str_sp + module procedure get_err_str_dp + end interface + +contains + + !---------------------------------------------------- + !----- EXTRACT DATA FROM ABSTRACT TYPES ----- + !---------------------------------------------------- + + subroutine get_data_vec_rsp(vec_out, vec_in) + real(sp), intent(out) :: vec_out(:) + type(vector_rsp), intent(in) :: vec_in + vec_out = vec_in%data + return + end subroutine get_data_vec_rsp + + subroutine get_data_vec_basis_rsp(basis_out, basis_in) + real(sp), intent(out) :: basis_out(:, :) + type(vector_rsp), intent(in) :: basis_in(:) + ! Internal variables. + integer :: k + do k = 1, size(basis_in) + basis_out(:, k) = basis_in(k)%data + enddo + return + end subroutine get_data_vec_basis_rsp + + subroutine get_data_linop_rsp(mat_out, linop_in) + real(sp), intent(out) :: mat_out(:, :) + type(linop_rsp), intent(in) :: linop_in + mat_out = linop_in%data + return + end subroutine get_data_linop_rsp + + subroutine get_data_vec_rdp(vec_out, vec_in) + real(dp), intent(out) :: vec_out(:) + type(vector_rdp), intent(in) :: vec_in + vec_out = vec_in%data + return + end subroutine get_data_vec_rdp + + subroutine get_data_vec_basis_rdp(basis_out, basis_in) + real(dp), intent(out) :: basis_out(:, :) + type(vector_rdp), intent(in) :: basis_in(:) + ! Internal variables. + integer :: k + do k = 1, size(basis_in) + basis_out(:, k) = basis_in(k)%data + enddo + return + end subroutine get_data_vec_basis_rdp + + subroutine get_data_linop_rdp(mat_out, linop_in) + real(dp), intent(out) :: mat_out(:, :) + type(linop_rdp), intent(in) :: linop_in + mat_out = linop_in%data + return + end subroutine get_data_linop_rdp + + subroutine get_data_vec_csp(vec_out, vec_in) + complex(sp), intent(out) :: vec_out(:) + type(vector_csp), intent(in) :: vec_in + vec_out = vec_in%data + return + end subroutine get_data_vec_csp + + subroutine get_data_vec_basis_csp(basis_out, basis_in) + complex(sp), intent(out) :: basis_out(:, :) + type(vector_csp), intent(in) :: basis_in(:) + ! Internal variables. + integer :: k + do k = 1, size(basis_in) + basis_out(:, k) = basis_in(k)%data + enddo + return + end subroutine get_data_vec_basis_csp + + subroutine get_data_linop_csp(mat_out, linop_in) + complex(sp), intent(out) :: mat_out(:, :) + type(linop_csp), intent(in) :: linop_in + mat_out = linop_in%data + return + end subroutine get_data_linop_csp + + subroutine get_data_vec_cdp(vec_out, vec_in) + complex(dp), intent(out) :: vec_out(:) + type(vector_cdp), intent(in) :: vec_in + vec_out = vec_in%data + return + end subroutine get_data_vec_cdp + + subroutine get_data_vec_basis_cdp(basis_out, basis_in) + complex(dp), intent(out) :: basis_out(:, :) + type(vector_cdp), intent(in) :: basis_in(:) + ! Internal variables. + integer :: k + do k = 1, size(basis_in) + basis_out(:, k) = basis_in(k)%data + enddo + return + end subroutine get_data_vec_basis_cdp + + subroutine get_data_linop_cdp(mat_out, linop_in) + complex(dp), intent(out) :: mat_out(:, :) + type(linop_cdp), intent(in) :: linop_in + mat_out = linop_in%data + return + end subroutine get_data_linop_cdp + + + !---------------------------------------------- + !----- PUT DATA TO ABSTRACT TYPES ----- + !---------------------------------------------- + + subroutine put_data_vec_rsp(vec_out, vec_in) + type(vector_rsp), intent(out) :: vec_out + real(sp), intent(in) :: vec_in + vec_out%data = vec_in + return + end subroutine put_data_vec_rsp + + subroutine put_data_vec_basis_rsp(basis_out, basis_in) + type(vector_rsp), intent(out) :: basis_out(:) + real(sp), intent(in) :: basis_in(:, :) + ! Internal variables. + integer :: k + do k = 1, size(basis_out) + basis_out(k)%data = basis_in(:, k) + enddo + return + end subroutine put_data_vec_basis_rsp + + subroutine put_data_linop_rsp(linop_out, mat_in) + type(linop_rsp), intent(out) :: linop_out + real(sp), intent(in) :: mat_in(:, :) + ! Internal variables. + linop_out%data = mat_in + return + end subroutine put_data_linop_rsp + + subroutine put_data_vec_rdp(vec_out, vec_in) + type(vector_rdp), intent(out) :: vec_out + real(dp), intent(in) :: vec_in + vec_out%data = vec_in + return + end subroutine put_data_vec_rdp + + subroutine put_data_vec_basis_rdp(basis_out, basis_in) + type(vector_rdp), intent(out) :: basis_out(:) + real(dp), intent(in) :: basis_in(:, :) + ! Internal variables. + integer :: k + do k = 1, size(basis_out) + basis_out(k)%data = basis_in(:, k) + enddo + return + end subroutine put_data_vec_basis_rdp + + subroutine put_data_linop_rdp(linop_out, mat_in) + type(linop_rdp), intent(out) :: linop_out + real(dp), intent(in) :: mat_in(:, :) + ! Internal variables. + linop_out%data = mat_in + return + end subroutine put_data_linop_rdp + + subroutine put_data_vec_csp(vec_out, vec_in) + type(vector_csp), intent(out) :: vec_out + complex(sp), intent(in) :: vec_in + vec_out%data = vec_in + return + end subroutine put_data_vec_csp + + subroutine put_data_vec_basis_csp(basis_out, basis_in) + type(vector_csp), intent(out) :: basis_out(:) + complex(sp), intent(in) :: basis_in(:, :) + ! Internal variables. + integer :: k + do k = 1, size(basis_out) + basis_out(k)%data = basis_in(:, k) + enddo + return + end subroutine put_data_vec_basis_csp + + subroutine put_data_linop_csp(linop_out, mat_in) + type(linop_csp), intent(out) :: linop_out + complex(sp), intent(in) :: mat_in(:, :) + ! Internal variables. + linop_out%data = mat_in + return + end subroutine put_data_linop_csp + + subroutine put_data_vec_cdp(vec_out, vec_in) + type(vector_cdp), intent(out) :: vec_out + complex(dp), intent(in) :: vec_in + vec_out%data = vec_in + return + end subroutine put_data_vec_cdp + + subroutine put_data_vec_basis_cdp(basis_out, basis_in) + type(vector_cdp), intent(out) :: basis_out(:) + complex(dp), intent(in) :: basis_in(:, :) + ! Internal variables. + integer :: k + do k = 1, size(basis_out) + basis_out(k)%data = basis_in(:, k) + enddo + return + end subroutine put_data_vec_basis_cdp + + subroutine put_data_linop_cdp(linop_out, mat_in) + type(linop_cdp), intent(out) :: linop_out + complex(dp), intent(in) :: mat_in(:, :) + ! Internal variables. + linop_out%data = mat_in + return + end subroutine put_data_linop_cdp + + + !-------------------------------------------------------------- + !----- INITIALIZE ABSTRACT TYPES WITH RANDOM DATA ----- + !-------------------------------------------------------------- + + subroutine init_rand_vec_rsp(x) + type(vector_rsp), intent(inout) :: x + call x%rand() + return + end subroutine init_rand_vec_rsp + + subroutine init_rand_basis_rsp(X) + type(vector_rsp), intent(inout) :: X(:) + integer :: i + do i = 1, size(X) + call X(i)%rand() + enddo + return + end subroutine init_rand_basis_rsp + + subroutine init_rand_linop_rsp(linop) + type(linop_rsp), intent(inout) :: linop + real(sp), allocatable :: mu(:, :), var(:, :) + allocate(mu(test_size, test_size)) ; mu = 0.0_sp + allocate(var(test_size, test_size)) + var = 1.0_sp + linop%data = normal(mu, var) + return + end subroutine init_rand_linop_rsp + + subroutine init_rand_spd_linop_rsp(linop) + type(spd_linop_rsp), intent(inout) :: linop + real(sp), allocatable :: mu(:, :), var(:, :) + real(sp), allocatable :: data(:, :) + allocate(mu(test_size, test_size)) ; mu = zero_rsp + allocate(var(test_size, test_size)) ; var = one_rsp + + data = normal(mu, var) + linop%data = matmul(data, transpose(data))/test_size + 0.01*eye(test_size) + + return + end subroutine init_rand_spd_linop_rsp + + subroutine init_rand_vec_rdp(x) + type(vector_rdp), intent(inout) :: x + call x%rand() + return + end subroutine init_rand_vec_rdp + + subroutine init_rand_basis_rdp(X) + type(vector_rdp), intent(inout) :: X(:) + integer :: i + do i = 1, size(X) + call X(i)%rand() + enddo + return + end subroutine init_rand_basis_rdp + + subroutine init_rand_linop_rdp(linop) + type(linop_rdp), intent(inout) :: linop + real(dp), allocatable :: mu(:, :), var(:, :) + allocate(mu(test_size, test_size)) ; mu = 0.0_dp + allocate(var(test_size, test_size)) + var = 1.0_dp + linop%data = normal(mu, var) + return + end subroutine init_rand_linop_rdp + + subroutine init_rand_spd_linop_rdp(linop) + type(spd_linop_rdp), intent(inout) :: linop + real(dp), allocatable :: mu(:, :), var(:, :) + real(dp), allocatable :: data(:, :) + allocate(mu(test_size, test_size)) ; mu = zero_rdp + allocate(var(test_size, test_size)) ; var = one_rdp + + data = normal(mu, var) + linop%data = matmul(data, transpose(data))/test_size + 0.01*eye(test_size) + + return + end subroutine init_rand_spd_linop_rdp + + subroutine init_rand_vec_csp(x) + type(vector_csp), intent(inout) :: x + call x%rand() + return + end subroutine init_rand_vec_csp + + subroutine init_rand_basis_csp(X) + type(vector_csp), intent(inout) :: X(:) + integer :: i + do i = 1, size(X) + call X(i)%rand() + enddo + return + end subroutine init_rand_basis_csp + + subroutine init_rand_linop_csp(linop) + type(linop_csp), intent(inout) :: linop + complex(sp), allocatable :: mu(:, :), var(:, :) + allocate(mu(test_size, test_size)) ; mu = 0.0_sp + allocate(var(test_size, test_size)) + var = cmplx(1.0_sp, 1.0_sp, kind=sp) + linop%data = normal(mu, var) + return + end subroutine init_rand_linop_csp + + subroutine init_rand_hermitian_linop_csp(linop) + type(hermitian_linop_csp), intent(inout) :: linop + complex(sp), allocatable :: data(:, :) + complex(sp), allocatable :: mu(:, :), var(:, :) + + allocate(mu(test_size, test_size)) ; mu = 0.0_sp + allocate(var(test_size, test_size)) ; var = cmplx(1.0_sp, 1.0_sp, kind=sp) + + data = normal(mu, var) + data = matmul(data, transpose(conjg(data)))/test_size + 0.01*eye(test_size) + linop%data = data + + return + end subroutine init_rand_hermitian_linop_csp + + subroutine init_rand_vec_cdp(x) + type(vector_cdp), intent(inout) :: x + call x%rand() + return + end subroutine init_rand_vec_cdp + + subroutine init_rand_basis_cdp(X) + type(vector_cdp), intent(inout) :: X(:) + integer :: i + do i = 1, size(X) + call X(i)%rand() + enddo + return + end subroutine init_rand_basis_cdp + + subroutine init_rand_linop_cdp(linop) + type(linop_cdp), intent(inout) :: linop + complex(dp), allocatable :: mu(:, :), var(:, :) + allocate(mu(test_size, test_size)) ; mu = 0.0_dp + allocate(var(test_size, test_size)) + var = cmplx(1.0_dp, 1.0_dp, kind=dp) + linop%data = normal(mu, var) + return + end subroutine init_rand_linop_cdp + + subroutine init_rand_hermitian_linop_cdp(linop) + type(hermitian_linop_cdp), intent(inout) :: linop + complex(dp), allocatable :: data(:, :) + complex(dp), allocatable :: mu(:, :), var(:, :) + + allocate(mu(test_size, test_size)) ; mu = 0.0_dp + allocate(var(test_size, test_size)) ; var = cmplx(1.0_dp, 1.0_dp, kind=dp) + + data = normal(mu, var) + data = matmul(data, transpose(conjg(data)))/test_size + 0.01*eye(test_size) + linop%data = data + + return + end subroutine init_rand_hermitian_linop_cdp + + + subroutine get_err_str_sp(msg, info, err) + character(len=*), intent(inout) :: msg + character(len=*), intent(in) :: info + real(sp) :: err + + ! internals + character*8 :: value_str + character(len=*), parameter :: indent = repeat(" ", 4) + + write(value_str, '(E8.2)') err + msg = indent // info // value_str // achar(10) + + end subroutine get_err_str_sp + subroutine get_err_str_dp(msg, info, err) + character(len=*), intent(inout) :: msg + character(len=*), intent(in) :: info + real(dp) :: err + + ! internals + character*8 :: value_str + character(len=*), parameter :: indent = repeat(" ", 4) + + write(value_str, '(E8.2)') err + msg = indent // info // value_str // achar(10) + + end subroutine get_err_str_dp + +end module diff --git a/test/TestVector.f90 b/test/TestVector.f90 deleted file mode 100644 index f63a048..0000000 --- a/test/TestVector.f90 +++ /dev/null @@ -1,86 +0,0 @@ -module TestVector - use LightKrylov - use stdlib_optval, only: optval - - implicit none - - private - - public :: test_size - - integer, parameter :: test_size = 100 - - type, extends(abstract_vector), public :: rvector - real(kind=wp), dimension(test_size) :: data = 0.0_wp - contains - private - procedure, pass(self), public :: zero - procedure, pass(self), public :: dot - procedure, pass(self), public :: scal - procedure, pass(self), public :: axpby - procedure, pass(self), public :: rand - end type rvector - -contains - - !----------------------------------------------------------- - !----- ----- - !----- DEFINITION OF THE TYPE-BOUND PROCEDURES ----- - !----- ----- - !----------------------------------------------------------- - - !--> Zero-out a vector. - subroutine zero(self) - class(rvector), intent(inout) :: self - self%data = 0.0_wp - return - end subroutine zero - - double precision function dot(self, vec) result(alpha) - class(rvector), intent(in) :: self - class(abstract_vector), intent(in) :: vec - - select type(vec) - type is(rvector) - alpha = dot_product(self%data, vec%data) - end select - return - end function dot - - ! --> In-place scalar multiplication. - subroutine scal(self, alpha) - class(rvector), intent(inout) :: self - real(kind=wp), intent(in) :: alpha - self%data = self%data * alpha - return - end subroutine scal - - ! --> axpby interface - subroutine axpby(self, alpha, vec, beta) - class(rvector), intent(inout) :: self - class(abstract_vector), intent(in) :: vec - real(kind=wp) , intent(in) :: alpha, beta - - select type(vec) - type is(rvector) - self%data = alpha*self%data + beta*vec%data - end select - return - end subroutine axpby - - subroutine rand(self, ifnorm) - class(rvector), intent(inout) :: self - logical, optional, intent(in) :: ifnorm - ! internals - logical :: normalize - real(kind=wp) :: alpha - normalize = optval(ifnorm, .true.) - call random_number(self%data) - if (normalize) then - alpha = self%norm() - call self%scal(1.0/alpha) - endif - return - end subroutine rand - -end module TestVector diff --git a/test/tests.f90 b/test/tests.f90 index c62400c..8144e9d 100644 --- a/test/tests.f90 +++ b/test/tests.f90 @@ -5,10 +5,10 @@ program Tester use testdrive, only : run_testsuite, new_testsuite, testsuite_type !> Only dummy test. Needs to be removed later. use testdrive, only : new_unittest, unittest_type, error_type, check + use LightKrylov + use LightKrylov_Logger !> Abstract implementation of ROM-LTI techniques. use LightROM - use TestVector - use TestMatrices use TestLyapunov implicit none @@ -25,7 +25,7 @@ program Tester !------------------------------- !> Display information about the version of LightKrylov being tested. - call greetings() + call greetings_LightROM() !> Test status. status = 0 @@ -53,24 +53,4 @@ program Tester write(*, *) "All test successfully passed!" endif -contains - - subroutine collect_dummy_testsuite(testsuite) - !> Collection of tests. - type(unittest_type), allocatable, intent(out) :: testsuite(:) - - testsuite = [new_unittest("Dummy test 1", dummy_test_1)] - - return - end subroutine collect_dummy_testsuite - - subroutine dummy_test_1(error) - !> Error-type to be returned. - type(error_type), allocatable, intent(out) :: error - - !> Check if 1 == 1. - call check(error, 1 == 1) - return - end subroutine dummy_test_1 - end program Tester