diff --git a/.buildinfo b/.buildinfo new file mode 100644 index 0000000..c25060b --- /dev/null +++ b/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: 5b65c5582b2dc108701ac645b1ed00d1 +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/_images/pytomoatt.png b/_images/pytomoatt.png new file mode 100644 index 0000000..1248b8c Binary files /dev/null and b/_images/pytomoatt.png differ diff --git a/_modules/index.html b/_modules/index.html new file mode 100644 index 0000000..f1c601d --- /dev/null +++ b/_modules/index.html @@ -0,0 +1,347 @@ + + +
+ + + + +
+import xarray
+import numpy as np
+from scipy.interpolate import interpn
+from pyproj import Geod
+
+
+[docs]
+class Dataset(xarray.Dataset):
+ """Sub class of `xarray.Dataset <https://docs.xarray.dev/en/stable/generated/xarray.Dataset.html>`__
+ """
+ def __init__(self, data_vars, coords, attrs=None) -> None:
+
+ __slots__ = ()
+ super().__init__(data_vars, coords, attrs)
+
+
+[docs]
+ @classmethod
+ def from_xarray(cls, dataset):
+ ds = cls(dataset.data_vars, dataset.coords)
+ return ds
+
+
+
+[docs]
+ def interp_dep(self, depth:float, field:str):
+ """Interpolate map view with given depth
+
+ :param depth: Depth in km
+ :type depth: float
+ :param field: Field name in ATT model data
+ :type field: str
+ :return: xyz data with 3 columns [lon, lat, value]
+ :rtype: numpy.ndarray
+ """
+ if field not in self.data_vars.keys():
+ raise ValueError('Error field name of {}'.format(field))
+ idx = np.where(self.coords['dep'].values == depth)[0]
+ if idx.size > 0:
+ offset = 0
+ data = np.zeros([self.coords['lat'].size*self.coords['lon'].size, 3])
+ for i, la in enumerate(self.coords['lat'].values):
+ for j, lo in enumerate(self.coords['lon'].values):
+ data[offset] = [lo, la, self.data_vars[field].values[idx[0], i, j]]
+ offset += 1
+ else:
+ rad = 6371 - depth
+ points = np.zeros([self.coords['lat'].size*self.coords['lon'].size, 4])
+ offset = 0
+ for _, la in enumerate(self.coords['lat'].values):
+ for _, lo in enumerate(self.coords['lon'].values):
+ points[offset] = [rad, la, lo, 0.]
+ offset += 1
+ points[:, 3] = interpn(
+ (self.coords['rad'].values,
+ self.coords['lat'].values,
+ self.coords['lon'].values),
+ self.data_vars[field].values,
+ points[:, 0:3]
+ )
+ data = points[:, [2, 1, 3]]
+ return data
+
+
+
+[docs]
+ def interp_sec(self, start_point, end_point, field:str, val=10.):
+ """Interpolate value along a cross section
+
+ :param start_point: start point with [lon1, lat1]
+ :type start_point: list or tuple
+ :param end_point: end points with [lon2, lat2]
+ :type end_point: list or tuple
+ :param field: Field name in ATT model data
+ :type field: str
+ :param val: interval between successive points in km
+ :type val: float
+ :return: xyz data with 5 columns [lon, lat, dis, dep, value]
+ :rtype: numpy.ndarray
+ """
+ # Initialize a profile
+ g = Geod(ellps='WGS84')
+ az, _, dist = g.inv(start_point[0],start_point[1],end_point[0],end_point[1])
+ sec_range = np.arange(0, dist/1000, val)
+ r = g.fwd_intermediate(start_point[0],start_point[1], az, npts=sec_range.size, del_s=val*1000)
+
+ # create points array
+ points = np.zeros([sec_range.size*self.coords['dep'].size, 5])
+ offset = 0
+ for i, lola in enumerate(zip(r.lons, r.lats)):
+ for _, rad in enumerate(self.coords['rad'].values):
+ points[offset] = [rad, lola[1], lola[0], sec_range[i], 0.]
+ offset += 1
+
+ # Interpolation
+ points[:, 4] = interpn(
+ (self.coords['rad'].values,
+ self.coords['lat'].values,
+ self.coords['lon'].values),
+ self.data_vars[field].values,
+ points[:, 0:3],
+ bounds_error=False
+ )
+ points[:, 0] = 6371 - points[:, 0]
+ data = points[:, [2, 1, 3, 0, 4]]
+ return data
+
+
+
+import h5py
+import numpy as np
+from .utils import init_axis, sind, cosd
+import copy
+
+
+
+[docs]
+class Checker():
+ """Create checkerboard model by adding perturbations on an exist model
+ """
+ def __init__(self, fname:str) -> None:
+ self.model_file = fname
+ with h5py.File(fname) as f:
+ self.vel = f['vel'][:]
+ self.eta = f['eta'][:]
+ self.xi = f['xi'][:]
+ self.zeta = f['zeta'][:]
+
+
+[docs]
+ def init_axis(self, min_max_dep, min_max_lat, min_max_lon, n_rtp):
+ """Initialize axis
+
+ :param min_max_dep: min and max depth, ``[min_dep, max_dep]``
+ :type min_max_dep: list
+ :param min_max_lat: Min and max latitude, ``[min_lat, max_lat]``
+ :type min_max_lat: list
+ :param min_max_lon: Min and max longitude, ``[min_lon, max_lon]``
+ :type min_max_lon: list
+ :param n_rtp: number of dimensions [ndep, nlat, nlon]
+ :type n_rtp: list
+ """
+ self.dd, self.tt, self.pp, self.dr, self.dt, self.dp, = init_axis(
+ min_max_dep, min_max_lat, min_max_lon, n_rtp
+ )
+
+
+ def _create_taper(self, xleft, xright, type='d'):
+ if type == 'd':
+ x = np.flip(self.dd); dx = self.dr
+ elif type == 't':
+ x = self.tt; dx = self.dt
+ elif type == 'p':
+ x = self.pp; dx = self.dp
+ else:
+ pass
+ if xleft < x[0] or xright > x[-1]:
+ raise ValueError('limitation out of range')
+ ntaper_left = int((xleft-x[0])/dx)
+ ntaper_right = int((x[-1]-xright)/dx)
+ return ntaper_left, ntaper_right
+
+
+[docs]
+ def checkerboard(self, period_x, period_y, period_z,
+ pert_vel=0.08, pert_ani=0.04,
+ lim_x=None, lim_y=None, lim_z=None):
+ """Create checkerboard
+
+ :param period_x: Multiple of period along X, e.g., set to 1 for 2 anomalies
+ :type period_x: float
+ :param period_y: Multiple of period along Y
+ :type period_y: float
+ :param period_z: Multiple of period along Z
+ :type period_z: float
+ :param pert_vel: Perturbation for velocity, defaults to 0.08
+ :type pert_vel: float, optional
+ :param pert_ani: Perturbation for anisotropy, defaults to 0.04
+ :type pert_ani: float, optional
+ :param lim_x: Left and right bound along X, defaults to None
+ :type lim_x: list, optional
+ :param lim_y: Left and right bound along Y, defaults to None
+ :type lim_y: list, optional
+ :param lim_z: Left and right bound along Z, defaults to None
+ :type lim_z: list, optional
+ """
+ if lim_x is not None:
+ ntaper_left, ntaper_right = self._create_taper(*lim_x, type='p')
+ else:
+ ntaper_left = 0
+ ntaper_right = 0
+ x_pert = np.zeros_like(self.pp)
+ x_pert[ntaper_left:self.pp.size-ntaper_right] = \
+ np.sin(period_x*np.pi*np.arange(self.pp.size-(ntaper_left+ntaper_right))/ \
+ (self.pp.size-(ntaper_left+ntaper_right)))
+
+ if lim_y is not None:
+ ntaper_left, ntaper_right = self._create_taper(*lim_y, type='t')
+ else:
+ ntaper_left = 0
+ ntaper_right = 0
+ y_pert = np.zeros_like(self.tt)
+ y_pert[ntaper_left:self.tt.size-ntaper_right] = \
+ np.sin(period_y*np.pi*np.arange(self.tt.size-(ntaper_left+ntaper_right))/ \
+ (self.tt.size-(ntaper_left+ntaper_right)))
+
+ if lim_z is not None:
+ ntaper_left, ntaper_right = self._create_taper(*lim_z, type='d')
+ else:
+ ntaper_left = 0
+ ntaper_right = 0
+ z_pert = np.zeros_like(self.dd)
+ z_pert[ntaper_right:self.dd.size-ntaper_left] = \
+ np.sin(period_z*np.pi*np.arange(self.dd.size-(ntaper_left+ntaper_right))/ \
+ (self.dd.size-(ntaper_left+ntaper_right)))
+
+ xx, yy, zz= np.meshgrid(z_pert, y_pert, x_pert, indexing='ij')
+ self.perturbation = xx*yy*zz
+ self.vel_pert = self.vel * (1+self.perturbation*pert_vel)
+ self.dlnv = self.perturbation*pert_vel
+ self.epsilon = np.abs(self.perturbation)*pert_ani
+ self.phi = np.zeros_like(self.vel)
+ self.phi[np.where(self.perturbation>0)] = 135.
+ self.phi[np.where(self.perturbation<0)] = 45.
+ self.xi = self.epsilon*cosd(2*self.phi)
+ self.eta = self.epsilon*sind(2*self.phi)
+
+
+
+[docs]
+ def copy(self):
+ """Create a deep copy of the Checker object
+
+ :return: A copy of the Checker object
+ :rtype: Checker
+ """
+ return copy.deepcopy(self)
+
+
+
+[docs]
+ def write(self, fname: str):
+ """Write new model to h5 file
+
+ :param fname: Path to output file
+ :type fname: str
+ """
+ if fname is None:
+ fname = '.'.join(self.model_file.split('.')[:-1])+'_pert.h5'
+ with h5py.File(fname, 'w') as f:
+ f.create_dataset('xi', data=self.xi)
+ f.create_dataset('eta', data=self.eta)
+ f.create_dataset('zeta', data=self.zeta)
+ f.create_dataset('vel', data=self.vel_pert)
+ f.create_dataset('epsilon', data=self.epsilon)
+ f.create_dataset('phi', data=self.phi)
+ f.create_dataset('dlnv', data=self.dlnv)
+
+
+
+
+import numpy as np
+import h5py
+from .para import ATTPara
+from .attarray import Dataset
+from .utils import asind, acosd
+
+
+
+[docs]
+class ATTData():
+ """Read data from HDF5 or ASCII file
+
+ :param fname: Path to data file
+ :type fname: str
+ :param fname_params: Path to input parameter file
+ :type fname_params: str
+ :param fname_grid: Path to grid file
+ :type fname_grid: str
+ """
+ def __init__(self, fname:str,
+ fname_params:str,
+ fname_grid='OUTPUT_FILES/out_data_grid.h5'):
+ self.fname = fname
+ self.fname_grid = fname_grid
+ self.format = 'hdf5'
+ self.fgrid = None
+ self.fdata = None
+ self.grid_glob_r = None
+ self.grid_glob_t = None
+ self.grid_glob_p = None
+ self.fields = []
+ self.input_params = ATTPara(fname_params).input_params
+ self.ndiv_r, self.ndiv_t, self.ndiv_p = self.input_params['parallel']['ndiv_rtp']
+ self.nr_glob, self.nt_glob, self.np_glob = self.input_params['domain']['n_rtp']
+
+ def _add_field(self, name):
+ exec('self.{} = None'.format(name))
+ self.fields.append(name)
+
+ def _read_h5(self):
+ """read data file with HDF5 format
+ """
+ self.fgrid = h5py.File(self.fname_grid, 'r')
+ self.fdata = h5py.File(self.fname, 'r')
+
+
+[docs]
+ @classmethod
+ def read(cls, fname:str, fname_params:str,
+ fname_grid='OUTPUT_FILES/out_data_grid.h5',
+ group_name='model', dataset_name=None,
+ format='hdf5'):
+ """Read data from HDF5 or ASCII file
+
+ :param fname: Path to data file
+ :type fname: str
+ :param fname_params: Path to input parameter file
+ :type fname_params: str
+ :param fname_grid: Path to grid file
+ :type fname_grid: str
+ :param group_name: Name of the group in the HDF5 file
+ :type group_name: str
+ :param dataset_name: Name of the dataset in the HDF5 file
+ :type dataset_name: str
+ :param format: Format of the data file, defaults to 'hdf5'
+ :type format: str, optional
+ :return: An instance of ATTData
+ :rtype: ATTData
+ """
+ attdata = cls(fname, fname_params, fname_grid)
+ attdata.format = format
+ # attdata.group_name = group_name
+ # open grid data file
+ if attdata.format == 'hdf5':
+ attdata._read_h5()
+ else:
+ attdata.fdata = np.loadtxt(fname)
+ attdata.fgrid = np.loadtxt(fname_grid)
+ if isinstance(dataset_name, str) and attdata.format == 'hdf5':
+ attdata._add_field(dataset_name)
+ attdata.__dict__[key], attdata.grid_glob_r, \
+ attdata.grid_glob_t, attdata.grid_glob_p = \
+ attdata._data_retrieval(group_name=group_name, dataset_name=key)
+ elif isinstance(dataset_name, str) and attdata.format != 'hdf5':
+ attdata._add_field('data')
+ attdata.data, attdata.grid_glob_r, attdata.grid_glob_t, attdata.grid_glob_p = \
+ attdata._data_retrieval()
+ elif isinstance(dataset_name, (list, tuple)) and attdata.format == 'hdf5':
+ for key in dataset_name:
+ if not (key in attdata.fdata[group_name].keys()):
+ raise ValueError('Error dataset_name of {}. \n{} are available.'.format(key, ', '.join(attdata.fgrid.keys())))
+ attdata._add_field(key)
+ print(attdata.vel)
+ attdata.__dict__[key], attdata.grid_glob_r, \
+ attdata.grid_glob_t, attdata.grid_glob_p = \
+ attdata._data_retrieval(group_name=group_name, dataset_name=key)
+ elif dataset_name is None and attdata.format == 'hdf5':
+ for key in attdata.fdata[group_name].keys():
+ attdata._add_field(key)
+ attdata.__dict__[key], attdata.grid_glob_r, \
+ attdata.grid_glob_t, attdata.grid_glob_p = \
+ attdata._data_retrieval(group_name=group_name, dataset_name=key)
+ else:
+ raise ValueError('Error format of dataset_name')
+ return attdata
+
+
+ def _read_data_hdf5(self, offset, n_points_total_sub, group_name, dataset_name):
+ data_sub = self.fdata[group_name][dataset_name][offset:offset+n_points_total_sub]
+ grid_sub_p = self.fgrid["/Mesh/node_coords_p"][offset:offset+n_points_total_sub]
+ grid_sub_t = self.fgrid["/Mesh/node_coords_t"][offset:offset+n_points_total_sub]
+ grid_sub_r = self.fgrid["/Mesh/node_coords_r"][offset:offset+n_points_total_sub]
+ return data_sub, grid_sub_p, grid_sub_t, grid_sub_r
+
+ def _read_data_ascii(self, offset, n_points_total_sub):
+ data_sub = self.fdata[offset:offset+n_points_total_sub]
+ grid_sub_p = self.fgrid[offset:offset+n_points_total_sub,0]
+ grid_sub_t = self.fgrid[offset:offset+n_points_total_sub,1]
+ grid_sub_r = self.fgrid[offset:offset+n_points_total_sub,2]
+ return data_sub, grid_sub_p, grid_sub_t, grid_sub_r
+
+ def _data_retrieval(self, group_name=None, dataset_name=None):
+ # prepare a 3D array to store the data
+ data_glob = np.zeros(self.input_params['domain']['n_rtp'], dtype=np.float64)
+ grid_glob_r = np.zeros(self.input_params['domain']['n_rtp'], dtype=np.float64)
+ grid_glob_t = np.zeros(self.input_params['domain']['n_rtp'], dtype=np.float64)
+ grid_glob_p = np.zeros(self.input_params['domain']['n_rtp'], dtype=np.float64)
+
+ # load data data by each subdomain
+
+ # offset
+ offset = 0
+
+ for ir_sub in range(self.ndiv_r):
+ for it_sub in range(self.ndiv_t):
+ for ip_sub in range(self.ndiv_p):
+
+ # number of data point for this sub domain
+ nr_sub = self.nr_glob//self.ndiv_r
+ nt_sub = self.nt_glob//self.ndiv_t
+ np_sub = self.np_glob//self.ndiv_p
+
+ # offset for each direction
+ offset_r = ir_sub*nr_sub
+ offset_t = it_sub*nt_sub
+ offset_p = ip_sub*np_sub
+
+ # add modulus to the last subdomains
+ if ir_sub == self.ndiv_r-1:
+ nr_sub += self.nr_glob%self.ndiv_r
+ if it_sub == self.ndiv_t-1:
+ nt_sub += self.nt_glob%self.ndiv_t
+ if ip_sub == self.ndiv_p-1:
+ np_sub += self.np_glob%self.ndiv_p
+
+ # add overlap layer if this subdomain is not the last one for each direction
+ if ir_sub != self.ndiv_r-1:
+ nr_sub += 1
+ if it_sub != self.ndiv_t-1:
+ nt_sub += 1
+ if ip_sub != self.ndiv_p-1:
+ np_sub += 1
+
+ # number of data point for this sub domain
+ n_points_total_sub = nr_sub*nt_sub*np_sub
+
+ # load data
+ if self.format == 'hdf5':
+ data_sub, grid_sub_p, grid_sub_t, grid_sub_r = self._read_data_hdf5(
+ offset, n_points_total_sub, group_name, dataset_name)
+ else:
+ data_sub, grid_sub_p, grid_sub_t, grid_sub_r = self._read_data_ascii(
+ offset, n_points_total_sub)
+
+ # reshape data
+ data_sub = data_sub.reshape(nr_sub, nt_sub, np_sub)
+ grid_sub_p = grid_sub_p.reshape(nr_sub, nt_sub, np_sub)
+ grid_sub_t = grid_sub_t.reshape(nr_sub, nt_sub, np_sub)
+ grid_sub_r = grid_sub_r.reshape(nr_sub, nt_sub, np_sub)
+
+ # put those data in global 3d array
+ data_glob[offset_r:offset_r+nr_sub, offset_t:offset_t+nt_sub, offset_p:offset_p+np_sub] = data_sub
+ grid_glob_p[offset_r:offset_r+nr_sub, offset_t:offset_t+nt_sub, offset_p:offset_p+np_sub] = grid_sub_p
+ grid_glob_t[offset_r:offset_r+nr_sub, offset_t:offset_t+nt_sub, offset_p:offset_p+np_sub] = grid_sub_t
+ grid_glob_r[offset_r:offset_r+nr_sub, offset_t:offset_t+nt_sub, offset_p:offset_p+np_sub] = grid_sub_r
+
+ # update offset
+ offset += n_points_total_sub
+ return data_glob, grid_glob_r, grid_glob_t, grid_glob_p
+
+
+[docs]
+ def to_xarray(self):
+ """Convert to attarray.Dataset
+
+ :return: A multi-dimensional data base inheriting from xarray.Dataset
+ :rtype: attarray.DataSet
+ """
+ depths = 6371. - self.grid_glob_r[:, 0, 0]
+ radius = self.grid_glob_r[:, 0, 0]
+ latitudes = self.grid_glob_t[0, :, 0]
+ longitudes = self.grid_glob_p[0, 0, :]
+ data_dict = {}
+ for dataset_name in self.fields:
+ data_dict[dataset_name] = (["r", "t", "p"], self.__dict__[dataset_name])
+ dataset = Dataset(
+ data_dict,
+ coords={
+ 'dep': (['r'], depths),
+ 'rad': (['r'], radius),
+ 'lat': (['t'], latitudes),
+ 'lon': (['p'], longitudes),
+ }
+ )
+ return dataset
+
+
+
+
+if __name__ == '__main__':
+ attdata = ATTData.read('examples/out_data_sim_0.h5',
+ 'examples/input_params.yml',
+ 'examples/out_data_grid.h5',
+ dataset_name='T_res_src_0_inv_0000',
+ format='hdf5')
+
+
+import numpy as np
+from scipy.ndimage import gaussian_filter
+import h5py
+from .para import ATTPara
+from .io.crustmodel import CrustModel
+from .io.asciimodel import ASCIIModel
+from .attarray import Dataset
+from .utils import init_axis, acosd, atand
+
+
+
+[docs]
+class ATTModel():
+ """Create initial model from external models
+ """
+ def __init__(self, para_fname='input_params.yml') -> None:
+ """
+ :param para_fname: Path to parameter file, defaults to 'input_params.yml'
+ :type para_fname: str, optional
+ """
+ self.para_fname = para_fname
+ self.d_rtp = np.zeros(3)
+ self.read_param()
+ self.eta = np.zeros(self.n_rtp)
+ self.xi = np.zeros(self.n_rtp)
+ self.zeta = np.zeros(self.n_rtp)
+ self.vel = np.zeros(self.n_rtp)
+
+
+[docs]
+ def read_param(self):
+ """Read ``n_rtp``, ``min_max_dep``, ``min_max_lat`` and ``min_max_lon`` from ``para_fname``
+ """
+ para = ATTPara(self.para_fname)
+ self.n_rtp = para.input_params['domain']['n_rtp']
+ self.min_max_dep = para.input_params['domain']['min_max_dep']
+ self.min_max_lat = para.input_params['domain']['min_max_lat']
+ self.min_max_lon = para.input_params['domain']['min_max_lon']
+ self.depths, self.latitudes, self.longitudes, \
+ self.d_rtp[0], self.d_rtp[1], self.d_rtp[2] = init_axis(
+ self.min_max_dep, self.min_max_lat, self.min_max_lon, self.n_rtp
+ )
+ self.radius = 6371. - self.depths
+
+
+
+[docs]
+ @classmethod
+ def read(cls, model_fname: str, para_fname='input_params.yml'):
+ """Read an exists model
+
+ :param model_fname: Path to the exists model
+ :type model_fname: str
+ :param para_fname: Path to parameter file, defaults to 'input_params.yml'
+ :type para_fname: str, optional
+ """
+ mod = cls(para_fname)
+ f = h5py.File(model_fname)
+ mod.vel = f['vel'][:]
+ mod.xi = f['xi'][:]
+ mod.eta = f['eta'][:]
+ # mod.zeta = f['zeta'][:]
+ mod._check_axis()
+ if not ((mod.xi==0).all() and (mod.eta==0).all()):
+ mod.to_ani()
+ f.close()
+ return mod
+
+
+ def _check_axis(self):
+ if self.vel.shape != tuple(self.n_rtp):
+ raise ValueError('conflicting size of data and n_rtp in parameters')
+
+
+[docs]
+ def to_ani(self):
+ """Convert to anisotropic strength (epsilon) and azimuth (phi)
+ """
+ self.epsilon = np.sqrt(self.eta**2+self.xi**2)
+ self.phi = np.zeros_like(self.epsilon)
+ idx = np.where(self.xi <= 0)
+ self.phi[idx] = 90 + 0.5*atand(self.eta[idx]/self.xi[idx])
+ idx = np.where((self.xi > 0) & (self.eta <= 0))
+ self.phi[idx] = 180 + 0.5*atand(self.eta[idx]/self.xi[idx])
+ idx = np.where((self.xi > 0) & (self.eta > 0))
+ self.phi[idx] = 0.5*atand(self.eta[idx]/self.xi[idx])
+
+
+
+[docs]
+ def to_xarray(self):
+ """Convert to xarray
+ """
+ data_dict = {}
+ data_dict['vel'] = (["r", "t", "p"], self.vel)
+ data_dict['xi'] = (["r", "t", "p"], self.xi)
+ data_dict['eta'] = (["r", "t", "p"], self.eta)
+ # data_dict['zeta'] = (["r", "t", "p"], self.zeta)
+ if hasattr(self, 'epsilon') and hasattr(self, 'phi'):
+ data_dict['epsilon'] = (["r", "t", "p"], self.epsilon)
+ data_dict['phi'] = (["r", "t", "p"], self.phi)
+ if hasattr(self, 'dlnv'):
+ data_dict['dlnv'] = (["r", "t", "p"], self.dlnv)
+ dataset = Dataset(
+ data_dict,
+ coords={
+ 'dep': (['r'], self.depths),
+ 'rad': (['r'], self.radius),
+ 'lat': (['t'], self.latitudes),
+ 'lon': (['p'], self.longitudes),
+ }
+ )
+ return dataset
+
+
+
+[docs]
+ def grid_data_crust1(self, type='vp'):
+ """Grid data from CRUST1.0 model
+
+ :param type: Specify velocity type of ``vp`` or ``vs``, defaults to 'vp'
+ :type type: str, optional
+ """
+ cm = CrustModel()
+ self.vel = cm.griddata(
+ self.min_max_dep,
+ self.min_max_lat,
+ self.min_max_lon,
+ self.n_rtp, type=type
+ )
+
+
+
+[docs]
+ def grid_data_ascii(self, model_fname:str, **kwargs):
+ """Grid data from custom model file in ASCII format
+
+ :param model_fname: Path to model file
+ :type model_fname: str
+ :param usecols: Columns order by longitude, latitude, depth and velocity, defaults to [0, 1, 2, 3]
+ :type usecols: list or tuple
+ """
+ am = ASCIIModel(model_fname)
+ self.vel = am.read_ascii(**kwargs)
+ self.vel = am.griddata(
+ self.min_max_dep,
+ self.min_max_lat,
+ self.min_max_lon,
+ self.n_rtp,
+ )
+
+
+
+[docs]
+ def smooth(self, sigma=5.0):
+ """Gaussian smooth the 3D velocity model
+
+ :param sigma: Standard division of gaussian kernel in km, defaults to 10
+ :type sigma: scalar or sequence of scalars , optional
+ """
+ if isinstance(sigma, (int, float)):
+ sigma_all = np.ones(3)*sigma/self.d_rtp/2/np.pi
+ elif len(sigma) == 3:
+ sigma_all = np.array(sigma)/self.d_rtp/2/np.pi
+ sigma_all[0:2] /= 111.19
+ self.vel = gaussian_filter(self.vel, sigma)
+
+
+
+[docs]
+ def calc_dv_avg(self):
+ """calculate anomalies relative to average velocity at each depth
+ """
+ self.dlnv = np.zeros_like(self.vel)
+ for i, _ in enumerate(self.depths):
+ avg = np.mean(self.vel[i, :, :])
+ self.dlnv[i, :, :] = 100 * (self.vel[i, :, :] - avg)/avg
+
+
+
+[docs]
+ def calc_dv(self, ref_mod_fname: str):
+ """calculate anomalies relative to another model
+
+ :param ref_mod_fname: Path to reference model
+ :type ref_mod_fname: str
+ """
+ with h5py.File(ref_mod_fname) as f:
+ ref_vel = f['vel'][:]
+ if self.vel.shape != ref_vel.shape:
+ raise ValueError('reference model should be in same size as input model')
+ self.dlnv = 100*(self.vel - ref_vel)/ref_vel
+
+
+
+[docs]
+ def write(self, out_fname=None):
+ """Write to h5 file with TomoATT format.
+
+ :param fname: file name of output model, defaults to 'model_crust1.0.h5'
+ :type fname: str, optional
+ """
+ if out_fname is None:
+ out_fname = 'Sub_CRUST1.0_{}_{:d}_{:d}_{:d}.h5'.format(self.type, *self.n_rtp)
+ with h5py.File(out_fname, 'w') as f:
+ f.create_dataset('eta', data=self.eta)
+ f.create_dataset('xi', data=self.xi)
+ f.create_dataset('zeta', data=self.zeta)
+ f.create_dataset('vel', data=self.vel)
+
+
+
+import yaml
+from .utils import init_axis
+
+
+
+[docs]
+class ATTPara:
+ """Class for read and write parameter file with ``yaml`` format
+ """
+ def __init__(self, fname: str) -> None:
+ """
+ :param fname: Path to parameter file
+ :type fname: str
+ """
+ self.fname = fname
+ with open(fname, encoding='utf-8') as f:
+ file_data = f.read()
+ self.input_params = yaml.load(file_data, Loader=yaml.Loader)
+
+
+[docs]
+ def init_axis(self):
+ dep, lat, lon, dd, dt, dp = init_axis(
+ self.input_params['domain']['min_max_dep'],
+ self.input_params['domain']['min_max_lat'],
+ self.input_params['domain']['min_max_lon'],
+ self.input_params['domain']['n_rtp'],
+ )
+ return dep, lat, lon, dd, dt, dp
+
+
+
+[docs]
+ def write(self, fname=None):
+ """write
+
+ :param fname: Path to output file, for None to overwrite input file, defaults to None
+ :type fname: str, optional
+ """
+ if fname is None:
+ fname = self.fname
+ yaml.dump(self.input_params, fname, Dumper=yaml.Dumper)
+
+
+
+import numpy as np
+import tqdm
+import pandas as pd
+from .distaz import DistAZ
+from .setuplog import SetupLog
+from .utils import WGS84_to_cartesian, define_rec_cols
+from sklearn.metrics.pairwise import haversine_distances
+import copy
+
+pd.options.mode.chained_assignment = None # default='warn'
+
+
+
+[docs]
+class SrcRec:
+ """I/O for source <--> receiver file
+
+ :param fname: Path to src_rec file
+ :type fname: str
+ :param src_only: Whether to read only source information, defaults to False
+ :type src_only: bool, optional
+ """
+
+ def __init__(self, fname: str, src_only=False) -> None:
+ """ """
+ self.src_only = src_only
+ self.src_points = None
+ self.rec_points = None
+ self.sources = None
+ self.receivers = None
+ self.fnames = [fname]
+ self.log = SetupLog()
+
+ def __repr__(self):
+ return f"PyTomoATT SrcRec Object: \n\
+ fnames={self.fnames}, \n\
+ src_only={self.src_only}, \n\
+ number of sources={self.src_points.shape[0]}, \n\
+ number of receivers={self.rec_points.shape[0]}"
+
+ @property
+ def src_points(self):
+ """Return a DataFrame of all sources
+
+ :return: All sources
+ :rtype: pandas.DataFrame
+
+ Sources contain 8 columns:
+
+ ================ ===================================================
+ Column Description
+ ================ ===================================================
+ ``origin_time`` Origin time of the source
+ ``evla`` Latitude of the source
+ ``evlo`` Longitude of the source
+ ``evdp`` Focal depth
+ ``mag`` Magnitude of the source
+ ``num_rec`` Number of receivers that recorded the source
+ ``event_id`` ID of the source
+ ``weight`` Weight of the source applied on objective function
+ ================ ===================================================
+ """
+ return self._src_points
+
+ @src_points.setter
+ def src_points(self, value):
+ if value is None or isinstance(value, pd.DataFrame):
+ self._src_points = value
+ else:
+ raise TypeError("src_points should be in DataFrame")
+
+ @property
+ def rec_points(self):
+ """Return a DataFrame of all receivers
+
+ :return: All receivers
+ :rtype: pandas.DataFrame
+
+ Receivers contain 9 ~ 11 columns:
+
+ Common fields
+ -----------------
+
+ ================ =====================================================
+ Column Description
+ ================ =====================================================
+ ``src_index`` Index of source recorded by the receiver
+ ``rec_index`` Index of receivers that recorded the same source
+ ``staname`` Name of the receiver
+ ``stla`` Latitude of the receiver
+ ``stlo`` Longitude of the receiver
+ ``stel`` Elevation of the receiver
+ ``phase`` Phase name
+ ``tt`` Travel time of the source receiver pair
+ ``weight`` Weight of the receiver applied on objective function
+ ================ =====================================================
+
+ Optional fields
+ ----------------
+
+ ================ ===========================================================================
+ Column Description
+ ================ ===========================================================================
+ ``netname`` Name of the network (when ``name_net_and_sta=True`` in ``SrcRec.read``)
+ ``dist_deg`` Epicentral distance in deg (when ``dist_in_data=True`` in ``SrcRec.read``)
+ ================ ===========================================================================
+
+ """
+ return self._rec_points
+
+ @rec_points.setter
+ def rec_points(self, value):
+ if value is None or isinstance(value, pd.DataFrame):
+ self._rec_points = value
+ else:
+ raise TypeError("rec_points should be in DataFrame")
+
+
+[docs]
+ @classmethod
+ def read(cls, fname: str, dist_in_data=False, name_net_and_sta=False, **kwargs):
+ """Read source <--> receiver file to pandas.DataFrame
+
+ :param fname: Path to src_rec file
+ :type fname: str
+ :param dist_in_data: Whether distance is included in the src_rec file
+ :type dist_in_data: bool
+ :param name_net_and_sta: Whether to include network and station name in the src_rec file
+ :type name_net_and_sta: bool
+ :return: class of SrcRec
+ :rtype: SrcRec
+ """
+ sr = cls(fname=fname, **kwargs)
+ alldf = pd.read_table(
+ fname, sep="\s+|\t", engine="python", header=None, comment="#"
+ )
+
+ last_col_src = 12
+ # this is a source line if the last column is not NaN
+ sr.src_points = alldf[pd.notna(alldf[last_col_src])]
+ # add weight column if not included
+ if sr.src_points.shape[1] == last_col_src + 1:
+ # add another column for weight
+ sr.src_points.loc[:, last_col_src + 1] = 1.0
+
+ # src id dataframe
+ sr.src_points.index = sr.src_points.iloc[:, 0]
+ sr.src_points.index.name = "src_index"
+
+ # event datetime dataframe
+ datedf = sr.src_points.loc[:, 1:6]
+ type_dict = {
+ 1: int,
+ 2: int,
+ 3: int,
+ 4: int,
+ 5: int,
+ 6: float,
+ }
+ try:
+ datedf = datedf.astype(type_dict)
+ except:
+ sr.log.SrcReclog.error("please check the date format in the src_rec file")
+ return sr.src_points
+ dateseris = (
+ datedf.astype(str)
+ .apply(lambda x: ".".join(x), axis=1)
+ .apply(pd.to_datetime, format="%Y.%m.%d.%H.%M.%S.%f")
+ )
+ dateseris.name = "origin_time"
+ # event data dataframe
+ src_data = sr.src_points.loc[:, 7:]
+ src_data.columns = [
+ "evla",
+ "evlo",
+ "evdp",
+ "mag",
+ "num_rec",
+ "event_id",
+ "weight",
+ ]
+ type_dict = {
+ "evla": float,
+ "evlo": float,
+ "evdp": float,
+ "mag": float,
+ "num_rec": int,
+ "event_id": str,
+ "weight": float,
+ }
+ try:
+ src_data = src_data.astype(type_dict)
+ except:
+ sr.log.SrcReclog.error(
+ "please check the event data format in the src_rec file"
+ )
+ return sr.src_points
+
+ # concat all the 3 dataframes
+ sr.src_points = pd.concat([dateseris, src_data], axis=1)
+
+ # read receiver data if not src_only
+ if not sr.src_only:
+ # number of columns is 8 if distance is not included
+ cols, last_col = define_rec_cols(dist_in_data, name_net_and_sta)
+
+ # extract the rows if the last_col is not NaN and the 12th column is NaN
+ sr.rec_points = alldf[
+ (alldf[last_col].notna()) & (alldf[last_col_src].isna())
+ ].reset_index(drop=True)
+
+ # add weight column if not included
+ if sr.rec_points.loc[:, last_col + 1].isna().all():
+ sr.rec_points.loc[:, last_col + 1] = 1.0
+
+ # warning if weigh value is greater than 10
+ if (sr.rec_points.loc[:, last_col + 1] > 10).any():
+ sr.log.SrcReclog.warning(
+ """
+at least one weight value is greater than 10.
+Probably your src_rec file includes distance data.
+In this case, please set dist_in_data=True and read again."""
+ )
+
+ # extract only the first part of columns (cut unnecessary columns)
+ sr.rec_points = sr.rec_points.loc[:, : last_col + 1]
+
+ # define column names
+ sr.rec_points.columns = cols
+
+ # change type of rec_index to int
+ sr.rec_points["rec_index"] = sr.rec_points["rec_index"].astype(int)
+
+ if name_net_and_sta:
+ # concatenate network and station name with "_"
+ sr.rec_points["staname"] = (
+ sr.rec_points["netname"] + "_" + sr.rec_points["staname"]
+ )
+ # drop network name column
+ sr.rec_points.drop("netname", axis=1, inplace=True)
+ # define src and rec list
+ sr.update_unique_src_rec()
+ return sr
+
+
+
+[docs]
+ def write(self, fname="src_rec_file"):
+ """Write sources and receivers to ASCII file for TomoATT
+
+ :param fname: Path to the src_rec file, defaults to 'src_rec_file'
+ :type fname: str, optional
+ """
+ with open(fname, "w") as f:
+ for idx, src in tqdm.tqdm(
+ self.src_points.iterrows(), total=len(self.src_points)
+ ):
+ time_lst = (
+ src["origin_time"].strftime("%Y_%m_%d_%H_%M_%S.%f").split("_")
+ )
+ f.write(
+ "{:d} {} {} {} {} {} {} {:.4f} {:.4f} {:.4f} {:.4f} {} {} {:.4f}\n".format(
+ idx,
+ *time_lst,
+ src["evla"],
+ src["evlo"],
+ src["evdp"],
+ src["mag"],
+ src["num_rec"],
+ src["event_id"],
+ src["weight"],
+ )
+ )
+ if self.src_only:
+ continue
+ rec_data = self.rec_points[self.rec_points["src_index"] == idx]
+ for _, rec in rec_data.iterrows():
+ f.write(
+ " {:d} {:d} {} {:6.4f} {:6.4f} {:6.4f} {} {:6.4f} {:6.4f}\n".format(
+ idx,
+ rec["rec_index"],
+ rec["staname"],
+ rec["stla"],
+ rec["stlo"],
+ rec["stel"],
+ rec["phase"],
+ rec["tt"],
+ rec["weight"],
+ )
+ )
+
+
+
+[docs]
+ def copy(self):
+ """Return a copy of SrcRec object
+
+ :return: Copy of SrcRec object
+ :rtype: SrcRec
+ """
+ return copy.deepcopy(self)
+
+
+ def update_unique_src_rec(self):
+ self.sources = self.src_points[
+ ["event_id", "evla", "evlo", "evdp", "weight"]
+ ]
+ self.receivers = self.rec_points[
+ ["staname", "stla", "stlo", "stel", "weight"]
+ ].drop_duplicates()
+
+
+[docs]
+ def reset_index(self):
+ """Reset index of source and receivers."""
+ # reset src_index to be 0, 1, 2, ... for both src_points and rec_points
+ self.rec_points["src_index"] = self.rec_points["src_index"].map(
+ dict(zip(self.src_points.index, np.arange(len(self.src_points))))
+ )
+ self.src_points.index = np.arange(len(self.src_points))
+ self.src_points.index.name = "src_index"
+
+ # reset rec_index to be 0, 1, 2, ... for rec_points
+ self.rec_points["rec_index"] = self.rec_points.groupby("src_index").cumcount()
+
+ # sr.rec_points['rec_index'] = sr.rec_points['rec_index'].astype(int)
+
+
+[docs]
+ def append(self, sr):
+ """
+ Append another SrcRec object to the current one
+
+ :param sr: Another SrcRec object
+ :type sr: SrcRec
+ """
+ if not isinstance(sr, SrcRec):
+ raise TypeError("Input must be a SrcRec object")
+
+ if self.src_only != sr.src_only:
+ raise ValueError("Cannot append src_only and non-src_only SrcRec objects")
+
+ self.reset_index()
+ sr.reset_index()
+
+ # number of sources to be added
+ n_src_offset = self.src_points.shape[0]
+
+ # add column for source file tag if not included
+ if "fname" not in self.src_points.columns:
+ self.src_points["fname"] = self.fnames[0]
+ self.rec_points["fname"] = self.fnames[0]
+ if "fname" not in sr.src_points.columns:
+ sr.src_points["fname"] = sr.fnames[0]
+ sr.rec_points["fname"] = sr.fnames[0]
+
+ # append src_points
+ self.src_points = pd.concat([self.src_points, sr.src_points], ignore_index=True)
+ self.src_points.index.name = "src_index"
+ # self.src_points.index += 1 # start from 1
+
+ if not self.src_only:
+ # update src_index in rec_points
+ sr.rec_points["src_index"] += n_src_offset
+ # append rec_points
+ self.rec_points = pd.concat(
+ [self.rec_points, sr.rec_points], ignore_index=True
+ )
+
+ # store fnames
+ self.fnames.extend(sr.fnames)
+
+
+
+[docs]
+ def remove_rec_by_new_src(self,):
+ """
+ remove rec_points by new src_points
+ """
+ self.rec_points = self.rec_points[
+ self.rec_points["src_index"].isin(self.src_points.index)
+ ]
+
+
+
+[docs]
+ def remove_src_by_new_rec(self):
+ """remove src_points by new receivers"""
+ self.src_points = self.src_points[
+ self.src_points.index.isin(self.rec_points["src_index"])
+ ]
+
+
+
+[docs]
+ def update_num_rec(self):
+ """
+ update num_rec in src_points by current rec_points
+ """
+ self.src_points["num_rec"] = self.rec_points.groupby("src_index").size()
+
+
+
+[docs]
+ def update(self):
+ """
+ Update ``SrcRec.src_points`` and ``SrcRec.rec_points`` with procedures:
+
+ 1. remove receivers by new sources
+ 2. remove sources by new receivers
+ 3. update num_rec
+ 4. reset index
+ 5. update unique sources and receivers
+ """
+ self.remove_rec_by_new_src()
+ self.remove_src_by_new_rec()
+ self.update_num_rec()
+ self.reset_index()
+ self.update_unique_src_rec()
+ # sort by src_index
+ self.src_points.sort_values(by=["src_index"], inplace=True)
+ self.rec_points.sort_values(by=["src_index", "rec_index"], inplace=True)
+
+
+ def erase_src_with_no_rec(self):
+ """
+ erase src_points with no rec_points
+ """
+ self.log.SrcReclog.info("src_points before removing: ", self.src_points.shape)
+ self.src_points = self.src_points[self.src_points["num_rec"] > 0]
+ self.log.SrcReclog.info("src_points after removing: ", self.src_points.shape)
+
+
+[docs]
+ def erase_duplicate_events(
+ self, thre_deg: float, thre_dep: float, thre_time_in_min: float
+ ):
+ """
+ check and count how many events are duplicated,
+ under given threshold of distance, depth, and time.
+
+ :param thre_deg: threshold of distance in degree
+ :type thre_deg: float
+ :param thre_dep: threshold of distance in degree
+ :type thre_dep: float
+ :param thre_time_in_min: hreshold of time in minutes
+ :type thre_time_in_min: float
+ """
+
+ # sort event data
+ self.src_points.sort_values(by=["origin_time", "evlo", "evla"], inplace=True)
+
+ num_duplicated = 99999
+ iter_count = 0
+
+ while num_duplicated > 0:
+ # difference with row +1
+ self.src_points["diff_evlo+1"] = self.src_points["evlo"].diff().abs()
+ self.src_points["diff_evla+1"] = self.src_points["evla"].diff().abs()
+ self.src_points["diff_evdp+1"] = self.src_points["evdp"].diff().abs()
+ self.src_points["diff_time+1"] = self.src_points["origin_time"].diff().abs()
+ self.src_points["diff_nrec+1"] = self.src_points["num_rec"].diff()
+ # difference with row -1
+ self.src_points["diff_evlo-1"] = (
+ self.src_points["evlo"].diff(periods=-1).abs()
+ )
+ self.src_points["diff_evla-1"] = (
+ self.src_points["evla"].diff(periods=-1).abs()
+ )
+ self.src_points["diff_evdp-1"] = (
+ self.src_points["evdp"].diff(periods=-1).abs()
+ )
+ self.src_points["diff_time-1"] = (
+ self.src_points["origin_time"].diff(periods=-1).abs()
+ )
+ self.src_points["diff_nrec-1"] = self.src_points["num_rec"].diff(periods=-1)
+
+ self.src_points["duplicated+1"] = self.src_points.apply(
+ lambda x: 1
+ if x["diff_evlo+1"] < thre_deg
+ and x["diff_evla+1"] < thre_deg
+ and x["diff_evdp+1"] < thre_dep
+ and x["diff_time+1"] < pd.Timedelta(minutes=thre_time_in_min)
+ else 0,
+ axis=1,
+ )
+ self.src_points["duplicated-1"] = self.src_points.apply(
+ lambda x: 1
+ if x["diff_evlo-1"] < thre_deg
+ and x["diff_evla-1"] < thre_deg
+ and x["diff_evdp-1"] < thre_dep
+ and x["diff_time-1"] < pd.Timedelta(minutes=thre_time_in_min)
+ else 0,
+ axis=1,
+ )
+
+ # drop rows (duplicated == 1 and diff_nrec <= 0)
+ self.src_points = self.src_points[
+ ~(
+ (self.src_points["duplicated+1"] == 1)
+ & (self.src_points["diff_nrec+1"] < 0)
+ )
+ ]
+ # drow upper row of (duplicated == 1 and diff_nrec > 0)
+ self.src_points = self.src_points[
+ ~(
+ (self.src_points["duplicated-1"] == 1)
+ & (self.src_points["diff_nrec-1"] <= 0)
+ )
+ ]
+
+ # print iterate count and number of rows, number of duplicated rows
+ num_duplicated = self.src_points[
+ (self.src_points["duplicated+1"] == 1)
+ | (self.src_points["duplicated-1"] == 1)
+ ].shape[0]
+ self.log.SrcReclog.info(
+ "iteration: {}; num_duplicated: {}".format(iter_count, num_duplicated)
+ )
+
+ iter_count += 1
+
+ # erase all columns starting with diff_*
+ self.src_points.drop(
+ self.src_points.columns[self.src_points.columns.str.startswith("diff_")],
+ axis=1,
+ inplace=True,
+ )
+ # erase all clumuns starting with duplicated
+ self.src_points.drop(
+ self.src_points.columns[
+ self.src_points.columns.str.startswith("duplicated")
+ ],
+ axis=1,
+ inplace=True,
+ )
+ self.update()
+
+
+
+[docs]
+ def select_phase(self, phase_list):
+ """
+ select interested phase and remove others
+
+ :param phase_list: List of phases for travel times used for inversion
+ :type phase_list: list of str
+ """
+ if not isinstance(phase_list, (list, str)):
+ raise TypeError("phase_list should be in list or str")
+ self.log.SrcReclog.info(
+ "rec_points before selecting: {}".format(self.rec_points.shape)
+ )
+ self.rec_points = self.rec_points[self.rec_points["phase"].isin(phase_list)]
+ self.update()
+ self.log.SrcReclog.info(
+ "rec_points after selecting: {}".format(self.rec_points.shape)
+ )
+
+
+
+[docs]
+ def select_by_datetime(self, time_range):
+ """
+ select sources and station in a time range
+
+ :param time_range: Time range defined as [start_time, end_time]
+ :type time_range: iterable
+ """
+ # select source within this time range.
+ self.log.SrcReclog.info(
+ "src_points before selecting: {}".format(self.src_points.shape)
+ )
+ self.log.SrcReclog.info(
+ "rec_points before selecting: {}".format(self.rec_points.shape)
+ )
+ self.src_points = self.src_points[
+ (self.src_points["origin_time"] >= time_range[0])
+ & (self.src_points["origin_time"] <= time_range[1])
+ ]
+ self.update()
+ self.log.SrcReclog.info(
+ "src_points after selecting: {}".format(self.src_points.shape)
+ )
+ self.log.SrcReclog.info(
+ "rec_points after selecting: {}".format(self.rec_points.shape)
+ )
+
+
+
+[docs]
+ def remove_specified_recs(self, rec_list):
+ """Remove specified receivers
+
+ :param rec_list: List of receivers to be removed
+ :type rec_list: list
+ """
+ self.log.SrcReclog.info(
+ "rec_points before removing: {}".format(self.rec_points.shape)
+ )
+ self.rec_points = self.rec_points[~self.rec_points["staname"].isin(rec_list)]
+ self.update()
+ self.log.SrcReclog.info(
+ "rec_points after removing: {}".format(self.rec_points.shape)
+ )
+
+
+
+[docs]
+ def select_box_region(self, region):
+ """
+ Select sources and station in a box region
+
+ :param region: Box region defined as [lon1, lon2, lat1, lat2]
+ :type region: iterable
+ """
+ # select source within this region.
+ self.log.SrcReclog.info(
+ "src_points before selecting: {}".format(self.src_points.shape)
+ )
+ self.log.SrcReclog.info(
+ "rec_points before selecting: {}".format(self.rec_points.shape)
+ )
+ self.src_points = self.src_points[
+ (self.src_points["evlo"] >= region[0])
+ & (self.src_points["evlo"] <= region[1])
+ & (self.src_points["evla"] >= region[2])
+ & (self.src_points["evla"] <= region[3])
+ ]
+
+ # Remove receivers whose events have been removed
+ self.remove_rec_by_new_src()
+
+ # Remove rest receivers out of region.
+ self.rec_points = self.rec_points[
+ (self.rec_points["stlo"] >= region[0])
+ & (self.rec_points["stlo"] <= region[1])
+ & (self.rec_points["stla"] >= region[2])
+ & (self.rec_points["stla"] <= region[3])
+ ]
+
+ # Remove empty sources
+ self.update()
+ self.log.SrcReclog.info(
+ "src_points after selecting: {}".format(self.src_points.shape)
+ )
+ self.log.SrcReclog.info(
+ "rec_points after selecting: {}".format(self.rec_points.shape)
+ )
+
+
+
+[docs]
+ def select_depth(self, dep_min_max):
+ """Select sources in a range of depth
+
+ :param dep_min_max: limit of depth, ``[dep_min, dep_max]``
+ :type dep_min_max: sequence
+ """
+ self.log.SrcReclog.info('src_points before selecting: {}'.format(self.src_points.shape))
+ self.log.SrcReclog.info(
+ "rec_points before selecting: {}".format(self.rec_points.shape)
+ )
+ self.src_points = self.src_points[
+ (self.src_points['evdp'] >= dep_min_max[0]) &
+ (self.src_points['evdp'] <= dep_min_max[1])
+ ]
+ self.update()
+ self.log.SrcReclog.info('src_points after selecting: {}'.format(self.src_points.shape))
+ self.log.SrcReclog.info(
+ "rec_points after selecting: {}".format(self.rec_points.shape)
+ )
+
+
+
+[docs]
+ def calc_distance(self):
+ """Calculate epicentral distance"""
+ self.rec_points["dist"] = 0.0
+ rec_group = self.rec_points.groupby("src_index")
+ for idx, rec in rec_group:
+ dist = DistAZ(
+ self.src_points.loc[idx]["evla"],
+ self.src_points.loc[idx]["evlo"],
+ rec["stla"].values,
+ rec["stlo"].values,
+ ).delta
+ self.rec_points["dist"].loc[rec.index] = dist
+
+
+
+[docs]
+ def select_distance(self, dist_min_max, recalc_dist=False):
+ """Select stations in a range of distance
+
+ :param dist_min_max: limit of distance in deg, ``[dist_min, dist_max]``
+ :type dist_min_max: list or tuple
+ """
+ self.log.SrcReclog.info(
+ "rec_points before selecting: {}".format(self.rec_points.shape)
+ )
+ # rec_group = self.rec_points.groupby('src_index')
+ if ("dist" not in self.rec_points) or recalc_dist:
+ self.log.SrcReclog.info("Calculating epicentral distance...")
+ self.calc_distance()
+ elif not recalc_dist:
+ pass
+ else:
+ self.log.SrcReclog.error(
+ "No such field of dist, please set up recalc_dist to True"
+ )
+ # for _, rec in rec_group:
+ mask = (self.rec_points["dist"] < dist_min_max[0]) | (
+ self.rec_points["dist"] > dist_min_max[1]
+ )
+ drop_idx = self.rec_points[mask].index
+ self.rec_points = self.rec_points.drop(index=drop_idx)
+ self.update()
+ self.log.SrcReclog.info(
+ "rec_points after selecting: {}".format(self.rec_points.shape)
+ )
+
+
+ def select_by_num_rec(self, num_rec: int):
+ """select sources with recievers greater and equal than a number
+ :param num_rec: threshold of minimum receiver number
+ :type num_rec: int
+ """
+ self.update_num_rec()
+ self.log.SrcReclog.info(
+ "src_points before selecting: {}".format(self.src_points.shape)
+ )
+ self.log.SrcReclog.info(
+ "rec_points before selecting: {}".format(self.rec_points.shape)
+ )
+ self.src_points = self.src_points[(self.src_points["num_rec"] >= num_rec)]
+ # self.remove_rec_by_new_src()
+ self.update()
+ self.log.SrcReclog.info(
+ "src_points after selecting: {}".format(self.src_points.shape)
+ )
+ self.log.SrcReclog.info(
+ "rec_points after selecting: {}".format(self.rec_points.shape)
+ )
+
+
+[docs]
+ def select_one_event_in_each_subgrid(self, d_deg: float, d_km: float):
+ """select one event in each subgrid
+
+ :param d_deg: grid size along lat and lon in degree
+ :type d_deg: float
+ :param d_km: grid size along depth axis in km
+ :type d_km: float
+ """
+
+ self.log.SrcReclog.info(
+ "src_points before selecting: {}".format(self.src_points.shape)
+ )
+ self.log.SrcReclog.info("processing... (this may take a few minutes)")
+
+ # store index of src_points as 'src_index'
+ self.src_points["src_index"] = self.src_points.index
+
+ # add 'lat_group' and 'lon_group' to src_points by module d_deg
+ self.src_points["lat_group"] = self.src_points["evla"].apply(
+ lambda x: int(x / d_deg)
+ )
+ self.src_points["lon_group"] = self.src_points["evlo"].apply(
+ lambda x: int(x / d_deg)
+ )
+
+ # add 'dep_group' to src_points by module d_km
+ self.src_points["dep_group"] = self.src_points["evdp"].apply(
+ lambda x: int(x / d_km)
+ )
+
+ # sort src_points by 'lat_group' and 'lon_group' and 'dep_group'
+ self.src_points = self.src_points.sort_values(
+ by=["lat_group", "lon_group", "dep_group"]
+ )
+
+ # find all events in the same lat_group and lon_group and dep_group
+ # and keep only on with largest nrec
+ self.src_points = self.src_points.groupby(
+ ["lat_group", "lon_group", "dep_group"]
+ ).apply(lambda x: x.sort_values(by="num_rec", ascending=False).iloc[0])
+
+ # drop 'lat_group' and 'lon_group' and 'dep_group'
+ self.src_points = self.src_points.drop(
+ columns=["lat_group", "lon_group", "dep_group"]
+ )
+
+ # restore index from 'src_index'
+ self.src_points = self.src_points.set_index("src_index")
+
+ # sort src_points by index
+ self.src_points = self.src_points.sort_index()
+
+ self.log.SrcReclog.info(
+ "src_points after selecting: {}".format(self.src_points.shape)
+ )
+
+ # remove rec_points by new src_points
+ # self.remove_rec_by_new_src()
+ self.update()
+
+
+
+[docs]
+ def count_events_per_station(self):
+ """
+ count events per station
+ """
+ # count the number of staname
+ self.rec_points["num_events"] = (
+ self.rec_points.groupby("staname").cumcount() + 1
+ )
+ # reflect the total number of events for each station
+ self.rec_points["num_events"] = self.rec_points.groupby("staname")[
+ "num_events"
+ ].transform("max")
+
+
+ def _calc_weights(self, lat, lon, scale):
+ points = pd.concat([lon, lat], axis=1)
+ points_rad = points * (np.pi / 180)
+ dist = haversine_distances(points_rad) * 6371.0 / 111.19
+ dist_ref = scale * np.mean(dist)
+ om = np.exp(-((dist / dist_ref) ** 2)) * points.shape[0]
+ return 1 / np.mean(om, axis=0)
+
+
+[docs]
+ def geo_weighting(self, scale=0.5, rec_weight=False):
+ """Calculating geographical weights for sources
+
+ :param scale: Scale of reference distance parameter.
+ See equation 22 in Ruan et al., (2019). The reference distance is given by ``scale* dis_average``, defaults to 0.5
+ :type scale: float, optional
+ :param rec_weight: Whether to calculate weights for receivers, defaults to False
+ :type rec_weight: bool, optional
+ """
+
+ self.src_points["weight"] = self._calc_weights(
+ self.src_points["evla"], self.src_points["evlo"], scale
+ )
+ if rec_weight:
+ weights = self._calc_weights(
+ self.receivers['stla'],
+ self.receivers['stlo'],
+ scale
+ )
+ # apply weights to rec_points
+ for staname, weight in zip(self.receivers['staname'], weights):
+ self.rec_points.loc[self.rec_points['staname'] == staname, 'weight'] = weight
+
+ #
+ # This function is comment out temprarly because it includes verified bug and not modified.
+ #
+ # def merge_adjacent_stations(self, d_deg:float, d_km:float):
+ # """
+ # merge adjacent stations as one station
+ # d_deg : float
+ # grid size in degree
+ # d_km : float
+ # grid size in km
+ # """
+
+ # # count the number of events per station
+ # self.count_events_per_station()
+
+ # # number of unique stations before merging
+ # print('number of unique stations before merging: ', self.rec_points['staname'].nunique())
+
+ # # create 'lat_group', 'lon_group' and 'dep_group' columns from 'stla', 'stlo' and 'stel'
+ # def create_groups(row, column, d):
+ # return int(row[column]/d)
+
+ # self.rec_points['lat_group'] = self.rec_points.apply(lambda x: create_groups(x, 'stla', d_deg), axis=1)
+ # self.rec_points['lon_group'] = self.rec_points.apply(lambda x: create_groups(x, 'stlo', d_deg), axis=1)
+ # self.rec_points['dep_group'] = self.rec_points.apply(lambda x: create_groups(x, 'stel', d_km*1000), axis=1)
+
+ # # sort src_points by 'lat_group' and 'lon_group' and 'dep_group'
+ # self.rec_points = self.rec_points.sort_values(by=['lat_group', 'lon_group', 'dep_group', 'num_events'], ascending=[True, True, True, False])
+
+ # # find all events in the same lat_group and lon_group and dep_group
+ # # and copy the 'staname' 'stlo' 'stla' 'stel' to all rows within the same group from the row where 'count' is the largest
+ # self.rec_points['staname'] = self.rec_points.groupby(['lat_group', 'lon_group', 'dep_group'])['staname'].transform(lambda x: x.iloc[0])
+ # self.rec_points['stlo'] = self.rec_points.groupby(['lat_group', 'lon_group', 'dep_group'])['stlo'].transform(lambda x: x.iloc[0])
+ # self.rec_points['stla'] = self.rec_points.groupby(['lat_group', 'lon_group', 'dep_group'])['stla'].transform(lambda x: x.iloc[0])
+ # self.rec_points['stel'] = self.rec_points.groupby(['lat_group', 'lon_group', 'dep_group'])['stel'].transform(lambda x: x.iloc[0])
+
+ # # drop 'lat_group' and 'lon_group' and 'dep_group'
+ # self.rec_points = self.rec_points.drop(columns=['lat_group', 'lon_group', 'dep_group'])
+
+ # # sort
+ # self.rec_points = self.rec_points.sort_values(by=['src_index','rec_index'])
+
+ # # update the num_events
+ # self.count_events_per_station()
+
+ # # number of unique stations after merging
+ # print('number of unique stations after merging: ', self.rec_points['staname'].nunique())
+
+ #
+ # This function is comment out temprarly because it includes verified bug and not modified.
+ #
+ # def merge_duplicated_station(self):
+ # """
+ # merge duplicated stations as one station
+ # duplicated stations are defined as stations with the same staname
+ # """
+
+ # # number of unique stations before merging
+ # print('number of unique stations before merging: ', self.rec_points['staname'].nunique())
+
+ # # sort rec_points by 'src_index' then 'staname'
+ # self.rec_points = self.rec_points.sort_values(by=['src_index', 'staname'])
+
+ # # find all duplicated stations in each src_index and drop except the first one
+ # self.rec_points = self.rec_points.drop_duplicates(subset=['src_index', 'staname'], keep='first')
+
+ # # sort rec_points by 'src_index' then 'rec_index'
+ # self.rec_points = self.rec_points.sort_values(by=['src_index', 'rec_index'])
+
+ # # update the num_events
+ # self.count_events_per_station()
+
+ # # number of unique stations after merging
+ # print('number of unique stations after merging: ', self.rec_points['staname'].nunique())
+
+
+[docs]
+ def add_noise(self, range_in_sec=0.1, mean_in_sec=0.0, shape="gaussian"):
+ """Add random noise on travel time
+
+ :param mean_in_sec: Mean of the noise in sec, defaults to 0.0
+ :type mean_in_sec: float, optional
+ :param range_in_sec: Maximun noise in sec, defaults to 0.1
+ :type range_in_sec: float, optional
+ :param shape: shape of the noise distribution probability
+ :type shape: str. options: gaussian or uniform
+ """
+ if shape == "uniform":
+ noise = (
+ np.random.uniform(
+ low=-range_in_sec, high=range_in_sec, size=self.rec_points.shape[0]
+ )
+ + mean_in_sec
+ )
+ elif shape == "gaussian":
+ noise = np.random.normal(
+ loc=mean_in_sec, scale=range_in_sec, size=self.rec_points.shape[0]
+ )
+ self.rec_points["tt"] += noise
+
+
+
+[docs]
+ def write_receivers(self, fname: str):
+ """
+ Write receivers to a txt file.
+
+ :param fname: Path to output txt file of receivers
+ """
+ self.receivers.to_csv(fname, sep=" ", header=False, index=False)
+
+
+
+[docs]
+ def write_sources(self, fname: str):
+ """
+ Write sources to a txt file.
+
+ :param fname: Path to output txt file of sources
+ """
+ self.sources.to_csv(fname, sep=" ", header=False, index=False)
+
+
+
+[docs]
+ @classmethod
+ def from_seispy(cls, rf_path: str):
+ """Read and convert source and station information from
+ receiver function data calculated by Seispy
+
+ :param rf_path: Path to receiver functions calculated by Seispy
+ :type rf_path: str
+ :return: New instance of class SrcRec
+ :rtype: SrcRec
+ """
+ from .io.seispy import Seispy
+
+ sr = cls("")
+ # Initial an instance of Seispy
+ seispyio = Seispy(rf_path)
+
+ # Load station info from SAC header
+ seispyio._load_sta_info()
+
+ # Read finallist.dat
+ seispyio.get_rf_info()
+
+ # Convert to SrcRec format
+ sr.src_points, sr.rec_points = seispyio.to_src_rec_points()
+
+ # update number of receivers
+ sr.update_num_rec()
+
+ return sr
+
+
+ # implemented in vis.py
+
+[docs]
+ def plot(self, weight=False, fname=None):
+ """Plot source and receivers for preview
+
+ :param weight: Draw colors of weights, defaults to False
+ :type weight: bool, optional
+ :param fname: Path to output file, defaults to None
+ :type fname: str, optional
+ :return: matplotlib figure
+ :rtype: matplotlib.figure.Figure
+ """
+ from .vis import plot_srcrec
+
+ return plot_srcrec(self, weight=weight, fname=fname)
+
+
+
+
+if __name__ == "__main__":
+ sr = SrcRec.read("src_rec_file_checker_data_test1.dat_noised_evweighted")
+ sr.write()
+ print(sr.rec_points)
+
Short
+ */ + .o-tooltip--left { + position: relative; + } + + .o-tooltip--left:after { + opacity: 0; + visibility: hidden; + position: absolute; + content: attr(data-tooltip); + padding: .2em; + font-size: .8em; + left: -.2em; + background: grey; + color: white; + white-space: nowrap; + z-index: 2; + border-radius: 2px; + transform: translateX(-102%) translateY(0); + transition: opacity 0.2s cubic-bezier(0.64, 0.09, 0.08, 1), transform 0.2s cubic-bezier(0.64, 0.09, 0.08, 1); +} + +.o-tooltip--left:hover:after { + display: block; + opacity: 1; + visibility: visible; + transform: translateX(-100%) translateY(0); + transition: opacity 0.2s cubic-bezier(0.64, 0.09, 0.08, 1), transform 0.2s cubic-bezier(0.64, 0.09, 0.08, 1); + transition-delay: .5s; +} + +/* By default the copy button shouldn't show up when printing a page */ +@media print { + button.copybtn { + display: none; + } +} diff --git a/_static/copybutton.js b/_static/copybutton.js new file mode 100644 index 0000000..e0da193 --- /dev/null +++ b/_static/copybutton.js @@ -0,0 +1,248 @@ +// Localization support +const messages = { + 'en': { + 'copy': 'Copy', + 'copy_to_clipboard': 'Copy to clipboard', + 'copy_success': 'Copied!', + 'copy_failure': 'Failed to copy', + }, + 'es' : { + 'copy': 'Copiar', + 'copy_to_clipboard': 'Copiar al portapapeles', + 'copy_success': '¡Copiado!', + 'copy_failure': 'Error al copiar', + }, + 'de' : { + 'copy': 'Kopieren', + 'copy_to_clipboard': 'In die Zwischenablage kopieren', + 'copy_success': 'Kopiert!', + 'copy_failure': 'Fehler beim Kopieren', + }, + 'fr' : { + 'copy': 'Copier', + 'copy_to_clipboard': 'Copier dans le presse-papier', + 'copy_success': 'Copié !', + 'copy_failure': 'Échec de la copie', + }, + 'ru': { + 'copy': 'Скопировать', + 'copy_to_clipboard': 'Скопировать в буфер', + 'copy_success': 'Скопировано!', + 'copy_failure': 'Не удалось скопировать', + }, + 'zh-CN': { + 'copy': '复制', + 'copy_to_clipboard': '复制到剪贴板', + 'copy_success': '复制成功!', + 'copy_failure': '复制失败', + }, + 'it' : { + 'copy': 'Copiare', + 'copy_to_clipboard': 'Copiato negli appunti', + 'copy_success': 'Copiato!', + 'copy_failure': 'Errore durante la copia', + } +} + +let locale = 'en' +if( document.documentElement.lang !== undefined + && messages[document.documentElement.lang] !== undefined ) { + locale = document.documentElement.lang +} + +let doc_url_root = DOCUMENTATION_OPTIONS.URL_ROOT; +if (doc_url_root == '#') { + doc_url_root = ''; +} + +/** + * SVG files for our copy buttons + */ +let iconCheck = `' + + '' + + _("Hide Search Matches") + + "
" + ) + ); + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords: () => { + document + .querySelectorAll("#searchbox .highlight-link") + .forEach((el) => el.remove()); + document + .querySelectorAll("span.highlighted") + .forEach((el) => el.classList.remove("highlighted")); + localStorage.removeItem("sphinx_highlight_terms") + }, + + initEscapeListener: () => { + // only install a listener if it is really needed + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.shiftKey || event.altKey || event.ctrlKey || event.metaKey) return; + if (DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS && (event.key === "Escape")) { + SphinxHighlight.hideSearchWords(); + event.preventDefault(); + } + }); + }, +}; + +_ready(() => { + /* Do not call highlightSearchWords() when we are on the search page. + * It will highlight words from the *previous* search query. + */ + if (typeof Search === "undefined") SphinxHighlight.highlightSearchWords(); + SphinxHighlight.initEscapeListener(); +}); diff --git a/_static/styles/furo-extensions.css b/_static/styles/furo-extensions.css new file mode 100644 index 0000000..bc447f2 --- /dev/null +++ b/_static/styles/furo-extensions.css @@ -0,0 +1,2 @@ +#furo-sidebar-ad-placement{padding:var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal)}#furo-sidebar-ad-placement .ethical-sidebar{background:var(--color-background-secondary);border:none;box-shadow:none}#furo-sidebar-ad-placement .ethical-sidebar:hover{background:var(--color-background-hover)}#furo-sidebar-ad-placement .ethical-sidebar a{color:var(--color-foreground-primary)}#furo-sidebar-ad-placement .ethical-callout a{color:var(--color-foreground-secondary)!important}#furo-readthedocs-versions{background:transparent;display:block;position:static;width:100%}#furo-readthedocs-versions .rst-versions{background:#1a1c1e}#furo-readthedocs-versions .rst-current-version{background:var(--color-sidebar-item-background);cursor:unset}#furo-readthedocs-versions .rst-current-version:hover{background:var(--color-sidebar-item-background)}#furo-readthedocs-versions .rst-current-version .fa-book{color:var(--color-foreground-primary)}#furo-readthedocs-versions>.rst-other-versions{padding:0}#furo-readthedocs-versions>.rst-other-versions small{opacity:1}#furo-readthedocs-versions .injected .rst-versions{position:unset}#furo-readthedocs-versions:focus-within,#furo-readthedocs-versions:hover{box-shadow:0 0 0 1px var(--color-sidebar-background-border)}#furo-readthedocs-versions:focus-within .rst-current-version,#furo-readthedocs-versions:hover .rst-current-version{background:#1a1c1e;font-size:inherit;height:auto;line-height:inherit;padding:12px;text-align:right}#furo-readthedocs-versions:focus-within .rst-current-version .fa-book,#furo-readthedocs-versions:hover .rst-current-version .fa-book{color:#fff;float:left}#furo-readthedocs-versions:focus-within .fa-caret-down,#furo-readthedocs-versions:hover .fa-caret-down{display:none}#furo-readthedocs-versions:focus-within .injected,#furo-readthedocs-versions:focus-within .rst-current-version,#furo-readthedocs-versions:focus-within .rst-other-versions,#furo-readthedocs-versions:hover .injected,#furo-readthedocs-versions:hover .rst-current-version,#furo-readthedocs-versions:hover .rst-other-versions{display:block}#furo-readthedocs-versions:focus-within>.rst-current-version,#furo-readthedocs-versions:hover>.rst-current-version{display:none}.highlight:hover button.copybtn{color:var(--color-code-foreground)}.highlight button.copybtn{align-items:center;background-color:var(--color-code-background);border:none;color:var(--color-background-item);cursor:pointer;height:1.25em;opacity:1;right:.5rem;top:.625rem;transition:color .3s,opacity .3s;width:1.25em}.highlight button.copybtn:hover{background-color:var(--color-code-background);color:var(--color-brand-content)}.highlight button.copybtn:after{background-color:transparent;color:var(--color-code-foreground);display:none}.highlight button.copybtn.success{color:#22863a;transition:color 0ms}.highlight button.copybtn.success:after{display:block}.highlight button.copybtn svg{padding:0}body{--sd-color-primary:var(--color-brand-primary);--sd-color-primary-highlight:var(--color-brand-content);--sd-color-primary-text:var(--color-background-primary);--sd-color-shadow:rgba(0,0,0,.05);--sd-color-card-border:var(--color-card-border);--sd-color-card-border-hover:var(--color-brand-content);--sd-color-card-background:var(--color-card-background);--sd-color-card-text:var(--color-foreground-primary);--sd-color-card-header:var(--color-card-marginals-background);--sd-color-card-footer:var(--color-card-marginals-background);--sd-color-tabs-label-active:var(--color-brand-content);--sd-color-tabs-label-hover:var(--color-foreground-muted);--sd-color-tabs-label-inactive:var(--color-foreground-muted);--sd-color-tabs-underline-active:var(--color-brand-content);--sd-color-tabs-underline-hover:var(--color-foreground-border);--sd-color-tabs-underline-inactive:var(--color-background-border);--sd-color-tabs-overline:var(--color-background-border);--sd-color-tabs-underline:var(--color-background-border)}.sd-tab-content{box-shadow:0 -2px var(--sd-color-tabs-overline),0 1px var(--sd-color-tabs-underline)}.sd-card{box-shadow:0 .1rem .25rem var(--sd-color-shadow),0 0 .0625rem rgba(0,0,0,.1)}.sd-shadow-sm{box-shadow:0 .1rem .25rem var(--sd-color-shadow),0 0 .0625rem rgba(0,0,0,.1)!important}.sd-shadow-md{box-shadow:0 .3rem .75rem var(--sd-color-shadow),0 0 .0625rem rgba(0,0,0,.1)!important}.sd-shadow-lg{box-shadow:0 .6rem 1.5rem var(--sd-color-shadow),0 0 .0625rem rgba(0,0,0,.1)!important}.sd-card-hover:hover{transform:none}.sd-cards-carousel{gap:.25rem;padding:.25rem}body{--tabs--label-text:var(--color-foreground-muted);--tabs--label-text--hover:var(--color-foreground-muted);--tabs--label-text--active:var(--color-brand-content);--tabs--label-text--active--hover:var(--color-brand-content);--tabs--label-background:transparent;--tabs--label-background--hover:transparent;--tabs--label-background--active:transparent;--tabs--label-background--active--hover:transparent;--tabs--padding-x:0.25em;--tabs--margin-x:1em;--tabs--border:var(--color-background-border);--tabs--label-border:transparent;--tabs--label-border--hover:var(--color-foreground-muted);--tabs--label-border--active:var(--color-brand-content);--tabs--label-border--active--hover:var(--color-brand-content)}[role=main] .container{max-width:none;padding-left:0;padding-right:0}.shadow.docutils{border:none;box-shadow:0 .2rem .5rem rgba(0,0,0,.05),0 0 .0625rem rgba(0,0,0,.1)!important}.sphinx-bs .card{background-color:var(--color-background-secondary);color:var(--color-foreground)} +/*# sourceMappingURL=furo-extensions.css.map*/ \ No newline at end of file diff --git a/_static/styles/furo-extensions.css.map b/_static/styles/furo-extensions.css.map new file mode 100644 index 0000000..9ba5637 --- /dev/null +++ b/_static/styles/furo-extensions.css.map @@ -0,0 +1 @@ +{"version":3,"file":"styles/furo-extensions.css","mappings":"AAGA,2BACE,oFACA,4CAKE,6CAHA,YACA,eAEA,CACA,kDACE,yCAEF,8CACE,sCAEJ,8CACE,kDAEJ,2BAGE,uBACA,cAHA,gBACA,UAEA,CAGA,yCACE,mBAEF,gDAEE,gDADA,YACA,CACA,sDACE,gDACF,yDACE,sCAEJ,+CACE,UACA,qDACE,UAGF,mDACE,eAEJ,yEAEE,4DAEA,mHASE,mBAPA,kBAEA,YADA,oBAGA,aADA,gBAIA,CAEA,qIAEE,WADA,UACA,CAEJ,uGACE,aAEF,iUAGE,cAEF,mHACE,aC1EJ,gCACE,mCAEF,0BAKE,mBAUA,8CACA,YAFA,mCAKA,eAZA,cALA,UASA,YADA,YAYA,iCAdA,YAcA,CAEA,gCAEE,8CADA,gCACA,CAEF,gCAGE,6BADA,mCADA,YAEA,CAEF,kCAEE,cADA,oBACA,CACA,wCACE,cAEJ,8BACE,UC5CN,KAEE,6CAA8C,CAC9C,uDAAwD,CACxD,uDAAwD,CAGxD,iCAAsC,CAGtC,+CAAgD,CAChD,uDAAwD,CACxD,uDAAwD,CACxD,oDAAqD,CACrD,6DAA8D,CAC9D,6DAA8D,CAG9D,uDAAwD,CACxD,yDAA0D,CAC1D,4DAA6D,CAC7D,2DAA4D,CAC5D,8DAA+D,CAC/D,iEAAkE,CAClE,uDAAwD,CACxD,wDAAyD,CAG3D,gBACE,qFAGF,SACE,6EAEF,cACE,uFAEF,cACE,uFAEF,cACE,uFAGF,qBACE,eAEF,mBACE,WACA,eChDF,KACE,gDAAiD,CACjD,uDAAwD,CACxD,qDAAsD,CACtD,4DAA6D,CAC7D,oCAAqC,CACrC,2CAA4C,CAC5C,4CAA6C,CAC7C,mDAAoD,CACpD,wBAAyB,CACzB,oBAAqB,CACrB,6CAA8C,CAC9C,gCAAiC,CACjC,yDAA0D,CAC1D,uDAAwD,CACxD,8DAA+D,CCbjE,uBACE,eACA,eACA,gBAGF,iBACE,YACA,+EAGF,iBACE,mDACA","sources":["webpack:///./src/furo/assets/styles/extensions/_readthedocs.sass","webpack:///./src/furo/assets/styles/extensions/_copybutton.sass","webpack:///./src/furo/assets/styles/extensions/_sphinx-design.sass","webpack:///./src/furo/assets/styles/extensions/_sphinx-inline-tabs.sass","webpack:///./src/furo/assets/styles/extensions/_sphinx-panels.sass"],"sourcesContent":["// This file contains the styles used for tweaking how ReadTheDoc's embedded\n// contents would show up inside the theme.\n\n#furo-sidebar-ad-placement\n padding: var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal)\n .ethical-sidebar\n // Remove the border and box-shadow.\n border: none\n box-shadow: none\n // Manage the background colors.\n background: var(--color-background-secondary)\n &:hover\n background: var(--color-background-hover)\n // Ensure the text is legible.\n a\n color: var(--color-foreground-primary)\n\n .ethical-callout a\n color: var(--color-foreground-secondary) !important\n\n#furo-readthedocs-versions\n position: static\n width: 100%\n background: transparent\n display: block\n\n // Make the background color fit with the theme's aesthetic.\n .rst-versions\n background: rgb(26, 28, 30)\n\n .rst-current-version\n cursor: unset\n background: var(--color-sidebar-item-background)\n &:hover\n background: var(--color-sidebar-item-background)\n .fa-book\n color: var(--color-foreground-primary)\n\n > .rst-other-versions\n padding: 0\n small\n opacity: 1\n\n .injected\n .rst-versions\n position: unset\n\n &:hover,\n &:focus-within\n box-shadow: 0 0 0 1px var(--color-sidebar-background-border)\n\n .rst-current-version\n // Undo the tweaks done in RTD's CSS\n font-size: inherit\n line-height: inherit\n height: auto\n text-align: right\n padding: 12px\n\n // Match the rest of the body\n background: #1a1c1e\n\n .fa-book\n float: left\n color: white\n\n .fa-caret-down\n display: none\n\n .rst-current-version,\n .rst-other-versions,\n .injected\n display: block\n\n > .rst-current-version\n display: none\n",".highlight\n &:hover button.copybtn\n color: var(--color-code-foreground)\n\n button.copybtn\n // Make it visible\n opacity: 1\n\n // Align things correctly\n align-items: center\n\n height: 1.25em\n width: 1.25em\n\n top: 0.625rem // $code-spacing-vertical\n right: 0.5rem\n\n // Make it look better\n color: var(--color-background-item)\n background-color: var(--color-code-background)\n border: none\n\n // Change to cursor to make it obvious that you can click on it\n cursor: pointer\n\n // Transition smoothly, for aesthetics\n transition: color 300ms, opacity 300ms\n\n &:hover\n color: var(--color-brand-content)\n background-color: var(--color-code-background)\n\n &::after\n display: none\n color: var(--color-code-foreground)\n background-color: transparent\n\n &.success\n transition: color 0ms\n color: #22863a\n &::after\n display: block\n\n svg\n padding: 0\n","body\n // Colors\n --sd-color-primary: var(--color-brand-primary)\n --sd-color-primary-highlight: var(--color-brand-content)\n --sd-color-primary-text: var(--color-background-primary)\n\n // Shadows\n --sd-color-shadow: rgba(0, 0, 0, 0.05)\n\n // Cards\n --sd-color-card-border: var(--color-card-border)\n --sd-color-card-border-hover: var(--color-brand-content)\n --sd-color-card-background: var(--color-card-background)\n --sd-color-card-text: var(--color-foreground-primary)\n --sd-color-card-header: var(--color-card-marginals-background)\n --sd-color-card-footer: var(--color-card-marginals-background)\n\n // Tabs\n --sd-color-tabs-label-active: var(--color-brand-content)\n --sd-color-tabs-label-hover: var(--color-foreground-muted)\n --sd-color-tabs-label-inactive: var(--color-foreground-muted)\n --sd-color-tabs-underline-active: var(--color-brand-content)\n --sd-color-tabs-underline-hover: var(--color-foreground-border)\n --sd-color-tabs-underline-inactive: var(--color-background-border)\n --sd-color-tabs-overline: var(--color-background-border)\n --sd-color-tabs-underline: var(--color-background-border)\n\n// Tabs\n.sd-tab-content\n box-shadow: 0 -2px var(--sd-color-tabs-overline), 0 1px var(--sd-color-tabs-underline)\n\n// Shadows\n.sd-card // Have a shadow by default\n box-shadow: 0 0.1rem 0.25rem var(--sd-color-shadow), 0 0 0.0625rem rgba(0, 0, 0, 0.1)\n\n.sd-shadow-sm\n box-shadow: 0 0.1rem 0.25rem var(--sd-color-shadow), 0 0 0.0625rem rgba(0, 0, 0, 0.1) !important\n\n.sd-shadow-md\n box-shadow: 0 0.3rem 0.75rem var(--sd-color-shadow), 0 0 0.0625rem rgba(0, 0, 0, 0.1) !important\n\n.sd-shadow-lg\n box-shadow: 0 0.6rem 1.5rem var(--sd-color-shadow), 0 0 0.0625rem rgba(0, 0, 0, 0.1) !important\n\n// Cards\n.sd-card-hover:hover // Don't change scale on hover\n transform: none\n\n.sd-cards-carousel // Have a bit of gap in the carousel by default\n gap: 0.25rem\n padding: 0.25rem\n","// This file contains styles to tweak sphinx-inline-tabs to work well with Furo.\n\nbody\n --tabs--label-text: var(--color-foreground-muted)\n --tabs--label-text--hover: var(--color-foreground-muted)\n --tabs--label-text--active: var(--color-brand-content)\n --tabs--label-text--active--hover: var(--color-brand-content)\n --tabs--label-background: transparent\n --tabs--label-background--hover: transparent\n --tabs--label-background--active: transparent\n --tabs--label-background--active--hover: transparent\n --tabs--padding-x: 0.25em\n --tabs--margin-x: 1em\n --tabs--border: var(--color-background-border)\n --tabs--label-border: transparent\n --tabs--label-border--hover: var(--color-foreground-muted)\n --tabs--label-border--active: var(--color-brand-content)\n --tabs--label-border--active--hover: var(--color-brand-content)\n","// This file contains styles to tweak sphinx-panels to work well with Furo.\n\n// sphinx-panels includes Bootstrap 4, which uses .container which can conflict\n// with docutils' `.. container::` directive.\n[role=\"main\"] .container\n max-width: initial\n padding-left: initial\n padding-right: initial\n\n// Make the panels look nicer!\n.shadow.docutils\n border: none\n box-shadow: 0 0.2rem 0.5rem rgba(0, 0, 0, 0.05), 0 0 0.0625rem rgba(0, 0, 0, 0.1) !important\n\n// Make panel colors respond to dark mode\n.sphinx-bs .card\n background-color: var(--color-background-secondary)\n color: var(--color-foreground)\n"],"names":[],"sourceRoot":""} \ No newline at end of file diff --git a/_static/styles/furo.css b/_static/styles/furo.css new file mode 100644 index 0000000..e3d4e57 --- /dev/null +++ b/_static/styles/furo.css @@ -0,0 +1,2 @@ +/*! normalize.css v8.0.1 | MIT License | github.com/necolas/normalize.css */html{line-height:1.15;-webkit-text-size-adjust:100%}body{margin:0}main{display:block}h1{font-size:2em;margin:.67em 0}hr{box-sizing:content-box;height:0;overflow:visible}pre{font-family:monospace,monospace;font-size:1em}a{background-color:transparent}abbr[title]{border-bottom:none;text-decoration:underline;text-decoration:underline dotted}b,strong{font-weight:bolder}code,kbd,samp{font-family:monospace,monospace;font-size:1em}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}img{border-style:none}button,input,optgroup,select,textarea{font-family:inherit;font-size:100%;line-height:1.15;margin:0}button,input{overflow:visible}button,select{text-transform:none}[type=button],[type=reset],[type=submit],button{-webkit-appearance:button}[type=button]::-moz-focus-inner,[type=reset]::-moz-focus-inner,[type=submit]::-moz-focus-inner,button::-moz-focus-inner{border-style:none;padding:0}[type=button]:-moz-focusring,[type=reset]:-moz-focusring,[type=submit]:-moz-focusring,button:-moz-focusring{outline:1px dotted ButtonText}fieldset{padding:.35em .75em .625em}legend{box-sizing:border-box;color:inherit;display:table;max-width:100%;padding:0;white-space:normal}progress{vertical-align:baseline}textarea{overflow:auto}[type=checkbox],[type=radio]{box-sizing:border-box;padding:0}[type=number]::-webkit-inner-spin-button,[type=number]::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}[type=search]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}details{display:block}summary{display:list-item}[hidden],template{display:none}@media print{.content-icon-container,.headerlink,.mobile-header,.related-pages{display:none!important}.highlight{border:.1pt solid var(--color-foreground-border)}a,blockquote,dl,ol,pre,table,ul{page-break-inside:avoid}caption,figure,h1,h2,h3,h4,h5,h6,img{page-break-after:avoid;page-break-inside:avoid}dl,ol,ul{page-break-before:avoid}}.visually-hidden{height:1px!important;margin:-1px!important;overflow:hidden!important;padding:0!important;position:absolute!important;width:1px!important;clip:rect(0,0,0,0)!important;background:var(--color-background-primary);border:0!important;color:var(--color-foreground-primary);white-space:nowrap!important}:-moz-focusring{outline:auto}body{--font-stack:-apple-system,BlinkMacSystemFont,Segoe UI,Helvetica,Arial,sans-serif,Apple Color Emoji,Segoe UI Emoji;--font-stack--monospace:"SFMono-Regular",Menlo,Consolas,Monaco,Liberation Mono,Lucida Console,monospace;--font-stack--headings:var(--font-stack);--font-size--normal:100%;--font-size--small:87.5%;--font-size--small--2:81.25%;--font-size--small--3:75%;--font-size--small--4:62.5%;--sidebar-caption-font-size:var(--font-size--small--2);--sidebar-item-font-size:var(--font-size--small);--sidebar-search-input-font-size:var(--font-size--small);--toc-font-size:var(--font-size--small--3);--toc-font-size--mobile:var(--font-size--normal);--toc-title-font-size:var(--font-size--small--4);--admonition-font-size:0.8125rem;--admonition-title-font-size:0.8125rem;--code-font-size:var(--font-size--small--2);--api-font-size:var(--font-size--small);--header-height:calc(var(--sidebar-item-line-height) + var(--sidebar-item-spacing-vertical)*4);--header-padding:0.5rem;--sidebar-tree-space-above:1.5rem;--sidebar-caption-space-above:1rem;--sidebar-item-line-height:1rem;--sidebar-item-spacing-vertical:0.5rem;--sidebar-item-spacing-horizontal:1rem;--sidebar-item-height:calc(var(--sidebar-item-line-height) + var(--sidebar-item-spacing-vertical)*2);--sidebar-expander-width:var(--sidebar-item-height);--sidebar-search-space-above:0.5rem;--sidebar-search-input-spacing-vertical:0.5rem;--sidebar-search-input-spacing-horizontal:0.5rem;--sidebar-search-input-height:1rem;--sidebar-search-icon-size:var(--sidebar-search-input-height);--toc-title-padding:0.25rem 0;--toc-spacing-vertical:1.5rem;--toc-spacing-horizontal:1.5rem;--toc-item-spacing-vertical:0.4rem;--toc-item-spacing-horizontal:1rem;--icon-search:url('data:image/svg+xml;charset=utf-8,