repo
stringlengths 2
99
| file
stringlengths 14
239
| code
stringlengths 20
3.99M
| file_length
int64 20
3.99M
| avg_line_length
float64 9.73
128
| max_line_length
int64 11
86.4k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
synfeal | synfeal-main/synfeal_collection/src/save_dataset.py |
import rospy
import os
from visualization_msgs.msg import *
from cv_bridge import CvBridge
from tf.listener import TransformListener
from utils import write_intrinsic, write_img, write_transformation
from utils_ros import read_pcd, write_pcd
from sensor_msgs.msg import PointCloud2, Image, PointField, CameraInfo
from colorama import Fore
from datetime import datetime
import yaml
import sensor_msgs.point_cloud2 as pc2
import numpy as np
class SaveDataset():
def __init__(self, output, mode, dbf = None, uvl = None, model3d_config = None, fast=False):
path=os.environ.get("SYNFEAL_DATASET")
self.output_folder = f'{path}/datasets/localbot/{output}'
if not os.path.exists(self.output_folder):
print(f'Creating folder {self.output_folder}')
os.makedirs(self.output_folder) # Create the new folder
else:
print(f'{Fore.RED} {self.output_folder} already exists... Aborting SaveDataset initialization! {Fore.RESET}')
exit(0)
name_model3d_config = model3d_config if model3d_config is not None else None
dt_now = datetime.now() # current date and time
config = {'user' : os.environ["USER"],
'date' : dt_now.strftime("%d/%m/%Y, %H:%M:%S"),
'mode' : mode,
'is_valid' : False,
'npoints' : None,
'scaled' : False,
'distance_between_frames' : dbf,
'raw' : output,
'variable_lights' : uvl,
'model3d_config' : name_model3d_config,
'fast' : fast}
self.fast = fast
self.frame_idx = 0
self.world_link = 'world'
self.depth_frame = 'kinect_depth_optical_frame'
self.rgb_frame = 'kinect_rgb_optical_frame'
self.listener = TransformListener()
self.bridge = CvBridge()
# get transformation from depth_frame to rgb_fram
now = rospy.Time()
print(f'Waiting for transformation from {self.depth_frame} to {self.rgb_frame}')
self.listener.waitForTransform(self.depth_frame, self.rgb_frame , now, rospy.Duration(5)) # admissible waiting time
print('... received!')
self.transform_depth_rgb = self.listener.lookupTransform(self.depth_frame, self.rgb_frame, now)
self.matrix_depth_rgb = self.listener.fromTranslationRotation(self.transform_depth_rgb[0], self.transform_depth_rgb[1])
# get intrinsic matrices from both cameras
rgb_camera_info = rospy.wait_for_message('/kinect/rgb/camera_info', CameraInfo)
depth_camera_info = rospy.wait_for_message('/kinect/depth/camera_info', CameraInfo)
# rgb information
rgb_intrinsic = rgb_camera_info.K
rgb_width = rgb_camera_info.width
rgb_height = rgb_camera_info.height
# depth information
depth_width = depth_camera_info.width
depth_height = depth_camera_info.height
depth_intrinsic = depth_camera_info.K
# save intrinsic to txt file
write_intrinsic(f'{self.output_folder}/rgb_intrinsic.txt', rgb_intrinsic)
write_intrinsic(f'{self.output_folder}/depth_intrinsic.txt', depth_intrinsic)
rgb_dict = {'intrinsic' : f'{self.output_folder}/rgb_intrinsic.txt',
'width' : rgb_width,
'height' : rgb_height}
depth_dict = {'intrinsic' : f'{self.output_folder}/depth_intrinsic.txt',
'width' : depth_width,
'height' : depth_height}
config['rgb'] = rgb_dict
config['depth'] = depth_dict
with open(f'{self.output_folder}/config.yaml', 'w') as file:
yaml.dump(config, file)
with open(f'{self.output_folder}/model3d_config.yaml', 'w') as file:
yaml.dump(model3d_config, file)
print('SaveDataset initialized properly')
def saveFrame(self):
transformation = self.getTransformation()
image = self.getImage()
filename = f'frame-{self.frame_idx:05d}'
write_transformation(f'{self.output_folder}/{filename}.pose.txt', transformation)
write_img(f'{self.output_folder}/{filename}.rgb.png', image)
if not self.fast:
pc_msg = self.getPointCloud()
write_pcd(f'{self.output_folder}/{filename}.pcd', pc_msg)
print(f'frame-{self.frame_idx:05d} saved successfully')
self.step()
def getTransformation(self):
now = rospy.Time()
print(f'Waiting for transformation from {self.world_link} to {self.rgb_frame}')
self.listener.waitForTransform(self.world_link, self.rgb_frame , now, rospy.Duration(5))
print('... received!')
(trans,rot) = self.listener.lookupTransform(self.world_link, self.rgb_frame, now)
return self.listener.fromTranslationRotation(trans, rot)
def getImage(self):
rgb_msg = rospy.wait_for_message('/kinect/rgb/image_raw', Image)
return self.bridge.imgmsg_to_cv2(rgb_msg, "bgr8") # convert to opencv image
def getPointCloud(self):
pc_msg = rospy.wait_for_message('/kinect/depth/points', PointCloud2)
pc2_points = pc2.read_points(pc_msg)
gen_selected_points = list(pc2_points)
lst_points = []
for point in gen_selected_points:
lst_points.append([point[0], point[1], point[2], 1])
np_points = np.array(lst_points)
# convert to rgb_frame
np_points = np.dot(self.matrix_depth_rgb, np_points.T).T
fields = [PointField('x', 0, PointField.FLOAT32, 1),
PointField('y', 4, PointField.FLOAT32, 1),
PointField('z', 8, PointField.FLOAT32, 1),
PointField('intensity', 12, PointField.FLOAT32, 1)]
pc_msg.header.frame_id = self.rgb_frame
return pc2.create_cloud(pc_msg.header, fields, np_points)
def step(self):
self.frame_idx+=1
| 6,373 | 37.630303 | 127 | py |
synfeal | synfeal-main/synfeal_collection/src/pypcd_no_ros.py | """
The MIT License (MIT)
Copyright (c) 2015 Daniel Maturana, Carnegie Mellon University
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Read and write PCL .pcd files in python.
dimatura@cmu.edu, 2013
"""
import re
import struct
import copy
import numpy as np
#from sensor_msgs.msg import PointField
#from sensor_msgs.msg import PointCloud2
DUMMY_FIELD_PREFIX = '__'
# sizes (in bytes) of PointField types
# pftype_sizes = {PointField.INT8: 1, PointField.UINT8: 1, PointField.INT16: 2, PointField.UINT16: 2,
# PointField.INT32: 4, PointField.UINT32: 4, PointField.FLOAT32: 4, PointField.FLOAT64: 8}
# # mappings between PointField types and numpy types
# type_mappings = [(PointField.INT8, np.dtype('int8')),
# (PointField.UINT8, np.dtype('uint8')),
# (PointField.INT16, np.dtype('int16')),
# (PointField.UINT16, np.dtype('uint16')),
# (PointField.INT32, np.dtype('int32')),
# (PointField.UINT32, np.dtype('uint32')),
# (PointField.FLOAT32, np.dtype('float32')),
# (PointField.FLOAT64, np.dtype('float64'))]
# pftype_to_nptype = dict(type_mappings)
# nptype_to_pftype = dict((nptype, pftype) for pftype, nptype in type_mappings)
# pc2_pcd_type_mappings = [(PointField.INT8, ('I', 1)),
# (PointField.UINT8, ('U', 1)),
# (PointField.INT16, ('I', 2)),
# (PointField.UINT16, ('U', 2)),
# (PointField.INT32, ('I', 4)),
# (PointField.UINT32, ('U', 4)),
# (PointField.FLOAT32, ('F', 4)),
# (PointField.FLOAT64, ('F', 8))]
# pc2_type_to_pcd_type = dict(pc2_pcd_type_mappings)
# pcd_type_to_pc2_type = dict((q, p) for (p, q) in pc2_pcd_type_mappings)
numpy_pcd_type_mappings = [(np.dtype('float32'), ('F', 4)),
(np.dtype('float64'), ('F', 8)),
(np.dtype('uint8'), ('U', 1)),
(np.dtype('uint16'), ('U', 2)),
(np.dtype('uint32'), ('U', 4)),
(np.dtype('uint64'), ('U', 8)),
(np.dtype('int16'), ('I', 2)),
(np.dtype('int32'), ('I', 4)),
(np.dtype('int64'), ('I', 8))]
numpy_type_to_pcd_type = dict(numpy_pcd_type_mappings)
pcd_type_to_numpy_type = dict((q, p) for (p, q) in numpy_pcd_type_mappings)
def split_rgb_field(cloud_arr):
'''Takes an array with a named 'rgb' float32 field, and returns an array in which
this has been split into 3 uint 8 fields: 'r', 'g', and 'b'.
(pcl stores rgb in packed 32 bit floats)
'''
rgb_arr = cloud_arr['rgb'].copy()
rgb_arr.dtype = np.uint32
r = np.asarray((rgb_arr >> 16) & 255, dtype=np.uint8)
g = np.asarray((rgb_arr >> 8) & 255, dtype=np.uint8)
b = np.asarray(rgb_arr & 255, dtype=np.uint8)
# create a new array, without rgb, but with r, g, and b fields
new_dtype = []
for field_name in cloud_arr.dtype.names:
field_type, field_offset = cloud_arr.dtype.fields[field_name]
if not field_name == 'rgb':
new_dtype.append((field_name, field_type))
new_dtype.append(('r', np.uint8))
new_dtype.append(('g', np.uint8))
new_dtype.append(('b', np.uint8))
new_cloud_arr = np.zeros(cloud_arr.shape, new_dtype)
# fill in the new array
for field_name in new_cloud_arr.dtype.names:
if field_name == 'r':
new_cloud_arr[field_name] = r
elif field_name == 'g':
new_cloud_arr[field_name] = g
elif field_name == 'b':
new_cloud_arr[field_name] = b
else:
new_cloud_arr[field_name] = cloud_arr[field_name]
return new_cloud_arr
def merge_rgb_fields(cloud_arr):
'''Takes an array with named np.uint8 fields 'r', 'g', and 'b', and returns an array in
which they have been merged into a single np.float32 'rgb' field. The first byte of this
field is the 'r' uint8, the second is the 'g', uint8, and the third is the 'b' uint8.
This is the way that pcl likes to handle RGB colors for some reason.
'''
r = np.asarray(cloud_arr['r'], dtype=np.uint32)
g = np.asarray(cloud_arr['g'], dtype=np.uint32)
b = np.asarray(cloud_arr['b'], dtype=np.uint32)
rgb_arr = np.array((r << 16) | (g << 8) | (b << 0), dtype=np.uint32)
# not sure if there is a better way to do this. i'm changing the type of the array
# from uint32 to float32, but i don't want any conversion to take place -jdb
rgb_arr.dtype = np.float32
# create a new array, without r, g, and b, but with rgb float32 field
new_dtype = []
for field_name in cloud_arr.dtype.names:
field_type, field_offset = cloud_arr.dtype.fields[field_name]
if field_name not in ('r', 'g', 'b'):
new_dtype.append((field_name, field_type))
new_dtype.append(('rgb', np.float32))
new_cloud_arr = np.zeros(cloud_arr.shape, new_dtype)
# fill in the new array
for field_name in new_cloud_arr.dtype.names:
if field_name == 'rgb':
new_cloud_arr[field_name] = rgb_arr
else:
new_cloud_arr[field_name] = cloud_arr[field_name]
return new_cloud_arr
def arr_to_fields(cloud_arr):
'''Convert a numpy record datatype into a list of PointFields.
'''
fields = []
for field_name in cloud_arr.dtype.names:
np_field_type, field_offset = cloud_arr.dtype.fields[field_name]
pf = PointField()
pf.name = field_name
pf.datatype = nptype_to_pftype[np_field_type]
pf.offset = field_offset
pf.count = 1 # is this ever more than one?
fields.append(pf)
return fields
def pointcloud2_to_dtype(cloud_msg):
'''Convert a list of PointFields to a numpy record datatype.
'''
offset = 0
np_dtype_list = []
for f in cloud_msg.fields:
while offset < f.offset:
# might be extra padding between fields
np_dtype_list.append(('%s%d' % (DUMMY_FIELD_PREFIX, offset), np.uint8))
offset += 1
np_dtype_list.append((f.name, pftype_to_nptype[f.datatype]))
offset += pftype_sizes[f.datatype]
# might be extra padding between points
while offset < cloud_msg.point_step:
np_dtype_list.append(('%s%d' % (DUMMY_FIELD_PREFIX, offset), np.uint8))
offset += 1
return np_dtype_list
def pointcloud2_to_array(cloud_msg, split_rgb=False, remove_padding=True):
''' Converts a rospy PointCloud2 message to a numpy recordarray
Reshapes the returned array to have shape (height, width), even if the height is 1.
The reason for using np.fromstring rather than struct.unpack is speed... especially
for large point clouds, this will be <much> faster.
'''
# construct a numpy record type equivalent to the point type of this cloud
dtype_list = pointcloud2_to_dtype(cloud_msg)
# parse the cloud into an array
cloud_arr = np.fromstring(cloud_msg.data, dtype_list)
# remove the dummy fields that were added
if remove_padding:
cloud_arr = cloud_arr[
[fname for fname, _type in dtype_list if not (fname[:len(DUMMY_FIELD_PREFIX)] == DUMMY_FIELD_PREFIX)]]
if split_rgb:
cloud_arr = split_rgb_field(cloud_arr)
return np.reshape(cloud_arr, (cloud_msg.height, cloud_msg.width))
def array_to_pointcloud2(cloud_arr, stamp=None, frame_id=None, merge_rgb=False):
'''Converts a numpy record array to a sensor_msgs.msg.PointCloud2.
'''
if merge_rgb:
cloud_arr = merge_rgb_fields(cloud_arr)
# make it 2d (even if height will be 1)
cloud_arr = np.atleast_2d(cloud_arr)
cloud_msg = PointCloud2()
if stamp is not None:
cloud_msg.header.stamp = stamp
if frame_id is not None:
cloud_msg.header.frame_id = frame_id
cloud_msg.height = cloud_arr.shape[0]
cloud_msg.width = cloud_arr.shape[1]
cloud_msg.fields = arr_to_fields(cloud_arr)
cloud_msg.is_bigendian = False # assumption
cloud_msg.point_step = cloud_arr.dtype.itemsize
cloud_msg.row_step = cloud_msg.point_step * cloud_arr.shape[1]
cloud_msg.is_dense = all([np.isfinite(cloud_arr[fname]).all() for fname in cloud_arr.dtype.names])
cloud_msg.data = cloud_arr.tostring()
return cloud_msg
def parse_header(lines):
metadata = {}
for ln in lines:
if ln.startswith('#') or len(ln) < 2:
continue
match = re.match('(\w+)\s+([\w\s\.]+)', ln)
if not match:
print("\033[93m" + "warning: can't understand line: %s" % ln + "\033[1m")
continue
key, value = match.group(1).lower(), match.group(2)
if key == 'version':
metadata[key] = value
elif key in ('fields', 'type'):
metadata[key] = value.split()
elif key in ('size', 'count'):
metadata[key] = list(map(int, value.split()))
elif key in ('width', 'height', 'points'):
metadata[key] = int(value)
elif key == 'viewpoint':
metadata[key] = list(map(float, value.split()))
elif key == 'data':
metadata[key] = value.strip().lower()
# TODO apparently count is not required?
# add some reasonable defaults
if 'count' not in metadata:
metadata['count'] = [1] * len(metadata['fields'])
if 'viewpoint' not in metadata:
metadata['viewpoint'] = [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]
if 'version' not in metadata:
metadata['version'] = '.7'
return metadata
def write_header(metadata, rename_padding=False):
""" given metadata as dictionary return a string header.
"""
template = """\
VERSION {version}
FIELDS {fields}
SIZE {size}
TYPE {type}
COUNT {count}
WIDTH {width}
HEIGHT {height}
VIEWPOINT {viewpoint}
POINTS {points}
DATA {data}
"""
str_metadata = metadata.copy()
if not rename_padding:
str_metadata['fields'] = ' '.join(metadata['fields'])
else:
new_fields = []
for f in metadata['fields']:
if f == '_':
new_fields.append('padding')
else:
new_fields.append(f)
str_metadata['fields'] = ' '.join(new_fields)
str_metadata['size'] = ' '.join(map(str, metadata['size']))
str_metadata['type'] = ' '.join(metadata['type'])
str_metadata['count'] = ' '.join(map(str, metadata['count']))
str_metadata['width'] = str(metadata['width'])
str_metadata['height'] = str(metadata['height'])
str_metadata['viewpoint'] = ' '.join(map(str, metadata['viewpoint']))
str_metadata['points'] = str(metadata['points'])
tmpl = template.format(**str_metadata)
return tmpl
def _metadata_is_consistent(metadata):
""" sanity check for metadata. just some basic checks.
"""
checks = []
required = ('version', 'fields', 'size', 'width', 'height', 'points',
'viewpoint', 'data')
for f in required:
if f not in metadata:
print('%s required' % f)
checks.append((lambda m: all([k in m for k in required]),
'missing field'))
checks.append((lambda m: len(m['type']) == len(m['count']) ==
len(m['fields']),
'length of type, count and fields must be equal'))
checks.append((lambda m: m['height'] > 0,
'height must be greater than 0'))
checks.append((lambda m: m['width'] > 0,
'width must be greater than 0'))
checks.append((lambda m: m['points'] > 0,
'points must be greater than 0'))
checks.append((lambda m: m['data'].lower() in ('ascii', 'binary',
'binary_compressed'),
'unknown data type:'
'should be ascii/binary/binary_compressed'))
ok = True
for check, msg in checks:
if not check(metadata):
print('error:', msg)
ok = False
return ok
def _build_dtype(metadata):
""" build numpy structured array dtype from pcl metadata.
note that fields with count > 1 are 'flattened' by creating multiple
single-count fields.
TODO: allow 'proper' multi-count fields.
"""
fieldnames = []
typenames = []
for f, c, t, s in zip(metadata['fields'],
metadata['count'],
metadata['type'],
metadata['size']):
np_type = pcd_type_to_numpy_type[(t, s)]
if c == 1:
fieldnames.append(f)
typenames.append(np_type)
else:
fieldnames.extend(['%s_%04d' % (f, i) for i in range(c)])
typenames.extend([np_type] * c)
dtype = np.dtype(list(zip(fieldnames, typenames)))
return dtype
def parse_binary_pc_data(f, dtype, metadata):
rowstep = metadata['points'] * dtype.itemsize
# for some reason pcl adds empty space at the end of files
buf = f.read(rowstep)
return np.fromstring(buf, dtype=dtype)
def point_cloud_from_fileobj(f):
""" parse pointcloud coming from file object f
"""
header = []
while True:
ln = f.readline().strip()
if not isinstance(ln, str):
ln = ln.decode('utf-8')
header.append(ln)
if ln.startswith('DATA'):
metadata = parse_header(header)
dtype = _build_dtype(metadata)
break
pc_data = parse_binary_pc_data(f, dtype, metadata)
return PointCloud(metadata, pc_data)
def point_cloud_from_path(fname):
""" load point cloud in binary format
"""
with open(fname, 'rb') as f:
pc = point_cloud_from_fileobj(f)
return pc
def point_cloud_to_fileobj(pc, fileobj, data_compression=None):
""" write pointcloud as .pcd to fileobj.
if data_compression is not None it overrides pc.data.
"""
metadata = pc.get_metadata()
if data_compression is not None:
data_compression = data_compression.lower()
assert (data_compression in ('ascii', 'binary', 'binary_compressed'))
metadata['data'] = data_compression
header = write_header(metadata).encode('utf-8')
fileobj.write(header)
fileobj.write(pc.pc_data.tostring())
class PointCloud(object):
def __init__(self, metadata, pc_data):
self.metadata_keys = metadata.keys()
self.__dict__.update(metadata)
self.pc_data = pc_data
self.check_sanity()
def get_metadata(self):
""" returns copy of metadata """
metadata = {}
for k in self.metadata_keys:
metadata[k] = copy.copy(getattr(self, k))
return metadata
def check_sanity(self):
# pdb.set_trace()
md = self.get_metadata()
assert (_metadata_is_consistent(md))
assert (len(self.pc_data) == self.points)
assert (self.width * self.height == self.points)
assert (len(self.fields) == len(self.count))
assert (len(self.fields) == len(self.type))
def save_pcd(self, fname, compression=None, **kwargs):
if 'data_compression' in kwargs:
print('\033[93m' + 'data_compression keyword is deprecated for'
' compression' + '\033[1m')
compression = kwargs['data_compression']
with open(fname, 'wb') as f:
point_cloud_to_fileobj(self, f, compression)
def save_pcd_to_fileobj(self, fileobj, compression=None, **kwargs):
if 'data_compression' in kwargs:
print('\033[93m' + 'data_compression keyword is deprecated for'
' compression' + '\033[1m')
compression = kwargs['data_compression']
point_cloud_to_fileobj(self, fileobj, compression)
def copy(self):
new_pc_data = np.copy(self.pc_data)
new_metadata = self.get_metadata()
return PointCloud(new_metadata, new_pc_data)
def to_msg(self):
# TODO is there some metadata we want to attach?
return array_to_pointcloud2(self.pc_data)
@staticmethod
def from_path(fname):
return point_cloud_from_path(fname)
@staticmethod
def from_msg(msg, squeeze=True):
""" from pointcloud2 msg
squeeze: fix when clouds get 1 as first dim
"""
md = {'version': .7,
'fields': [],
'size': [],
'count': [],
'width': 0,
'height': 1,
'viewpoint': [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
'points': 0,
'type': [],
'data': 'binary_compressed'}
for field in msg.fields:
md['fields'].append(field.name)
t, s = pc2_type_to_pcd_type[field.datatype]
md['type'].append(t)
md['size'].append(s)
# TODO handle multicount correctly
if field.count > 1:
print('\033[93m' + 'fields with count > 1 are not well tested' + '\033[1m')
md['count'].append(field.count)
pc_data = np.squeeze(pointcloud2_to_array(msg))
md['width'] = len(pc_data)
md['points'] = len(pc_data)
pc = PointCloud(md, pc_data)
return pc
| 18,853 | 36.859438 | 114 | py |
synfeal | synfeal-main/synfeal_collection/src/pypcd.py | """
The MIT License (MIT)
Copyright (c) 2015 Daniel Maturana, Carnegie Mellon University
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Read and write PCL .pcd files in python.
dimatura@cmu.edu, 2013
"""
import re
import struct
import copy
import numpy as np
from sensor_msgs.msg import PointField
from sensor_msgs.msg import PointCloud2
DUMMY_FIELD_PREFIX = '__'
# sizes (in bytes) of PointField types
pftype_sizes = {PointField.INT8: 1, PointField.UINT8: 1, PointField.INT16: 2, PointField.UINT16: 2,
PointField.INT32: 4, PointField.UINT32: 4, PointField.FLOAT32: 4, PointField.FLOAT64: 8}
# mappings between PointField types and numpy types
type_mappings = [(PointField.INT8, np.dtype('int8')),
(PointField.UINT8, np.dtype('uint8')),
(PointField.INT16, np.dtype('int16')),
(PointField.UINT16, np.dtype('uint16')),
(PointField.INT32, np.dtype('int32')),
(PointField.UINT32, np.dtype('uint32')),
(PointField.FLOAT32, np.dtype('float32')),
(PointField.FLOAT64, np.dtype('float64'))]
pftype_to_nptype = dict(type_mappings)
nptype_to_pftype = dict((nptype, pftype) for pftype, nptype in type_mappings)
pc2_pcd_type_mappings = [(PointField.INT8, ('I', 1)),
(PointField.UINT8, ('U', 1)),
(PointField.INT16, ('I', 2)),
(PointField.UINT16, ('U', 2)),
(PointField.INT32, ('I', 4)),
(PointField.UINT32, ('U', 4)),
(PointField.FLOAT32, ('F', 4)),
(PointField.FLOAT64, ('F', 8))]
pc2_type_to_pcd_type = dict(pc2_pcd_type_mappings)
pcd_type_to_pc2_type = dict((q, p) for (p, q) in pc2_pcd_type_mappings)
numpy_pcd_type_mappings = [(np.dtype('float32'), ('F', 4)),
(np.dtype('float64'), ('F', 8)),
(np.dtype('uint8'), ('U', 1)),
(np.dtype('uint16'), ('U', 2)),
(np.dtype('uint32'), ('U', 4)),
(np.dtype('uint64'), ('U', 8)),
(np.dtype('int16'), ('I', 2)),
(np.dtype('int32'), ('I', 4)),
(np.dtype('int64'), ('I', 8))]
numpy_type_to_pcd_type = dict(numpy_pcd_type_mappings)
pcd_type_to_numpy_type = dict((q, p) for (p, q) in numpy_pcd_type_mappings)
def split_rgb_field(cloud_arr):
'''Takes an array with a named 'rgb' float32 field, and returns an array in which
this has been split into 3 uint 8 fields: 'r', 'g', and 'b'.
(pcl stores rgb in packed 32 bit floats)
'''
rgb_arr = cloud_arr['rgb'].copy()
rgb_arr.dtype = np.uint32
r = np.asarray((rgb_arr >> 16) & 255, dtype=np.uint8)
g = np.asarray((rgb_arr >> 8) & 255, dtype=np.uint8)
b = np.asarray(rgb_arr & 255, dtype=np.uint8)
# create a new array, without rgb, but with r, g, and b fields
new_dtype = []
for field_name in cloud_arr.dtype.names:
field_type, field_offset = cloud_arr.dtype.fields[field_name]
if not field_name == 'rgb':
new_dtype.append((field_name, field_type))
new_dtype.append(('r', np.uint8))
new_dtype.append(('g', np.uint8))
new_dtype.append(('b', np.uint8))
new_cloud_arr = np.zeros(cloud_arr.shape, new_dtype)
# fill in the new array
for field_name in new_cloud_arr.dtype.names:
if field_name == 'r':
new_cloud_arr[field_name] = r
elif field_name == 'g':
new_cloud_arr[field_name] = g
elif field_name == 'b':
new_cloud_arr[field_name] = b
else:
new_cloud_arr[field_name] = cloud_arr[field_name]
return new_cloud_arr
def merge_rgb_fields(cloud_arr):
'''Takes an array with named np.uint8 fields 'r', 'g', and 'b', and returns an array in
which they have been merged into a single np.float32 'rgb' field. The first byte of this
field is the 'r' uint8, the second is the 'g', uint8, and the third is the 'b' uint8.
This is the way that pcl likes to handle RGB colors for some reason.
'''
r = np.asarray(cloud_arr['r'], dtype=np.uint32)
g = np.asarray(cloud_arr['g'], dtype=np.uint32)
b = np.asarray(cloud_arr['b'], dtype=np.uint32)
rgb_arr = np.array((r << 16) | (g << 8) | (b << 0), dtype=np.uint32)
# not sure if there is a better way to do this. i'm changing the type of the array
# from uint32 to float32, but i don't want any conversion to take place -jdb
rgb_arr.dtype = np.float32
# create a new array, without r, g, and b, but with rgb float32 field
new_dtype = []
for field_name in cloud_arr.dtype.names:
field_type, field_offset = cloud_arr.dtype.fields[field_name]
if field_name not in ('r', 'g', 'b'):
new_dtype.append((field_name, field_type))
new_dtype.append(('rgb', np.float32))
new_cloud_arr = np.zeros(cloud_arr.shape, new_dtype)
# fill in the new array
for field_name in new_cloud_arr.dtype.names:
if field_name == 'rgb':
new_cloud_arr[field_name] = rgb_arr
else:
new_cloud_arr[field_name] = cloud_arr[field_name]
return new_cloud_arr
def arr_to_fields(cloud_arr):
'''Convert a numpy record datatype into a list of PointFields.
'''
fields = []
for field_name in cloud_arr.dtype.names:
np_field_type, field_offset = cloud_arr.dtype.fields[field_name]
pf = PointField()
pf.name = field_name
pf.datatype = nptype_to_pftype[np_field_type]
pf.offset = field_offset
pf.count = 1 # is this ever more than one?
fields.append(pf)
return fields
def pointcloud2_to_dtype(cloud_msg):
'''Convert a list of PointFields to a numpy record datatype.
'''
offset = 0
np_dtype_list = []
for f in cloud_msg.fields:
while offset < f.offset:
# might be extra padding between fields
np_dtype_list.append(('%s%d' % (DUMMY_FIELD_PREFIX, offset), np.uint8))
offset += 1
np_dtype_list.append((f.name, pftype_to_nptype[f.datatype]))
offset += pftype_sizes[f.datatype]
# might be extra padding between points
while offset < cloud_msg.point_step:
np_dtype_list.append(('%s%d' % (DUMMY_FIELD_PREFIX, offset), np.uint8))
offset += 1
return np_dtype_list
def pointcloud2_to_array(cloud_msg, split_rgb=False, remove_padding=True):
''' Converts a rospy PointCloud2 message to a numpy recordarray
Reshapes the returned array to have shape (height, width), even if the height is 1.
The reason for using np.fromstring rather than struct.unpack is speed... especially
for large point clouds, this will be <much> faster.
'''
# construct a numpy record type equivalent to the point type of this cloud
dtype_list = pointcloud2_to_dtype(cloud_msg)
# parse the cloud into an array
cloud_arr = np.fromstring(cloud_msg.data, dtype_list)
# remove the dummy fields that were added
if remove_padding:
cloud_arr = cloud_arr[
[fname for fname, _type in dtype_list if not (fname[:len(DUMMY_FIELD_PREFIX)] == DUMMY_FIELD_PREFIX)]]
if split_rgb:
cloud_arr = split_rgb_field(cloud_arr)
return np.reshape(cloud_arr, (cloud_msg.height, cloud_msg.width))
def array_to_pointcloud2(cloud_arr, stamp=None, frame_id=None, merge_rgb=False):
'''Converts a numpy record array to a sensor_msgs.msg.PointCloud2.
'''
if merge_rgb:
cloud_arr = merge_rgb_fields(cloud_arr)
# make it 2d (even if height will be 1)
cloud_arr = np.atleast_2d(cloud_arr)
cloud_msg = PointCloud2()
if stamp is not None:
cloud_msg.header.stamp = stamp
if frame_id is not None:
cloud_msg.header.frame_id = frame_id
cloud_msg.height = cloud_arr.shape[0]
cloud_msg.width = cloud_arr.shape[1]
cloud_msg.fields = arr_to_fields(cloud_arr)
cloud_msg.is_bigendian = False # assumption
cloud_msg.point_step = cloud_arr.dtype.itemsize
cloud_msg.row_step = cloud_msg.point_step * cloud_arr.shape[1]
cloud_msg.is_dense = all([np.isfinite(cloud_arr[fname]).all() for fname in cloud_arr.dtype.names])
cloud_msg.data = cloud_arr.tostring()
return cloud_msg
def parse_header(lines):
metadata = {}
for ln in lines:
if ln.startswith('#') or len(ln) < 2:
continue
match = re.match('(\w+)\s+([\w\s\.]+)', ln)
if not match:
print("\033[93m" + "warning: can't understand line: %s" % ln + "\033[1m")
continue
key, value = match.group(1).lower(), match.group(2)
if key == 'version':
metadata[key] = value
elif key in ('fields', 'type'):
metadata[key] = value.split()
elif key in ('size', 'count'):
metadata[key] = list(map(int, value.split()))
elif key in ('width', 'height', 'points'):
metadata[key] = int(value)
elif key == 'viewpoint':
metadata[key] = list(map(float, value.split()))
elif key == 'data':
metadata[key] = value.strip().lower()
# TODO apparently count is not required?
# add some reasonable defaults
if 'count' not in metadata:
metadata['count'] = [1] * len(metadata['fields'])
if 'viewpoint' not in metadata:
metadata['viewpoint'] = [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]
if 'version' not in metadata:
metadata['version'] = '.7'
return metadata
def write_header(metadata, rename_padding=False):
""" given metadata as dictionary return a string header.
"""
template = """\
VERSION {version}
FIELDS {fields}
SIZE {size}
TYPE {type}
COUNT {count}
WIDTH {width}
HEIGHT {height}
VIEWPOINT {viewpoint}
POINTS {points}
DATA {data}
"""
str_metadata = metadata.copy()
if not rename_padding:
str_metadata['fields'] = ' '.join(metadata['fields'])
else:
new_fields = []
for f in metadata['fields']:
if f == '_':
new_fields.append('padding')
else:
new_fields.append(f)
str_metadata['fields'] = ' '.join(new_fields)
str_metadata['size'] = ' '.join(map(str, metadata['size']))
str_metadata['type'] = ' '.join(metadata['type'])
str_metadata['count'] = ' '.join(map(str, metadata['count']))
str_metadata['width'] = str(metadata['width'])
str_metadata['height'] = str(metadata['height'])
str_metadata['viewpoint'] = ' '.join(map(str, metadata['viewpoint']))
str_metadata['points'] = str(metadata['points'])
tmpl = template.format(**str_metadata)
return tmpl
def _metadata_is_consistent(metadata):
""" sanity check for metadata. just some basic checks.
"""
checks = []
required = ('version', 'fields', 'size', 'width', 'height', 'points',
'viewpoint', 'data')
for f in required:
if f not in metadata:
print('%s required' % f)
checks.append((lambda m: all([k in m for k in required]),
'missing field'))
checks.append((lambda m: len(m['type']) == len(m['count']) ==
len(m['fields']),
'length of type, count and fields must be equal'))
checks.append((lambda m: m['height'] > 0,
'height must be greater than 0'))
checks.append((lambda m: m['width'] > 0,
'width must be greater than 0'))
checks.append((lambda m: m['points'] > 0,
'points must be greater than 0'))
checks.append((lambda m: m['data'].lower() in ('ascii', 'binary',
'binary_compressed'),
'unknown data type:'
'should be ascii/binary/binary_compressed'))
ok = True
for check, msg in checks:
if not check(metadata):
print('error:', msg)
ok = False
return ok
def _build_dtype(metadata):
""" build numpy structured array dtype from pcl metadata.
note that fields with count > 1 are 'flattened' by creating multiple
single-count fields.
TODO: allow 'proper' multi-count fields.
"""
fieldnames = []
typenames = []
for f, c, t, s in zip(metadata['fields'],
metadata['count'],
metadata['type'],
metadata['size']):
np_type = pcd_type_to_numpy_type[(t, s)]
if c == 1:
fieldnames.append(f)
typenames.append(np_type)
else:
fieldnames.extend(['%s_%04d' % (f, i) for i in range(c)])
typenames.extend([np_type] * c)
dtype = np.dtype(list(zip(fieldnames, typenames)))
return dtype
def parse_binary_pc_data(f, dtype, metadata):
rowstep = metadata['points'] * dtype.itemsize
# for some reason pcl adds empty space at the end of files
buf = f.read(rowstep)
return np.fromstring(buf, dtype=dtype)
def point_cloud_from_fileobj(f):
""" parse pointcloud coming from file object f
"""
header = []
while True:
ln = f.readline().strip()
if not isinstance(ln, str):
ln = ln.decode('utf-8')
header.append(ln)
if ln.startswith('DATA'):
metadata = parse_header(header)
dtype = _build_dtype(metadata)
break
pc_data = parse_binary_pc_data(f, dtype, metadata)
return PointCloud(metadata, pc_data)
def point_cloud_from_path(fname):
""" load point cloud in binary format
"""
with open(fname, 'rb') as f:
pc = point_cloud_from_fileobj(f)
return pc
def point_cloud_to_fileobj(pc, fileobj, data_compression=None):
""" write pointcloud as .pcd to fileobj.
if data_compression is not None it overrides pc.data.
"""
metadata = pc.get_metadata()
if data_compression is not None:
data_compression = data_compression.lower()
assert (data_compression in ('ascii', 'binary', 'binary_compressed'))
metadata['data'] = data_compression
header = write_header(metadata).encode('utf-8')
fileobj.write(header)
fileobj.write(pc.pc_data.tostring())
class PointCloud(object):
def __init__(self, metadata, pc_data):
self.metadata_keys = metadata.keys()
self.__dict__.update(metadata)
self.pc_data = pc_data
self.check_sanity()
def get_metadata(self):
""" returns copy of metadata """
metadata = {}
for k in self.metadata_keys:
metadata[k] = copy.copy(getattr(self, k))
return metadata
def check_sanity(self):
# pdb.set_trace()
md = self.get_metadata()
assert (_metadata_is_consistent(md))
assert (len(self.pc_data) == self.points)
assert (self.width * self.height == self.points)
assert (len(self.fields) == len(self.count))
assert (len(self.fields) == len(self.type))
def save_pcd(self, fname, compression=None, **kwargs):
if 'data_compression' in kwargs:
print('\033[93m' + 'data_compression keyword is deprecated for'
' compression' + '\033[1m')
compression = kwargs['data_compression']
with open(fname, 'wb') as f:
point_cloud_to_fileobj(self, f, compression)
def save_pcd_to_fileobj(self, fileobj, compression=None, **kwargs):
if 'data_compression' in kwargs:
print('\033[93m' + 'data_compression keyword is deprecated for'
' compression' + '\033[1m')
compression = kwargs['data_compression']
point_cloud_to_fileobj(self, fileobj, compression)
def copy(self):
new_pc_data = np.copy(self.pc_data)
new_metadata = self.get_metadata()
return PointCloud(new_metadata, new_pc_data)
def to_msg(self):
# TODO is there some metadata we want to attach?
return array_to_pointcloud2(self.pc_data)
@staticmethod
def from_path(fname):
return point_cloud_from_path(fname)
@staticmethod
def from_msg(msg, squeeze=True):
""" from pointcloud2 msg
squeeze: fix when clouds get 1 as first dim
"""
md = {'version': .7,
'fields': [],
'size': [],
'count': [],
'width': 0,
'height': 1,
'viewpoint': [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
'points': 0,
'type': [],
'data': 'binary_compressed'}
for field in msg.fields:
md['fields'].append(field.name)
t, s = pc2_type_to_pcd_type[field.datatype]
md['type'].append(t)
md['size'].append(s)
# TODO handle multicount correctly
if field.count > 1:
print('\033[93m' + 'fields with count > 1 are not well tested' + '\033[1m')
md['count'].append(field.count)
pc_data = np.squeeze(pointcloud2_to_array(msg))
md['width'] = len(pc_data)
md['points'] = len(pc_data)
pc = PointCloud(md, pc_data)
return pc | 18,804 | 36.837022 | 114 | py |
synfeal | synfeal-main/models/pointnet.py | import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
# this is a regularization to avoid overfitting! It adds another term to the cost function to penalize the complexity of the models.
def feature_transform_regularizer(trans):
d = trans.size()[1]
batchsize = trans.size()[0]
I = torch.eye(d)[None, :, :]
if trans.is_cuda:
I = I.cuda()
loss = torch.mean(torch.norm(torch.bmm(trans, trans.transpose(2,1)) - I, dim=(1,2)))
return loss
class STN3d(nn.Module): # spatial transformer network 3d, paper: https://arxiv.org/pdf/1506.02025v3.pdf
def __init__(self):
super(STN3d, self).__init__()
self.conv1 = torch.nn.Conv1d(3, 64, 1) # conv1d because we are sliding the filter over 1 dimensional.
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 1024, 1)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, 9)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.bn4 = nn.BatchNorm1d(512)
self.bn5 = nn.BatchNorm1d(256)
def forward(self, x):
batchsize = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = torch.max(x, 2, keepdim=True)[0]
x = x.view(-1, 1024)
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x)
iden = Variable(torch.from_numpy(np.array([1,0,0,0,1,0,0,0,1]).astype(np.float32))).view(1,9).repeat(batchsize,1)
if x.is_cuda:
iden = iden.cuda()
x = x + iden
x = x.view(-1, 3, 3)
return x
class STNkd(nn.Module):
def __init__(self, k=64):
super(STNkd, self).__init__()
self.conv1 = torch.nn.Conv1d(k, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 1024, 1)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, k*k)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.bn4 = nn.BatchNorm1d(512)
self.bn5 = nn.BatchNorm1d(256)
self.k = k
def forward(self, x):
batchsize = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = torch.max(x, 2, keepdim=True)[0]
x = x.view(-1, 1024)
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x)
iden = Variable(torch.from_numpy(np.eye(self.k).flatten().astype(np.float32))).view(1,self.k*self.k).repeat(batchsize,1)
if x.is_cuda:
iden = iden.cuda()
x = x + iden
x = x.view(-1, self.k, self.k)
return x
class PointNetfeat(nn.Module):
def __init__(self, feature_transform = False):
super(PointNetfeat, self).__init__()
#self.stn = STN3d()
self.conv1 = torch.nn.Conv1d(3, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 1024, 1)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.feature_transform = feature_transform
if self.feature_transform:
self.fstn = STNkd(k=64)
def forward(self, x):
n_pts = x.size()[2] # input is (batch_size, number_of_features, number_of_points)
#trans = self.stn(x)
#x = x.transpose(2, 1) # this swaps number of feature with number of points --> (batch_size, number_of_points, number_of_features)
#x = torch.bmm(x, trans) # batch matrix-matrix product --> x.shape = (32, 2500, 3), trans.shape = (32, 3, 3) --> output = (32, 2500, 3)
#x = x.transpose(2, 1) # now x.shape = (32, 3, 2500)
x = F.relu(self.bn1(self.conv1(x))) # x.shape = (32, 64, 2500)
if self.feature_transform:
trans_feat = self.fstn(x)
x = x.transpose(2,1)
x = torch.bmm(x, trans_feat)
x = x.transpose(2,1)
else:
trans_feat = None
x = F.relu(self.bn2(self.conv2(x)))
x = self.bn3(self.conv3(x)) #x.shape (32, 1024, 2500)
x = torch.max(x, 2, keepdim=True)[0] # MAX POOLING
x = x.view(-1, 1024) # flattening
trans = 0
return x, trans, trans_feat
class PointNet(nn.Module):
def __init__(self, feature_transform=False):
super(PointNet, self).__init__()
self.feature_transform = feature_transform
self.feat = PointNetfeat(feature_transform=feature_transform)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3_trans = nn.Linear(256, 3)
self.fc3_rot = nn.Linear(256, 4)
self.dropout = nn.Dropout(p=0.3)
self.bn1 = nn.BatchNorm1d(512)
self.bn2 = nn.BatchNorm1d(256)
self.relu = nn.ReLU()
def forward(self, x):
x, trans, trans_feat = self.feat(x) # the output x is the global feature (1024x1)
x = F.relu(self.bn1(self.fc1(x)))
x = F.relu(self.bn2(self.dropout(self.fc2(x))))
x_trans = self.fc3_trans(x) # Joint Learning!
x_rot = self.fc3_rot(x) # Joint Learning!
x_pose = torch.cat((x_trans, x_rot), dim=1)
return x_pose, trans, trans_feat # softmax removed!
| 5,796 | 34.564417 | 143 | py |
synfeal | synfeal-main/models/pointnet_classification.py | import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
class STN3d(nn.Module):
def __init__(self):
super(STN3d, self).__init__()
self.conv1 = torch.nn.Conv1d(3, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 1024, 1)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, 9)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.bn4 = nn.BatchNorm1d(512)
self.bn5 = nn.BatchNorm1d(256)
def forward(self, x):
batchsize = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = torch.max(x, 2, keepdim=True)[0]
x = x.view(-1, 1024)
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x)
iden = Variable(torch.from_numpy(np.array([1,0,0,0,1,0,0,0,1]).astype(np.float32))).view(1,9).repeat(batchsize,1)
if x.is_cuda:
iden = iden.cuda()
x = x + iden
x = x.view(-1, 3, 3)
return x
class STNkd(nn.Module):
def __init__(self, k=64):
super(STNkd, self).__init__()
self.conv1 = torch.nn.Conv1d(k, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 1024, 1)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, k*k)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.bn4 = nn.BatchNorm1d(512)
self.bn5 = nn.BatchNorm1d(256)
self.k = k
def forward(self, x):
batchsize = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = torch.max(x, 2, keepdim=True)[0]
x = x.view(-1, 1024)
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x)
iden = Variable(torch.from_numpy(np.eye(self.k).flatten().astype(np.float32))).view(1,self.k*self.k).repeat(batchsize,1)
if x.is_cuda:
iden = iden.cuda()
x = x + iden
x = x.view(-1, self.k, self.k)
return x
class PointNetfeat(nn.Module):
def __init__(self, global_feat = True, feature_transform = False):
super(PointNetfeat, self).__init__()
self.stn = STN3d()
self.conv1 = torch.nn.Conv1d(3, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 1024, 1)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.global_feat = global_feat
self.feature_transform = feature_transform
if self.feature_transform:
self.fstn = STNkd(k=64)
def forward(self, x):
n_pts = x.size()[2]
trans = self.stn(x)
x = x.transpose(2, 1)
x = torch.bmm(x, trans)
x = x.transpose(2, 1)
x = F.relu(self.bn1(self.conv1(x)))
if self.feature_transform:
trans_feat = self.fstn(x)
x = x.transpose(2,1)
x = torch.bmm(x, trans_feat)
x = x.transpose(2,1)
else:
trans_feat = None
pointfeat = x
x = F.relu(self.bn2(self.conv2(x)))
x = self.bn3(self.conv3(x))
x = torch.max(x, 2, keepdim=True)[0]
x = x.view(-1, 1024)
if self.global_feat:
return x, trans, trans_feat
else:
x = x.view(-1, 1024, 1).repeat(1, 1, n_pts)
return torch.cat([x, pointfeat], 1), trans, trans_feat
class PointNetCls(nn.Module):
def __init__(self, k=2, feature_transform=False):
super(PointNetCls, self).__init__()
self.feature_transform = feature_transform
self.feat = PointNetfeat(global_feat=True, feature_transform=feature_transform)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, k)
self.dropout = nn.Dropout(p=0.3)
self.bn1 = nn.BatchNorm1d(512)
self.bn2 = nn.BatchNorm1d(256)
self.relu = nn.ReLU()
def forward(self, x):
x, trans, trans_feat = self.feat(x) # the output x is the global feature (1024x1)
x = F.relu(self.bn1(self.fc1(x)))
x = F.relu(self.bn2(self.dropout(self.fc2(x))))
x = self.fc3(x)
return F.log_softmax(x, dim=1), trans, trans_feat # this must change
| 4,884 | 32.006757 | 128 | py |
synfeal | synfeal-main/models/poselstm.py |
from turtle import forward
from unicodedata import bidirectional
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
from torchvision import transforms, models
class PoseLSTM(nn.Module):
def __init__(self, hidden_size = 128, pretrained = True, aux_logits=True):
super(PoseLSTM, self).__init__()
self.hidden_size = hidden_size
self.aux_logits = aux_logits
if pretrained:
base_model = models.inception_v3(weights='Inception_V3_Weights.DEFAULT')
else:
base_model = models.inception_v3()
base_model.aux_logits = True
self.Conv2d_1a_3x3 = base_model.Conv2d_1a_3x3
self.Conv2d_2a_3x3 = base_model.Conv2d_2a_3x3
self.Conv2d_2b_3x3 = base_model.Conv2d_2b_3x3
self.Conv2d_3b_1x1 = base_model.Conv2d_3b_1x1
self.Conv2d_4a_3x3 = base_model.Conv2d_4a_3x3
self.Mixed_5b = base_model.Mixed_5b
self.Mixed_5c = base_model.Mixed_5c
self.Mixed_5d = base_model.Mixed_5d
self.Mixed_6a = base_model.Mixed_6a
self.Mixed_6b = base_model.Mixed_6b
self.Mixed_6c = base_model.Mixed_6c
self.Mixed_6d = base_model.Mixed_6d
self.Mixed_6e = base_model.Mixed_6e
self.Mixed_7a = base_model.Mixed_7a
self.Mixed_7b = base_model.Mixed_7b
self.Mixed_7c = base_model.Mixed_7c
if aux_logits:
self.aux1 = InceptionAux(288, stride=7, hidden_size = self.hidden_size)
self.aux2 = InceptionAux(768, stride=3, hidden_size = self.hidden_size)
self.lstm_regression = LstmRegression(dropout_rate=0.5, hidden_size=self.hidden_size)
def forward(self, x, verbose=False): # this is where we pass the input into the module
# 299 x 299 x 3
x = self.Conv2d_1a_3x3(x)
# 149 x 149 x 32
x = self.Conv2d_2a_3x3(x)
# 147 x 147 x 32
x = self.Conv2d_2b_3x3(x)
# 147 x 147 x 64
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 73 x 73 x 64
x = self.Conv2d_3b_1x1(x)
# 73 x 73 x 80
x = self.Conv2d_4a_3x3(x)
# 71 x 71 x 192
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 35 x 35 x 192
x = self.Mixed_5b(x) # mixed is the inception module!!
# 35 x 35 x 256
x = self.Mixed_5c(x)
# 35 x 35 x 288
x = self.Mixed_5d(x)
# 35 x 35 x 288
if self.aux_logits and self.training:
pose_aux1 = self.aux1(x)
x = self.Mixed_6a(x)
# 17 x 17 x 768
x = self.Mixed_6b(x)
# 17 x 17 x 768
x = self.Mixed_6c(x)
# 17 x 17 x 768
x = self.Mixed_6d(x)
# 17 x 17 x 768
x = self.Mixed_6e(x)
# 17 x 17 x 768
if self.aux_logits and self.training:
pose_aux2 = self.aux2(x)
x = self.Mixed_7a(x)
# 8 x 8 x 1280
x = self.Mixed_7b(x)
# 8 x 8 x 2048
x = self.Mixed_7c(x)
# 8 x 8 x 2048
x = F.avg_pool2d(x, kernel_size=8)
# 1 x 1 x 2048
# 1 x 1 x 2048
x = x.view(x.size(0), -1)
# 2048
pose = self.lstm_regression(x)
if self.aux_logits and self.training:
return pose_aux1, pose_aux2, pose
else:
return pose
class InceptionAux(nn.Module):
def __init__(self, in_channels, stride, hidden_size):
super(InceptionAux, self).__init__()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=128, kernel_size=(1,1))
self.fc = nn.Linear(3200, 2048)
self.relu = nn.ReLU()
self.pool = nn.AvgPool2d(kernel_size=5, stride=stride)
self.lstm_regression = LstmRegression(dropout_rate=0.7, hidden_size=hidden_size)
def forward(self, x):
x = self.pool(x)
x = self.relu(self.conv(x))
x = x.reshape(x.shape[0], -1)
x = self.relu(self.fc(x))
pose = self.lstm_regression(x)
return pose
class LstmRegression(nn.Module):
def __init__(self, dropout_rate, hidden_size):
super(LstmRegression, self).__init__()
#TODO: try hidden_size = 32
self.hidden_size = hidden_size
self.lstm_lr = nn.LSTM(input_size=64, hidden_size = hidden_size, bidirectional = True, batch_first = True)
self.lstm_ud = nn.LSTM(input_size=32, hidden_size = hidden_size, bidirectional = True, batch_first = True)
self.pos = nn.Linear(hidden_size*4, 3, bias=True)
self.ori = nn.Linear(hidden_size*4, 4, bias=True)
self.dropout = nn.Dropout(p=dropout_rate)
def forward(self,x):
# x is of shape (N,1,2048)
x = x.view(x.size(0),32, 64)
_, (hidden_state_lr, _) = self.lstm_lr(x.permute(0,1,2)) # to run row by row
_, (hidden_state_ud, _) = self.lstm_ud(x.permute(0,2,1)) # to run col by col
# hidden_state_lr.shape = [2, batch_size, hidden_size]
lstm_vector = torch.cat((hidden_state_lr[0,:,:],
hidden_state_lr[1,:,:],
hidden_state_ud[0,:,:],
hidden_state_ud[1,:,:]), 1)
lstm_vector = self.dropout(lstm_vector)
pos = self.pos(lstm_vector)
ori = self.ori(lstm_vector)
pose = torch.cat((pos, ori), dim=1)
return pose
# if __name__ == "__main__":
# model = PoseLSTM()
# print(model(torch.rand(10,3,299,299))[0].shape) | 6,396 | 33.766304 | 120 | py |
synfeal | synfeal-main/models/posenet.py |
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
from torchvision import transforms, models
#https://pytorch.org/hub/pytorch_vision_inception_v3/
class PoseNetGoogleNet(nn.Module):
def __init__(self, pretrained,dropout_rate=0.0, aux_logits=True):
super(PoseNetGoogleNet, self).__init__()
self.dropout_rate = dropout_rate
self.aux_logits = aux_logits
if pretrained:
base_model = models.inception_v3(weights='Inception_V3_Weights.DEFAULT')
else:
base_model = models.inception_v3()
base_model.aux_logits = True
self.Conv2d_1a_3x3 = base_model.Conv2d_1a_3x3
self.Conv2d_2a_3x3 = base_model.Conv2d_2a_3x3
self.Conv2d_2b_3x3 = base_model.Conv2d_2b_3x3
self.Conv2d_3b_1x1 = base_model.Conv2d_3b_1x1
self.Conv2d_4a_3x3 = base_model.Conv2d_4a_3x3
self.Mixed_5b = base_model.Mixed_5b
self.Mixed_5c = base_model.Mixed_5c
self.Mixed_5d = base_model.Mixed_5d
self.Mixed_6a = base_model.Mixed_6a
self.Mixed_6b = base_model.Mixed_6b
self.Mixed_6c = base_model.Mixed_6c
self.Mixed_6d = base_model.Mixed_6d
self.Mixed_6e = base_model.Mixed_6e
self.Mixed_7a = base_model.Mixed_7a
self.Mixed_7b = base_model.Mixed_7b
self.Mixed_7c = base_model.Mixed_7c
if aux_logits:
self.aux1 = InceptionAux1(288, dropout_rate)
self.aux2 = InceptionAux2(768, dropout_rate)
# Out 2
self.pos = nn.Linear(2048, 3, bias=True)
self.ori = nn.Linear(2048, 4, bias=True)
def forward(self, x, verbose=False): # this is where we pass the input into the module
# 299 x 299 x 3
x = self.Conv2d_1a_3x3(x)
# 149 x 149 x 32
x = self.Conv2d_2a_3x3(x)
# 147 x 147 x 32
x = self.Conv2d_2b_3x3(x)
# 147 x 147 x 64
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 73 x 73 x 64
x = self.Conv2d_3b_1x1(x)
# 73 x 73 x 80
x = self.Conv2d_4a_3x3(x)
# 71 x 71 x 192
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 35 x 35 x 192
x = self.Mixed_5b(x) # mixed is the inception module!!
# 35 x 35 x 256
x = self.Mixed_5c(x)
# 35 x 35 x 288
x = self.Mixed_5d(x)
# 35 x 35 x 288
if self.aux_logits and self.training:
pose_aux1 = self.aux1(x)
x = self.Mixed_6a(x)
# 17 x 17 x 768
x = self.Mixed_6b(x)
# 17 x 17 x 768
x = self.Mixed_6c(x)
# 17 x 17 x 768
x = self.Mixed_6d(x)
# 17 x 17 x 768
x = self.Mixed_6e(x)
# 17 x 17 x 768
if self.aux_logits and self.training:
pose_aux2 = self.aux2(x)
x = self.Mixed_7a(x)
# 8 x 8 x 1280
x = self.Mixed_7b(x)
# 8 x 8 x 2048
x = self.Mixed_7c(x)
# 8 x 8 x 2048
x = F.avg_pool2d(x, kernel_size=8)
# 1 x 1 x 2048
x = F.dropout(x, p=self.dropout_rate, training=self.training)
# 1 x 1 x 2048
x = x.view(x.size(0), -1)
# 2048
pos = self.pos(x)
ori = self.ori(x)
pose = torch.cat((pos, ori), dim=1)
if self.aux_logits and self.training:
return pose_aux1, pose_aux2, pose
else:
return pose
class InceptionAux1(nn.Module):
def __init__(self, in_channels, dropout_rate):
super(InceptionAux1, self).__init__()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=128, kernel_size=(1,1))
self.fc = nn.Linear(3200, 2048)
self.pos_aux1 = nn.Linear(in_features=2048, out_features=3)
self.ori_aux1 = nn.Linear(in_features=2048, out_features=4)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=dropout_rate)
self.pool = nn.AvgPool2d(kernel_size=5, stride=7)
def forward(self, x):
x = self.pool(x)
x = self.relu(self.conv(x))
x = x.reshape(x.shape[0], -1)
x = self.relu(self.fc(x))
x = self.dropout(x)
pos = self.pos_aux1(x)
ori = self.ori_aux1(x)
pose = torch.cat((pos, ori), dim=1)
return pose
class InceptionAux2(nn.Module):
def __init__(self, in_channels, dropout_rate):
super(InceptionAux2, self).__init__()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=128, kernel_size=(1,1))
self.fc = nn.Linear(3200, 2048)
self.pos_aux2 = nn.Linear(in_features=2048, out_features=3)
self.ori_aux2 = nn.Linear(in_features=2048, out_features=4)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=dropout_rate)
self.pool = nn.AvgPool2d(kernel_size=5, stride=3)
def forward(self, x):
x = self.pool(x)
x = self.relu(self.conv(x))
x = x.reshape(x.shape[0], -1)
x = self.relu(self.fc(x))
x = self.dropout(x)
pos = self.pos_aux2(x)
ori = self.ori_aux2(x)
pose = torch.cat((pos, ori), dim=1)
return pose
class PoseNetResNet(nn.Module): #https://github.com/youngguncho/PoseNet-Pytorch/blob/master/model.py
def __init__(self, pretrained, dropout_rate=0.0, aux_logits=False):
super(PoseNetResNet, self).__init__()
base_model = models.resnet34(pretrained=pretrained)
feat_in = base_model.fc.in_features
self.aux_logits = aux_logits
self.dropout_rate = dropout_rate
self.base_model = nn.Sequential(*list(base_model.children())[:-1])
self.fc_last = nn.Linear(feat_in, 2048, bias=True)
self.fc_position = nn.Linear(2048, 3, bias=True)
self.fc_rotation = nn.Linear(2048, 4, bias=True)
init_modules = [self.fc_last, self.fc_position, self.fc_rotation]
# init modules accoring to kaiming normal
# https://pytorch.org/docs/stable/nn.init.html
for module in init_modules:
if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):
nn.init.kaiming_normal_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
def forward(self, x):
x = self.base_model(x)
x = x.view(x.size(0), -1)
x_fully = self.fc_last(x)
x = F.relu(x_fully)
if self.dropout_rate > 0:
x = F.dropout(x, p=self.dropout_rate, training=self.training)
position = self.fc_position(x)
rotation = self.fc_rotation(x)
x_pose = torch.cat((position, rotation), dim=1)
return x_pose
| 7,521 | 34.314554 | 116 | py |
synfeal | synfeal-main/models/depthnet.py | import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
import numpy as np
import torch.nn.functional as F
class CNNDepth(nn.Module): #https://towardsdatascience.com/covolutional-neural-network-cb0883dd6529
def __init__(self):
super(CNNDepth, self).__init__() # call the init constructor of the nn.Module. This way, we are only adding attributes.
self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=5, stride=2, padding=2)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=5, stride=2, padding=2)
self.conv3 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=5, stride=2, padding=2)
self.conv4 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=5, stride=2, padding=2)
self.conv5 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=5, stride=2, padding=2)
self.fc1 = nn.Linear(25088, 4096)
self.fc2 = nn.Linear(4096, 1024)
self.fc3 = nn.Linear(1024, 512)
self.fc_out_translation = nn.Linear(512, 3)
self.fc_out_rotation = nn.Linear(512, 4)
# instead of treating the relu as modules, we can treat them as functions. We can access them via torch funtional
def forward(self, x, verbose=False): # this is where we pass the input into the module
if verbose: print('shape ' + str(x.shape))
x = F.relu(self.conv1(x))
if verbose: print('layer1 shape ' + str(x.shape))
x = F.relu(self.conv2(x))
if verbose: print('layer2 shape ' + str(x.shape))
x = F.relu(self.conv3(x))
if verbose: print('layer3 shape ' + str(x.shape))
x = F.relu(self.conv4(x))
if verbose: print('layer4 shape ' + str(x.shape))
x = F.relu(self.conv5(x))
if verbose: print('layer5 shape ' + str(x.shape))
x = x.view(x.size(0), -1)
if verbose: print('x shape ' + str(x.shape))
# x = F.dropout(x, p=0.5)
# x = F.relu(self.fc1(x))
# if verbose: print('fc1 shape ' + str(x.shape))
#
# x = F.relu(self.fc2(x))
# if verbose: print('fc2 shape ' + str(x.shape))
#
# x = F.relu(self.fc3(x))
# if verbose: print('fc3 shape ' + str(x.shape))
x = F.relu(self.fc1(x))
if verbose: print('fc1 shape ' + str(x.shape))
x = F.relu(self.fc2(x))
if verbose: print('fc2 shape ' + str(x.shape))
x = F.relu(self.fc3(x))
if verbose: print('fc3 shape ' + str(x.shape))
x_translation = self.fc_out_translation(x)
if verbose: print('x_translation shape ' + str(x_translation.shape))
x_rotation = self.fc_out_rotation(x)
if verbose: print('x_rotation shape ' + str(x_rotation.shape))
x_pose = torch.cat((x_translation, x_rotation), dim=1)
return x_pose
class CNNDepthLow(nn.Module): #https://towardsdatascience.com/covolutional-neural-network-cb0883dd6529
def __init__(self):
super(CNNDepthLow, self).__init__() # call the init constructor of the nn.Module. This way, we are only adding attributes.
self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=3, stride=2)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2)
self.conv3 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2)
self.conv4 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=2)
self.conv5 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=2)
self.fc1 = nn.Linear(18432, 4096)
self.fc2 = nn.Linear(4096, 1024)
#self.fc3 = nn.Linear(1024, 512)
self.fc_out_translation = nn.Linear(1024, 3)
self.fc_out_rotation = nn.Linear(1024, 4)
# instead of treating the relu as modules, we can treat them as functions. We can access them via torch funtional
def forward(self, x, verbose=False): # this is where we pass the input into the module
if verbose: print('shape ' + str(x.shape))
x = F.relu(self.conv1(x))
if verbose: print('layer1 shape ' + str(x.shape))
x = F.relu(self.conv2(x))
if verbose: print('layer2 shape ' + str(x.shape))
x = F.relu(self.conv3(x))
if verbose: print('layer3 shape ' + str(x.shape))
x = F.relu(self.conv4(x))
if verbose: print('layer4 shape ' + str(x.shape))
x = F.relu(self.conv5(x))
if verbose: print('layer5 shape ' + str(x.shape))
x = x.view(x.size(0), -1)
if verbose: print('x shape ' + str(x.shape))
# x = F.dropout(x, p=0.5)
# x = F.relu(self.fc1(x))
# if verbose: print('fc1 shape ' + str(x.shape))
#
# x = F.relu(self.fc2(x))
# if verbose: print('fc2 shape ' + str(x.shape))
#
# x = F.relu(self.fc3(x))
# if verbose: print('fc3 shape ' + str(x.shape))
x = F.relu(self.fc1(x))
if verbose: print('fc1 shape ' + str(x.shape))
x = F.relu(self.fc2(x))
if verbose: print('fc2 shape ' + str(x.shape))
# x = F.relu(self.fc3(x))
# if verbose: print('fc3 shape ' + str(x.shape))
x_translation = self.fc_out_translation(x)
if verbose: print('x_translation shape ' + str(x_translation.shape))
x_rotation = self.fc_out_rotation(x)
if verbose: print('x_rotation shape ' + str(x_rotation.shape))
x_pose = torch.cat((x_translation, x_rotation), dim=1)
return x_pose
class CNNDepthDropout(nn.Module): #https://towardsdatascience.com/covolutional-neural-network-cb0883dd6529
def __init__(self):
super(CNNDepthDropout, self).__init__() # call the init constructor of the nn.Module. This way, we are only adding attributes.
self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=5, stride=2, padding=2)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=5, stride=2, padding=2)
self.conv3 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=5, stride=2, padding=2)
self.conv4 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=5, stride=2, padding=2)
self.conv5 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=5, stride=2, padding=2)
self.dropout1 = nn.Dropout(p=0.5)
self.dropout2 = nn.Dropout(p=0.3)
self.dropout3 = nn.Dropout(p=0.2)
self.fc1 = nn.Linear(25088, 4096)
self.fc2 = nn.Linear(4096, 1024)
self.fc3 = nn.Linear(1024, 512)
self.fc_out_translation = nn.Linear(512, 3)
self.fc_out_rotation = nn.Linear(512, 4)
# instead of treating the relu as modules, we can treat them as functions. We can access them via torch funtional
def forward(self, x, verbose=False): # this is where we pass the input into the module
if verbose: print('shape ' + str(x.shape))
x = F.relu(self.droupout3(self.conv1(x)))
if verbose: print('layer1 shape ' + str(x.shape))
x = F.relu(self.dropout3(self.conv2(x)))
if verbose: print('layer2 shape ' + str(x.shape))
x = F.relu(self.dropout3(self.conv3(x)))
if verbose: print('layer3 shape ' + str(x.shape))
x = F.relu(self.dropout3(self.conv4(x)))
if verbose: print('layer4 shape ' + str(x.shape))
x = F.relu(self.dropout3(self.conv5(x)))
if verbose: print('layer5 shape ' + str(x.shape))
x = x.view(x.size(0), -1)
if verbose: print('x shape ' + str(x.shape))
# x = F.dropout(x, p=0.5)
# x = F.relu(self.fc1(x))
# if verbose: print('fc1 shape ' + str(x.shape))
#
# x = F.relu(self.fc2(x))
# if verbose: print('fc2 shape ' + str(x.shape))
#
# x = F.relu(self.fc3(x))
# if verbose: print('fc3 shape ' + str(x.shape))
x = F.relu(self.dropout1(self.fc1(x)))
if verbose: print('fc1 shape ' + str(x.shape))
x = F.relu(self.dropout2(self.fc2(x)))
if verbose: print('fc2 shape ' + str(x.shape))
x = F.relu(self.dropout3(self.fc3(x)))
if verbose: print('fc3 shape ' + str(x.shape))
x_translation = self.fc_out_translation(x)
if verbose: print('x_translation shape ' + str(x_translation.shape))
x_rotation = self.fc_out_rotation(x)
if verbose: print('x_rotation shape ' + str(x_rotation.shape))
x_pose = torch.cat((x_translation, x_rotation), dim=1)
return x_pose
class CNNDepthBatch(nn.Module): #https://towardsdatascience.com/covolutional-neural-network-cb0883dd6529
def __init__(self):
super(CNNDepthBatch, self).__init__() # call the init constructor of the nn.Module. This way, we are only adding attributes.
self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=5, stride=2, padding=2)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=5, stride=2, padding=2)
self.conv3 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=5, stride=2, padding=2)
self.conv4 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=5, stride=2, padding=2)
self.conv5 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=5, stride=2, padding=2)
# Batch norm should be before relu
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(128)
self.bn3 = nn.BatchNorm2d(256)
self.bn4 = nn.BatchNorm2d(512)
self.bn5 = nn.BatchNorm2d(512)
self.bn6 = nn.BatchNorm1d(4096)
self.bn7 = nn.BatchNorm1d(1024)
self.bn8 = nn.BatchNorm1d(512)
self.dropout = nn.Dropout(p=0.4)
self.fc1 = nn.Linear(25088, 4096)
self.fc2 = nn.Linear(4096, 1024)
self.fc3 = nn.Linear(1024, 512)
self.fc_out_translation = nn.Linear(512, 3)
self.fc_out_rotation = nn.Linear(512, 4)
# instead of treating the relu as modules, we can treat them as functions. We can access them via torch funtional
def forward(self, x, verbose=False): # this is where we pass the input into the module
if verbose: print('shape ' + str(x.shape))
x = F.relu(self.bn1(self.conv1(x)))
if verbose: print('layer1 shape ' + str(x.shape))
x = F.relu(self.bn2(self.conv2(x)))
if verbose: print('layer2 shape ' + str(x.shape))
x = F.relu(self.bn3(self.conv3(x)))
if verbose: print('layer3 shape ' + str(x.shape))
x = F.relu(self.bn4(self.conv4(x)))
if verbose: print('layer4 shape ' + str(x.shape))
x = F.relu(self.bn5(self.conv5(x)))
if verbose: print('layer5 shape ' + str(x.shape))
x = x.view(x.size(0), -1)
if verbose: print('x shape ' + str(x.shape))
# x = F.dropout(x, p=0.5)
# x = F.relu(self.fc1(x))
# if verbose: print('fc1 shape ' + str(x.shape))
#
# x = F.relu(self.fc2(x))
# if verbose: print('fc2 shape ' + str(x.shape))
#
# x = F.relu(self.fc3(x))
# if verbose: print('fc3 shape ' + str(x.shape))
x = F.relu(self.dropout(self.bn6(self.fc1(x))))
if verbose: print('fc1 shape ' + str(x.shape))
x = F.relu(self.bn7(self.fc2(x)))
if verbose: print('fc2 shape ' + str(x.shape))
x = F.relu(self.bn8(self.fc3(x)))
if verbose: print('fc3 shape ' + str(x.shape))
x_translation = self.fc_out_translation(x)
if verbose: print('x_translation shape ' + str(x_translation.shape))
x_rotation = self.fc_out_rotation(x)
if verbose: print('x_rotation shape ' + str(x_rotation.shape))
x_pose = torch.cat((x_translation, x_rotation), dim=1)
return x_pose
class CNNDepthBatchK3(nn.Module): #https://towardsdatascience.com/covolutional-neural-network-cb0883dd6529
def __init__(self):
super(CNNDepthBatchK3, self).__init__() # call the init constructor of the nn.Module. This way, we are only adding attributes.
self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=3, stride=2, padding=1)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2, padding=1)
self.conv3 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1)
self.conv4 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=2, padding=1)
self.conv5 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=2, padding=1)
# Batch norm should be before relu
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(128)
self.bn3 = nn.BatchNorm2d(256)
self.bn4 = nn.BatchNorm2d(512)
self.bn5 = nn.BatchNorm2d(512)
self.bn6 = nn.BatchNorm1d(4096)
self.bn7 = nn.BatchNorm1d(1024)
self.bn8 = nn.BatchNorm1d(512)
self.dropout = nn.Dropout(p=0.4)
self.fc1 = nn.Linear(25088, 4096)
self.fc2 = nn.Linear(4096, 1024)
self.fc3 = nn.Linear(1024, 512)
self.fc_out_translation = nn.Linear(512, 3)
self.fc_out_rotation = nn.Linear(512, 4)
# instead of treating the relu as modules, we can treat them as functions. We can access them via torch funtional
def forward(self, x, verbose=True): # this is where we pass the input into the module
if verbose: print('shape ' + str(x.shape))
x = F.relu(self.bn1(self.conv1(x)))
if verbose: print('layer1 shape ' + str(x.shape))
x = F.relu(self.bn2(self.conv2(x)))
if verbose: print('layer2 shape ' + str(x.shape))
x = F.relu(self.bn3(self.conv3(x)))
if verbose: print('layer3 shape ' + str(x.shape))
x = F.relu(self.bn4(self.conv4(x)))
if verbose: print('layer4 shape ' + str(x.shape))
x = F.relu(self.bn5(self.conv5(x)))
if verbose: print('layer5 shape ' + str(x.shape))
x = x.view(x.size(0), -1)
if verbose: print('x shape ' + str(x.shape))
# x = F.dropout(x, p=0.5)
# x = F.relu(self.fc1(x))
# if verbose: print('fc1 shape ' + str(x.shape))
#
# x = F.relu(self.fc2(x))
# if verbose: print('fc2 shape ' + str(x.shape))
#
# x = F.relu(self.fc3(x))
# if verbose: print('fc3 shape ' + str(x.shape))
x = F.relu(self.dropout(self.bn6(self.fc1(x))))
if verbose: print('fc1 shape ' + str(x.shape))
x = F.relu(self.bn7(self.fc2(x)))
if verbose: print('fc2 shape ' + str(x.shape))
x = F.relu(self.bn8(self.fc3(x)))
if verbose: print('fc3 shape ' + str(x.shape))
x_translation = self.fc_out_translation(x)
if verbose: print('x_translation shape ' + str(x_translation.shape))
x_rotation = self.fc_out_rotation(x)
if verbose: print('x_rotation shape ' + str(x_rotation.shape))
x_pose = torch.cat((x_translation, x_rotation), dim=1)
return x_pose
class CNNDepthBatchLeaky(nn.Module): #https://towardsdatascience.com/covolutional-neural-network-cb0883dd6529
def __init__(self):
super(CNNDepthBatchLeaky, self).__init__() # call the init constructor of the nn.Module. This way, we are only adding attributes.
self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=5, stride=2, padding=2)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=5, stride=2, padding=2)
self.conv3 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=5, stride=2, padding=2)
self.conv4 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=5, stride=2, padding=2)
self.conv5 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=5, stride=2, padding=2)
# Batch norm should be before relu
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(128)
self.bn3 = nn.BatchNorm2d(256)
self.bn4 = nn.BatchNorm2d(512)
self.bn5 = nn.BatchNorm2d(512)
self.bn6 = nn.BatchNorm1d(4096)
self.bn7 = nn.BatchNorm1d(1024)
self.bn8 = nn.BatchNorm1d(512)
self.lrelu = nn.LeakyReLU(0.1)
self.fc1 = nn.Linear(25088, 4096)
self.fc2 = nn.Linear(4096, 1024)
self.fc3 = nn.Linear(1024, 512)
self.fc_out_translation = nn.Linear(512, 3)
self.fc_out_rotation = nn.Linear(512, 4)
# instead of treating the relu as modules, we can treat them as functions. We can access them via torch funtional
def forward(self, x, verbose=False): # this is where we pass the input into the module
if verbose: print('shape ' + str(x.shape))
x = self.lrelu(self.bn1(self.conv1(x)))
if verbose: print('layer1 shape ' + str(x.shape))
x = self.lrelu(self.bn2(self.conv2(x)))
if verbose: print('layer2 shape ' + str(x.shape))
x = self.lrelu(self.bn3(self.conv3(x)))
if verbose: print('layer3 shape ' + str(x.shape))
x = self.lrelu(self.bn4(self.conv4(x)))
if verbose: print('layer4 shape ' + str(x.shape))
x = self.lrelu(self.bn5(self.conv5(x)))
if verbose: print('layer5 shape ' + str(x.shape))
x = x.view(x.size(0), -1)
if verbose: print('x shape ' + str(x.shape))
# x = F.dropout(x, p=0.5)
# x = F.relu(self.fc1(x))
# if verbose: print('fc1 shape ' + str(x.shape))
#
# x = F.relu(self.fc2(x))
# if verbose: print('fc2 shape ' + str(x.shape))
#
# x = F.relu(self.fc3(x))
# if verbose: print('fc3 shape ' + str(x.shape))
x = self.lrelu(self.dropout(self.bn6(self.fc1(x))))
if verbose: print('fc1 shape ' + str(x.shape))
x = self.lrelu(self.bn7(self.fc2(x)))
if verbose: print('fc2 shape ' + str(x.shape))
x = self.lrelu(self.bn8(self.fc3(x)))
if verbose: print('fc3 shape ' + str(x.shape))
x_translation = self.fc_out_translation(x)
if verbose: print('x_translation shape ' + str(x_translation.shape))
x_rotation = self.fc_out_rotation(x)
if verbose: print('x_rotation shape ' + str(x_rotation.shape))
x_pose = torch.cat((x_translation, x_rotation), dim=1)
return x_pose
class CNNDepthBatchLow(nn.Module): #https://towardsdatascience.com/covolutional-neural-network-cb0883dd6529
def __init__(self):
super(CNNDepthBatchLow, self).__init__() # call the init constructor of the nn.Module. This way, we are only adding attributes.
self.conv1 = nn.Conv2d(in_channels=1, out_channels=32, kernel_size=3, stride=2, padding=1)
self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=2, padding=1)
self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2, padding=1)
self.conv4 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1)
self.conv5 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=2, padding=1)
# Batch norm should be before relu
self.bn1 = nn.BatchNorm2d(32)
self.bn2 = nn.BatchNorm2d(64)
self.bn3 = nn.BatchNorm2d(128)
self.bn4 = nn.BatchNorm2d(256)
self.bn5 = nn.BatchNorm2d(512)
self.bn6 = nn.BatchNorm1d(4096)
self.bn7 = nn.BatchNorm1d(1024)
#self.bn8 = nn.BatchNorm1d(512)
#self.lrelu = nn.LeakyReLU(0.2)
self.dropout = nn.Dropout(p=0.5)
self.fc1 = nn.Linear(25088, 4096)
self.fc2 = nn.Linear(4096, 1024)
#self.fc3 = nn.Linear(1024, 512)
self.fc_out_translation = nn.Linear(1024, 3)
self.fc_out_rotation = nn.Linear(1024, 4)
# instead of treating the relu as modules, we can treat them as functions. We can access them via torch funtional
def forward(self, x, verbose=True): # this is where we pass the input into the module
if verbose: print('shape ' + str(x.shape))
x = F.relu(self.bn1(self.conv1(x)))
if verbose: print('layer1 shape ' + str(x.shape))
x = F.relu(self.bn2(self.conv2(x)))
if verbose: print('layer2 shape ' + str(x.shape))
x = F.relu(self.bn3(self.conv3(x)))
if verbose: print('layer3 shape ' + str(x.shape))
x = F.relu(self.bn4(self.conv4(x)))
if verbose: print('layer4 shape ' + str(x.shape))
x = F.relu(self.bn5(self.conv5(x)))
if verbose: print('layer5 shape ' + str(x.shape))
x = x.view(x.size(0), -1)
if verbose: print('x shape ' + str(x.shape))
# x = F.dropout(x, p=0.5)
# x = self.lrelu(self.fc1(x))
# if verbose: print('fc1 shape ' + str(x.shape))
#
# x = self.lrelu(self.fc2(x))
# if verbose: print('fc2 shape ' + str(x.shape))
#
# x = self.lrelu(self.fc3(x))
# if verbose: print('fc3 shape ' + str(x.shape))
x = F.relu(self.dropout(self.bn6(self.fc1(x))))
if verbose: print('fc1 shape ' + str(x.shape))
x = F.relu(self.bn7(self.fc2(x)))
if verbose: print('fc2 shape ' + str(x.shape))
# x = self.lrelu(self.bn8(self.fc3(x)))
# if verbose: print('fc3 shape ' + str(x.shape))
x_translation = self.fc_out_translation(x)
if verbose: print('x_translation shape ' + str(x_translation.shape))
x_rotation = self.fc_out_rotation(x)
if verbose: print('x_rotation shape ' + str(x_rotation.shape))
x_pose = torch.cat((x_translation, x_rotation), dim=1)
return x_pose
class CNNDepthBatchLowL2RegLeaky(nn.Module): #https://towardsdatascience.com/covolutional-neural-network-cb0883dd6529
def __init__(self):
super(CNNDepthBatchLowL2RegLeaky, self).__init__() # call the init constructor of the nn.Module. This way, we are only adding attributes.
self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=3, stride=3, padding=1)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=3, padding=1)
self.conv3 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=3, padding=1)
#self.conv4 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=3, padding=1)
#self.conv5 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=3, padding=1)
# Batch norm should be before relu
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(128)
self.bn3 = nn.BatchNorm2d(256)
#self.bn4 = nn.BatchNorm2d(256)
#self.bn5 = nn.BatchNorm2d(512)
self.bn6 = nn.BatchNorm1d(4096)
self.bn7 = nn.BatchNorm1d(1024)
self.bn8 = nn.BatchNorm1d(512)
self.lrelu = nn.LeakyReLU(0.2)
#self.dropout = nn.Dropout(p=0.5)
self.fc1 = nn.Linear(20736, 4096)
self.fc2 = nn.Linear(4096, 1024)
self.fc3 = nn.Linear(1024, 512)
self.fc_out_translation = nn.Linear(512, 3)
self.fc_out_rotation = nn.Linear(512, 4)
# instead of treating the relu as modules, we can treat them as functions. We can access them via torch funtional
def forward(self, x, verbose=False): # this is where we pass the input into the module
if verbose: print('shape ' + str(x.shape))
x = self.lrelu(self.bn1(self.conv1(x)))
if verbose: print('layer1 shape ' + str(x.shape))
x = self.lrelu(self.bn2(self.conv2(x)))
if verbose: print('layer2 shape ' + str(x.shape))
x = self.lrelu(self.bn3(self.conv3(x)))
if verbose: print('layer3 shape ' + str(x.shape))
# x = self.lrelu(self.bn4(self.conv4(x)))
# if verbose: print('layer4 shape ' + str(x.shape))
# x = self.lrelu(self.bn5(self.conv5(x)))
# if verbose: print('layer5 shape ' + str(x.shape))
x = x.view(x.size(0), -1)
if verbose: print('x shape ' + str(x.shape))
# x = F.dropout(x, p=0.5)
# x = self.lrelu(self.fc1(x))
# if verbose: print('fc1 shape ' + str(x.shape))
#
# x = self.lrelu(self.fc2(x))
# if verbose: print('fc2 shape ' + str(x.shape))
#
# x = self.lrelu(self.fc3(x))
# if verbose: print('fc3 shape ' + str(x.shape))
x = self.lrelu(self.bn6(self.fc1(x)))
if verbose: print('fc1 shape ' + str(x.shape))
x = self.lrelu(self.bn7(self.fc2(x)))
if verbose: print('fc2 shape ' + str(x.shape))
x = self.lrelu(self.bn8(self.fc3(x)))
if verbose: print('fc3 shape ' + str(x.shape))
x_translation = self.fc_out_translation(x)
if verbose: print('x_translation shape ' + str(x_translation.shape))
x_rotation = self.fc_out_rotation(x)
if verbose: print('x_rotation shape ' + str(x_rotation.shape))
x_pose = torch.cat((x_translation, x_rotation), dim=1)
return x_pose
class CNNDepthBatchLowL2Reg2(nn.Module): #https://towardsdatascience.com/covolutional-neural-network-cb0883dd6529
def __init__(self):
super(CNNDepthBatchLowL2Reg2, self).__init__() # call the init constructor of the nn.Module. This way, we are only adding attributes.
self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=5, stride=2, padding=1)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=5, stride=2, padding=1)
self.conv3 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=5, stride=2, padding=1)
self.conv4 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=5, stride=2, padding=1)
self.conv5 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=5, stride=2, padding=1)
#self.conv6 = nn.Conv2d(in_channels=512, out_channels=1024, kernel_size=5, stride=2, padding=1)
# Batch norm should be before relu
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(128)
self.bn3 = nn.BatchNorm2d(256)
self.bn4 = nn.BatchNorm2d(512)
self.bn5 = nn.BatchNorm2d(512)
#self.bn6 = nn.BatchNorm2d(1024)
self.bn6 = nn.BatchNorm1d(4096)
self.bn7 = nn.BatchNorm1d(1024)
self.bn8 = nn.BatchNorm1d(512)
#self.lrelu = nn.LeakyReLU(0.2)
#self.dropout = nn.Dropout(p=0.5)
self.fc1 = nn.Linear(18432, 4096)
self.fc2 = nn.Linear(4096, 1024)
self.fc3 = nn.Linear(1024, 512)
self.fc_out_translation = nn.Linear(512, 3)
self.fc_out_rotation = nn.Linear(512, 4)
# instead of treating the relu as modules, we can treat them as functions. We can access them via torch funtional
def forward(self, x, verbose=False): # this is where we pass the input into the module
if verbose: print('shape ' + str(x.shape))
x = F.relu(self.bn1(self.conv1(x)))
if verbose: print('layer1 shape ' + str(x.shape))
x = F.relu(self.bn2(self.conv2(x)))
if verbose: print('layer2 shape ' + str(x.shape))
x = F.relu(self.bn3(self.conv3(x)))
if verbose: print('layer3 shape ' + str(x.shape))
x = F.relu(self.bn4(self.conv4(x)))
if verbose: print('layer4 shape ' + str(x.shape))
x = F.relu(self.bn5(self.conv5(x)))
if verbose: print('layer5 shape ' + str(x.shape))
# x = self.lrelu(self.bn6(self.conv6(x)))
# if verbose: print('layer6 shape ' + str(x.shape))
x = x.view(x.size(0), -1)
if verbose: print('x shape ' + str(x.shape))
# x = F.dropout(x, p=0.5)
# x = self.lrelu(self.fc1(x))
# if verbose: print('fc1 shape ' + str(x.shape))
#
# x = self.lrelu(self.fc2(x))
# if verbose: print('fc2 shape ' + str(x.shape))
#
# x = self.lrelu(self.fc3(x))
# if verbose: print('fc3 shape ' + str(x.shape))
x = F.relu(self.bn6(self.fc1(x)))
if verbose: print('fc1 shape ' + str(x.shape))
x = F.relu(self.bn7(self.fc2(x)))
if verbose: print('fc2 shape ' + str(x.shape))
x = F.relu(self.bn8(self.fc3(x)))
if verbose: print('fc3 shape ' + str(x.shape))
x_translation = self.fc_out_translation(x)
if verbose: print('x_translation shape ' + str(x_translation.shape))
x_rotation = self.fc_out_rotation(x)
if verbose: print('x_rotation shape ' + str(x_rotation.shape))
x_pose = torch.cat((x_translation, x_rotation), dim=1)
return x_pose
class CNNDepthBatchDropout8(nn.Module): #https://towardsdatascience.com/covolutional-neural-network-cb0883dd6529
def __init__(self):
super(CNNDepthBatchDropout8, self).__init__() # call the init constructor of the nn.Module. This way, we are only adding attributes.
self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=5, stride=2, padding=2)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=5, stride=2, padding=2)
self.conv3 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=5, stride=2, padding=2)
self.conv4 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=5, stride=2, padding=2)
self.conv5 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=5, stride=2, padding=2)
# Batch norm should be before relu
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(128)
self.bn3 = nn.BatchNorm2d(256)
self.bn4 = nn.BatchNorm2d(512)
self.bn5 = nn.BatchNorm2d(512)
self.bn6 = nn.BatchNorm1d(4096)
self.bn7 = nn.BatchNorm1d(1024)
self.bn8 = nn.BatchNorm1d(512)
self.dropout = nn.Dropout(p=0.8)
self.fc1 = nn.Linear(25088, 4096)
self.fc2 = nn.Linear(4096, 1024)
self.fc3 = nn.Linear(1024, 512)
self.fc_out_translation = nn.Linear(512, 3)
self.fc_out_rotation = nn.Linear(512, 4)
# instead of treating the relu as modules, we can treat them as functions. We can access them via torch funtional
def forward(self, x, verbose=False): # this is where we pass the input into the module
if verbose: print('shape ' + str(x.shape))
x = F.relu(self.bn1(self.conv1(x)))
if verbose: print('layer1 shape ' + str(x.shape))
x = F.relu(self.bn2(self.conv2(x)))
if verbose: print('layer2 shape ' + str(x.shape))
x = F.relu(self.bn3(self.conv3(x)))
if verbose: print('layer3 shape ' + str(x.shape))
x = F.relu(self.bn4(self.conv4(x)))
if verbose: print('layer4 shape ' + str(x.shape))
x = F.relu(self.bn5(self.conv5(x)))
if verbose: print('layer5 shape ' + str(x.shape))
x = x.view(x.size(0), -1)
if verbose: print('x shape ' + str(x.shape))
# x = F.dropout(x, p=0.5)
# x = F.relu(self.fc1(x))
# if verbose: print('fc1 shape ' + str(x.shape))
#
# x = F.relu(self.fc2(x))
# if verbose: print('fc2 shape ' + str(x.shape))
#
# x = F.relu(self.fc3(x))
# if verbose: print('fc3 shape ' + str(x.shape))
x = F.relu(self.dropout(self.bn6(self.fc1(x))))
if verbose: print('fc1 shape ' + str(x.shape))
x = F.relu(self.bn7(self.fc2(x)))
if verbose: print('fc2 shape ' + str(x.shape))
x = F.relu(self.bn8(self.fc3(x)))
if verbose: print('fc3 shape ' + str(x.shape))
x_translation = self.fc_out_translation(x)
if verbose: print('x_translation shape ' + str(x_translation.shape))
x_rotation = self.fc_out_rotation(x)
if verbose: print('x_rotation shape ' + str(x_rotation.shape))
x_pose = torch.cat((x_translation, x_rotation), dim=1)
return x_pose
class CNNDepthBatchDropoutVar(nn.Module): #https://towardsdatascience.com/covolutional-neural-network-cb0883dd6529
def __init__(self):
super(CNNDepthBatchDropoutVar, self).__init__() # call the init constructor of the nn.Module. This way, we are only adding attributes.
self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=5, stride=2, padding=2)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=5, stride=2, padding=2)
self.conv3 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=5, stride=2, padding=2)
self.conv4 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=5, stride=2, padding=2)
self.conv5 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=5, stride=2, padding=2)
# Batch norm should be before relu
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(128)
self.bn3 = nn.BatchNorm2d(256)
self.bn4 = nn.BatchNorm2d(512)
self.bn5 = nn.BatchNorm2d(512)
self.bn6 = nn.BatchNorm1d(4096)
self.bn7 = nn.BatchNorm1d(1024)
self.bn8 = nn.BatchNorm1d(512)
self.dropout1 = nn.Dropout(p=0.5)
self.dropout2 = nn.Dropout(p=0.3)
self.dropout3 = nn.Dropout(p=0.2)
self.fc1 = nn.Linear(25088, 4096)
self.fc2 = nn.Linear(4096, 1024)
self.fc3 = nn.Linear(1024, 512)
self.fc_out_translation = nn.Linear(512, 3)
self.fc_out_rotation = nn.Linear(512, 4)
# instead of treating the relu as modules, we can treat them as functions. We can access them via torch funtional
def forward(self, x, verbose=False): # this is where we pass the input into the module
if verbose: print('shape ' + str(x.shape))
x = F.relu(self.bn1(self.conv1(x)))
if verbose: print('layer1 shape ' + str(x.shape))
x = F.relu(self.bn2(self.conv2(x)))
if verbose: print('layer2 shape ' + str(x.shape))
x = F.relu(self.bn3(self.conv3(x)))
if verbose: print('layer3 shape ' + str(x.shape))
x = F.relu(self.bn4(self.conv4(x)))
if verbose: print('layer4 shape ' + str(x.shape))
x = F.relu(self.bn5(self.conv5(x)))
if verbose: print('layer5 shape ' + str(x.shape))
x = x.view(x.size(0), -1)
if verbose: print('x shape ' + str(x.shape))
# x = F.dropout(x, p=0.5)
# x = F.relu(self.fc1(x))
# if verbose: print('fc1 shape ' + str(x.shape))
#
# x = F.relu(self.fc2(x))
# if verbose: print('fc2 shape ' + str(x.shape))
#
# x = F.relu(self.fc3(x))
# if verbose: print('fc3 shape ' + str(x.shape))
x = F.relu(self.dropout1(self.bn6(self.fc1(x))))
if verbose: print('fc1 shape ' + str(x.shape))
x = F.relu(self.dropout2(self.bn7(self.fc2(x))))
if verbose: print('fc2 shape ' + str(x.shape))
x = F.relu(self.dropout3(self.bn8(self.fc3(x))))
if verbose: print('fc3 shape ' + str(x.shape))
x_translation = self.fc_out_translation(x)
if verbose: print('x_translation shape ' + str(x_translation.shape))
x_rotation = self.fc_out_rotation(x)
if verbose: print('x_rotation shape ' + str(x_rotation.shape))
x_pose = torch.cat((x_translation, x_rotation), dim=1)
return x_pose
class CNNDepthBatchDropout8Cont(nn.Module): #https://towardsdatascience.com/covolutional-neural-network-cb0883dd6529
def __init__(self):
super(CNNDepthBatchDropout8Cont, self).__init__() # call the init constructor of the nn.Module. This way, we are only adding attributes.
self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=5, stride=2, padding=2)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=5, stride=2, padding=2)
self.conv3 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=5, stride=2, padding=2)
self.conv4 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=5, stride=2, padding=2)
self.conv5 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=5, stride=2, padding=2)
# Batch norm should be before relu
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(128)
self.bn3 = nn.BatchNorm2d(256)
self.bn4 = nn.BatchNorm2d(512)
self.bn5 = nn.BatchNorm2d(512)
self.bn6 = nn.BatchNorm1d(4096)
self.bn7 = nn.BatchNorm1d(1024)
self.bn8 = nn.BatchNorm1d(512)
self.dropout1 = nn.Dropout(p=0.8)
self.dropout2 = nn.Dropout(p=0.5)
self.dropout3 = nn.Dropout(p=0.3)
self.fc1 = nn.Linear(25088, 4096)
self.fc2 = nn.Linear(4096, 1024)
#self.fc3 = nn.Linear(1024, 512)
self.fc_out_translation = nn.Linear(1024, 3)
self.fc_out_rotation = nn.Linear(1024, 4)
# instead of treating the relu as modules, we can treat them as functions. We can access them via torch funtional
def forward(self, x, verbose=False): # this is where we pass the input into the module
if verbose: print('shape ' + str(x.shape))
x = F.relu(self.bn1(self.conv1(x)))
if verbose: print('layer1 shape ' + str(x.shape))
x = F.relu(self.bn2(self.conv2(x)))
if verbose: print('layer2 shape ' + str(x.shape))
x = F.relu(self.bn3(self.conv3(x)))
if verbose: print('layer3 shape ' + str(x.shape))
x = F.relu(self.bn4(self.conv4(x)))
if verbose: print('layer4 shape ' + str(x.shape))
x = F.relu(self.bn5(self.conv5(x)))
if verbose: print('layer5 shape ' + str(x.shape))
x = x.view(x.size(0), -1)
if verbose: print('x shape ' + str(x.shape))
# x = F.dropout(x, p=0.5)
# x = F.relu(self.fc1(x))
# if verbose: print('fc1 shape ' + str(x.shape))
#
# x = F.relu(self.fc2(x))
# if verbose: print('fc2 shape ' + str(x.shape))
#
# x = F.relu(self.fc3(x))
# if verbose: print('fc3 shape ' + str(x.shape))
x = F.relu(self.dropout1(self.bn6(self.fc1(x))))
if verbose: print('fc1 shape ' + str(x.shape))
x = F.relu(self.dropout2(self.bn7(self.fc2(x))))
if verbose: print('fc2 shape ' + str(x.shape))
#x = F.relu(self.dropout3(self.bn8(self.fc3(x))))
#if verbose: print('fc3 shape ' + str(x.shape))
x_translation = self.fc_out_translation(x)
if verbose: print('x_translation shape ' + str(x_translation.shape))
x_rotation = self.fc_out_rotation(x)
if verbose: print('x_rotation shape ' + str(x_rotation.shape))
x_pose = torch.cat((x_translation, x_rotation), dim=1)
return x_pose
class CNNDepthBatchDropout8Kernel7(nn.Module): #https://towardsdatascience.com/covolutional-neural-network-cb0883dd6529
def __init__(self):
super(CNNDepthBatchDropout8Kernel7, self).__init__() # call the init constructor of the nn.Module. This way, we are only adding attributes.
self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=7, stride=2, padding=1)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=7, stride=2, padding=1)
self.conv3 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=5, stride=2, padding=1)
self.conv4 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=5, stride=2, padding=1)
self.conv5 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=2, padding=1)
# Batch norm should be before relu
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(128)
self.bn3 = nn.BatchNorm2d(256)
self.bn4 = nn.BatchNorm2d(512)
self.bn5 = nn.BatchNorm2d(512)
self.bn6 = nn.BatchNorm1d(4096)
self.bn7 = nn.BatchNorm1d(1024)
self.bn8 = nn.BatchNorm1d(512)
self.dropout = nn.Dropout(p=0.8)
self.fc1 = nn.Linear(18432, 4096)
self.fc2 = nn.Linear(4096, 1024)
self.fc3 = nn.Linear(1024, 512)
self.fc_out_translation = nn.Linear(512, 3)
self.fc_out_rotation = nn.Linear(512, 4)
# instead of treating the relu as modules, we can treat them as functions. We can access them via torch funtional
def forward(self, x, verbose=False): # this is where we pass the input into the module
if verbose: print('shape ' + str(x.shape))
x = F.relu(self.bn1(self.conv1(x)))
if verbose: print('layer1 shape ' + str(x.shape))
x = F.relu(self.bn2(self.conv2(x)))
if verbose: print('layer2 shape ' + str(x.shape))
x = F.relu(self.bn3(self.conv3(x)))
if verbose: print('layer3 shape ' + str(x.shape))
x = F.relu(self.bn4(self.conv4(x)))
if verbose: print('layer4 shape ' + str(x.shape))
x = F.relu(self.bn5(self.conv5(x)))
if verbose: print('layer5 shape ' + str(x.shape))
x = x.view(x.size(0), -1)
if verbose: print('x shape ' + str(x.shape))
# x = F.dropout(x, p=0.5)
# x = F.relu(self.fc1(x))
# if verbose: print('fc1 shape ' + str(x.shape))
#
# x = F.relu(self.fc2(x))
# if verbose: print('fc2 shape ' + str(x.shape))
#
# x = F.relu(self.fc3(x))
# if verbose: print('fc3 shape ' + str(x.shape))
x = F.relu(self.dropout(self.bn6(self.fc1(x))))
if verbose: print('fc1 shape ' + str(x.shape))
x = F.relu(self.bn7(self.fc2(x)))
if verbose: print('fc2 shape ' + str(x.shape))
x = F.relu(self.bn8(self.fc3(x)))
if verbose: print('fc3 shape ' + str(x.shape))
x_translation = self.fc_out_translation(x)
if verbose: print('x_translation shape ' + str(x_translation.shape))
x_rotation = self.fc_out_rotation(x)
if verbose: print('x_rotation shape ' + str(x_rotation.shape))
x_pose = torch.cat((x_translation, x_rotation), dim=1)
return x_pose | 45,692 | 44.784569 | 152 | py |
synfeal | synfeal-main/models/hourglass.py |
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
import torch.nn.functional as F
from torchvision import transforms, models
# paper: https://arxiv.org/abs/1703.07971
class HourglassBatch(nn.Module):
def __init__(self, pretrained, sum_mode=False, dropout_rate=0.5, aux_logits=False):
super(HourglassBatch, self).__init__()
self.sum_mode = sum_mode
self.dropout_rate = dropout_rate
self.aux_logits = aux_logits
if pretrained:
base_model = models.resnet34('ResNet34_Weights.DEFAULT')
else:
base_model = models.resnet34()
# encoding blocks!
self.init_block = nn.Sequential(*list(base_model.children())[:4])
self.res_block1 = base_model.layer1
self.res_block2 = base_model.layer2
self.res_block3 = base_model.layer3
self.res_block4 = base_model.layer4
# decoding blocks
if sum_mode:
self.deconv_block1 = nn.ConvTranspose2d(512, 256, kernel_size=(
3, 3), stride=(2, 2), padding=(1, 1), bias=False, output_padding=1)
self.deconv_block2 = nn.ConvTranspose2d(256, 128, kernel_size=(
3, 3), stride=(2, 2), padding=(1, 1), bias=False, output_padding=1)
self.deconv_block3 = nn.ConvTranspose2d(128, 64, kernel_size=(
3, 3), stride=(2, 2), padding=(1, 1), bias=False, output_padding=1)
self.conv_block = nn.Conv2d(64, 32, kernel_size=(
3, 3), stride=(1, 1), padding=(1, 1), bias=False)
else:
# concatenation with the encoder feature vectors
self.deconv_block1 = nn.ConvTranspose2d(512, 256, kernel_size=(
3, 3), stride=(2, 2), padding=(1, 1), bias=False, output_padding=1)
self.deconv_block2 = nn.ConvTranspose2d(512, 128, kernel_size=(
3, 3), stride=(2, 2), padding=(1, 1), bias=False, output_padding=1)
self.deconv_block3 = nn.ConvTranspose2d(256, 64, kernel_size=(
3, 3), stride=(2, 2), padding=(1, 1), bias=False, output_padding=1)
self.conv_block = nn.Conv2d(128, 32, kernel_size=(
3, 3), stride=(1, 1), padding=(1, 1), bias=False)
self.bn1 = nn.BatchNorm2d(256)
self.bn2 = nn.BatchNorm2d(128)
self.bn3 = nn.BatchNorm2d(64)
self.bn4 = nn.BatchNorm2d(32)
self.bn5 = nn.BatchNorm1d(1024)
# Regressor
self.fc_dim_reduce = nn.Linear(56 * 56 * 32, 1024)
self.fc_trans = nn.Linear(1024, 3)
self.fc_rot = nn.Linear(1024, 4)
# Initialize Weights
init_modules = [self.deconv_block1, self.deconv_block2, self.deconv_block3, self.conv_block,
self.fc_dim_reduce, self.fc_trans, self.fc_rot]
for module in init_modules:
if isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.Linear) or isinstance(module, nn.Conv3d):
nn.init.kaiming_normal_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
def forward(self, x):
# conv
x = self.init_block(x)
x_res1 = self.res_block1(x)
x_res2 = self.res_block2(x_res1)
x_res3 = self.res_block3(x_res2)
x_res4 = self.res_block4(x_res3)
# Deconv
x_deconv1 = self.bn1(F.relu(self.deconv_block1(x_res4)))
if self.sum_mode:
x_deconv1 = x_res3 + x_deconv1
else:
x_deconv1 = torch.cat((x_res3, x_deconv1), dim=1)
x_deconv2 = self.bn2(F.relu(self.deconv_block2(x_deconv1)))
if self.sum_mode:
x_deconv2 = x_res2 + x_deconv2
else:
x_deconv2 = torch.cat((x_res2, x_deconv2), dim=1)
x_deconv3 = self.bn3(F.relu(self.deconv_block3(x_deconv2)))
if self.sum_mode:
x_deconv3 = x_res1 + x_deconv3
else:
x_deconv3 = torch.cat((x_res1, x_deconv3), dim=1)
x_conv = self.bn4(F.relu(self.conv_block(x_deconv3)))
x_linear = x_conv.view(x_conv.size(0), -1)
x_linear = self.bn5(F.relu(self.fc_dim_reduce(x_linear)))
x_linear = F.dropout(x_linear, p=self.dropout_rate,
training=self.training)
position = self.fc_trans(x_linear)
rotation = self.fc_rot(x_linear)
x_pose = torch.cat((position, rotation), dim=1)
return x_pose
| 4,639 | 36.723577 | 120 | py |
synfeal | synfeal-main/process_dataset/src/validate_dataset.py | from utils import projectToCamera
import numpy as np
import os
import shutil
from dataset import Dataset
from sensor_msgs.msg import PointField
import sensor_msgs.point_cloud2 as pc2
# from utils import
from utils_ros import read_pcd, write_pcd
import random
from os.path import exists
import yaml
from colorama import Fore
import math
import copy
import cv2
class ValidateDataset():
def __init__(self):
self.files = ['.pcd', '.rgb.png', '.pose.txt']
def resetConfig(self, config={}):
self.config = config
def duplicateDataset(self, dataset, suffix):
# copy folder and create dataset object, return object
path_dataset = dataset.path_seq
path_validated_dataset = f'{dataset.path_seq}{suffix}'
shutil.copytree(path_dataset, path_validated_dataset)
return Dataset(path_seq=f'{dataset.seq}{suffix}')
def numberOfPoints(self, dataset, frame = None):
dct = {}
if frame == None: # calculate number of points for all pointclouds
for index in range(len(dataset)):
n_points = read_pcd(f'{dataset.path_seq}/frame-{index:05d}.pcd').points
dct[index] = n_points
else:
n_points = read_pcd(f'{dataset.path_seq}/frame-{frame:05d}.pcd').points
dct[frame] = n_points
return dct
def numberOfNans(self, dataset, frame = None):
dct = {}
if frame == None:
for index in range(len(dataset)):
pc_raw = read_pcd(f'{dataset.path_seq}/frame-{index:05d}.pcd')
pts = np.vstack([pc_raw.pc_data['x'], pc_raw.pc_data['y'], pc_raw.pc_data['z']]).T # stays NX3
dct[index] = np.sum(np.isnan(pts).any(axis=1))
else:
pc_raw = read_pcd(f'{dataset.path_seq}/frame-{frame:05d}.pcd')
pts = np.vstack([pc_raw.pc_data['x'], pc_raw.pc_data['y'], pc_raw.pc_data['z']]).T # stays NX3
dct[frame] = np.sum(np.isnan(pts).any(axis=1))
return dct
def removeNansPointCloud(self, dataset, frame = None):
fields = [PointField('x', 0, PointField.FLOAT32, 1),
PointField('y', 4, PointField.FLOAT32, 1),
PointField('z', 8, PointField.FLOAT32, 1),
PointField('intensity', 12, PointField.FLOAT32, 1)]
if frame == None:
for index in range(len(dataset)):
pc_raw = read_pcd(f'{dataset.path_seq}/frame-{index:05d}.pcd')
pts = np.vstack([pc_raw.pc_data['x'], pc_raw.pc_data['y'], pc_raw.pc_data['z'], pc_raw.pc_data['intensity']]).T # stays NX4
pts = pts[~np.isnan(pts).any(axis=1)]
# del
os.remove(f'{dataset.path_seq}/frame-{index:05d}.pcd')
# save new point clouds
pc_msg =pc2.create_cloud(None, fields, pts)
write_pcd(f'{dataset.path_seq}/frame-{index:05d}.pcd', pc_msg)
else:
pc_raw = read_pcd(f'{dataset.path_seq}/frame-{frame:05d}.pcd')
pts = np.vstack([pc_raw.pc_data['x'], pc_raw.pc_data['y'], pc_raw.pc_data['z'], pc_raw.pc_data['intensity']]).T # stays NX4
pts = pts[~np.isnan(pts).any(axis=1)]
# del
os.remove(f'{dataset.path_seq}/frame-{frame:05d}.pcd')
# save new point clouds
pc_msg =pc2.create_cloud(None, fields, pts)
write_pcd(f'{dataset.path_seq}/frame-{frame:05d}.pcd', pc_msg)
def downsamplePointCloud(self, dataset, npoints):
fields = [PointField('x', 0, PointField.FLOAT32, 1),
PointField('y', 4, PointField.FLOAT32, 1),
PointField('z', 8, PointField.FLOAT32, 1),
PointField('intensity', 12, PointField.FLOAT32, 1)]
for index in range(len(dataset)):
pc_raw = read_pcd(f'{dataset.path_seq}/frame-{index:05d}.pcd')
pts = np.vstack([pc_raw.pc_data['x'], pc_raw.pc_data['y'], pc_raw.pc_data['z'], pc_raw.pc_data['intensity']]).T # stays NX4
initial_npoints = pc_raw.points
step = initial_npoints // npoints
idxs = list(range(0,initial_npoints, step))
for i in range(len(idxs) - npoints):
idxs.pop(random.randrange(len(idxs)))
pts = pts[idxs,:]
# del
os.remove(f'{dataset.path_seq}/frame-{index:05d}.pcd')
# save new point clouds
pc_msg =pc2.create_cloud(None, fields, pts)
write_pcd(f'{dataset.path_seq}/frame-{index:05d}.pcd', pc_msg)
config = dataset.getConfig()
config['npoints'] = npoints
with open(f'{dataset.path_seq}/config.yaml', 'w') as file:
yaml.dump(config, file)
def scalePointCloud(self, dataset):
fields = [PointField('x', 0, PointField.FLOAT32, 1),
PointField('y', 4, PointField.FLOAT32, 1),
PointField('z', 8, PointField.FLOAT32, 1),
PointField('intensity', 12, PointField.FLOAT32, 1)]
for index in range(len(dataset)):
pc_raw = read_pcd(f'{dataset.path_seq}/frame-{index:05d}.pcd')
pts = np.vstack([pc_raw.pc_data['x'], pc_raw.pc_data['y'], pc_raw.pc_data['z'], pc_raw.pc_data['intensity']]).T # stays NX4
pts = pts - np.expand_dims(np.mean(pts, axis=0), 0) # center
dist = np.max(np.sqrt(np.sum(pts ** 2, axis=1)), 0)
pts = pts / dist
os.remove(f'{dataset.path_seq}/frame-{index:05d}.pcd')
# save new point clouds
pc_msg =pc2.create_cloud(None, fields, pts)
write_pcd(f'{dataset.path_seq}/frame-{index:05d}.pcd', pc_msg)
config = dataset.getConfig()
config['scaled'] = True
with open(f'{dataset.path_seq}/config.yaml', 'w') as file:
yaml.dump(config, file)
def invalidFrames(self, dataset):
# return a list with invalid frames
idxs = []
files = copy.deepcopy(self.files)
config = dataset.getConfig()
if config['fast']:
files = ['.rgb.png','.pose.txt']
else:
files = copy.deepcopy(self.files)
files.append('.depth.png')
for index in range(len(dataset)):
for file in files:
if not exists(f'{dataset.path_seq}/frame-{index:05d}{file}'):
idxs.append(index)
break
return idxs
def removeFrames(self, dataset, idxs):
for idx in idxs:
for file in os.listdir(f'{dataset.path_seq}'):
if file.startswith(f'frame-{idx:05d}'):
os.remove(f'{dataset.path_seq}/{file}')
def reorganizeDataset(self, dataset):
# here I assume the invalidFrames and removeFrames were called before.
# last_pose_idx is the idx of the last frame. We cannot use len(dataset) because the dataset might be missing some frames!
last_pose_idx = int(sorted([f for f in os.listdir(dataset.path_seq) if f.endswith('pose.txt')])[-1][6:11])
for idx in range(last_pose_idx+1):
print(idx)
if not exists(f'{dataset.path_seq}/frame-{idx:05d}.pose.txt'):
# idx does not exists, so we have to rename the close one.
print(f'{idx} is missing!!!')
new_idx = None
for idx2 in range(idx+1, last_pose_idx+1):
print(f'trying {idx2}')
if exists(f'{dataset.path_seq}/frame-{idx2:05d}.pose.txt'):
new_idx = idx2
break
if not new_idx==None:
print(f'renaming idx {new_idx} to idx {idx}')
for file in self.files:
os.rename(f'{dataset.path_seq}/frame-{new_idx:05d}{file}', f'{dataset.path_seq}/frame-{idx:05d}{file}')
else:
print(f'No candidate to replace {idx}')
def validateDataset(self, dataset):
# update config, check if all point clouds have the same size, if any has nans
# check for invalid frames
idxs = self.invalidFrames(dataset)
if idxs != []:
print(f'{Fore.RED} There are invalid frames in the dataset! {Fore.RESET}')
return False
# # check for missing data
# dct = self.numberOfNans(dataset)
# n_nans = 0
# for count in dct.values():
# n_nans += count
# if n_nans != 0:
# print(f'{Fore.RED} There are Nans in the dataset! {Fore.RESET}')
# return False
# #check for point clouds of different size
# dct = self.numberOfPoints(dataset)
# number_of_points = list(dct.values())
# result = all(element == number_of_points[0] for element in number_of_points)
# if not result:
# print(f'{Fore.RED} Not all pointclouds have the same number of points! {Fore.RESET}')
# return False
config = dataset.getConfig()
config['is_valid'] = True
with open(f'{dataset.path_seq}/config.yaml', 'w') as file:
yaml.dump(config, file)
return True
def mergeDatasets(self, dataset1, dataset2, dataset3_name):
# both datasets must be valids
# they should share the same number of points
# if not (dataset1.getConfig()['is_valid'] and dataset2.getConfig()['is_valid']):
# print(f'{Fore.RED} The datasets are not valid! Validate before merge. {Fore.RESET}')
# return False
# if not (dataset1.getConfig()['npoints'] == dataset2.getConfig()['npoints']):
# print(f'{Fore.RED} The datasets dont have the same number of points! {Fore.RESET}')
# return False
# if not (dataset1.getConfig()['scaled'] == dataset2.getConfig()['scaled']):
# print(f'{Fore.RED} Property scaled is different! {Fore.RESET}')
# return False
config = dataset1.getConfig()
if config['fast']:
files = ['.rgb.png','.pose.txt']
else:
files = self.files
size_dataset1 = len(dataset1)
shutil.copytree(dataset1.path_seq, f'{dataset1.root}/{dataset3_name}')
shutil.copytree(dataset2.path_seq, f'{dataset2.path_seq}_tmp')
dataset3 = Dataset(path_seq=f'{dataset3_name}')
dataset2_tmp = Dataset(path_seq=f'{dataset2.seq}_tmp')
for idx in range(len(dataset2_tmp)):
for file in files:
os.rename(f'{dataset2_tmp.path_seq}/frame-{idx:05d}{file}', f'{dataset3.path_seq}/frame-{idx+size_dataset1:05d}{file}')
shutil.rmtree(dataset2_tmp.path_seq)
def createDepthImages(self, dataset, rescale):
# loop through all point clouds
config = dataset.getConfig()
intrinsic = np.loadtxt(f'{dataset.path_seq}/depth_intrinsic.txt', delimiter=',')
width = config['depth']['width']
height = config['depth']['height']
for idx in range(len(dataset)):
pc_raw = read_pcd(f'{dataset.path_seq}/frame-{idx:05d}.pcd')
pts = np.vstack([pc_raw.pc_data['x'], pc_raw.pc_data['y'], pc_raw.pc_data['z'], pc_raw.pc_data['intensity']]) # stays 4xN
pixels, valid_pixels, dist = projectToCamera(intrinsic, [0, 0, 0, 0, 0], width, height, pts)
range_sparse = np.zeros((height, width), dtype=np.float32)
mask = 255 * np.ones((range_sparse.shape[0], range_sparse.shape[1]), dtype=np.uint8)
for idx_point in range(0, pts.shape[1]):
if valid_pixels[idx_point]:
x0 = math.floor(pixels[0, idx_point])
y0 = math.floor(pixels[1, idx_point])
mask[y0, x0] = 0
range_sparse[y0, x0] = dist[idx_point]
range_sparse = cv2.resize(range_sparse, (0, 0), fx=rescale, fy=rescale, interpolation=cv2.INTER_NEAREST)
mask = cv2.resize(mask, (0, 0), fx=rescale, fy=rescale, interpolation=cv2.INTER_NEAREST)
# Computing the dense depth map
print('Computing inpaint ...')
range_dense = cv2.inpaint(range_sparse, mask, 3, cv2.INPAINT_NS)
print('Inpaint done')
range_dense = cv2.resize(range_dense, (0, 0), fx=1 / rescale, fy=1 / rescale, interpolation=cv2.INTER_NEAREST)
tmp = copy.deepcopy(range_dense)
tmp = tmp * 1000.0 # to milimeters
tmp = tmp.astype(np.uint16)
cv2.imwrite(f'{dataset.path_seq}/frame-{idx:05d}.depth.png', tmp)
print(f'Saved depth image {dataset.path_seq}/frame-{idx:05d}.depth.png')
def createStatistics(self, dataset):
# loop through all point clouds
config = dataset.getConfig()
config['statistics'] = {'B' : {'max' : np.empty((len(dataset))),
'min' : np.empty((len(dataset))),
'mean' : np.empty((len(dataset))),
'std' : np.empty((len(dataset)))},
'G' : {'max' : np.empty((len(dataset))),
'min' : np.empty((len(dataset))),
'mean' : np.empty((len(dataset))),
'std' : np.empty((len(dataset)))},
'R' : {'max' : np.empty((len(dataset))),
'min' : np.empty((len(dataset))),
'mean' : np.empty((len(dataset))),
'std' : np.empty((len(dataset)))},
'D' : {'max' : np.empty((len(dataset))),
'min' : np.empty((len(dataset))),
'mean' : np.empty((len(dataset))),
'std' : np.empty((len(dataset)))}}
for idx in range(len(dataset)):
print(f'creating stats of frame {idx}')
# Load RGB image
cv_image = cv2.imread(f'{dataset.path_seq}/frame-{idx:05d}.rgb.png', cv2.IMREAD_UNCHANGED)
#cv2.imshow('fig', cv_image)
#cv2.waitKey(0)
#print(cv_image.shape)
blue_image = cv_image[:,:,0]/255
green_image = cv_image[:,:,1]/255
red_image = cv_image[:,:,2]/255
# cv2.imshow('fig', green_image)
# cv2.waitKey(0)
## B channel
config['statistics']['B']['max'][idx] = np.max(blue_image)
config['statistics']['B']['min'][idx] = np.min(blue_image)
config['statistics']['B']['mean'][idx] = np.mean(blue_image)
config['statistics']['B']['std'][idx] = np.std(blue_image)
## G channel
config['statistics']['G']['max'][idx] = np.max(green_image)
config['statistics']['G']['min'][idx] = np.min(green_image)
config['statistics']['G']['mean'][idx] = np.mean(green_image)
config['statistics']['G']['std'][idx] = np.std(green_image)
## R channel
config['statistics']['R']['max'][idx] = np.max(red_image)
config['statistics']['R']['min'][idx] = np.min(red_image)
config['statistics']['R']['mean'][idx] = np.mean(red_image)
config['statistics']['R']['std'][idx] = np.std(red_image)
# Load Depth image
if not config['fast']:
depth_image = cv2.imread(f'{dataset.path_seq}/frame-{idx:05d}.depth.png', cv2.IMREAD_UNCHANGED)
depth_image = depth_image.astype(np.float32) / 1000.0 # to meters
else:
depth_image = -1
## D channel
config['statistics']['D']['max'][idx] = np.max(depth_image)
config['statistics']['D']['min'][idx] = np.min(depth_image)
config['statistics']['D']['mean'][idx] = np.mean(depth_image)
config['statistics']['D']['std'][idx] = np.std(depth_image)
config['statistics']['B']['max'] = round(float(np.mean(config['statistics']['B']['max'])),5)
config['statistics']['B']['min'] = round(float(np.mean(config['statistics']['B']['min'])),5)
config['statistics']['B']['mean'] = round(float(np.mean(config['statistics']['B']['mean'])),5)
config['statistics']['B']['std'] = round(float(np.mean(config['statistics']['B']['std'])),5)
config['statistics']['G']['max'] = round(float(np.mean(config['statistics']['G']['max'])),5)
config['statistics']['G']['min'] = round(float(np.mean(config['statistics']['G']['min'])),5)
config['statistics']['G']['mean'] = round(float(np.mean(config['statistics']['G']['mean'])),5)
config['statistics']['G']['std'] = round(float(np.mean(config['statistics']['G']['std'])),5)
config['statistics']['R']['max'] = round(float(np.mean(config['statistics']['R']['max'])),5)
config['statistics']['R']['min'] = round(float(np.mean(config['statistics']['R']['min'])),5)
config['statistics']['R']['mean'] = round(float(np.mean(config['statistics']['R']['mean'])),5)
config['statistics']['R']['std'] = round(float(np.mean(config['statistics']['R']['std'])),5)
config['statistics']['D']['max'] = round(float(np.mean(config['statistics']['D']['max'])),5)
config['statistics']['D']['min'] = round(float(np.mean(config['statistics']['D']['min'])),5)
config['statistics']['D']['mean'] = round(float(np.mean(config['statistics']['D']['mean'])),5)
config['statistics']['D']['std'] = round(float(np.mean(config['statistics']['D']['std'])),5)
dataset.setConfig(config)
def createStatisticsRGB01(self, dataset):
# loop through all point clouds
config = dataset.getConfig()
config['statistics'] = {'B' : {'max' : np.empty((len(dataset))),
'min' : np.empty((len(dataset))),
'mean' : np.empty((len(dataset))),
'std' : np.empty((len(dataset)))},
'G' : {'max' : np.empty((len(dataset))),
'min' : np.empty((len(dataset))),
'mean' : np.empty((len(dataset))),
'std' : np.empty((len(dataset)))},
'R' : {'max' : np.empty((len(dataset))),
'min' : np.empty((len(dataset))),
'mean' : np.empty((len(dataset))),
'std' : np.empty((len(dataset)))}}
for idx in range(len(dataset)):
print(f'creating stats of frame {idx}')
# Load RGB image
cv_image = cv2.imread(f'{dataset.path_seq}/frame-{idx:05d}.rgb.png', cv2.IMREAD_UNCHANGED)
#cv2.imshow('fig', cv_image)
#cv2.waitKey(0)
#print(cv_image.shape)
blue_image = cv_image[:,:,0]/255
green_image = cv_image[:,:,1]/255
red_image = cv_image[:,:,2]/255
# cv2.imshow('fig', green_image)
# cv2.waitKey(0)
## B channel
config['statistics']['B']['max'][idx] = np.max(blue_image)
config['statistics']['B']['min'][idx] = np.min(blue_image)
config['statistics']['B']['mean'][idx] = np.mean(blue_image)
config['statistics']['B']['std'][idx] = np.std(blue_image)
## G channel
config['statistics']['G']['max'][idx] = np.max(green_image)
config['statistics']['G']['min'][idx] = np.min(green_image)
config['statistics']['G']['mean'][idx] = np.mean(green_image)
config['statistics']['G']['std'][idx] = np.std(green_image)
## R channel
config['statistics']['R']['max'][idx] = np.max(red_image)
config['statistics']['R']['min'][idx] = np.min(red_image)
config['statistics']['R']['mean'][idx] = np.mean(red_image)
config['statistics']['R']['std'][idx] = np.std(red_image)
config['statistics']['B']['max'] = round(float(np.mean(config['statistics']['B']['max'])),5)
config['statistics']['B']['min'] = round(float(np.mean(config['statistics']['B']['min'])),5)
config['statistics']['B']['mean'] = round(float(np.mean(config['statistics']['B']['mean'])),5)
config['statistics']['B']['std'] = round(float(np.mean(config['statistics']['B']['std'])),5)
config['statistics']['G']['max'] = round(float(np.mean(config['statistics']['G']['max'])),5)
config['statistics']['G']['min'] = round(float(np.mean(config['statistics']['G']['min'])),5)
config['statistics']['G']['mean'] = round(float(np.mean(config['statistics']['G']['mean'])),5)
config['statistics']['G']['std'] = round(float(np.mean(config['statistics']['G']['std'])),5)
config['statistics']['R']['max'] = round(float(np.mean(config['statistics']['R']['max'])),5)
config['statistics']['R']['min'] = round(float(np.mean(config['statistics']['R']['min'])),5)
config['statistics']['R']['mean'] = round(float(np.mean(config['statistics']['R']['mean'])),5)
config['statistics']['R']['std'] = round(float(np.mean(config['statistics']['R']['std'])),5)
dataset.setConfig(config)
def processImages(self, dataset, technique, global_dataset):
config = dataset.getConfig()
config['statistics'] = {'B' : {'max' : np.empty((len(dataset))),
'min' : np.empty((len(dataset))),
'mean' : np.empty((len(dataset))),
'std' : np.empty((len(dataset)))},
'G' : {'max' : np.empty((len(dataset))),
'min' : np.empty((len(dataset))),
'mean' : np.empty((len(dataset))),
'std' : np.empty((len(dataset)))},
'R' : {'max' : np.empty((len(dataset))),
'min' : np.empty((len(dataset))),
'mean' : np.empty((len(dataset))),
'std' : np.empty((len(dataset)))},
'D' : {'max' : np.empty((len(dataset))),
'min' : np.empty((len(dataset))),
'mean' : np.empty((len(dataset))),
'std' : np.empty((len(dataset)))}}
# Local Processing
if global_dataset == None:
for idx in range(len(dataset)):
# RGB image
bgr_image = cv2.imread(f'{dataset.path_seq}/frame-{idx:05d}.rgb.png', cv2.IMREAD_UNCHANGED)
blue_image = bgr_image[:,:,0]
green_image = bgr_image[:,:,1]
red_image = bgr_image[:,:,2]
depth_image = cv2.imread(f'{dataset.path_seq}/frame-{idx:05d}.depth.png', cv2.IMREAD_UNCHANGED)
depth_image = depth_image.astype(np.float32) / 1000.0 # to meters
if technique =='standardization':
# B channel
mean = np.mean(blue_image)
std = np.std(blue_image)
blue_image = (blue_image - mean) / std
# G channel
mean = np.mean(green_image)
std = np.std(green_image)
green_image = (green_image - mean) / std
# R channel
mean = np.mean(red_image)
std = np.std(red_image)
red_image = (red_image - mean) / std
# D channel
mean = np.mean(depth_image)
std = np.std(depth_image)
depth_image = (depth_image - mean) / std
elif technique == 'normalization':
# B channel
min_v = np.min(blue_image)
max_v = np.max(blue_image)
blue_image = (blue_image - min_v) / (max_v - min_v)
# G channel
min_v = np.min(green_image)
max_v = np.max(green_image)
green_image = (green_image - min_v) / (max_v - min_v)
# R channel
min_v = np.min(red_image)
max_v = np.max(red_image)
red_image = (red_image - min_v) / (max_v - min_v)
# D channel
min_v = np.min(depth_image)
max_v = np.max(depth_image)
depth_image = (depth_image - min_v) / (max_v - min_v)
else:
print('Tehcnique not implemented. Available techniques are: normalization and standardization')
exit(0)
blue_image = blue_image.astype(np.float32)
green_image = green_image.astype(np.float32)
red_image = red_image.astype(np.float32)
depth_image = depth_image.astype(np.float32)
## B channel
config['statistics']['B']['max'][idx] = np.max(blue_image)
config['statistics']['B']['min'][idx] = np.min(blue_image)
config['statistics']['B']['mean'][idx] = np.mean(blue_image)
config['statistics']['B']['std'][idx] = np.std(blue_image)
## G channel
config['statistics']['G']['max'][idx] = np.max(green_image)
config['statistics']['G']['min'][idx] = np.min(green_image)
config['statistics']['G']['mean'][idx] = np.mean(green_image)
config['statistics']['G']['std'][idx] = np.std(green_image)
## R channel
config['statistics']['R']['max'][idx] = np.max(red_image)
config['statistics']['R']['min'][idx] = np.min(red_image)
config['statistics']['R']['mean'][idx] = np.mean(red_image)
config['statistics']['R']['std'][idx] = np.std(red_image)
## D channel
config['statistics']['D']['max'][idx] = np.max(depth_image)
config['statistics']['D']['min'][idx] = np.min(depth_image)
config['statistics']['D']['mean'][idx] = np.mean(depth_image)
config['statistics']['D']['std'][idx] = np.std(depth_image)
# joint BGR images as nparray
bgr_image = cv2.merge([blue_image, green_image, red_image])
os.remove(f'{dataset.path_seq}/frame-{idx:05d}.depth.png')
os.remove(f'{dataset.path_seq}/frame-{idx:05d}.rgb.png')
np.save(f'{dataset.path_seq}/frame-{idx:05d}.depth.npy', depth_image)
np.save(f'{dataset.path_seq}/frame-{idx:05d}.rgb.npy', bgr_image)
#cv2.imwrite(f'{dataset.path_seq}/frame-{idx:05d}.depth.png', tmp)
config['processing'] = {'global' : None,
'technique' : technique}
config['statistics']['B']['max'] = round(float(np.mean(config['statistics']['B']['max'])),5)
config['statistics']['B']['min'] = round(float(np.mean(config['statistics']['B']['min'])),5)
config['statistics']['B']['mean'] = round(float(np.mean(config['statistics']['B']['mean'])),5)
config['statistics']['B']['std'] = round(float(np.mean(config['statistics']['B']['std'])),5)
config['statistics']['G']['max'] = round(float(np.mean(config['statistics']['G']['max'])),5)
config['statistics']['G']['min'] = round(float(np.mean(config['statistics']['G']['min'])),5)
config['statistics']['G']['mean'] = round(float(np.mean(config['statistics']['G']['mean'])),5)
config['statistics']['G']['std'] = round(float(np.mean(config['statistics']['G']['std'])),5)
config['statistics']['R']['max'] = round(float(np.mean(config['statistics']['R']['max'])),5)
config['statistics']['R']['min'] = round(float(np.mean(config['statistics']['R']['min'])),5)
config['statistics']['R']['mean'] = round(float(np.mean(config['statistics']['R']['mean'])),5)
config['statistics']['R']['std'] = round(float(np.mean(config['statistics']['R']['std'])),5)
config['statistics']['D']['max'] = round(float(np.mean(config['statistics']['D']['max'])),5)
config['statistics']['D']['min'] = round(float(np.mean(config['statistics']['D']['min'])),5)
config['statistics']['D']['mean'] = round(float(np.mean(config['statistics']['D']['mean'])),5)
config['statistics']['D']['std'] = round(float(np.mean(config['statistics']['D']['std'])),5)
dataset.setConfig(config)
# Global Processing
else:
global_config = global_dataset.getConfig()
global_stats = global_config['statistics']
for idx in range(len(dataset)):
bgr_image = cv2.imread(f'{dataset.path_seq}/frame-{idx:05d}.rgb.png', cv2.IMREAD_UNCHANGED)
blue_image = bgr_image[:,:,0]
green_image = bgr_image[:,:,1]
red_image = bgr_image[:,:,2]
depth_image = cv2.imread(f'{dataset.path_seq}/frame-{idx:05d}.depth.png', cv2.IMREAD_UNCHANGED)
depth_image = depth_image.astype(np.float32) / 1000.0 # to meters
if technique =='standardization':
blue_image = (blue_image - global_stats['B']['mean']) / global_stats['B']['std']
green_image = (green_image - global_stats['G']['mean']) / global_stats['G']['std']
red_image = (red_image - global_stats['R']['mean']) / global_stats['R']['std']
depth_image = (depth_image - global_stats['D']['mean']) / global_stats['D']['std']
elif technique == 'normalization':
blue_image = (blue_image - global_stats['B']['min']) / (global_stats['B']['max'] - global_stats['B']['min'])
green_image = (green_image - global_stats['G']['min']) / (global_stats['G']['max'] - global_stats['G']['min'])
red_image = (red_image - global_stats['R']['min']) / (global_stats['R']['max'] - global_stats['R']['min'])
depth_image = (depth_image - global_stats['D']['min']) / (global_stats['D']['max'] - global_stats['D']['min'])
else:
print('Tehcnique not implemented. Available techniques are: normalization and standardization')
exit(0)
blue_image = blue_image.astype(np.float32)
green_image = green_image.astype(np.float32)
red_image = red_image.astype(np.float32)
depth_image = depth_image.astype(np.float32)
## B channel
config['statistics']['B']['max'][idx] = np.max(blue_image)
config['statistics']['B']['min'][idx] = np.min(blue_image)
config['statistics']['B']['mean'][idx] = np.mean(blue_image)
config['statistics']['B']['std'][idx] = np.std(blue_image)
## G channel
config['statistics']['G']['max'][idx] = np.max(green_image)
config['statistics']['G']['min'][idx] = np.min(green_image)
config['statistics']['G']['mean'][idx] = np.mean(green_image)
config['statistics']['G']['std'][idx] = np.std(green_image)
## R channel
config['statistics']['R']['max'][idx] = np.max(red_image)
config['statistics']['R']['min'][idx] = np.min(red_image)
config['statistics']['R']['mean'][idx] = np.mean(red_image)
config['statistics']['R']['std'][idx] = np.std(red_image)
## D channel
config['statistics']['D']['max'][idx] = np.max(depth_image)
config['statistics']['D']['min'][idx] = np.min(depth_image)
config['statistics']['D']['mean'][idx] = np.mean(depth_image)
config['statistics']['D']['std'][idx] = np.std(depth_image)
# joint BGR images as nparray
bgr_image = cv2.merge([blue_image, green_image, red_image])
os.remove(f'{dataset.path_seq}/frame-{idx:05d}.depth.png')
os.remove(f'{dataset.path_seq}/frame-{idx:05d}.rgb.png')
np.save(f'{dataset.path_seq}/frame-{idx:05d}.depth.npy', depth_image)
np.save(f'{dataset.path_seq}/frame-{idx:05d}.rgb.npy', bgr_image)
config['processing'] = {'global' : global_dataset.path_seq,
'technique' : technique}
config['statistics']['B']['max'] = round(float(np.mean(config['statistics']['B']['max'])),5)
config['statistics']['B']['min'] = round(float(np.mean(config['statistics']['B']['min'])),5)
config['statistics']['B']['mean'] = round(float(np.mean(config['statistics']['B']['mean'])),5)
config['statistics']['B']['std'] = round(float(np.mean(config['statistics']['B']['std'])),5)
config['statistics']['G']['max'] = round(float(np.mean(config['statistics']['G']['max'])),5)
config['statistics']['G']['min'] = round(float(np.mean(config['statistics']['G']['min'])),5)
config['statistics']['G']['mean'] = round(float(np.mean(config['statistics']['G']['mean'])),5)
config['statistics']['G']['std'] = round(float(np.mean(config['statistics']['G']['std'])),5)
config['statistics']['R']['max'] = round(float(np.mean(config['statistics']['R']['max'])),5)
config['statistics']['R']['min'] = round(float(np.mean(config['statistics']['R']['min'])),5)
config['statistics']['R']['mean'] = round(float(np.mean(config['statistics']['R']['mean'])),5)
config['statistics']['R']['std'] = round(float(np.mean(config['statistics']['R']['std'])),5)
config['statistics']['D']['max'] = round(float(np.mean(config['statistics']['D']['max'])),5)
config['statistics']['D']['min'] = round(float(np.mean(config['statistics']['D']['min'])),5)
config['statistics']['D']['mean'] = round(float(np.mean(config['statistics']['D']['mean'])),5)
config['statistics']['D']['std'] = round(float(np.mean(config['statistics']['D']['std'])),5)
dataset.setConfig(config)
| 37,230 | 49.312162 | 140 | py |
synfeal | synfeal-main/process_dataset/scripts/reduce_dataset.py |
# stdlib
import sys
import argparse
import copy
# 3rd-party
from dataset import Dataset
import os
import shutil
import yaml
def main():
parser = argparse.ArgumentParser(description='Validate dataset')
parser.add_argument('-d', '--dataset', type=str, required=True, help='Name of the dataset')
parser.add_argument('-dr', '--dataset_reduced', type=str, required=True, help='Suffix to append to the name of the dataset')
parser.add_argument('-s', '--size', type=int, required=True, help='Sample size')
arglist = [x for x in sys.argv[1:] if not x.startswith('__')]
args = vars(parser.parse_args(args=arglist))
dataset = Dataset(path_seq=args['dataset'])
path_root = dataset.root
dataset_reduced_path = f'{path_root}/{args["dataset_reduced"]}'
if os.path.exists(dataset_reduced_path):
print(f'{dataset_reduced_path} already exits. Aborting reducing')
exit(0)
else:
os.makedirs(dataset_reduced_path) # Create the new folder
# get config
config = dataset.getConfig()
if 'statistics' in config:
config.pop('statistics')
if not config['fast']:
files_to_copy = ['.pcd', '.rgb.png', '.depth.png','.pose.txt']
else:
files_to_copy = ['.rgb.png','.pose.txt']
# copy intrinsics to both datasets
for idx in range(len(dataset)):
print(f'original idx: {idx}')
if idx <= args['size']:
print(f'copying {idx} to {idx} in {dataset_reduced_path}')
for file in files_to_copy:
shutil.copy2(f'{dataset.path_seq}/frame-{idx:05d}{file}', f'{dataset_reduced_path}/frame-{idx:05d}{file}')
# copy intrinsics to both datasets
shutil.copy2(f'{dataset.path_seq}/depth_intrinsic.txt', f'{dataset_reduced_path}/depth_intrinsic.txt')
shutil.copy2(f'{dataset.path_seq}/rgb_intrinsic.txt', f'{dataset_reduced_path}/rgb_intrinsic.txt')
config['raw'] = args['dataset_reduced']
with open(f'{dataset_reduced_path}/config.yaml', 'w') as f:
yaml.dump(config, f)
if __name__ == "__main__":
main()
| 2,174 | 30.071429 | 128 | py |
synfeal | synfeal-main/synfeal_bringup/scripts/model_states_to_tf.py |
# Adapted from http://wiki.ros.org/tf2/Tutorials/Writing%20a%20tf2%20broadcaster%20%28Python%29
from functools import partial
import rospy
import tf2_ros
import geometry_msgs.msg
from gazebo_msgs.msg import ModelStates
def callbackModelStatesReceived(msg, tf_broadcaster):
childs = msg.name
pose = msg.pose
world = 'world'
now = rospy.Time.now()
# the gazebo has several models, so we have to pick the one we want
if 'localbot' in childs:
idx = childs.index('localbot')
transform = geometry_msgs.msg.TransformStamped()
transform.header.frame_id = world
transform.child_frame_id = '/base_footprint'
transform.header.stamp = now
transform.transform.translation.x = pose[idx].position.x
transform.transform.translation.y = pose[idx].position.y
transform.transform.translation.z = pose[idx].position.z
transform.transform.rotation.x = pose[idx].orientation.x
transform.transform.rotation.y = pose[idx].orientation.y
transform.transform.rotation.z = pose[idx].orientation.z
transform.transform.rotation.w = pose[idx].orientation.w
tf_broadcaster.sendTransform(transform)
def main():
rospy.init_node('model_states_to_tf')
rospy.Subscriber("/gazebo/model_states_throttle", ModelStates,
partial(callbackModelStatesReceived, tf_broadcaster=tf2_ros.TransformBroadcaster()))
rospy.spin()
if __name__ == '__main__':
main()
| 1,631 | 33.723404 | 105 | py |
synfeal | synfeal-main/synfeal_visualization/src/generate_real_vs_predicted.py |
import rospy
import os
from gazebo_msgs.srv import SetModelState, GetModelState, GetModelStateRequest, SetModelStateRequest
from colorama import Fore
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from utils import write_img
class GenerateRealPredicted():
def __init__(self, model_name, results):
self.set_state_service = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
self.model_name = model_name # model_name = 'localbot'
self.bridge = CvBridge()
self.folder = f'{results.path}/images'
if not os.path.exists(self.folder):
print(f'Creating folder {self.folder}')
os.makedirs(self.folder) # Create the new folder
else:
print(f'{Fore.RED} {self.folder} already exists... Aborting GenerateRealPredicted initialization! {Fore.RESET}')
exit(0)
rospy.wait_for_service('/gazebo/get_model_state')
self.get_model_state_service = rospy.ServiceProxy('/gazebo/get_model_state', GetModelState)
def getPose(self):
return self.get_model_state_service(self.model_name, 'world')
def setPose(self, pose):
req = SetModelStateRequest() # Create an object of type SetModelStateRequest
req.model_state.model_name = self.model_name
req.model_state.pose.position.x = pose.position.x
req.model_state.pose.position.y = pose.position.y
req.model_state.pose.position.z = pose.position.z
req.model_state.pose.orientation.x = pose.orientation.x
req.model_state.pose.orientation.y = pose.orientation.y
req.model_state.pose.orientation.z = pose.orientation.z
req.model_state.pose.orientation.w = pose.orientation.w
req.model_state.reference_frame = 'world'
self.set_state_service(req.model_state)
def getImage(self):
rgb_msg = rospy.wait_for_message('/kinect/rgb/image_raw', Image)
return self.bridge.imgmsg_to_cv2(rgb_msg, "bgr8") # convert to opencv image
def saveImage(self, filename, image):
filename = f'{self.folder}/{filename}'
write_img(filename, image)
| 2,221 | 39.4 | 124 | py |
synfeal | synfeal-main/produce_results/src/results.py | import numpy as np
import pandas as pd
import os
import yaml
from yaml.loader import SafeLoader
from utils import matrixToXYZ, matrixToQuaternion, normalize_quat
class Results():
def __init__(self, results_path):
path=os.environ.get("SYNFEAL_DATASET")
self.path = f'{path}/results/localbot/{results_path}'
self.nframes = int(sum(f.endswith('.txt') for f in os.listdir(self.path))/2)
self.csv = pd.read_csv(f'{self.path}/errors.csv')
def __getitem__(self, index):
# load pose
matrix_predicted = np.loadtxt(f'{self.path}/frame-{index:05d}.predicted.pose.txt', delimiter=',')
matrix_real = np.loadtxt(f'{self.path}/frame-{index:05d}.real.pose.txt', delimiter=',')
quaternion_real = matrixToQuaternion(matrix_real)
quaternion_real = normalize_quat(quaternion_real)
xyz_real = matrixToXYZ(matrix_real)
pose_real = np.append(xyz_real, quaternion_real)
quaternion_predicted = matrixToQuaternion(matrix_predicted)
quaternion_predicted = normalize_quat(quaternion_predicted)
xyz_predicted = matrixToXYZ(matrix_predicted)
pose_predicted = np.append(xyz_predicted, quaternion_predicted)
return pose_real, pose_predicted
def __len__(self):
return self.nframes
def getErrorsArrays(self):
pos_error_array = self.csv.iloc[:-1]['position_error (m)'].to_numpy()
rot_error_array = self.csv.iloc[:-1]['rotation_error (rads)'].to_numpy()
return pos_error_array, rot_error_array
def updateCSV(self):
self.csv.to_csv(f'{self.path}/errors.csv', index=False, float_format='%.5f')
def getConfig(self):
with open(f'{self.path}/config.yaml') as f:
config = yaml.load(f, Loader=SafeLoader)
return config
| 1,911 | 32.54386 | 105 | py |
synfeal | synfeal-main/produce_results/src/save_results.py | import os
import yaml
import pandas as pd
import shutil
import matplotlib.pyplot as plt
from utils import write_transformation
from colorama import Fore
from datetime import datetime
class SaveResults():
"""
class to save results
"""
def __init__(self, output, model_path, seq_path, overwrite):
# attribute initializer
path=os.environ.get("SYNFEAL_DATASET")
self.output_folder = f'{path}/results/localbot/{output}'
self.model_path = model_path
self.seq_path = seq_path
if not os.path.exists(self.output_folder):
print(f'Creating folder {self.output_folder}')
os.makedirs(self.output_folder) # Create the new folder
elif overwrite:
print(f'Overwriting folder {self.output_folder}')
shutil.rmtree(self.output_folder)
os.makedirs(self.output_folder) # Create the new folder
else:
print(f'{Fore.RED} {self.output_folder} already exists... Aborting SaveResults initialization! {Fore.RESET}')
exit(0)
dt_now = datetime.now() # current date and time
config = {'user' : os.environ["USER"],
'date' : dt_now.strftime("%d/%m/%Y, %H:%M:%S"),
'model_path' : self.model_path,
'seq_path' : self.seq_path}
with open(f'{self.output_folder}/config.yaml', 'w') as file:
yaml.dump(config, file)
self.frame_idx = 0 # make sure to save as 00000
self.csv = pd.DataFrame(columns=('frame', 'position_error (m)', 'rotation_error (rads)'))
print('SaveResults initialized properly')
def saveTXT(self, real_transformation, predicted_transformation):
filename = f'frame-{self.frame_idx:05d}'
write_transformation(f'{self.output_folder}/{filename}.real.pose.txt', real_transformation)
write_transformation(f'{self.output_folder}/{filename}.predicted.pose.txt', predicted_transformation)
def updateCSV(self, position_error, rotation_error):
row = {'frame' : f'{self.frame_idx:05d}',
'position_error (m)' : position_error,
'rotation_error (rads)' : rotation_error}
self.csv = self.csv.append(row, ignore_index=True)
def saveCSV(self):
# save averages values in the last row
mean_row = {'frame' : 'mean_values',
'position_error (m)' : self.csv.mean(axis=0).loc["position_error (m)"],
'rotation_error (rads)' : self.csv.mean(axis=0).loc["rotation_error (rads)"]}
median_row = {'frame' : 'median_values',
'position_error (m)' : self.csv.median(axis=0).loc["position_error (m)"],
'rotation_error (rads)' : self.csv.median(axis=0).loc["rotation_error (rads)"]}
self.csv = self.csv.append(mean_row, ignore_index=True)
self.csv = self.csv.append(median_row, ignore_index=True)
print(self.csv)
self.csv.to_csv(f'{self.output_folder}/errors.csv', index=False, float_format='%.5f')
def saveErrorsFig(self):
frames_array = self.csv.iloc[:-2]['frame'].to_numpy().astype(int)
pos_error_array = self.csv.iloc[:-2]['position_error (m)'].to_numpy()
rot_error_array = self.csv.iloc[:-2]['rotation_error (rads)'].to_numpy()
fig, (ax1, ax2) = plt.subplots(2, sharex=True)
fig.suptitle('position and rotation errors')
ax1.plot(frames_array, pos_error_array, 'cyan', label='position error')
ax2.plot(frames_array, rot_error_array, 'navy', label='rotation error')
ax2.set_xlabel('frame idx')
ax2.set_ylabel('[rads]')
ax1.set_ylabel('[m]')
ax1.legend()
ax2.legend()
plt.savefig(f'{self.output_folder}/errors.png')
def step(self):
self.frame_idx+=1
| 4,142 | 37.719626 | 121 | py |
ngmm_tools | ngmm_tools-master/Analyses/Python_lib/catalog/pylib_catalog.py | """
Created on Tue Jul 20 10:39:12 2021
@author: glavrent
"""
#load libraries
#arithmetic libraries
import numpy as np
def IndexAvgColumns(df_data, col_idx, col2avg):
'''
Average columns based on index column
Parameters
----------
df_data : pd.dataframe
Data data-frame.
col_idx : str
Name of index column.
col2avg : list
List of column names to be averaged.
Returns
-------
df_data : pd.dataframe
Data data-frame.
'''
#unique ids
idx_array, inv_array = np.unique(df_data[col_idx], return_inverse=True)
#iterate over columns
for col in col2avg:
#compute average values for all unique indices
avg_vals = np.array([np.nanmean(df_data.loc[df_data[col_idx] == idx,col]) for idx in idx_array])
df_data.loc[:,col] = avg_vals[inv_array]
return df_data
def ColocatePt(df_flatfile, col_idx, col_coor, thres_dist=0.01, return_df_pt=False):
'''
Colocate points (assign same ID) based on threshold distance.
Parameters
----------
df_flatfile : pd.DataFrame
Catalog flatfile.
col_idx : str
Name of index column.
col_coor : list of str
List of coordinate name columns.
thres_dist : real, optional
Value of threshold distance. The default is 0.01.
return_df_pt : bool, optional
Option for returning point data frame. The default is False.
Returns
-------
df_flatfile : pd.DataFrame
Catalog flatfile with updated index column.
df_pt: pd.DataFrame
Point data frame with updated index column.
'''
#dataframe with unique points
_, pt_idx, pt_inv = np.unique(df_flatfile[col_idx], axis=0, return_index=True, return_inverse=True)
df_pt = df_flatfile.loc[:,[col_idx] + col_coor].iloc[pt_idx,:]
#find and merge collocated points
for _, pt in df_pt.iterrows():
#distance between points
dist2pt = np.linalg.norm((df_pt[col_coor] - pt[col_coor]).astype(float), axis=1)
#indices of collocated points
i_pt_coll = dist2pt < thres_dist
#assign new id for collocated points
df_pt.loc[i_pt_coll,col_idx] = pt[col_idx].astype(int)
#update pt info to main catalog
df_flatfile.loc[:,col_idx] = df_pt[col_idx].values[pt_inv]
if not return_df_pt:
return df_flatfile
else:
return df_flatfile, df_pt
def UsableSta(mag_array, dist_array, df_coeffs):
'''
Find records that meet the mag-distance limits
Parameters
----------
mag_array : np.array
Magnitude array.
dist_array : np.array
Distance array.
df_coeffs : pd.DataFrame
Coefficients dataframe.
Returns
-------
rec_lim : np.array
logical array with True for records that meet M/R limits.
'''
#rrup limit
rrup_lim = dist_array <= df_coeffs.loc['max_rrup','coefficients']
#mag limit
mag_min = (df_coeffs.loc['b1','coefficients'] +
df_coeffs.loc['b1','coefficients'] * dist_array +
df_coeffs.loc['b2','coefficients'] * dist_array**2)
mag_lim = mag_array >= mag_min
#find records that meet both conditions
rec_lim = np.logical_and(rrup_lim, mag_lim)
return rec_lim
| 3,361 | 26.557377 | 104 | py |
ngmm_tools | ngmm_tools-master/Analyses/Python_lib/plotting/pylib_contour_plots.py | """
Created on Sat Nov 9 13:12:38 2019
@author: glavrent
"""
## load libraries
#arithmetic
import numpy as np
from scipy.interpolate import griddata
from scipy.ndimage import gaussian_filter
#plotting
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
#from matplotlib.ticker import FormatStrFormatter
from matplotlib import ticker
#base map
from cartopy import config
import cartopy.crs as ccrs
import cartopy.feature as cfeature
class FormatScalarFormatter(matplotlib.ticker.ScalarFormatter):
def __init__(self, fformat="%1.1f", offset=True, mathText=True):
self.fformat = fformat
matplotlib.ticker.ScalarFormatter.__init__(self,useOffset=offset,
useMathText=mathText)
def _set_format(self, vmin, vmax):
self.format = self.fformat
if self._useMathText:
self.format = '$%s$' % matplotlib.ticker._mathdefault(self.format)
## Main functions
def PlotContourMapObs(cont_latlondata, cmin=None, cmax=None, flag_grid=False, title=None, cbar_label=None, log_cbar = False, frmt_clb = '%.2f',
prj_map = False):
'''
PlotContourMapObs:
Input Arguments:
cont_latlondata (np.array [n1,3]): contains the latitude, logitude and contour values
cont_latlondata = [lat, long, data]
cmin (double-opt): lower limit for color levels for contour plot
cmax (double-opt): upper limit for color levels for contour plot
title (str-opt): figure title
cbar_label (str-opt): contour plot color bar label
ptlevs (np.array-opt): color levels for points
pt_label (str-opt): points color bar label
log_cbar (bool-opt): if true use log-scale for contour plots
frmt_clb string format color bar ticks
Output Arguments:
'''
plt_res = '50m'
plt_scale = '50m'
#number of interpolation points, x & y direction
#ngridx = 5000
#ngridy = 5000
#ngridx = 500
#ngridy = 500
ngridx = 100
ngridy = 100
#create figure
fig = plt.figure(figsize=(10, 10))
#fig = plt.figure(figsize=(15, 15))
#create basemap
if prj_map == True:
data_crs = ccrs.PlateCarree()
ax = fig.add_subplot(1, 1, 1, projection=data_crs)
else:
data_crs = None
ax = fig.add_subplot(1, 1, 1)
#project contour data
x_cont = cont_latlondata[:,1]
y_cont = cont_latlondata[:,0]
#interpolation grid
x_int = np.linspace(x_cont.min(), x_cont.max(), ngridx)
y_int = np.linspace(y_cont.min(), y_cont.max(), ngridy)
X_grid, Y_grid = np.meshgrid(x_int, y_int)
#interpolate contour data on grid
if log_cbar:
data_cont = np.log(cont_latlondata[:,2])
else:
data_cont = cont_latlondata[:,2]
data_grid = griddata((x_cont, y_cont) , data_cont, (X_grid, Y_grid), method='linear')
#data colorbar
cbmin = data_cont.min() if cmin is None else cmin
cbmax = data_cont.max() if cmax is None else cmax
clevs = np.linspace(cbmin, cbmax, 41).tolist()
#plot interpolated data
if prj_map == True:
cs = ax.contourf(X_grid, Y_grid, data_grid, transform = data_crs, vmin=cmin, vmax=cmax, levels = clevs, zorder=3, alpha = 0.75)
else:
cs = ax.contourf(X_grid, Y_grid, data_grid, vmin=cmin, vmax=cmax, levels = clevs, zorder=3, alpha = 0.75)
#color bar
fmt_clb = ticker.FormatStrFormatter(frmt_clb)
cbar_ticks = clevs[0:41:8]
cbar = fig.colorbar(cs, boundaries=clevs, ticks=cbar_ticks, pad=0.05, orientation="horizontal", format=fmt_clb) # add colorbar
if log_cbar:
cbar_labels = [frmt_clb%np.exp(c_t) for c_t in cbar_ticks]
cbar.set_ticklabels(cbar_labels)
#add tick labs
cbar.ax.tick_params(labelsize=18)
if (not cbar_label is None): cbar.set_label(cbar_label, size=20)
if prj_map == True:
#add costal lines
ax.coastlines(resolution=plt_res, edgecolor='black', zorder=5);
#add state boundaries
states = cfeature.NaturalEarthFeature(category='cultural', name='admin_1_states_provinces_lines',
scale=plt_scale, facecolor='none')
ax.add_feature(states, edgecolor='black', zorder=3)
borders = cfeature.NaturalEarthFeature(category='cultural', name='admin_0_countries',
scale=plt_scale, facecolor='none')
ax.add_feature(borders, edgecolor='black', zorder=4)
#add water bodies
oceans = cfeature.NaturalEarthFeature(category='physical', name='ocean', facecolor='lightblue',
scale=plt_scale)
ax.add_feature(oceans, zorder=6)
#add figure title
if (not title is None): plt.title(title, fontsize=25)
plt.xlabel('Latitude (deg)', fontsize=20)
plt.ylabel('Longitude (deg)', fontsize=20)
#grid lines
if flag_grid:
# gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True)
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,
linewidth=1, color='gray', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_right = False
else:
gl = None
# fig.show()
# fig.draw()
fig.tight_layout()
return fig, ax, cbar, data_crs, gl
# Original PlotContourCAMap function
def PlotContourCAMapAdv(cont_latlondata, line_latlon=None, pt_latlondata=None, clevs=None, flag_grid=False, title=None, cbar_label=None,
ptlevs = None, pt_label = None, log_cbar = False, frmt_clb = '%.2f', **kwargs):
'''
PlotContourCAMapAdv:
create a contour plot of the data in cont_latlondata
Input Arguments:
cont_latlondata (np.array [n1,3]): contains the latitude, logitude and contour values
cont_latlondata = [lat, long, data]
line_latlon (np.array-opt [n2,2]): contains the latitde and logitude coordinates of any lines
pt_latlondata (np.array-opt [n3,(2,3)]): contains the latitude, logitude and values of disp points
pt_latlondata = [lat, long, data-optional]
clevs (np.array-opt): color levels for contour plot
title (str-opt): figure title
cbar_label (str-opt): contour plot color bar label
ptlevs (np.array-opt): color levels for points
pt_label (str-opt): points color bar label
log_cbar (bool-opt): if true use log-scale for contour plots
frmt_clb string format color bar ticks
Output Arguments:
'''
#additional input arguments
flag_smooth = kwargs['flag_smooth'] if 'flag_smooth' in kwargs else False
sig_smooth = kwargs['smooth_sig'] if 'smooth_sig' in kwargs else 0.1
plt_res = '10m'
plt_scale = '10m'
#number of interpolation points, x & y direction
#ngridx = 5000
#ngridy = 5000
#ngridx = 500
#ngridy = 500
ngridx = 100
ngridy = 100
#create figure
fig = plt.figure(figsize=(10, 10))
#fig = plt.figure(figsize=(15, 15))
#create basemap
data_crs = ccrs.PlateCarree()
ax = fig.add_subplot(1, 1, 1, projection=data_crs)
#project contour data
x_cont = cont_latlondata[:,1]
y_cont = cont_latlondata[:,0]
#interpolation grid
x_int = np.linspace(x_cont.min(), x_cont.max(), ngridx)
y_int = np.linspace(y_cont.min(), y_cont.max(), ngridy)
X_grid, Y_grid = np.meshgrid(x_int, y_int)
#interpolate contour data on grid
data_cont = cont_latlondata[:,2]
data_grid = griddata((x_cont, y_cont) , data_cont, (X_grid, Y_grid), method='linear')
#smooth
if flag_smooth:
data_grid = gaussian_filter(data_grid, sigma=sig_smooth)
#data colorbar
if clevs is None:
if not log_cbar:
clevs = np.linspace(data_cont.min(),data_cont.max(),11).tolist()
else:
clevs = np.logspace(np.log10(data_cont.min()),np.log10(data_cont.max()),11).tolist()
#plot interpolated data
if not log_cbar:
cs = ax.contourf(X_grid, Y_grid, data_grid, transform = data_crs, levels = clevs, zorder=3, alpha = 0.75)
else:
cs = ax.contourf(X_grid, Y_grid, data_grid, transform = data_crs, levels = clevs, zorder=3, alpha = 0.75,
locator=ticker.LogLocator())
#color bar
fmt_clb = ticker.FormatStrFormatter(frmt_clb)
if not log_cbar:
cbar = fig.colorbar(cs, boundaries = clevs, pad=0.05, orientation="horizontal", format=fmt_clb) # add colorbar
else:
cbar = fig.colorbar(cs, boundaries = clevs, pad=0.05, orientation="horizontal", format=fmt_clb) # add colorbar
cbar.ax.tick_params(labelsize=18)
if (not cbar_label is None): cbar.set_label(cbar_label, size=20)
#plot line
if not line_latlon is None:
ax.plot(line_latlon[:,1], line_latlon[:,0], latlon = True, linewidth=3, color='k', zorder= 5 )
#plot points
if not pt_latlondata is None:
if np.size(pt_latlondata,1) == 2:
ax.plot(pt_latlondata[:,1], pt_latlondata[:,0], 'o', latlon=True, color = 'k', markersize = 4, zorder = 8)
elif np.size(pt_latlondata,1) == 2:
raise ValueError('Unimplemented plotting option')
#add costal lines
ax.coastlines(resolution=plt_res, edgecolor='black', zorder=5);
#add state boundaries
states = cfeature.NaturalEarthFeature(category='cultural', name='admin_1_states_provinces_lines',
scale=plt_scale, facecolor='none')
ax.add_feature(states, edgecolor='black', zorder=3)
ax.add_feature(cfeature.BORDERS, zorder=4)
#add oceans
oceans = cfeature.NaturalEarthFeature(category='physical', name='ocean', facecolor='lightblue',
scale=plt_scale)
ax.add_feature(oceans, zorder=6)
#add figure title
if (not title is None): plt.title(title, fontsize=25)
plt.xlabel('Latitude (deg)', fontsize=20)
plt.ylabel('Longitude (deg)', fontsize=20)
#grid lines
if flag_grid:
# gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True)
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,
linewidth=1, color='gray', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_right = False
else:
gl = None
fig.tight_layout()
return fig, ax, cbar, data_crs, gl
# Updated PlotContourCAMap function
def PlotContourCAMap(cont_latlondata, cmin=None, cmax=None, flag_grid=False, title=None, cbar_label=None, log_cbar = False,
frmt_clb = '%.2f', cmap = 'viridis', **kwargs):
'''
PlotContourCAMap:
simplifed function to create a contour plot of the data in cont_latlondata
Input Arguments:
cont_latlondata (np.array [n1,3]): contains the latitude, logitude and contour values
cont_latlondata = [lat, long, data]
cmin (double-opt): lower limit for color levels for contour plot
cmax (double-opt): upper limit for color levels for contour plot
title (str-opt): figure title
cbar_label (str-opt): contour plot color bar label
ptlevs (np.array-opt): color levels for points
pt_label (str-opt): points color bar label
log_cbar (bool-opt): if true use log-scale for contour plots
frmt_clb string format color bar ticks
Output Arguments:
'''
#additional input arguments
flag_smooth = kwargs['flag_smooth'] if 'flag_smooth' in kwargs else False
sig_smooth = kwargs['smooth_sig'] if 'smooth_sig' in kwargs else 0.1
intrp_method = kwargs['intrp_method'] if 'intrp_method' in kwargs else 'linear'
plt_res = '50m'
plt_scale = '50m'
#number of interpolation points, x & y direction
#ngridx = 5000
#ngridy = 5000
ngridx = 500
ngridy = 500
#ngridx = 100
#ngridy = 100
#create figure
fig = plt.figure(figsize=(10, 10))
#fig = plt.figure(figsize=(15, 15))
#create basemap
data_crs = ccrs.PlateCarree()
ax = fig.add_subplot(1, 1, 1, projection=data_crs)
#project contour data
x_cont = cont_latlondata[:,1]
y_cont = cont_latlondata[:,0]
#interpolation grid
x_int = np.linspace(x_cont.min(), x_cont.max(), ngridx)
y_int = np.linspace(y_cont.min(), y_cont.max(), ngridy)
X_grid, Y_grid = np.meshgrid(x_int, y_int)
#interpolate contour data on grid
if log_cbar:
data_cont = np.log(cont_latlondata[:,2])
else:
data_cont = cont_latlondata[:,2]
data_grid = griddata((x_cont, y_cont) , data_cont, (X_grid, Y_grid), method=intrp_method )
#smooth
if flag_smooth:
data_grid = gaussian_filter(data_grid, sigma=sig_smooth)
#data colorbar
cbmin = data_cont.min() if cmin is None else cmin
cbmax = data_cont.max() if cmax is None else cmax
clevs = np.linspace(cbmin, cbmax, 41).tolist()
#plot interpolated data
cs = ax.contourf(X_grid, Y_grid, data_grid, transform = data_crs, vmin=cmin, vmax=cmax,
levels = clevs, zorder=3, alpha = 0.75, cmap=cmap)
#color bar
#import pdb; pdb.set_trace()
fmt_clb = ticker.FormatStrFormatter(frmt_clb)
cbar_ticks = clevs[0:41:10]
cbar = fig.colorbar(cs, boundaries=clevs, ticks=cbar_ticks, pad=0.05, orientation="horizontal", format=fmt_clb) # add colorbar
if log_cbar:
cbar_labels = [frmt_clb%np.exp(c_t) for c_t in cbar_ticks]
cbar.set_ticklabels(cbar_labels)
cbar.ax.tick_params(labelsize=18)
if (not cbar_label is None): cbar.set_label(cbar_label, size=20)
#add costal lines
ax.coastlines(resolution=plt_res, edgecolor='black', zorder=5);
#add state boundaries
states = cfeature.NaturalEarthFeature(category='cultural', name='admin_1_states_provinces_lines',
scale=plt_scale, facecolor='none')
ax.add_feature(states, edgecolor='black', zorder=3)
borders = cfeature.NaturalEarthFeature(category='cultural', name='admin_0_countries',
scale=plt_scale, facecolor='none')
ax.add_feature(borders, edgecolor='black', zorder=4)
#add oceans
oceans = cfeature.NaturalEarthFeature(category='physical', name='ocean', scale=plt_scale)
ax.add_feature(oceans, zorder=6)
#add figure title
if (not title is None): plt.title(title, fontsize=25)
plt.xlabel('Latitude (deg)', fontsize=20)
plt.ylabel('Longitude (deg)', fontsize=20)
#grid lines
if flag_grid:
# gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True)
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,
linewidth=1, color='gray', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_right = False
else:
gl = None
# fig.show()
# fig.draw()
fig.tight_layout()
return fig, ax, cbar, data_crs, gl
# PlotContourSloveniaMap function
def PlotContourSloveniaMap(cont_latlondata, cmin=None, cmax=None, flag_grid=False, title=None, cbar_label=None, log_cbar = False,
frmt_clb = '%.2f', **kwargs):
'''
PlotContourCAMap:
simplifed create a contour plot of the data in cont_latlondata
Input Arguments:
cont_latlondata (np.array [n1,3]): contains the latitude, logitude and contour values
cont_latlondata = [lat, long, data]
cmin (double-opt): lower limit for color levels for contour plot
cmax (double-opt): upper limit for color levels for contour plot
title (str-opt): figure title
cbar_label (str-opt): contour plot color bar label
ptlevs (np.array-opt): color levels for points
pt_label (str-opt): points color bar label
log_cbar (bool-opt): if true use log-scale for contour plots
frmt_clb string format color bar ticks
Output Arguments:
'''
plt_res = '50m'
plt_scale = '50m'
#number of interpolation points, x & y direction
#ngridx = 5000
#ngridy = 5000
#ngridx = 500
#ngridy = 500
ngridx = 100
ngridy = 100
#create figure
fig = plt.figure(figsize=(10, 10))
#fig = plt.figure(figsize=(15, 15))
#create basemap
data_crs = ccrs.PlateCarree()
ax = fig.add_subplot(1, 1, 1, projection=data_crs)
#project contour data
x_cont = cont_latlondata[:,1]
y_cont = cont_latlondata[:,0]
#interpolation grid
x_int = np.linspace(x_cont.min(), x_cont.max(), ngridx)
y_int = np.linspace(y_cont.min(), y_cont.max(), ngridy)
X_grid, Y_grid = np.meshgrid(x_int, y_int)
#interpolate contour data on grid
if log_cbar:
data_cont = np.log(cont_latlondata[:,2])
else:
data_cont = cont_latlondata[:,2]
data_grid = griddata((x_cont, y_cont) , data_cont, (X_grid, Y_grid), method='linear')
#smooth
if (kwargs['flag_smooth'] if 'flag_smooth' in kwargs else False):
sig_smooth = kwargs['smooth_sig'] if 'smooth_sig' in kwargs else 0.1
data_grid = gaussian_filter(data_grid, sigma=sig_smooth)
#data colorbar
cbmin = data_cont.min() if cmin is None else cmin
cbmax = data_cont.max() if cmax is None else cmax
clevs = np.linspace(cbmin, cbmax, 41).tolist()
#plot interpolated data
cs = ax.contourf(X_grid, Y_grid, data_grid, transform = data_crs, vmin=cmin, vmax=cmax, levels = clevs, zorder=3, alpha = 0.75)
#color bar
fmt_clb = ticker.FormatStrFormatter(frmt_clb)
cbar_ticks = clevs[0:41:8]
cbar = fig.colorbar(cs, boundaries=clevs, ticks=cbar_ticks, pad=0.05, orientation="horizontal", format=fmt_clb) # add colorbar
if log_cbar:
cbar_labels = [frmt_clb%np.exp(c_t) for c_t in cbar_ticks]
cbar.set_ticklabels(cbar_labels)
cbar.ax.tick_params(labelsize=18)
if (not cbar_label is None): cbar.set_label(cbar_label, size=20)
#add costal lines
ax.coastlines(resolution=plt_res, edgecolor='black', zorder=5);
#add state boundaries
#states = cfeature.NaturalEarthFeature(category='cultural', name='admin_1_states_provinces_lines',
# scale=plt_scale, facecolor='none')
#ax.add_feature(states, edgecolor='black', zorder=3)
borders = cfeature.NaturalEarthFeature(category='cultural', name='admin_0_countries',
scale=plt_scale, facecolor='none')
ax.add_feature(borders, edgecolor='black', zorder=4)
#ax.add_feature(cfeature.BORDERS, zorder=4)
#add oceans
oceans = cfeature.NaturalEarthFeature(category='physical', name='ocean', facecolor='lightblue',
scale=plt_scale)
ax.add_feature(oceans, zorder=6)
#add figure title
if (not title is None): plt.title(title, fontsize=25)
plt.xlabel('Latitude (deg)', fontsize=20)
plt.ylabel('Longitude (deg)', fontsize=20)
#grid lines
if flag_grid:
# gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True)
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,
linewidth=1, color='gray', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_right = False
else:
gl = None
# fig.show()
# fig.draw()
fig.tight_layout()
return fig, ax, cbar, data_crs, gl
# Scatter plot function
def PlotScatterCAMap(scat_latlondata, cmin=None, cmax=None, flag_grid=False, title=None, cbar_label=None, log_cbar = False,
frmt_clb = '%.2f', alpha_v = 0.7, cmap='seismic', marker_size=10.):
'''
PlotContourCAMap:
create a contour plot of the data in cont_latlondata
Input Arguments:
scat_latlondata (np.array [n1,(3,4)]): contains the latitude, logitude, contour values, and size values (optional)
scat_latlondata = [lat, long, data_color, data_size]
cmin (double-opt): lower limit for color levels for contour plot
cmax (double-opt): upper limit for color levels for contour plot
title (str-opt): figure title
cbar_label (str-opt): contour plot color bar label
ptlevs (np.array-opt): color levels for points
pt_label (str-opt): points color bar label
log_cbar (bool-opt): if true use log-scale for contour plots
frmt_clb: string format color bar ticks
alpha_v: opacity value
cmap: color palette
marker_size: marker size, if scat_latlondata dimensions is [n1, 3]
Output Arguments:
'''
#import pdb; pdb.set_trace()
plt_res = '10m'
plt_scale = '10m'
#create figure
fig = plt.figure(figsize=(10, 10))
#fig = plt.figure(figsize=(15, 15))
#create basemap
data_crs = ccrs.PlateCarree()
ax = fig.add_subplot(1, 1, 1, projection=data_crs)
#project contour data
x_scat = scat_latlondata[:,1]
y_scat = scat_latlondata[:,0]
#color scale
if log_cbar:
data_scat_c = np.log(scat_latlondata[:,2])
else:
data_scat_c = scat_latlondata[:,2]
#size scale
if scat_latlondata.shape[1] > 3:
data_scat_s = scat_latlondata[:,3]
else:
data_scat_s = marker_size * np.ones(data_scat_c.shape)
#data colorbar
cbmin = data_scat_c.min() if cmin is None else cmin
cbmax = data_scat_c.max() if cmax is None else cmax
clevs = np.linspace(cbmin, cbmax, 41).tolist()
#plot scatter bubble plot data
cs = ax.scatter(x_scat, y_scat, s = data_scat_s, c = data_scat_c,
transform = data_crs, vmin=cmin, vmax=cmax, zorder=3, alpha=alpha_v, cmap=cmap)
#color bar
#import pdb; pdb.set_trace()
fmt_clb = ticker.FormatStrFormatter(frmt_clb)
cbar_ticks = clevs[0:41:8]
cbar = fig.colorbar(cs, boundaries=clevs, ticks=cbar_ticks, pad=0.05, orientation="horizontal", format=fmt_clb) # add colorbar
if log_cbar:
cbar_labels = [frmt_clb%np.exp(c_t) for c_t in cbar_ticks]
cbar.set_ticklabels(cbar_labels)
cbar.ax.tick_params(labelsize=18)
if (not cbar_label is None): cbar.set_label(cbar_label, size=20)
#add costal lines
ax.coastlines(resolution=plt_res, edgecolor='black', zorder=5);
#add state boundaries
states = cfeature.NaturalEarthFeature(category='cultural', name='admin_1_states_provinces_lines',
scale=plt_scale, facecolor='none')
ax.add_feature(states, edgecolor='black', zorder=3)
ax.add_feature(cfeature.BORDERS, zorder=4)
#oceans
oceans = cfeature.NaturalEarthFeature(category='physical', name='ocean', facecolor='lightblue',
scale=plt_scale)
ax.add_feature(oceans, zorder=2)
#add figure title
if (not title is None): plt.title(title, fontsize=25)
plt.xlabel('Latitude (deg)', fontsize=20)
plt.ylabel('Longitude (deg)', fontsize=20)
#grid lines
if flag_grid:
# gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True)
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,
linewidth=1, color='gray', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_right = False
else:
gl = None
# fig.show()
# fig.draw()
fig.tight_layout()
return fig, ax, cbar, data_crs, gl
# Updated PlotContourCAMap function
def PlotCellsCAMap(cell_latlondata, cmin=None, cmax=None, flag_grid=False, title=None, cbar_label=None, log_cbar = False, frmt_clb = '%.2f',
alpha_v = .8, cell_size = 50, cmap='seismic'):
'''
PlotCellsCAMap:
PlotCellsCAMap function to create a contour plot of the data in cont_latlondata
Input Arguments:
cell_latlondata (np.array [n1,3]): contains the latitude, logitude and color values
cell_latlondata = [lat, long, data]
cmin (double-opt): lower limit for color levels for contour plot
cmax (double-opt): upper limit for color levels for contour plot
title (str-opt): figure title
cbar_label (str-opt): contour plot color bar label
ptlevs (np.array-opt): color levels for points
pt_label (str-opt): points color bar label
log_cbar (bool-opt): if true use log-scale for contour plots
frmt_clb string format color bar ticks
Output Arguments:
'''
plt_res = '50m'
plt_scale = '50m'
#create figure
fig = plt.figure(figsize=(10, 10))
#create basemap
data_crs = ccrs.PlateCarree()
ax = fig.add_subplot(1, 1, 1, projection=data_crs)
#project contour data
x_cell = cell_latlondata[:,1]
y_cell = cell_latlondata[:,0]
#contour transfomration
if log_cbar:
data_cell = np.log(cell_latlondata[:,2])
else:
data_cell = cell_latlondata[:,2]
#data colorbar
cbmin = data_cell.min() if cmin is None else cmin
cbmax = data_cell.max() if cmax is None else cmax
clevs = np.linspace(cbmin, cbmax, 41).tolist()
#plot interpolated data
cs = ax.scatter(x_cell, y_cell, s = cell_size, c = data_cell, transform = data_crs, vmin=cmin, vmax=cmax, zorder=3,
alpha = alpha_v, cmap=cmap)
#cs = ax.contourf(X_grid, Y_grid, data_grid, transform = data_crs, vmin=cmin, vmax=cmax, levels = clevs, zorder=3, alpha = 0.75)
#color bar
#import pdb; pdb.set_trace()
fmt_clb = ticker.FormatStrFormatter(frmt_clb)
cbar_ticks = clevs[0:41:8]
cbar = fig.colorbar(cs, boundaries=clevs, ticks=cbar_ticks, pad=0.05, orientation="horizontal", format=fmt_clb) # add colorbar
if log_cbar:
cbar_labels = [frmt_clb%np.exp(c_t) for c_t in cbar_ticks]
cbar.set_ticklabels(cbar_labels)
cbar.ax.tick_params(labelsize=18)
if (not cbar_label is None): cbar.set_label(cbar_label, size=20)
#add costal lines
ax.coastlines(resolution=plt_res, edgecolor='black', zorder=5);
#add state boundaries
states = cfeature.NaturalEarthFeature(category='cultural', name='admin_1_states_provinces_lines',
scale=plt_scale, facecolor='none')
ax.add_feature(states, edgecolor='black', zorder=3)
borders = cfeature.NaturalEarthFeature(category='cultural', name='admin_0_countries',
scale=plt_scale, facecolor='none')
ax.add_feature(borders, edgecolor='black', zorder=4)
#add oceans
#ax.stock_img()
oceans = cfeature.NaturalEarthFeature(category='physical', name='ocean', facecolor='lightblue',
scale=plt_scale)
ax.add_feature(oceans, zorder=2)
#add figure title
if (not title is None): ax.set_title(title, fontsize=25)
ax.set_xlabel('Latitude (deg)', fontsize=20)
ax.set_ylabel('Longitude (deg)', fontsize=20)
#grid lines
if flag_grid:
# gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True)
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,
linewidth=1, color='gray', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_right = False
else:
gl = None
# fig.show()
# fig.draw()
fig.tight_layout()
return fig, ax, cbar, data_crs, gl
# Plotting coefficient function
#plotting of median values coefficients
def PlotCoeffCAMapMed(cont_latlondata, cmin=None, cmax=None, flag_grid=False, title=None, cbar_label=None, log_cbar = False, frmt_clb = '%.2f', **kwargs):
cmap = 'seismic'
fig, ax, cbar, data_crs, gl = PlotContourCAMap(cont_latlondata, cmin=cmin, cmax=cmax, flag_grid=flag_grid, title=title, cbar_label=cbar_label,
log_cbar = log_cbar, frmt_clb = frmt_clb, cmap = cmap, **kwargs)
return fig, ax, cbar, data_crs, gl
#plotting of epistemic uncertainty coefficients
def PlotCoeffCAMapSig(cont_latlondata, cmin=None, cmax=None, flag_grid=False, title=None, cbar_label=None, log_cbar = False, frmt_clb = '%.2f', **kwargs):
cmap = 'Purples_r'
fig, ax, cbar, data_crs, gl = PlotContourCAMap(cont_latlondata, cmin=cmin, cmax=cmax, flag_grid=flag_grid, title=title, cbar_label=cbar_label,
log_cbar = log_cbar, frmt_clb = frmt_clb, cmap = cmap, **kwargs)
return fig, ax, cbar, data_crs, gl
#plotting of median values of cells
def PlotCellsCAMapMed(cell_latlondata, cmin=None, cmax=None, flag_grid=False, title=None, cbar_label=None, log_cbar = False, frmt_clb = '%.2f',
alpha_v = .8, cell_size = 50):
cmap = 'seismic'
fig, ax, cbar, data_crs, gl = PlotCellsCAMap(cell_latlondata, cmin=cmin, cmax=cmax, flag_grid=flag_grid, title=title, cbar_label=cbar_label,
log_cbar=log_cbar, frmt_clb=frmt_clb,
alpha_v=alpha_v, cell_size=cell_size, cmap=cmap)
return fig, ax, cbar, data_crs, gl
#plotting of mono-color increasing values of cells
def PlotCellsCAMapInc(cell_latlondata, cmin=None, cmax=None, flag_grid=False, title=None, cbar_label=None, log_cbar = False, frmt_clb = '%.2f',
alpha_v = .8, cell_size = 50):
cmap = 'Reds'
fig, ax, cbar, data_crs, gl = PlotCellsCAMap(cell_latlondata, cmin=cmin, cmax=cmax, flag_grid=flag_grid, title=title, cbar_label=cbar_label,
log_cbar=log_cbar, frmt_clb=frmt_clb,
alpha_v=alpha_v, cell_size=cell_size, cmap=cmap)
return fig, ax, cbar, data_crs, gl
#plotting of epistemic uncertainty of cells
def PlotCellsCAMapSig(cell_latlondata, cmin=None, cmax=None, flag_grid=False, title=None, cbar_label=None, log_cbar = False, frmt_clb = '%.2f',
alpha_v = .8, cell_size = 50):
cmap = 'Purples_r'
fig, ax, cbar, data_crs, gl = PlotCellsCAMap(cell_latlondata, cmin=cmin, cmax=cmax, flag_grid=flag_grid, title=title, cbar_label=cbar_label,
log_cbar=log_cbar, frmt_clb=frmt_clb,
alpha_v=alpha_v, cell_size=cell_size, cmap=cmap )
return fig, ax, cbar, data_crs, gl
# Base plot function
def PlotMap(lat_lims = None, lon_lims = None, flag_grid=False, title=None):
'''
PlotContourCAMap:
simplifed function to create a contour plot of the data in cont_latlondata
Input Arguments:
line_latlondata (np.array [n1,3]): contains the latitude, logitude and contour values
cont_latlondata = [lat, long, data]
cmin (double-opt): lower limit for color levels for contour plot
cmax (double-opt): upper limit for color levels for contour plot
title (str-opt): figure title
cbar_label (str-opt): contour plot color bar label
ptlevs (np.array-opt): color levels for points
pt_label (str-opt): points color bar label
log_cbar (bool-opt): if true use log-scale for contour plots
frmt_clb string format color bar ticks
Output Arguments:
'''
plt_res = '50m'
plt_scale = '50m'
#create figure
fig = plt.figure(figsize=(10, 10))
#fig = plt.figure(figsize=(15, 15))
#create basemap
data_crs = ccrs.PlateCarree()
ax = fig.add_subplot(1, 1, 1, projection=data_crs)
if lat_lims:
ax.set_xlim(lon_lims)
if lon_lims:
ax.set_ylim(lat_lims)
#add land zones
lands = cfeature.LAND
ax.add_feature(lands, zorder=1)
#add costal lines
ax.coastlines(resolution=plt_res, edgecolor='black', zorder=3);
#add state boundaries
states = cfeature.NaturalEarthFeature(category='cultural', name='admin_1_states_provinces_lines',
scale=plt_scale, facecolor='none')
ax.add_feature(states, edgecolor='black', zorder=4)
borders = cfeature.NaturalEarthFeature(category='cultural', name='admin_0_countries',
scale=plt_scale, facecolor='none')
ax.add_feature(borders, edgecolor='black', zorder=5)
#add oceans
oceans = cfeature.NaturalEarthFeature(category='physical', name='ocean', facecolor='lightblue',
scale=plt_scale)
ax.add_feature(oceans, zorder=2)
#add figure title
if (not title is None): plt.title(title, fontsize=25)
plt.xlabel('Latitude (deg)', fontsize=20)
plt.ylabel('Longitude (deg)', fontsize=20)
#grid lines
if flag_grid:
# gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True)
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,
linewidth=1, color='gray', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_right = False
else:
gl = None
# fig.show()
# fig.draw()
# fig.tight_layout()
return fig, ax, data_crs, gl
| 35,537 | 40.613583 | 154 | py |
ngmm_tools | ngmm_tools-master/Analyses/Python_lib/ground_motions/pylib_gmm_eas.py | # ba18.py
# Conversion of Jeff Bayless' MATLAB code to Python
# Including class ba18
# I've tried to avoid mixed UPPER and lower case variable names
# e.g. Mbreak, Rrup, Vsref
#arithmetic libraries
import numpy as np
import numpy.matlib
from scipy import linalg as scipylalg
from scipy import sparse as scipysp
#geographic coordinates
import pyproj
#statistics libraries
import pandas as pd
#geometric libraries
from shapely.geometry import Point as shp_pt, Polygon as shp_poly
def SlicingSparceMat(mat_sp, i_rows, j_col):
'''Slice sparse matrix'''
return np.array([mat_sp.getcol(i_r).toarray().flatten()[j_col] for i_r in i_rows])
def QuartCos(per, x0, x, flag_left = False):
y = np.cos( 2.*np.pi*(x-x0)/per )
if flag_left: y[np.logical_or(x < x0-per/4, x > x0)] = 0.
else: y[np.logical_or(x < x0, x > x0+per/4)] = 0.
return y
def QuadCosTapper(freq, freq_nerg):
#boxcar at intermediate frequencies
i_box = np.logical_and(freq >= freq_nerg.min(), freq <= freq_nerg.max())
y_box = np.zeros(len(freq))
y_box[i_box] = 1.
#quarter cosine left taper
per = 2 * freq_nerg.min()
y_tpl = QuartCos(per, freq_nerg.min(), freq, flag_left=True)
#quarter cosine right taper
per = 2 * freq_nerg.max()
y_tpr = QuartCos(per, freq_nerg.max(), freq)
#combined tapering function
y_tapper = np.array([y_box, y_tpl, y_tpr]).max(axis=0)
return y_tapper
def TriagTapper(freq, freq_nerg):
fn_min = freq_nerg.min()
fn_max = freq_nerg.max()
#triangular window
f_win = np.array([0.5*fn_min, fn_min, fn_max, 1.5*fn_max])
y_win = np.array([0., 1., 1., 0.])
#triangular tapering function
y_tapper = np.interp(np.log(freq), np.log(f_win), y_win)
return y_tapper
def ConvertPandasDf2NpArray(df_array):
array = df_array.values if isinstance(df_array, pd.DataFrame) or isinstance(df_array, pd.Series) else df_array
return array
class BA18:
def __init__(self, file=None):
'''
Constructor for this class
Read CSV file of BA18 coefficients, frequency range: 0.1 - 100 Hz
Parameters
----------
file : string, optional
file name for coefficients. The default is None.
'''
if file is None:
file = '/mnt/halcloud_nfs/glavrent/Research/Nonerg_CA_GMM/Analyses/Python_lib/ground_motions/Bayless_ModelCoefs.csv'
df = pd.read_csv(file, index_col=0)
df = df.head(301)
# Frequencies 0.1 - 24 Hz
self.freq = df.index.values
# Median FAS parameters
self.b1 = df.c1.values
self.b2 = df.c2.values
self.b3quantity = df['(c2-c3)/cn'].values
self.b3 = df.c3.values
self.bn = df.cn.values
self.bm = df.cM .values
self.b4 = df.c4.values
self.b5 = df.c5.values
self.b6 = df.c6.values
self.bhm = df.chm.values
self.b7 = df.c7.values
self.b8 = df.c8.values
self.b9 = df.c9.values
self.b10 = df.c10.values
self.b11a = df.c11a.values
self.b11b = df.c11b.values
self.b11c = df.c11c.values
self.b11d = df.c11d.values
self.b1a = df.c1a.values
self.b1a[239:] = 0
# Non-linear site parameters
self.f3 = df.f3.values
self.f4 = df.f4.values
self.f5 = df.f5.values
# Aleatory variability parameters
self.s1 = df.s1.values
self.s2 = df.s2.values
self.s3 = df.s3.values
self.s4 = df.s4.values
self.s5 = df.s5.values
self.s6 = df.s6.values
# Constants
self.b4a = -0.5
self.vsref = 1000
self.mbreak = 6.0
#bedrock anelastic attenuation
self.b7rock = self.b7.copy()
#frequency limits
# self.maxfreq = 23.988321
self.maxfreq = self.freq.max()
self.minfreq = self.freq.min()
def EasBase(self, mag, rrup, vs30, ztor, fnorm, z1, regid, flag_keep_b7 = True):
# note Z1 must be provided in km
z1ref = (1/1000) * np.exp(-7.67/4 * np.log((vs30**4+610**4)/(1360**4+610**4)) )
if vs30<=200:
self.b11 = self.b11a
if vs30>200 and vs30<=300:
self.b11 = self.b11b
if vs30>300 and vs30<=500:
self.b11 = self.b11c
if vs30>500:
self.b11 = self.b11d
if z1 is None or np.isnan(z1):
z1 = self.Z1(vs30, regid=1)
# Compute lnFAS by summing contributions, including linear site response
lnfas = self.b1 + self.b2*(mag-self.mbreak)
lnfas += self.b3quantity*np.log(1+np.exp(self.bn*(self.bm-mag)))
lnfas += self.b4*np.log(rrup+self.b5*np.cosh(self.b6*np.maximum(mag-self.bhm,0)))
lnfas += (self.b4a-self.b4) * np.log( np.sqrt(rrup**2+50**2) )
lnfas += self.b7 * rrup if flag_keep_b7 else 0.
lnfas += self.b8 * np.log( min(vs30,1000) / self.vsref )
lnfas += self.b9 * min(ztor,20)
lnfas += self.b10 * fnorm
lnfas += self.b11 * np.log( (min(z1,2) + 0.01) / (z1ref + 0.01) )
# this is the linear spectrum up to maxfreq=23.988321 Hz
maxfreq = 23.988321
imax = np.where(self.freq==maxfreq)[0][0]
fas_lin = np.exp(lnfas)
# Extrapolate to 100 Hz
fas_maxfreq = fas_lin[imax]
# Kappa
kappa = np.exp(-0.4*np.log(vs30/760)-3.5)
# Diminuition operator
D = np.exp(-np.pi*kappa*(self.freq[imax:] - maxfreq))
fas_lin = np.append(fas_lin[:imax], fas_maxfreq * D)
# Compute non-linear site response
# get the EAS_rock at 5 Hz (no c8, c11 terms)
vref=760
#row = df.iloc[df.index == 5.011872]
i5 = np.where(self.freq==5.011872)
lnfasrock5Hz = self.b1[i5]
lnfasrock5Hz += self.b2[i5]*(mag-self.mbreak)
lnfasrock5Hz += self.b3quantity[i5]*np.log(1+np.exp(self.bn[i5]*(self.bm[i5]-mag)))
lnfasrock5Hz += self.b4[i5]*np.log(rrup+self.b5[i5]*np.cosh(self.b6[i5]*max(mag-self.bhm[i5],0)))
lnfasrock5Hz += (self.b4a-self.b4[i5])*np.log(np.sqrt(rrup**2+50**2))
lnfasrock5Hz += self.b7rock[i5]*rrup
lnfasrock5Hz += self.b9[i5]*min(ztor,20)
lnfasrock5Hz += self.b10[i5]*fnorm
# Compute PGA_rock extimate from 5 Hz FAS
IR = np.exp(1.238+0.846*lnfasrock5Hz)
# apply the modified Hashash model
self.f2 = self.f4*( np.exp(self.f5*(min(vs30,vref)-360)) - np.exp(self.f5*(vref-360)) )
fnl0 = self.f2 * np.log((IR+self.f3)/self.f3)
fnl0[np.where(fnl0==min(fnl0))[0][0]:] = min(fnl0)
fas_nlin = np.exp( np.log(fas_lin) + fnl0 )
# Aleatory variability
if mag<4:
tau = self.s1
phi_s2s = self.s3
phi_ss = self.s5
if mag>6:
tau = self.s2
phi_s2s = self.s4
phi_ss = self.s6
if mag >= 4 and mag <= 6:
tau = self.s1 + ((self.s2-self.s1)/2)*(mag-4)
phi_s2s = self.s3 + ((self.s4-self.s3)/2)*(mag-4)
phi_ss = self.s5 + ((self.s6-self.s5)/2)*(mag-4)
sigma = np.sqrt(tau**2 + phi_s2s**2 + phi_ss**2 + self.b1a**2);
return self.freq, fas_nlin, fas_lin, sigma
def EasBaseArray(self, mag, rrup, vs30, ztor, fnorm, z1=None, regid=1, flag_keep_b7=True):
#convert eq parameters to np.arrays
mag = np.array([mag]).flatten()
rrup = np.array([rrup]).flatten()
vs30 = np.array([vs30]).flatten()
ztor = np.array([ztor]).flatten()
fnorm = np.array([fnorm]).flatten()
z1 = np.array([self.Z1(vs, regid) for vs in vs30]) if z1 is None else np.array([z1]).flatten()
#number of scenarios
npt = len(mag)
#input assertions
assert( np.all(npt == np.array([len(rrup),len(vs30),len(ztor),len(fnorm),len(z1)])) ),'Error. Inconsistent number of gmm parameters'
#compute fas for all scenarios
fas_nlin = list()
fas_lin = list()
sigma = list()
for k, (m, r, vs, zt, fn, z_1) in enumerate(zip(mag, rrup, vs30, ztor, fnorm, z1)):
ba18_base = self.EasBase(m, r, vs, zt, fn, z_1, regid, flag_keep_b7)[1:]
fas_nlin.append(ba18_base[0])
fas_lin.append(ba18_base[1])
sigma.append(ba18_base[2])
#combine them to np.arrays
fas_nlin = np.vstack(fas_nlin)
fas_lin = np.vstack(fas_lin)
sigma = np.vstack(sigma)
# if npt == 1 and flag_flatten:
# fas_nlin = fas_nlin.flatten()
# fas_lin = fas_lin.flatten()
# sigma = sigma.flatten()
#return self.EasBase(mag, rrup, vs30, ztor, fnorm, z1, regid, flag_keep_b7)
return self.freq, fas_nlin, fas_lin, sigma
def Eas(self, mag, rrup, vs30, ztor, fnorm, z1=None, regid=1, flag_keep_b7=True, flag_flatten=True):
'''
Computes BA18 EAS GMM for all frequencies
Parameters
----------
mag : real
moment magnitude [3-8].
rrup : real
Rupture distance in kilometers (km) [0-300].
vs30 : real
site-specific Vs30 = slowness-averaged shear wavespeed of upper 30 m (m/s) [120-1500].
ztor : real
depth to top of rupture (km) [0-20].
fnorm : real
1 for normal faults and 0 for all other faulting types (no units) [0 or 1].
z1 : real, optional
site-specific depth to shear wavespeed of 1 km/s (km) [0-2]. The default is =None.
regid : int, optional
DESCRIPTION. The default is =1.
Returns
-------
freq : np.array
frequency array.
fas_nlin : np.array
fas array with nonlinear site response.
fas_lin : np.array
fas array with linear site response.
sigma : np.array
standard deviation array.
'''
#return self.EasBase(mag, rrup, vs30, ztor, fnorm, z1, regid, flag_keep_b7)
# return self.EasBaseArray(mag, rrup, vs30, ztor, fnorm, z1, regid, flag_keep_b7, flag_flatten)
freq, fas_nlin, fas_lin, sigma = self.EasBaseArray(mag, rrup, vs30, ztor, fnorm, z1, regid, flag_keep_b7)
#flatten arrays if only one datapoint
if fas_nlin.shape[0] == 1 and flag_flatten:
fas_nlin = fas_nlin.flatten()
fas_lin = fas_lin.flatten()
sigma = sigma.flatten()
return freq, fas_nlin, fas_lin, sigma
def EasF(self, freq, mag, rrup, vs30, ztor, fnorm, z1=None, regid=1, flag_keep_b7 = True, flag_flatten=True):
'''
Computes BA18 EAS GMM for frequency of interest
Parameters
----------
mag : real
moment magnitude [3-8].
rrup : real
Rupture distance in kilometers (km) [0-300].
vs30 : real
site-specific Vs30 = slowness-averaged shear wavespeed of upper 30 m (m/s) [120-1500].
ztor : real
depth to top of rupture (km) [0-20].
fnorm : real
1 for normal faults and 0 for all other faulting types (no units) [0 or 1].
z1 : real, optional
site-specific depth to shear wavespeed of 1 km/s (km) [0-2]. The default is =None.
regid : int, optional
DESCRIPTION. The default is =1.
Returns
-------
freq : real
frequency of interest.
fas_nlin : real
fas with nonlinear site response for frequency of interest.
fas_lin : real
fas with linear site response for frequency of interest.
sigma : real
standard deviation of frequency of interest.
'''
#convert freq to numpy array
freq = np.array([freq]).flatten()
#frequency tolerance
f_tol = 1e-4
#compute fas for all frequencies
freq_all, fas_all, fas_lin_all, sig_all = self.EasBaseArray(mag, rrup, vs30, ztor, fnorm, z1, regid, flag_keep_b7)
#find eas for frequency of interest
if np.all([np.isclose(f, freq_all, f_tol).any() for f in freq]):
# i_f = np.array([np.where(np.isclose(f, freq_all, f_tol))[0] for f in freq]).flatten()
i_f = np.array([np.argmin(np.abs(f-freq_all)) for f in freq]).flatten()
freq = freq_all[i_f]
fas = fas_all[:,i_f]
fas_lin = fas_lin_all[:,i_f]
sigma = sig_all[:,i_f]
else:
fas = np.vstack([np.exp(np.interp(np.log(np.abs(freq)), np.log(freq_all), np.log(fas), left=-np.nan, right=-np.nan)) for fas in fas_all])
fas_lin = np.vstack([np.exp(np.interp(np.log(np.abs(freq)), np.log(freq_all), np.log(fas_l), left=-np.nan, right=-np.nan)) for fas_l in fas_lin_all])
sigma = np.vstack([ np.interp(np.log(np.abs(freq)), np.log(freq_all), sig, left=-np.nan, right=-np.nan) for sig in sig_all])
#if one scenario flatten arrays
if fas.shape[0] == 1 and flag_flatten:
fas = fas.flatten()
fas_lin = fas_lin.flatten()
sigma = sigma.flatten()
return fas, fas_lin, sigma
def GetFreq(self):
return np.array(self.freq)
def Z1(self, vs30, regid=1):
'''
Compute Z1.0 based on Vs30 for CA and JP
Parameters
----------
vs30 : real
Time average shear-wave velocity.
regid : int, optional
Region ID. The default is 1.
Returns
-------
real
Depth to a shear wave velocity of 1000m/sec.
'''
if regid == 1: #CA
z_1 = -7.67/4. * np.log((vs30**4+610.**4)/(1360.**4+610.**4))
elif regid == 10: #JP
z_1 = -5.23/4. * np.log((vs30**4+412.**4)/(1360.**4+412.**4))
return 1/1000*np.exp(z_1)
| 14,209 | 36.005208 | 161 | py |
ngmm_tools | ngmm_tools-master/Analyses/Python_lib/ground_motions/pylib_Willis15CA_Vs30.py | """
Created on Tue Feb 2 19:01:47 2021
@author: glavrent
"""
#load variables
import pathlib
import numpy as np
import rasterio
class Willis15Vs30CA:
def __init__(self, fname_vs30map_med=None, fname_vs30map_sig=None):
#file path
root = pathlib.Path(__file__).parent
#vs30 data filenames
fname_vs30map_med = '/mnt/halcloud_nfs/glavrent/Research/Other_projects/VS30_CA/data/California_vs30_Wills15_hybrid_7p5c.tif' if fname_vs30map_med is None else fname_vs30map_med
fname_vs30map_sig = '/mnt/halcloud_nfs/glavrent/Research/Other_projects/VS30_CA/data/California_vs30_Wills15_hybrid_7p5c_sd.tif' if fname_vs30map_sig is None else fname_vs30map_sig
#load vs30 data
# self.vs30map_med = rasterio.open(root / 'data/California_vs30_Wills15_hybrid_7p5c.tif')
# self.vs30map_sig = rasterio.open(root / 'data/California_vs30_Wills15_hybrid_7p5c_sd.tif')
self.vs30map_med = rasterio.open( fname_vs30map_med )
self.vs30map_sig = rasterio.open( fname_vs30map_sig )
def lookup(self, lonlats):
return (
np.fromiter(self.vs30map_med.sample(lonlats, 1), np.float),
np.fromiter(self.vs30map_sig.sample(lonlats, 1), np.float)
)
def test_lookup(self):
medians, stds = list(self.lookup([(-122.258, 37.875), (-122.295, 37.895)]))
np.testing.assert_allclose(medians, [733.4, 351.9], rtol=0.01)
np.testing.assert_allclose(stds, [0.432, 0.219], rtol=0.01)
| 1,564 | 36.261905 | 188 | py |
ngmm_tools | ngmm_tools-master/Analyses/Python_lib/ground_motions/pylib_NGMM_prediction.py | """
Created on Sat Aug 20 14:54:54 2022
@author: glavrent
"""
# Packages
#arithmetic libraries
import numpy as np
from scipy import linalg as scipylinalg
from sklearn.gaussian_process.kernels import Matern
#user functions
import pylib_kernels as pylib_kern
import pylib_cell_dist as pylib_cells
# Non-ergodic GMM effects prediction
def PredictNErgEffects(n_samp, nerg_coeff_info, df_scen_predict, df_nerg_coeffs,
nerg_catten_info=None, df_cell_info=None, df_nerg_cellatten=None):
'''
Predict non-egodic ground motion effects
Parameters
----------
n_samp : int
Number of samples.
nerg_coeff_info : dict
Non-ergodic coefficient information dictionary.
df_scen_predict : pd.dataframe
Prediction scenarios.
df_nerg_coeffs : pd.dataframe
Regressed non-ergodic coefficients .
nerg_catten_info : dict, optional
cell-specific anelastic attenuation information dictionary. The default is None.
df_cell_info : pd.dataframe, optional
Cell info dataframe. The default is None.
df_nerg_cellatten : pd.dataframe, optional
Regressed anelastic attenuation coefficients. The default is None.
Returns
-------
nerg_effects_prdct_samp : np.array
Samples of total non-ergodic effects.
nerg_vcm_prdct_samp : TYPE
Samples of spatially varying component of non-ergodic effects.
nerg_atten_prdct_samp : TYPE
Samples of anelastic attenuation component of non-ergodic effects.
nerg_effects_prdct_mu : TYPE
Mean of total non-ergodic effects.
nerg_effects_prdct_sig : TYPE
Standard deviation of total non-ergodic effects.
nerg_vcm_cmp : list
List with individual components of spatially varying non-ergodic effects.
nerg_atten_cmp : list
List with individual components of anelast attenuation.
'''
#number of prediction scenarios
n_predict = len(df_scen_predict)
# VCM component
#initialize vcm samples
nerg_vcm_prdct_samp = np.zeros(shape=(n_predict,n_samp))
nerg_vcm_prdct_mu = np.zeros(shape=n_predict)
nerg_vcm_prdct_var = np.zeros(shape=n_predict)
nerg_vcm_cmp = {}
#iterate over non-ergodic coefficients
for nerg_c in nerg_coeff_info:
#kernel type
k_type = nerg_coeff_info[nerg_c]['kernel_type']
#hyper-parameters
if 'hyp' in nerg_coeff_info[nerg_c]:
hyp_param = nerg_coeff_info[nerg_c]['hyp']
hyp_mean_c = hyp_param['mean_c'] if (('mean_c' in hyp_param) and (not hyp_param['mean_c'] is None)) else 0
hyp_ell = hyp_param['ell'] if (('ell' in hyp_param) and (not hyp_param['ell'] is None)) else 0
hyp_omega = hyp_param['omega'] if (('omega' in hyp_param) and (not hyp_param['omega'] is None)) else 0
hyp_pi = hyp_param['pi'] if (('pi' in hyp_param) and (not hyp_param['pi'] is None)) else 0
hyp_nu = hyp_param['nu'] if (('nu' in hyp_param) and (not hyp_param['nu'] is None)) else 0
#mean and std of non-ergodic coefficients at known locations
c_mean_train = df_nerg_coeffs.loc[:,nerg_coeff_info[nerg_c]['coeff'][0]].values
c_sig_train = df_nerg_coeffs.loc[:,nerg_coeff_info[nerg_c]['coeff'][1]].values
#non-ergodic coefficient scaling
c_scl = np.ones(n_predict) if nerg_coeff_info[nerg_c]['scaling'] is None else df_nerg_coeffs.loc[:,nerg_coeff_info[nerg_c]['scaling']].values
if k_type == 0: #constan
assert(len(np.unique(c_mean_train))==1)
#mean and std of non-ergodic coefficient
c_mean_train = c_mean_train[0]
c_sig_train = c_sig_train[0]
#draw random samples
c_prdct_samp = np.random.normal(loc=c_mean_train, scale=c_sig_train, size=n_samp)
#sample non-ergodic coefficient for prediction scenarios
c_prdct_samp = np.full((n_predict,n_samp), c_prdct_samp)
#mean and sigma
c_prdct_mu = np.full(n_predict, c_mean_train)
c_prdct_sig = np.full(n_predict, c_sig_train)
if k_type == 1: #group
#group ids in training data
id_train = df_nerg_coeffs.loc[:,nerg_coeff_info[nerg_c]['cor_info']].values
id_train, idx_train = np.unique(id_train, axis=0, return_index=True)
#group ids in prediction data
id_prdct = df_scen_predict.loc[:,nerg_coeff_info[nerg_c]['cor_info']].values
id_prdct, inv_prdct = np.unique(id_prdct, axis=0, return_inverse=True)
#mean and std of non-ergodic coefficient
c_mean_train = c_mean_train[idx_train]
c_sig_train = c_sig_train[idx_train]
#compute mean and cov of non-erg coeffs for prediction scenarios
c_prdct_mu, _, c_prdct_cov = pylib_kern.PredictGroupKern(id_prdct, id_train,
c_train_mu=c_mean_train, c_train_sig=c_sig_train,
hyp_mean_c=hyp_mean_c, hyp_omega=hyp_omega)
#sample non-ergodic coefficient for prediction scenarios
c_prdct_samp = MVNRnd(mean=c_prdct_mu, cov=c_prdct_cov, n_samp=n_samp)
c_prdct_samp = c_prdct_samp[inv_prdct,:]
#mean and sigma
c_prdct_mu = c_prdct_mu[inv_prdct]
c_prdct_sig = np.sqrt( np.diag(c_prdct_cov) )[inv_prdct]
if k_type == 2: #exponetial
#coordinates of training data
t_train = df_nerg_coeffs.loc[:,nerg_coeff_info[nerg_c]['cor_info']].values
t_train, idx_train = np.unique(t_train, axis=0, return_index=True)
#coordinates of prediction data
t_prdct = df_scen_predict.loc[:,nerg_coeff_info[nerg_c]['cor_info']].values
t_prdct, inv_prdct = np.unique(t_prdct, axis=0, return_inverse=True)
#mean and std of non-ergodic coefficient
c_mean_train = c_mean_train[idx_train]
c_sig_train = c_sig_train[idx_train]
#compute mean and cov of non-erg coeffs for prediction scenarios
c_prdct_mu, _, c_prdct_cov = pylib_kern.PredictExpKern(t_prdct, t_train,
c_train_mu=c_mean_train, c_train_sig=c_sig_train,
hyp_mean_c=hyp_mean_c, hyp_ell=hyp_ell, hyp_omega=hyp_omega, hyp_pi=hyp_pi)
#sample non-ergodic coefficient for prediction scenarios
c_prdct_samp = MVNRnd(mean=c_prdct_mu, cov=c_prdct_cov, n_samp=n_samp)
c_prdct_samp = c_prdct_samp[inv_prdct,:]
#mean and sigma
c_prdct_mu = c_prdct_mu[inv_prdct]
c_prdct_sig = np.sqrt( np.diag(c_prdct_cov) )[inv_prdct]
if k_type == 3: #squared exponetial
#coordinates of training data
t_train = df_nerg_coeffs.loc[:,nerg_coeff_info[nerg_c]['cor_info']].values
t_train, idx_train = np.unique(t_train, axis=0, return_index=True)
#coordinates of prediction data
t_prdct = df_scen_predict.loc[:,nerg_coeff_info[nerg_c]['cor_info']].values
t_prdct, inv_prdct = np.unique(t_prdct, axis=0, return_inverse=True)
#mean and std of non-ergodic coefficient
c_mean_train = c_mean_train[idx_train]
c_sig_train = c_sig_train[idx_train]
#compute mean and cov of non-erg coeffs for prediction scenarios
c_prdct_mu, _, c_prdct_cov = pylib_kern.PredictSqExpKern(t_prdct, t_train,
c_train_mu=c_mean_train, c_train_sig=c_sig_train,
hyp_mean_c=hyp_mean_c, hyp_ell=hyp_ell, hyp_omega=hyp_omega, hyp_pi=hyp_pi)
#sample non-ergodic coefficient for prediction scenarios
c_prdct_samp = MVNRnd(mean=c_prdct_mu, cov=c_prdct_cov, n_samp=n_samp)
c_prdct_samp = c_prdct_samp[inv_prdct,:]
#mean and sigma
c_prdct_mu = c_prdct_mu[inv_prdct]
c_prdct_sig = np.sqrt( np.diag(c_prdct_cov) )[inv_prdct]
if k_type == 4: #Matern kernel function
#coordinates of training data
t_train = df_nerg_coeffs.loc[:,nerg_coeff_info[nerg_c]['cor_info']].values
t_train, idx_train = np.unique(t_train, axis=0, return_index=True)
#coordinates of prediction data
t_prdct = df_scen_predict.loc[:,nerg_coeff_info[nerg_c]['cor_info']].values
t_prdct, inv_prdct = np.unique(t_prdct, axis=0, return_inverse=True)
#mean and std of non-ergodic coefficient
c_mean_train = c_mean_train[idx_train]
c_sig_train = c_sig_train[idx_train]
#compute mean and cov of non-erg coeffs for prediction scenarios
c_prdct_mu, _, c_prdct_cov = pylib_kern.PredictMaternKern(t_prdct, t_train,
c_train_mu=c_mean_train, c_train_sig=c_sig_train,
hyp_mean_c=hyp_mean_c, hyp_ell=hyp_ell, hyp_omega=hyp_omega, hyp_pi=hyp_pi,
hyp_nu=hyp_nu)
#sample non-ergodic coefficient for prediction scenarios
c_prdct_samp = MVNRnd(mean=c_prdct_mu, cov=c_prdct_cov, n_samp=n_samp)
c_prdct_samp = c_prdct_samp[inv_prdct,:]
#mean and sigma
c_prdct_mu = c_prdct_mu[inv_prdct]
c_prdct_sig = np.sqrt( np.diag(c_prdct_cov) )[inv_prdct]
#add contribution of non-ergodic effect
nerg_vcm_prdct_samp += c_scl[:,np.newaxis] * c_prdct_samp
#mean and std contribution of non-ergodic effect
nerg_vcm_prdct_mu += c_scl * c_prdct_mu
nerg_vcm_prdct_var += c_scl**2 * c_prdct_sig**2
#summarize individual components
nerg_vcm_cmp[nerg_c] = [c_scl * c_prdct_mu, c_scl * c_prdct_sig, c_scl[:,np.newaxis] * c_prdct_samp]
# Anelastic attenuation
#initialize anelastic attenuation
nerg_atten_prdct_samp = np.zeros(shape=(n_predict,n_samp))
nerg_atten_prdct_mu = np.zeros(shape=n_predict)
nerg_atten_prdct_var = np.zeros(shape=n_predict)
nerg_atten_cmp = {}
if not nerg_catten_info is None:
#cell edge coordinates for path seg calculation
ct4dist = df_cell_info.loc[:,['q1X', 'q1Y', 'q1Z', 'q8X', 'q8Y', 'q8Z']].values
#cell limts
c_lmax = ct4dist[:,[3,4,5]].max(axis=0)
c_lmin = ct4dist[:,[0,1,2]].min(axis=0)
#compute cell-path
cell_path = np.zeros([n_predict, len(df_cell_info)])
for j, (rsn, scn_p) in enumerate(df_scen_predict.iterrows()):
pt1 = scn_p[['eqX','eqY','eqZ']].values.astype(float)
pt2 = np.hstack([scn_p[['staX','staY']].values, 0]).astype(float)
#check limits
assert(np.logical_and(pt1>=c_lmin, pt1<=c_lmax).all()),'Error. Eq outside cell domain for rsn: %i'%rsn
assert(np.logical_and(pt2>=c_lmin, pt2<=c_lmax).all()),'Error. Sta outside cell domain for rsn: %i'%rsn
#cell paths for pt1 - pt2
cell_path[j,:] = pylib_cells.ComputeDistGridCells(pt1,pt2,ct4dist, flagUTM=True)
#keep only cells with non-zero paths
ca_valid = cell_path.sum(axis=0) > 0
cell_path = cell_path[:,ca_valid]
df_cell_info = df_cell_info.loc[ca_valid,:]
#iterate over anelastic attenuation components
for nerg_ca in nerg_catten_info:
#kernel type
k_type = nerg_catten_info[nerg_ca]['kernel_type']
#mean and std anelastic attenuation cells
ca_mean_train = df_nerg_cellatten.loc[:,nerg_catten_info[nerg_ca]['catten'][0]].values
ca_sig_train = df_nerg_cellatten.loc[:,nerg_catten_info[nerg_ca]['catten'][1]].values
#hyper-parameters
hyp_param = nerg_catten_info[nerg_ca]['hyp']
hyp_mean_ca = hyp_param['mean_ca'] if (('mean_ca' in hyp_param) and (not hyp_param['mean_ca'] is None)) else 0
hyp_ell = hyp_param['ell'] if (('ell' in hyp_param) and (not hyp_param['ell'] is None)) else 0
hyp_ell1 = hyp_param['ell1'] if (('ell1' in hyp_param) and (not hyp_param['ell1'] is None)) else np.nan
hyp_ell2 = hyp_param['ell2'] if (('ell2' in hyp_param) and (not hyp_param['ell2'] is None)) else np.nan
hyp_omega = hyp_param['omega'] if (('omega' in hyp_param) and (not hyp_param['omega'] is None)) else 0
hyp_omega1 = hyp_param['omega1'] if (('omega1' in hyp_param) and (not hyp_param['omega1'] is None)) else np.nan
hyp_omega2 = hyp_param['omega2'] if (('omega2' in hyp_param) and (not hyp_param['omega2'] is None)) else np.nan
hyp_pi = hyp_param['pi'] if (('pi' in hyp_param) and (not hyp_param['pi'] is None)) else 0
hyp_nu = hyp_param['nu'] if (('nu' in hyp_param) and (not hyp_param['nu'] is None)) else 0
#select kernel function
if k_type == 1: #independent cells
#cell ids in training data
# cid_train = df_nerg_cellatten.loc[:,nerg_catten_info[nerg_ca]['cor_info']].values
cid_train = df_nerg_cellatten.index.values
#cell ids in prediction data
# cid_prdct = df_cell_info.loc[:,nerg_catten_info[nerg_ca]['cor_info']].values
cid_prdct = df_cell_info.index.values
#compute mean and cov of cell anelastic coeffs for prediction scenarios
ca_prdct_mu, _, ca_prdct_cov = pylib_kern.PredictGroupKern(cid_prdct, cid_train,
c_train_mu=ca_mean_train, c_train_sig=ca_sig_train,
hyp_mean_c=hyp_mean_ca , hyp_omega=hyp_omega)
if k_type == 2: #exponetial
#cell coordinates of training data
ct_train = df_nerg_cellatten.loc[:,nerg_catten_info[nerg_ca]['cor_info']].values
#cell coordinates of prediction data
ct_prdct = df_cell_info.loc[:,nerg_catten_info[nerg_ca]['cor_info']].values
#compute mean and cov of cell anelastic coeffs for prediction scenarios
ca_prdct_mu, _, ca_prdct_cov = pylib_kern.PredictExpKern(ct_prdct, ct_train,
c_train_mu=ca_mean_train, c_train_sig=ca_sig_train,
hyp_mean_c=hyp_mean_ca, hyp_ell=hyp_ell, hyp_omega=hyp_omega, hyp_pi=hyp_pi)
if k_type == 3: #squared exponetial
#cell coordinates of training data
ct_train = df_nerg_cellatten.loc[:,nerg_catten_info[nerg_ca]['cor_info']].values
#cell coordinates of prediction data
ct_prdct = df_cell_info.loc[:,nerg_catten_info[nerg_ca]['cor_info']].values
#compute mean and cov of cell anelastic coeffs for prediction scenarios
ca_prdct_mu, _, ca_prdct_cov = pylib_kern.PredictSqExpKern(ct_prdct, ct_train,
c_train_mu=ca_mean_train, c_train_sig=ca_sig_train,
hyp_mean_c=hyp_mean_ca, hyp_ell=hyp_ell, hyp_omega=hyp_omega, hyp_pi=hyp_pi)
if k_type == 4: #Matern
#cell coordinates of training data
ct_train = df_nerg_cellatten.loc[:,nerg_catten_info[nerg_ca]['cor_info']].values
#cell coordinates of prediction data
ct_prdct = df_cell_info.loc[:,nerg_catten_info[nerg_ca]['cor_info']].values
#compute mean and cov of cell anelastic coeffs for prediction scenarios
ca_prdct_mu, _, ca_prdct_cov = pylib_kern.PredictSqMaternKern(ct_prdct, ct_train,
c_train_mu=ca_mean_train, c_train_sig=ca_sig_train,
hyp_mean_c=hyp_mean_ca, hyp_ell=hyp_ell, hyp_omega=hyp_omega, hyp_pi=hyp_pi)
if k_type == 5: #exponetial and spatially independent composite
#cell coordinates of training data
ct_train = df_nerg_cellatten.loc[:,nerg_catten_info[nerg_ca]['cor_info']].values
#cell coordinates of prediction data
ct_prdct = df_cell_info.loc[:,nerg_catten_info[nerg_ca]['cor_info']].values
#compute mean and cov of cell anelastic coeffs for prediction scenarios
ca_prdct_mu, _, ca_prdct_cov = pylib_kern.PredictNegExpSptInptKern(ct_prdct, ct_train,
c_train_mu=ca_mean_train, c_train_sig=ca_sig_train,
hyp_mean_c=hyp_mean_ca, hyp_ell1=hyp_ell1, hyp_omega1=hyp_omega1,
hyp_omega2=hyp_omega2, hyp_pi=hyp_pi)
#sample cell-specific anelastic coefficients for prediction scenarios
ca_prdct_samp = MVNRnd(mean=ca_prdct_mu, cov=ca_prdct_cov, n_samp=n_samp)
ca_prdct_sig = np.sqrt( np.diag(ca_prdct_cov) )
#effect of anelastic attenuation
nerg_atten_prdct_samp += cell_path @ ca_prdct_samp
nerg_atten_prdct_mu += cell_path @ ca_prdct_mu
nerg_atten_prdct_var += np.square(cell_path) @ ca_prdct_sig**2
#summarize individual anelastic components
nerg_atten_cmp[nerg_ca] = [cell_path @ ca_prdct_mu, np.sqrt(np.square(cell_path) @ ca_prdct_sig**2),
cell_path @ ca_prdct_samp]
#total non-ergodic effects
nerg_effects_prdct_samp = nerg_vcm_prdct_samp + nerg_atten_prdct_samp
nerg_effects_prdct_mu = nerg_vcm_prdct_mu + nerg_atten_prdct_mu
nerg_effects_prdct_sig = np.sqrt(nerg_vcm_prdct_var + nerg_atten_prdct_var)
return nerg_effects_prdct_samp, nerg_vcm_prdct_samp, nerg_atten_prdct_samp, \
nerg_effects_prdct_mu, nerg_effects_prdct_sig, \
nerg_vcm_cmp, nerg_atten_cmp
# Multivariate normal distribution random samples
def MVNRnd(mean = None, cov = None, seed = None, n_samp = None, flag_sp = False, flag_list = False):
'''
Draw random samples from a Multivariable Normal distribution
Parameters
----------
mean : np.array(n), optional
Mean array. The default is None.
cov : np.array(n,n), optional
Covariance Matrix. The default is None.
seed : int, optional
Seed number of random number generator. The default is None.
n_samp : int, optional
Number of samples. The default is None.
flag_sp : boolean, optional
Sparse covariance matrix flag; if sparse flag_sp = True. The default is False.
flag_list : boolean, optional
Flag returning output as list. The default is False.
Returns
-------
samp
Sampled values.
'''
#if not already covert to list
if flag_list:
seed_list = seed if not seed is None else [None]
else:
seed_list = [seed]
#number of dimensions
n_dim = len(mean) if not mean is None else cov.shape[0]
assert(cov.shape == (n_dim,n_dim)),'Error. Inconsistent size of mean array and covariance matrix'
#set mean array to zero if not given
if mean is None: mean = np.zeros(n_dim)
#compute L D L' decomposition
if flag_sp: cov = cov.toarray()
L, D, _ = scipylinalg.ldl(cov)
assert( not np.count_nonzero(D - np.diag(np.diagonal(D))) ),'Error. D not diagonal'
assert( np.all(np.diag(D) > -1e-1) ),'Error. D diagonal is negative'
#extract diagonal from D matrix, set to zero any negative entries due to bad conditioning
d = np.diagonal(D).copy()
d[d<0] = 0
#compute Q matrix
Q = L @ np.diag(np.sqrt(d))
#generate random sample
samp_list = list()
for k, seed in enumerate(seed_list):
#genereate seed numbers if not given
if seed is None: seed = np.random.standard_normal(size=(n_dim, n_samp))
#generate random multi-normal random samples
samp = Q @ (seed )
samp += mean[:,np.newaxis] if samp.ndim > 1 else mean
#summarize samples
samp_list.append( samp )
return samp_list if flag_list else samp_list[0]
| 21,359 | 54.051546 | 154 | py |
ngmm_tools | ngmm_tools-master/Analyses/Python_lib/ground_motions/pylib_cell_dist.py | """
Created on Sun May 3 17:25:10 2020
@author: glavrent
"""
#load libraries
import numpy as np
import geopy.distance as geopydist
def ComputeDistUnGridCells(pt1, pt2, cells, diffx, diffy, flagUTM=False):
'''
Compute the path distances of uniformly gridded cells
Parameters
----------
pt1 : np.array(3)
Latitude, Longitude, elevation coordinates of first point.
pt2 : np.array(3)
Latitude, Longitude, elevation coordinates of second point.
cells : np.array(n_cells, 4)
Cell coordinates: Cartesian or LatLon
Latitude, Longitude, bottom and top elevation of cells
[x, y, elev_bot, elev_top]
Lat Lon coordinates:
Latitude, Longitude, bottom and top elevation of cells
[lon, lat, elev_bot, elev_top]
diffx : real
Latitude interval of cells.
diffy : real
Longitude interval of cells.
Returns
-------
dm : np.array(n_cells)
Distance path on each cell.
'''
#import pdb; pdb.set_trace()
#grid points
x_grid = np.unique(cells[:, 0])
y_grid = np.unique(cells[:, 1])
z_grid = np.unique(cells[:, 2])
## find x,y,z grid points which are between source and site
x_v = np.sort([pt1[0], pt2[0]])
x_g_pts = x_grid[(x_v[0] <= x_grid) & (x_grid < x_v[1])]
y_v = np.sort([pt1[1], pt2[1]])
y_g_pts = y_grid[(y_v[0] <= y_grid) & (y_grid < y_v[1])]
z_v = np.sort([pt1[2], pt2[2]])
z_g_pts = z_grid[(z_v[0] <= z_grid) & (z_grid < z_v[1])]
#p1-pt2 vector
vec = np.subtract(pt1, pt2)
# intersection points for x
normal = [1, 0, 0];
ptx = np.ones(len(x_g_pts) * 3)
if len(x_g_pts) > 0:
ptx = ptx.reshape(len(x_g_pts), 3)
for i, xv in enumerate(x_g_pts):
ptplane = [xv, y_grid[0], 0]
d = np.divide(np.dot(np.subtract(ptplane,pt1),normal), np.dot(vec,normal))
pt = pt1 + d * vec
ptx[i] = pt
else:
ptx = [[-999, -999, -999]]
# intersection points for y
normal = [0, 1, 0];
pty = np.ones(len(y_g_pts) * 3)
if len(y_g_pts) > 0:
pty = pty.reshape(len(y_g_pts), 3)
for i, yv in enumerate(y_g_pts):
ptplane = [x_grid[0], yv, 0]
d = np.divide(np.dot(np.subtract(ptplane,pt1),normal), np.dot(vec,normal))
pt = pt1 + d * vec
pty[i] = pt
else:
pty = [[-999, -999, -999]]
# intersection points for z
normal = [0, 0, 1]
ptz = np.ones(len(z_g_pts) * 3)
if len(z_g_pts) > 0:
ptz = ptz.reshape(len(z_g_pts), 3)
for i, zv in enumerate(z_g_pts):
ptplane = [x_grid[0], y_grid[0], zv]
d = np.divide(np.dot(np.subtract(ptplane,pt1),normal), np.dot(vec,normal))
pt = pt1 + d * vec
ptz[i] = pt
else:
ptz = [[-999, -999, -999]]
#summarize all intersection points
ptall = np.concatenate(([pt1], [pt2], ptx, pty, ptz))
ptall = ptall[(ptall[:, 0] != -999) & (ptall[:, 1] != -999) & (ptall[:, 2] != -999)]
ptall = np.unique(ptall, axis=0)
if pt1[0] != pt2[0]:
ptall = ptall[ptall[:, 0].argsort()] #sort points by x coordinate
else:
ptall = ptall[ptall[:, 1].argsort()] #sort points by y coordinate
#cell ids
id_cells = np.arange(len(cells))
#compute cell distance
idx = np.zeros(len(ptall)-1)
distances = np.ones(len(ptall)-1)
for i in range(len(ptall) - 1):
p1 = ptall[i] #first intersection point
p2 = ptall[i+1] #second intersection point
#cell indices of cells where the first intersection point belongs
idx1 = id_cells[(cells[:, 0] <= p1[0]) & (p1[0] <= cells[:, 0] + diffx) & \
(cells[:, 1] <= p1[1]) & (p1[1] <= cells[:, 1] + diffy) & \
(cells[:, 2] <= p1[2]) & (p1[2] <= cells[:, 3])]
#cell indices of cells where the second intersection point belongs
idx2 = id_cells[(cells[:, 0] <= p2[0]) & (p2[0] <= cells[:, 0] + diffx) & \
(cells[:, 1] <= p2[1]) & (p2[1] <= cells[:, 1] + diffy) & \
(cells[:, 2] <= p2[2]) & (p2[2] <= cells[:, 3])]
#common indices of first and second int points
idx[i] = np.intersect1d(idx1, idx2)
#compute path distance
if not flagUTM:
dxy = geopydist.distance(ptall[i,(1,0)],ptall[i + 1,(1,0)]).km
else:
dxy = np.linalg.norm(ptall[i,0:1] - ptall[i + 1,0:1])
dz = ptall[i,2] - ptall[i + 1,2]
distances[i] = np.sqrt(dxy** 2 + dz ** 2)
dm = np.zeros(len(cells))
dm[idx.astype(int)] = distances
return dm
def ComputeDistGridCells(pt1, pt2, cells, flagUTM=False):
'''
Compute the path distances of gridded cells
Parameters
----------
pt1 : np.array(3)
Latitude, Longitude, elevation coordinates of first point.
pt2 : np.array(3)
Latitude, Longitude, elevation coordinates of second point.
cells : np.array(n_cells, 6)
Latitude, Longitude, elevation of bottom left (q1) and top right (q8) corrners of cells
[q1_lat, q1_lon, q1_elev, q8_lat, q8_lon, q8_elev]
diffx : real
Latitude interval of cells.
diffy : real
Longitude interval of cells.
Returns
-------
dm : np.array(n_cells)
Distance path on each cell.
'''
#import pdb; pdb.set_trace()
#grid points
x_grid = np.unique(cells[:, 0])
y_grid = np.unique(cells[:, 1])
z_grid = np.unique(cells[:, 2])
## find x,y,z grid points which are between source and site
x_v = np.sort([pt1[0], pt2[0]])
x_g_pts = x_grid[(x_v[0] <= x_grid) & (x_grid < x_v[1])]
y_v = np.sort([pt1[1], pt2[1]])
y_g_pts = y_grid[(y_v[0] <= y_grid) & (y_grid < y_v[1])]
z_v = np.sort([pt1[2], pt2[2]])
z_g_pts = z_grid[(z_v[0] <= z_grid) & (z_grid < z_v[1])]
#p1-pt2 vector
vec = np.subtract(pt1, pt2)
# intersection points for x
normal = [1, 0, 0];
ptx = np.ones(len(x_g_pts) * 3)
if len(x_g_pts) > 0:
ptx = ptx.reshape(len(x_g_pts), 3)
for i, xv in enumerate(x_g_pts):
ptplane = [xv, y_grid[0], 0]
d = np.divide(np.dot(np.subtract(ptplane,pt1),normal), np.dot(vec,normal))
pt = pt1 + d * vec
ptx[i] = pt
else:
ptx = [[-999, -999, -999]]
# intersection points for y
normal = [0, 1, 0];
pty = np.ones(len(y_g_pts) * 3)
if len(y_g_pts) > 0:
pty = pty.reshape(len(y_g_pts), 3)
for i, yv in enumerate(y_g_pts):
ptplane = [x_grid[0], yv, 0]
d = np.divide(np.dot(np.subtract(ptplane,pt1),normal), np.dot(vec,normal))
pt = pt1 + d * vec
pty[i] = pt
else:
pty = [[-999, -999, -999]]
# intersection points for z
normal = [0, 0, 1]
ptz = np.ones(len(z_g_pts) * 3)
if len(z_g_pts) > 0:
ptz = ptz.reshape(len(z_g_pts), 3)
for i, zv in enumerate(z_g_pts):
ptplane = [x_grid[0], y_grid[0], zv]
d = np.divide(np.dot(np.subtract(ptplane,pt1),normal), np.dot(vec,normal))
pt = pt1 + d * vec
ptz[i] = pt
else:
ptz = [[-999, -999, -999]]
#summarize all intersection points
ptall = np.concatenate(([pt1], [pt2], ptx, pty, ptz))
ptall = ptall[(ptall[:, 0] != -999) & (ptall[:, 1] != -999) & (ptall[:, 2] != -999)]
#ptall = np.unique(ptall.round, axis=0, return_index=True)
_, i_ptall_unq = np.unique(ptall.round(decimals=7), axis=0, return_index=True)
ptall = ptall[i_ptall_unq,:]
# if pt1[0] != pt2[0]:
if abs(pt1[0] - pt2[0]) > 1e-6:
ptall = ptall[ptall[:, 0].argsort()]
else:
ptall = ptall[ptall[:, 1].argsort()]
#compute cell distance
id_cells = np.arange(len(cells))
idx = np.ones(len(ptall)-1)
distances = np.ones(len(ptall)-1)
for i in range(len(ptall) - 1):
p1 = ptall[i] #first intersection point
p2 = ptall[i+1] #second intersection point
#cell indices where the first point belongs
tol = 1e-9
idx1 = id_cells[(cells[:, 0]-tol <= p1[0]) & (p1[0] <= cells[:, 3]+tol) & \
(cells[:, 1]-tol <= p1[1]) & (p1[1] <= cells[:, 4]+tol) & \
(cells[:, 2]-tol <= p1[2]) & (p1[2] <= cells[:, 5]+tol)]
#cell indices where the second point belongs
idx2 = id_cells[(cells[:, 0]-tol <= p2[0]) & (p2[0] <= cells[:, 3]+tol) & \
(cells[:, 1]-tol <= p2[1]) & (p2[1] <= cells[:, 4]+tol) & \
(cells[:, 2]-tol <= p2[2]) & (p2[2] <= cells[:, 5]+tol)]
#common indices of first and second point
try:
idx[i] = np.intersect1d(idx1, idx2)
except ValueError:
print('i_pt: ', i)
print('idx1: ', idx1)
print('idx2: ', idx2)
print('p1: ', p1)
print('p2: ', p2)
# import pdb; pdb.set_trace()
raise
#compute path distance
if not flagUTM:
dxy = geopydist.distance(ptall[i,(1,0)],ptall[i + 1,(1,0)]).km
else:
dxy = np.linalg.norm(ptall[i,0:2] - ptall[i + 1,0:2])
dz = ptall[i,2] - ptall[i + 1,2]
distances[i] = np.sqrt(dxy** 2 + dz ** 2)
dm = np.zeros(len(cells))
dm[idx.astype(int)] = distances
return dm
| 9,551 | 32.75265 | 95 | py |
ngmm_tools | ngmm_tools-master/Analyses/Python_lib/ground_motions/pylib_kernels.py | """
Created on Sat Aug 20 13:52:51 2022
@author: glavrent
"""
# Packages
#arithmetic libraries
import numpy as np
from scipy import linalg as scipylinalg
from sklearn.gaussian_process.kernels import Matern
# Kernel Functions
# group kernel function
def KernelGroup(grp_1, grp_2, hyp_omega = 0, delta = 1e-9):
'''
Compute kernel function for perfect correlation between group variables
Parameters
----------
grp_1 : np.array
IDs for first group.
grp_2 : np.array
IDs for second group.
hyp_omega : non-negative real, optional
Scale of kernel function. The default is 0.
delta : non-negative real, optional
Diagonal widening. The default is 1e-9.
Returns
-------
cov_mat : np.array
Covariance Matrix.
'''
#tolerance for station id comparison
r_tol = np.min([0.01/np.max([np.abs(grp_1).max(), np.abs(grp_2).max()]), 1e-11])
#number of grid nodes
n_pt_1 = grp_1.shape[0]
n_pt_2 = grp_2.shape[0]
#number of dimensions
n_dim = grp_1.ndim
#create cov. matrix
cov_mat = np.zeros([n_pt_1,n_pt_2]) #initialize
if n_dim == 1:
for i in range(n_pt_1):
cov_mat[i,:] = hyp_omega**2 * np.isclose(grp_1[i], grp_2, rtol=r_tol).flatten()
else:
for i in range(n_pt_1):
cov_mat[i,:] = hyp_omega**2 * (scipylinalg.norm(grp_1[i] - grp_2, axis=1) < r_tol)
if n_pt_1 == n_pt_2:
for i in range(n_pt_1):
cov_mat[i,i] += delta
return cov_mat
# exponential kernel
def KernelExp(t_1, t_2, hyp_ell = 0, hyp_omega = 0, hyp_pi = 0, delta = 1e-9):
'''
Compute exponential kernel function
Parameters
----------
t_1 : np.array
Coordinates of first group.
t_2 : np.array
Coordinates of second group.
hyp_ell : non-negative real, optional
Correlation length. The default is 0.
hyp_omega : non-negative real, optional
Scale of kernel function. The default is 0.
hyp_pi : non-negative real, optional
Constant of kernel function. The default is 0.
delta : non-negative real, optional
Diagonal widening. The default is 1e-9.
Returns
-------
cov_mat : np.array
Covariance Matrix.
'''
#number of grid nodes
n_pt_1 = t_1.shape[0]
n_pt_2 = t_2.shape[0]
#number of dimensions
n_dim = t_1.ndim
#create cov. matrix
cov_mat = np.zeros([n_pt_1,n_pt_2]) #initialize
for i in range(n_pt_1):
dist = scipylinalg.norm(t_1[i] - t_2,axis=1) if n_dim > 1 else np.abs(t_1[i] - t_2)
cov_mat[i,:] = hyp_pi**2 + hyp_omega**2 * np.exp(- dist/hyp_ell)
if n_pt_1 == n_pt_2:
for i in range(n_pt_1):
cov_mat[i,i] += delta
return cov_mat
# squared exponential kernel
def KernelSqExp(t_1, t_2, hyp_ell = 0, hyp_omega = 0, hyp_pi = 0, delta = 1e-9):
'''
Compute squared exponential kernel function
Parameters
----------
t_1 : np.array
Coordinates of first group.
t_2 : np.array
Coordinates of second group.
hyp_ell : non-negative real, optional
Correlation length. The default is 0.
hyp_omega : non-negative real, optional
Scale of kernel function. The default is 0.
hyp_pi : non-negative real, optional
Constant of kernel function. The default is 0.
delta : non-negative real, optional
Diagonal widening. The default is 1e-9.
Returns
-------
cov_mat : np.array
Covariance Matrix.
'''
#number of grid nodes
n_pt_1 = t_1.shape[0]
n_pt_2 = t_2.shape[0]
#number of dimensions
n_dim = t_1.ndim
#create cov. matrix
cov_mat = np.zeros([n_pt_1,n_pt_2]) #initialize
for i in range(n_pt_1):
dist = scipylinalg.norm(t_1[i] - t_2,axis=1) if n_dim > 1 else np.abs(t_1[i] - t_2)
cov_mat[i,:] = hyp_pi**2 + hyp_omega**2 * np.exp(- dist**2/hyp_ell**2)
if n_pt_1 == n_pt_2:
for i in range(n_pt_1):
cov_mat[i,i] += delta
return cov_mat
# matern exponential kernel
def MaternKernel(t_1, t_2, hyp_ell = 0, hyp_omega = 0, hyp_pi = 0, hyp_nu=1.5, delta = 1e-9):
'''
Compute Matern kernel function
Parameters
----------
t_1 : np.array
Coordinates of first group.
t_2 : np.array
Coordinates of second group.
hyp_ell : non-negative real, optional
Correlation length. The default is 0.
hyp_omega : non-negative real, optional
Scale of kernel function. The default is 0.
hyp_pi : non-negative real, optional
Constant of kernel function. The default is 0.
hyp_nu : non-negative real, optional
Smoothness parameter. The default is 1.5.
delta : non-negative real, optional
Diagonal widening. The default is 1e-9.
Returns
-------
cov_mat : np.array
Covariance Matrix.
'''
#number of grid nodes
n_pt_1 = t_1.shape[0]
n_pt_2 = t_2.shape[0]
#number of dimensions
n_dim = t_1.ndim
#distance matrix
dist_mat = np.array([scipylinalg.norm(t1 - t_2, axis=1) if n_dim > 1 else np.abs(t1 - t_2)
for t1 in t_1])
#create cov. matrix
cov_mat = hyp_omega**2 * Matern(nu=hyp_nu, length_scale=hyp_ell)(0, dist_mat.ravel()[:, np.newaxis]).reshape(dist_mat.shape)
cov_mat += hyp_pi**2
if n_pt_1 == n_pt_2:
for i in range(n_pt_1):
cov_mat[i,i] += delta
return cov_mat
# composite exponential kernel and spatially independent
def KernelNegExpSptInpt(t_1, t_2, hyp_ell1 = 0, hyp_omega1 = 0, hyp_omega2 = 0, hyp_pi = 0, delta = 1e-9):
'''
Compute composite kernel function, with negative exponential and
spatially idependent components
Parameters
----------
t_1 : np.array
Coordinates of first group.
t_2 : np.array
Coordinates of second group.
hyp_ell1 : non-negative real, optional
Correlation length of neg. exponential component. The default is 0.
hyp_omega1 : non-negative real, optional
Scale of neg. exponential component. The default is 0.
hyp_omega2 : non-negative real, optional
Scale of spatially independent component. The default is 0.
hyp_pi : non-negative real, optional
Constant of kernel function. The default is 0.
delta : non-negative real, optional
Diagonal widening. The default is 1e-9.
Returns
-------
cov_mat : TYPE
DESCRIPTION.
'''
#number of grid nodes
n_pt_1 = t_1.shape[0]
n_pt_2 = t_2.shape[0]
#negative exponetial component
cov_mat = KernelExp(t_1, t_2, hyp_ell=hyp_ell1, hyp_omega=hyp_omega1, hyp_pi=hyp_pi, delta=1e-9)
#spatially independent component
cov_mat += KernelGroup(t_1, t_2, hyp_omega=hyp_omega2, delta=0)
return cov_mat
# Predictive Functions
# predict coeffs with group kernel function
def PredictGroupKern(g_prdct, g_train, c_train_mu, c_train_sig = None,
hyp_mean_c = 0, hyp_omega = 0, delta = 1e-9):
'''
Predict conditional coefficients based on group kernel function.
Parameters
----------
g_prdct : np.array
Group IDs of prediction cases.
g_train : np.array
Group IDs of training cases.
c_train_mu : np.array
Mean values of non-ergodic coefficient of training cases.
c_train_sig : np.array, optional
Standard deviations of non-ergodic coefficient of training cases. The default is None.
hyp_mean_c : real, optional
Mean of non-ergodic coefficient. The default is 0.
hyp_omega : non-negative real, optional
Scale of kernel function. The default is 0.
delta : non-negative real, optional
Diagonal widening. The default is 1e-9.
Returns
-------
c_prdct_mu : np.array
Mean value of non-ergodic coefficient for prediction cases.
c_prdct_sig : np.array
Standard deviations of non-ergodic coefficient for prediction cases.
c_prdct_cov : np.array
Covariance matrix of non-ergodic coefficient for prediction cases.
'''
#remove mean effect from training coefficients
c_train_mu = c_train_mu - hyp_mean_c
#uncertainty in training data
if c_train_sig is None: c_train_sig = np.zeros(len(c_train_mu))
c_train_cov = np.diag(c_train_sig**2) if c_train_sig.ndim == 1 else c_train_sig
#covariance between training data
K = KernelGroup(g_train, g_train, hyp_omega=hyp_omega, delta=delta)
#covariance between data and new locations
k = KernelGroup(g_prdct, g_train, hyp_omega=hyp_omega, delta=0)
#covariance between new locations
k_star = KernelGroup(g_prdct, g_prdct, hyp_omega=hyp_omega, delta=0)
#inverse of covariance matrix
K_inv = scipylinalg.inv(K)
#product of k * K^-1
kK_inv = k.dot(K_inv)
#posterior mean and variance at new locations
c_prdct_mu = kK_inv.dot(c_train_mu)
c_prdct_cov = k_star - kK_inv.dot(k.transpose()) + kK_inv.dot( c_train_cov.dot(kK_inv.transpose()) )
#posterior standard dev. at new locations
c_prdct_sig = np.sqrt(np.diag(c_prdct_cov))
#add mean effect from training coefficients
c_prdct_mu += hyp_mean_c
return c_prdct_mu, c_prdct_sig, c_prdct_cov
# predict coeffs with exponential kernel
def PredictExpKern(t_prdct, t_train, c_train_mu, c_train_sig = None,
hyp_mean_c = 0, hyp_ell = 0, hyp_omega = 0, hyp_pi = 0, delta = 1e-9):
'''
Predict conditional coefficients based on exponential kernel function.
Parameters
----------
t_prdct : np.array
Coordinates of prediction cases.
t_train : np.array
Coordinates of training cases.
c_train_mu : np.array
Mean values of non-ergodic coefficient of training cases.
c_train_sig : np.array, optional
Standard deviations of non-ergodic coefficient of training cases. The default is None.
hyp_mean_c : real, optional
Mean of non-ergodic coefficient. The default is 0.
hyp_ell : non-negative real, optional
Correlation length of kernel function.. The default is 0.
hyp_omega : non-negative real, optional
Scale of kernel function. The default is 0.
hyp_pi : postive real, optional
Constant of kernel function. The default is 0.
delta : non-negative real, optional
Diagonal widening. The default is 1e-9.
Returns
-------
c_prdct_mu : np.array
Mean value of non-ergodic coefficient for prediction cases.
c_prdct_sig : np.array
Standard deviations of non-ergodic coefficient for prediction cases.
c_prdct_cov : np.array
Covariance matrix of non-ergodic coefficient for prediction cases.
'''
#remove mean effect from training coefficients
c_train_mu = c_train_mu - hyp_mean_c
#uncertainty in training data
if c_train_sig is None: c_train_sig = np.zeros(len(c_train_mu))
c_train_cov = np.diag(c_train_sig**2) if c_train_sig.ndim == 1 else c_train_sig
#covariance between training data
K = KernelExp(t_train, t_train, hyp_ell=hyp_ell, hyp_omega=hyp_omega, hyp_pi=hyp_pi, delta=delta)
#covariance between data and new locations
k = KernelExp(t_prdct, t_train, hyp_ell=hyp_ell, hyp_omega=hyp_omega, hyp_pi=hyp_pi, delta=0)
#covariance between new locations
k_star = KernelExp(t_prdct, t_prdct, hyp_ell=hyp_ell, hyp_omega=hyp_omega, hyp_pi=hyp_pi, delta=0)
#inverse of covariance matrix
K_inv = scipylinalg.inv(K)
#product of k * K^-1
kK_inv = k.dot(K_inv)
#posterior mean and variance at new locations
c_prdct_mu = kK_inv.dot(c_train_mu)
c_prdct_cov = k_star - kK_inv.dot(k.transpose()) + kK_inv.dot( c_train_cov.dot(kK_inv.transpose()) )
#posterior standard dev. at new locations
c_prdct_sig = np.sqrt(np.diag(c_prdct_cov))
#add mean effect from training coefficients
c_prdct_mu += hyp_mean_c
return c_prdct_mu, c_prdct_sig, c_prdct_cov
# predict coeffs with squared exponential kernel
def PredictSqExpKern(t_prdct, t_train, c_train_mu, c_train_sig = None,
hyp_mean_c = 0, hyp_ell = 0, hyp_omega = 0, hyp_pi = 0, delta = 1e-9):
'''
Predict conditional coefficients based on squared exponential kernel function.
Parameters
----------
t_prdct : np.array
Coordinates of prediction cases.
t_train : np.array
Coordinates of training cases.
c_train_mu : np.array
Mean values of non-ergodic coefficient of training cases.
c_train_sig : np.array, optional
Standard deviations of non-ergodic coefficient of training cases. The default is None.
hyp_mean_c : real, optional
Mean of non-ergodic coefficient. The default is 0.
hyp_ell : non-negative real, optional
Correlation length of kernel function.. The default is 0.
hyp_omega : non-negative real, optional
Scale of kernel function. The default is 0.
hyp_pi : postive real, optional
Constant of kernel function. The default is 0.
delta : non-negative real, optional
Diagonal widening. The default is 1e-9.
Returns
-------
c_prdct_mu : np.array
Mean value of non-ergodic coefficient for prediction cases.
c_prdct_sig : np.array
Standard deviations of non-ergodic coefficient for prediction cases.
c_prdct_cov : np.array
Covariance matrix of non-ergodic coefficient for prediction cases.
'''
#remove mean effect from training coefficients
c_train_mu = c_train_mu - hyp_mean_c
#uncertainty in training data
if c_train_sig is None: c_train_sig = np.zeros(len(c_train_mu))
c_train_cov = np.diag(c_train_sig**2) if c_train_sig.ndim == 1 else c_train_sig
#covariance between training data
K = KernelNegExpSptInpt(t_train, t_train, hyp_ell=hyp_ell, hyp_omega=hyp_omega, hyp_pi=hyp_pi, delta=delta)
#covariance between data and new locations
k = KernelNegExpSptInpt(t_prdct, t_train, hyp_ell=hyp_ell, hyp_omega=hyp_omega, hyp_pi=hyp_pi, delta=0)
#covariance between new locations
k_star = KernelNegExpSptInpt(t_prdct, t_prdct, hyp_ell=hyp_ell, hyp_omega=hyp_omega, hyp_pi=hyp_pi, delta=0)
#inverse of covariance matrix
K_inv = scipylinalg.inv(K)
#product of k * K^-1
kK_inv = k.dot(K_inv)
#posterior mean and variance at new locations
c_prdct_mu = kK_inv.dot(c_train_mu)
c_prdct_cov = k_star - kK_inv.dot(k.transpose()) + kK_inv.dot( c_train_cov.dot(kK_inv.transpose()) )
#posterior standard dev. at new locations
c_prdct_sig = np.sqrt(np.diag(c_prdct_cov))
#add mean effect from training coefficients
c_prdct_mu += hyp_mean_c
return c_prdct_mu, c_prdct_sig, c_prdct_cov
# predict coeffs with Matern kernel
def PredictMaternKern(t_prdct, t_train, c_train_mu, c_train_sig = None,
hyp_mean_c = 0, hyp_ell = 0, hyp_omega = 0, hyp_pi = 0, hyp_nu=1.5,
delta = 1e-9):
'''
Predict conditional coefficients based on Matern kernel function.
Parameters
----------
t_prdct : np.array
Coordinates of prediction cases.
t_train : np.array
Coordinates of training cases.
c_train_mu : np.array
Mean values of non-ergodic coefficient of training cases.
c_train_sig : np.array, optional
Standard deviations of non-ergodic coefficient of training cases. The default is None.
hyp_mean_c : real, optional
Mean of non-ergodic coefficient. The default is 0.
hyp_ell : non-negative real, optional
Correlation length of kernel function.. The default is 0.
hyp_omega : non-negative real, optional
Scale of kernel function. The default is 0.
hyp_pi : postive real, optional
Constant of kernel function. The default is 0.
hyp_nu: positive real, optional
Smoothness parameter. The default is 1.5.
delta : non-negative real, optional
Diagonal widening. The default is 1e-9.
Returns
-------
c_prdct_mu : np.array
Mean value of non-ergodic coefficient for prediction cases.
c_prdct_sig : np.array
Standard deviations of non-ergodic coefficient for prediction cases.
c_prdct_cov : np.array
Covariance matrix of non-ergodic coefficient for prediction cases.
'''
#remove mean effect from training coefficients
c_train_mu = c_train_mu - hyp_mean_c
#uncertainty in training data
if c_train_sig is None: c_train_sig = np.zeros(len(c_train_mu))
c_train_cov = np.diag(c_train_sig**2) if c_train_sig.ndim == 1 else c_train_sig
#covariance between training data
K = MaternKernel(t_train, t_train, hyp_ell=hyp_ell, hyp_omega=hyp_omega, hyp_pi=hyp_pi, hyp_nu=hyp_nu, delta=delta)
#covariance between data and new locations
k = MaternKernel(t_prdct, t_train, hyp_ell=hyp_ell, hyp_omega=hyp_omega, hyp_pi=hyp_pi, hyp_nu=hyp_nu, delta=0)
#covariance between new locations
k_star = MaternKernel(t_prdct, t_prdct, hyp_ell=hyp_ell, hyp_omega=hyp_omega, hyp_pi=hyp_pi, hyp_nu=hyp_nu, delta=0)
#inverse of covariance matrix
K_inv = scipylinalg.inv(K)
#product of k * K^-1
kK_inv = k.dot(K_inv)
#posterior mean and variance at new locations
c_prdct_mu = kK_inv.dot(c_train_mu)
c_prdct_cov = k_star - kK_inv.dot(k.transpose()) + kK_inv.dot( c_train_cov.dot(kK_inv.transpose()) )
#posterior standard dev. at new locations
c_prdct_sig = np.sqrt(np.diag(c_prdct_cov))
#add mean effect from training coefficients
c_prdct_mu += hyp_mean_c
return c_prdct_mu, c_prdct_sig, c_prdct_cov
# predict coeffs with composite exponential and spatially independent kernel function
def PredictNegExpSptInptKern(t_prdct, t_train, c_train_mu, c_train_sig = None,
hyp_mean_c = 0, hyp_ell1 = 0, hyp_omega1 = 0,
hyp_omega2 = 0, hyp_pi = 0, delta = 1e-9):
'''
Predict conditional coefficients based on composite exponential and
spatially independent kernel function.
Parameters
----------
t_prdct : np.array
Coordinates of prediction cases.
t_train : np.array
Coordinates of training cases.
c_train_mu : np.array
Mean values of non-ergodic coefficient of training cases.
c_train_sig : np.array, optional
Standard deviations of non-ergodic coefficient of training cases. The default is None.
hyp_mean_c : real, optional
Mean of non-ergodic coefficient. The default is 0.
hyp_ell1 : non-negative real, optional
Correlation length of negative exponential kernel function. The default is 0.
hyp_omega1 : non-negative real, optional
Scale of negative exponential kernel function. The default is 0.
hyp_omega2 : non-negative real, optional
Scale of spatially independent kernel function. The default is 0.
hyp_pi : postive real, optional
Constant of kernel function. The default is 0.
delta : non-negative real, optional
Diagonal widening. The default is 1e-9.
Returns
-------
c_prdct_mu : np.array
Mean value of non-ergodic coefficient for prediction cases.
c_prdct_sig : np.array
Standard deviations of non-ergodic coefficient for prediction cases.
c_prdct_cov : np.array
Covariance matrix of non-ergodic coefficient for prediction cases.
'''
#remove mean effect from training coefficients
c_train_mu = c_train_mu - hyp_mean_c
#uncertainty in training data
if c_train_sig is None: c_train_sig = np.zeros(len(c_train_mu))
c_train_cov = np.diag(c_train_sig**2) if c_train_sig.ndim == 1 else c_train_sig
#covariance between training data
K = KernelNegExpSptInpt(t_train, t_train, hyp_ell1=hyp_ell1, hyp_omega1=hyp_omega1,
hyp_omega2=hyp_omega2, hyp_pi=hyp_pi, delta=delta)
#covariance between data and new locations
k = KernelNegExpSptInpt(t_prdct, t_train, hyp_ell1=hyp_ell1, hyp_omega1=hyp_omega1,
hyp_omega2=hyp_omega2, hyp_pi=hyp_pi, delta=0)
#covariance between new locations
k_star = KernelNegExpSptInpt(t_prdct, t_prdct, hyp_ell1=hyp_ell1, hyp_omega1=hyp_omega1,
hyp_omega2=hyp_omega2, hyp_pi=hyp_pi, delta=0)
#inverse of covariance matrix
K_inv = scipylinalg.inv(K)
#product of k * K^-1
kK_inv = k.dot(K_inv)
#posterior mean and variance at new locations
c_prdct_mu = kK_inv.dot(c_train_mu)
c_prdct_cov = k_star - kK_inv.dot(k.transpose()) + kK_inv.dot( c_train_cov.dot(kK_inv.transpose()) )
#posterior standard dev. at new locations
c_prdct_sig = np.sqrt(np.diag(c_prdct_cov))
#add mean effect from training coefficients
c_prdct_mu += hyp_mean_c
return c_prdct_mu, c_prdct_sig, c_prdct_cov | 21,651 | 34.966777 | 128 | py |
ngmm_tools | ngmm_tools-master/Analyses/Python_lib/QGIS/pylib_QGIS.py | """
Created on Tue May 19 11:04:00 2020
@author: glavrent
"""
#load libraries
#load GIS
from qgis.core import QgsVectorLayer, QgsPointXY
from qgis.core import QgsField, QgsFeature, QgsGeometry, QgsVectorFileWriter, QgsFeatureSink
from qgis.PyQt.QtCore import QVariant
def EQLayer(eq_data):
'''
Create earthquake source layer for QGIS
Parameters
----------
eq_data : pd.dataframe
Dataframe for rupture points with fields:
eqid, region, mag, SOF, Ztor, eqLat, eqLon
Returns
-------
eq_layer : TYPE
QGIS layer with earthquake sources.
'''
#create qgis layer for earthquake sources
eq_layer = QgsVectorLayer("Point", "eq_pts", "memory")
eq_pr = eq_layer.dataProvider()
eq_pr.addAttributes([QgsField("eqid", QVariant.Int),
QgsField("region", QVariant.Int),
QgsField("mag", QVariant.Double),
QgsField("SOF", QVariant.Int),
QgsField("Ztor", QVariant.Double),
QgsField("eqLat", QVariant.Double),
QgsField("eqLon", QVariant.Double)])
#iterate over earthquakes, add on layer
eq_layer.startEditing()
for eq in eq_data.iterrows():
#earthquake info
eq_info = eq[1][['eqid','region','mag','SOF','Ztor']].tolist()
eq_latlon = eq[1][['eqLat','eqLon']].tolist()
#define feature, earthquake
eq_f = QgsFeature()
eq_f.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(eq_latlon[1],eq_latlon[0])))
eq_f.setAttributes(eq_info + eq_latlon)
#add earthquake in layer
eq_pr.addFeatures([eq_f])
#commit changes
eq_layer.commitChanges()
#update displacement layer
eq_layer.updateExtents()
return eq_layer
def STALayer(sta_data):
'''
Create station layer for QGIS
Parameters
----------
sta_data : pd.dataframe
Dataframe for rupture points with fields:
'ssn','region','Vs30','Z1.0','StaLat','StaLon'
eqid','region','mag','SOF','eqLat','eqLon'
Returns
-------
sta_layer : TYPE
QGIS layer with station points.
'''
#create qgis layer for station locations
sta_layer = QgsVectorLayer("Point", "sta_pts", "memory")
sta_pr = sta_layer.dataProvider()
sta_pr.addAttributes([QgsField("ssn", QVariant.Int),
QgsField("region", QVariant.Int),
QgsField("Vs30", QVariant.Double),
QgsField("Z1.0", QVariant.Double),
QgsField("staLat", QVariant.Double),
QgsField("staLon", QVariant.Double)])
#iterate over station, add on layer
sta_layer.startEditing()
for sta in sta_data.iterrows():
#earthquake info
sta_info = sta[1][['ssn','region','Vs30','Z1.0']].tolist()
sta_latlon = sta[1][['staLat','staLon']].tolist()
#define feature, earthquake
sta_f = QgsFeature()
sta_f.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(sta_latlon[1],sta_latlon[0])))
sta_f.setAttributes(sta_info + sta_latlon)
#add earthquake in layer
sta_pr.addFeatures([sta_f])
#commit changes
sta_layer.commitChanges()
#update displacement layer
sta_layer.updateExtents()
return sta_layer
| 3,513 | 31.841121 | 92 | py |
ngmm_tools | ngmm_tools-master/Analyses/Python_lib/regression/pylib_stats.py | """
Created on Tue Mar 15 13:56:13 2022
@author: glavrent
Other python statistics functions
"""
#imprort libraries
import numpy as np
def CalcRMS(samp_q, samp_p):
'''
Compute root mean square error between observation samples (samp_p) and
model samples (samp_p)
Parameters
----------
samp_q : np.array()
Model Samples.
samp_p : np.array()
Data Samples.
Returns
-------
real
root mean square error
'''
#errors
e = samp_q - samp_p
return np.sqrt(np.mean(e**2))
def CalcLKDivergece(samp_q, samp_p):
'''
Compute Kullback–Leibler divergence of observation samples (samp_p) based
on model samples (samp_p)
Parameters
----------
samp_q : np.array()
Model Samples.
samp_p : np.array()
Data Samples.
Returns
-------
real
Kullback–Leibler divergence.
'''
#create histogram bins
_, hist_bins = np.histogram(np.concatenate([samp_p,samp_q]))
#count of p and q distribution
p, _ = np.histogram(samp_p, bins=hist_bins)
q, _ = np.histogram(samp_q, bins=hist_bins)
#remove bins empty in any dist, otherwise kl= +/- inf
i_empty_bins = np.logical_or(p==0, q==0)
p = p[~i_empty_bins]
q = q[~i_empty_bins]
#normalize to compute probabilites
p = p/p.sum()
q = q/q.sum()
return sum(p[i] * np.log2(p[i]/q[i]) for i in range(len(p)))
| 1,508 | 19.671233 | 78 | py |
ngmm_tools | ngmm_tools-master/Analyses/Python_lib/regression/pystan/regression_pystan_model3_uncorr_cells_unbounded_hyp.py | """
Created on Tue Jul 13 18:22:15 2021
@author: glavrent
"""
#load variables
import os
import pathlib
import glob
import re #regular expression package
import pickle
from joblib import cpu_count
#arithmetic libraries
import numpy as np
#statistics libraries
import pandas as pd
#plot libraries
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoLocator as plt_autotick
import arviz as az
mpl.use('agg')
def RunStan(df_flatfile, df_cellinfo, df_celldist, stan_model_fname,
out_fname, out_dir, res_name='res', c_2_erg=0, c_3_erg=0, c_a_erg=0,
runstan_flag=True,
n_iter=600, n_chains=4,
adapt_delta=0.8, max_treedepth=10,
pystan_ver=2, pystan_parallel=False):
'''
Run full Bayessian regression in Stan. Non-ergodic model includes: a spatially
varying earthquake constant, a spatially varying site constant, a spatially
independent site constant, and uncorrelated anelastic attenuation.
Parameters
----------
df_flatfile : pd.DataFrame
Input data frame containing total residuals, eq and site coordinates.
df_cellinfo : pd.DataFrame
Dataframe with coordinates of anelastic attenuation cells.
df_celldist : pd.DataFrame
Datafame with cell path distances of all records in df_flatfile.
stan_model_fname : string
File name for stan model.
out_fname : string
File name for output files.
out_dir : string
Output directory.
res_name : string, optional
Column name for total residuals. The default is 'res'.
c_2_erg : double, optional
Value of ergodic geometrical spreading coefficient. The default is 0.
c_3_erg : double, optional
Value of ergodic Vs30 coefficient. The default is 0.
c_a_erg : double, optional
Value of ergodic anelatic attenuation coefficient. Used as mean of cell
specific anelastic attenuation prior distribution. The default is 0.
n_iter : integer, optional
Number of stan samples. The default is 600.
n_chains : integer, optional
Number of MCMC chains. The default is 4.
runstan_flag : bool, optional
Flag for running stan. If true run regression, if false read past regression
output and summarize non-ergodic parameters. The default is True.
adapt_delta : double, optional
Target average proposal acceptance probability for adaptation. The default is 0.8.
max_treedepth : integer, optional
Maximum number of evaluations for each iteration (2^max_treedepth). The default is 10.
pystan_ver : integer, optional
Version of pystan to run. The default is 2.
pystan_parallel : bool, optional
Flag for using multithreaded option in STAN. The default is False.
Returns
-------
None.
'''
#number of cores
n_cpu = max(cpu_count() -1,1)
## Read Data
#read stan model
with open(stan_model_fname, "r") as f:
stan_model_code = f.read()
## Preprocess Input Data
#set rsn column as dataframe index, skip if rsn already the index
if not df_flatfile.index.name == 'rsn':
df_flatfile.set_index('rsn', drop=True, inplace=True)
if not df_celldist.index.name == 'rsn':
df_celldist.set_index('rsn', drop=True, inplace=True)
#set cellid column as dataframe index, skip if cellid already the index
if not df_cellinfo.index.name == 'cellid':
df_cellinfo.set_index('cellid', drop=True, inplace=True)
#number of data
n_data = len(df_flatfile)
#earthquake data
data_eq_all = df_flatfile[['eqid','mag','eqX', 'eqY']].values
_, eq_idx, eq_inv = np.unique(df_flatfile[['eqid']], axis=0, return_inverse=True, return_index=True)
data_eq = data_eq_all[eq_idx,:]
X_eq = data_eq[:,[2,3]] #earthquake coordinates
#create earthquake ids for all records (1 to n_eq)
eq_id = eq_inv + 1
n_eq = len(data_eq)
#station data
data_sta_all = df_flatfile[['ssn','Vs30','x_3','staX','staY']].values
_, sta_idx, sta_inv = np.unique( df_flatfile[['ssn']].values, axis = 0, return_inverse=True, return_index=True)
data_sta = data_sta_all[sta_idx,:]
X_sta = data_sta[:,[3,4]] #station coordinates
#create station indices for all records (1 to n_sta)
sta_id = sta_inv + 1
n_sta = len(data_sta)
#ground-motion observations
y_data = df_flatfile[res_name].to_numpy().copy()
#geometrical spreading covariates
x_2 = df_flatfile['x_2'].values
#vs30 covariates
x_3 = df_flatfile['x_3'].values[sta_idx]
#cell data
#reorder and only keep records included in the flatfile
df_celldist = df_celldist.reindex(df_flatfile.index)
#cell info
cell_ids_all = df_cellinfo.index
cell_names_all = df_cellinfo.cellname
#cell distance matrix
celldist_all = df_celldist[cell_names_all] #cell-distance matrix with all cells
#find cell with more than one paths
i_cells_valid = np.where(celldist_all.sum(axis=0) > 0)[0] #valid cells with more than one path
cell_ids_valid = cell_ids_all[i_cells_valid]
cell_names_valid = cell_names_all[i_cells_valid]
celldist_valid = celldist_all.loc[:,cell_names_valid] #cell-distance with only non-zero cells
#number of cells
n_cell = celldist_all.shape[1]
n_cell_valid = celldist_valid.shape[1]
#cell coordinates
X_cells_valid = df_cellinfo.loc[i_cells_valid,['mptX','mptY']].values
#print Rrup missfits
print('max R_rup misfit', (df_flatfile.Rrup.values - celldist_valid.sum(axis=1)).abs().max())
stan_data = {'N': n_data,
'NEQ': n_eq,
'NSTAT': n_sta,
'NCELL': n_cell_valid,
'eq': eq_id, #earthquake id
'stat': sta_id, #station id
'rec_mu': np.zeros(y_data.shape),
'Y': y_data,
'x_2': x_2,
'x_3': x_3,
'c_2_erg': c_2_erg,
'c_3_erg': c_3_erg,
'c_a_erg': c_a_erg,
'X_e': X_eq, #earthquake coordinates
'X_s': X_sta, #station coordinates
'X_c': X_cells_valid,
'RC': celldist_valid.to_numpy(),
}
stan_data_fname = out_fname + '_stan_data' + '.Rdata'
## Run Stan, fit model
#number of cores
n_cpu = max(cpu_count() -1,1)
#filename for STAN regression raw output file saved as pkl
stan_fit_fname = out_dir + out_fname + '_stan_fit' + '.pkl'
#run stan
if runstan_flag:
#control paramters
control_stan = {'adapt_delta':adapt_delta, 'max_treedepth':max_treedepth}
if pystan_ver == 2:
import pystan
if (not pystan_parallel) or n_cpu<=n_chains:
#compile
stan_model = pystan.StanModel(model_code=stan_model_code)
#full Bayesian statistics
stan_fit = stan_model.sampling(data=stan_data, iter=n_iter, chains = n_chains, refresh=10, control = control_stan)
else:
#number of cores per chain
n_cpu_chain = int(np.floor(n_cpu/n_chains))
#multi-processing arguments
os.environ['STAN_NUM_THREADS'] = str(n_cpu_chain)
extra_compile_args = ['-pthread', '-DSTAN_THREADS']
#compile
stan_model = pystan.StanModel(model_code=stan_model_code, extra_compile_args=extra_compile_args)
#full Bayesian statistics
stan_fit = stan_model.sampling(data=stan_data, iter=n_iter, chains = n_chains, refresh=1, control = control_stan)
elif pystan_ver == 3:
import nest_asyncio
import stan
nest_asyncio.apply()
#compile
stan_model = stan.build(stan_model_code, data=stan_data, random_seed=1)
#full Bayesian statistics
stan_fit = stan_model.sample(num_chains=n_chains, num_samples=n_iter, max_depth=max_treedepth, delta=adapt_delta)
#save stan model and fit
pathlib.Path(out_dir).mkdir(parents=True, exist_ok=True)
with open(stan_fit_fname, "wb") as f:
pickle.dump({'model' : stan_model, 'fit' : stan_fit}, f, protocol=-1)
else:
#load model and fit for postprocessing if has already been executed
with open(stan_fit_fname, "rb") as f:
data_dict = pickle.load(f)
stan_fit = data_dict['fit']
stan_model = data_dict['model']
del data_dict
## Postprocessing Data
## Extract posterior samples
#hyper-parameters
col_names_hyp = ['dc_0','mu_2p','mu_3s',
'ell_1e', 'ell_1as', 'omega_1e', 'omega_1as', 'omega_1bs',
'ell_2p', 'ell_3s', 'omega_2p', 'omega_3s',
'mu_cap', 'omega_cap',
'phi_0','tau_0']
#non-ergodic terms
col_names_dc_1e = ['dc_1e.%i'%(k) for k in range(n_eq)]
col_names_dc_1as = ['dc_1as.%i'%(k) for k in range(n_sta)]
col_names_dc_1bs = ['dc_1bs.%i'%(k) for k in range(n_sta)]
col_names_c_2p = ['c_2p.%i'%(k) for k in range(n_eq)]
col_names_c_3s = ['c_3s.%i'%(k) for k in range(n_sta)]
col_names_dB = ['dB.%i'%(k) for k in range(n_eq)]
col_names_cap = ['c_cap.%i'%(c_id) for c_id in cell_ids_valid]
col_names_all = (col_names_hyp + col_names_dc_1e + col_names_dc_1as + col_names_dc_1bs +
col_names_c_2p + col_names_c_3s + col_names_cap + col_names_dB)
#summarize raw posterior distributions
stan_posterior = np.stack([stan_fit[c_n].flatten() for c_n in col_names_hyp], axis=1)
#adjustment terms
if pystan_ver == 2:
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1e']), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1as']), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1bs']), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['c_2p']), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['c_3s']), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['c_cap']), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dB']), axis=1)
else:
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1e'].T), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1as'].T), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1bs'].T), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['c_2p'].T), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['c_3s'].T), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['c_cap'].T), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dB'].T), axis=1)
#save raw-posterior distribution
df_stan_posterior_raw = pd.DataFrame(stan_posterior, columns = col_names_all)
df_stan_posterior_raw.to_csv(out_dir + out_fname + '_stan_posterior_raw' + '.csv', index=False)
## Summarize hyper-parameters
#summarize posterior distributions of hyper-parameters
perc_array = np.array([0.05,0.25,0.5,0.75,0.95])
df_stan_hyp = df_stan_posterior_raw[col_names_hyp].quantile(perc_array)
df_stan_hyp = df_stan_hyp.append(df_stan_posterior_raw[col_names_hyp].mean(axis = 0), ignore_index=True)
df_stan_hyp.index = ['prc_%.2f'%(prc) for prc in perc_array]+['mean']
df_stan_hyp.to_csv(out_dir + out_fname + '_stan_hyperparameters' + '.csv', index=True)
#detailed posterior percentiles of posterior distributions
perc_array = np.arange(0.01,0.99,0.01)
df_stan_posterior = df_stan_posterior_raw[col_names_hyp].quantile(perc_array)
df_stan_posterior.index.name = 'prc'
df_stan_posterior .to_csv(out_dir + out_fname + '_stan_hyperposterior' + '.csv', index=True)
del col_names_dc_1e, col_names_dc_1as, col_names_dc_1bs, col_names_c_2p, col_names_c_3s, col_names_dB
del stan_posterior, col_names_all
## Sample spatially varying coefficients and predictions at record locations
# earthquake and station location in database
X_eq_all = df_flatfile[['eqX', 'eqY']].values
X_sta_all = df_flatfile[['staX','staY']].values
# GMM anelastic attenuation
cells_ca_mu = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].mean() for k in cell_ids_valid])
cells_ca_med = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].median() for k in cell_ids_valid])
cells_ca_sig = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].std() for k in cell_ids_valid])
#effect of anelastic attenuation in GM
cells_LcA_mu = celldist_valid.values @ cells_ca_mu
cells_LcA_med = celldist_valid.values @ cells_ca_med
cells_LcA_sig = np.sqrt(np.square(celldist_valid.values) @ cells_ca_sig**2)
#summary attenuation cells
catten_summary = np.vstack((np.tile(c_a_erg, n_cell_valid),
cells_ca_mu,
cells_ca_med,
cells_ca_sig)).T
columns_names = ['c_a_erg','c_cap_mean','c_cap_med','c_cap_sig']
df_catten_summary = pd.DataFrame(catten_summary, columns = columns_names, index=df_cellinfo.index[i_cells_valid])
#create dataframe with summary attenuation cells
df_catten_summary = pd.merge(df_cellinfo[['cellname','mptLat','mptLon','mptX','mptY','mptZ','UTMzone']],
df_catten_summary, how='right', left_index=True, right_index=True)
df_catten_summary.to_csv(out_dir + out_fname + '_stan_catten' + '.csv', index=True)
# GMM coefficients
#constant shift coefficient
coeff_0_mu = df_stan_posterior_raw.loc[:,'dc_0'].mean() * np.ones(n_data)
coeff_0_med = df_stan_posterior_raw.loc[:,'dc_0'].median() * np.ones(n_data)
coeff_0_sig = df_stan_posterior_raw.loc[:,'dc_0'].std() * np.ones(n_data)
#spatially varying earthquake constant coefficient
coeff_1e_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].mean() for k in range(n_eq)])
coeff_1e_mu = coeff_1e_mu[eq_inv]
coeff_1e_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].median() for k in range(n_eq)])
coeff_1e_med = coeff_1e_med[eq_inv]
coeff_1e_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].std() for k in range(n_eq)])
coeff_1e_sig = coeff_1e_sig[eq_inv]
#site term constant covariance
coeff_1as_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].mean() for k in range(n_sta)])
coeff_1as_mu = coeff_1as_mu[sta_inv]
coeff_1as_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].median() for k in range(n_sta)])
coeff_1as_med = coeff_1as_med[sta_inv]
coeff_1as_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].std() for k in range(n_sta)])
coeff_1as_sig = coeff_1as_sig[sta_inv]
#spatially varying station constant covariance
coeff_1bs_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].mean() for k in range(n_sta)])
coeff_1bs_mu = coeff_1bs_mu[sta_inv]
coeff_1bs_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].median() for k in range(n_sta)])
coeff_1bs_med = coeff_1bs_med[sta_inv]
coeff_1bs_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].std() for k in range(n_sta)])
coeff_1bs_sig = coeff_1bs_sig[sta_inv]
#spatially varying geometrical spreading coefficient
coeff_2p_mu = np.array([df_stan_posterior_raw.loc[:,f'c_2p.{k}'].mean() for k in range(n_eq)])
coeff_2p_mu = coeff_2p_mu[eq_inv]
coeff_2p_med = np.array([df_stan_posterior_raw.loc[:,f'c_2p.{k}'].median() for k in range(n_eq)])
coeff_2p_med = coeff_2p_med[eq_inv]
coeff_2p_sig = np.array([df_stan_posterior_raw.loc[:,f'c_2p.{k}'].std() for k in range(n_eq)])
coeff_2p_sig = coeff_2p_sig[eq_inv]
#spatially varying Vs30 coefficient
coeff_3s_mu = np.array([df_stan_posterior_raw.loc[:,f'c_3s.{k}'].mean() for k in range(n_sta)])
coeff_3s_mu = coeff_3s_mu[sta_inv]
coeff_3s_med = np.array([df_stan_posterior_raw.loc[:,f'c_3s.{k}'].median() for k in range(n_sta)])
coeff_3s_med = coeff_3s_med[sta_inv]
coeff_3s_sig = np.array([df_stan_posterior_raw.loc[:,f'c_3s.{k}'].std() for k in range(n_sta)])
coeff_3s_sig = coeff_3s_sig[sta_inv]
# aleatory variability
phi_0_array = np.array([df_stan_posterior_raw.phi_0.mean()]*X_sta_all.shape[0])
tau_0_array = np.array([df_stan_posterior_raw.tau_0.mean()]*X_sta_all.shape[0])
#dataframe with flatfile info
df_flatinfo = df_flatfile[['eqid','ssn','eqLat','eqLon','staLat','staLon','eqX','eqY','staX','staY','UTMzone']]
#summary coefficients
coeffs_summary = np.vstack((coeff_0_mu,
coeff_1e_mu,
coeff_1as_mu,
coeff_1bs_mu,
coeff_2p_mu,
coeff_3s_mu,
cells_LcA_mu,
coeff_0_med,
coeff_1e_med,
coeff_1as_med,
coeff_1bs_med,
coeff_2p_med,
coeff_3s_med,
cells_LcA_med,
coeff_0_sig,
coeff_1e_sig,
coeff_1as_sig,
coeff_1bs_sig,
coeff_2p_sig,
coeff_3s_sig,
cells_LcA_sig)).T
columns_names = ['dc_0_mean','dc_1e_mean','dc_1as_mean','dc_1bs_mean','c_2p_mean','c_3s_mean','Lc_ca_mean',
'dc_0_med', 'dc_1e_med', 'dc_1as_med', 'dc_1bs_med', 'c_2p_med', 'c_3s_med', 'Lc_ca_med',
'dc_0_sig', 'dc_1e_sig', 'dc_1as_sig', 'dc_1bs_sig', 'c_2p_sig', 'c_3s_sig', 'Lc_ca_sig']
df_coeffs_summary = pd.DataFrame(coeffs_summary, columns = columns_names, index=df_flatfile.index)
#create dataframe with summary coefficients
df_coeffs_summary = pd.merge(df_flatinfo, df_coeffs_summary, how='right', left_index=True, right_index=True)
df_coeffs_summary[['eqid','ssn']] = df_coeffs_summary[['eqid','ssn']].astype(int)
df_coeffs_summary.to_csv(out_dir + out_fname + '_stan_coefficients' + '.csv', index=True)
# GMM prediction
#mean prediction
y_mu = (coeff_0_mu + coeff_1e_mu + coeff_1as_mu + coeff_1bs_mu + coeff_2p_mu*x_2 + coeff_3s_mu*x_3[sta_inv] + cells_LcA_mu)
#compute residuals
res_tot = y_data - y_mu
#residuals computed directly from stan regression
res_between = [df_stan_posterior_raw.loc[:,f'dB.{k}'].mean() for k in range(n_eq)]
res_between = np.array([res_between[k] for k in (eq_inv).astype(int)])
res_within = res_tot - res_between
#summary predictions and residuals
predict_summary = np.vstack((y_mu, res_tot, res_between, res_within)).T
columns_names = ['nerg_mu','res_tot','res_between','res_within']
df_predict_summary = pd.DataFrame(predict_summary, columns = columns_names, index=df_flatfile.index)
#create dataframe with predictions and residuals
df_predict_summary = pd.merge(df_flatinfo, df_predict_summary, how='right', left_index=True, right_index=True)
df_predict_summary[['eqid','ssn']] = df_predict_summary[['eqid','ssn']].astype(int)
df_predict_summary.to_csv(out_dir + out_fname + '_stan_residuals' + '.csv', index=True)
## Summary regression
#save summary statistics
stan_summary_fname = out_dir + out_fname + '_stan_summary' + '.txt'
with open(stan_summary_fname, 'w') as f:
print(stan_fit, file=f)
#create and save trace plots
fig_dir = out_dir + 'summary_figs/'
#create figures directory if doesn't exit
pathlib.Path(fig_dir).mkdir(parents=True, exist_ok=True)
#create stan trace plots
for c_name in col_names_hyp:
fig = stan_fit.traceplot(c_name)
fig.savefig(fig_dir + out_fname + '_stan_traceplot_' + c_name + '.png')
#create trace plot with arviz
ax = az.plot_trace(stan_fit, var_names=c_name, figsize=(10,5) ).ravel()
ax[0].yaxis.set_major_locator(plt_autotick())
ax[0].set_xlabel('sample value')
ax[0].set_ylabel('frequency')
ax[0].set_title('')
ax[0].grid(axis='both')
ax[1].set_xlabel('iteration')
ax[1].set_ylabel('sample value')
ax[1].grid(axis='both')
ax[1].set_title('')
fig = ax[0].figure
fig.suptitle(c_name)
fig.savefig(fig_dir + out_fname + '_stan_traceplot_' + c_name + '_arviz' + '.png')
return None
| 21,718 | 46.944812 | 130 | py |
ngmm_tools | ngmm_tools-master/Analyses/Python_lib/regression/pystan/regression_pystan_model2_uncorr_cells_sparse_unbounded_hyp.py | """
Created on Tue Jul 13 18:22:15 2021
@author: glavrent
"""
#load variables
import os
import pathlib
import glob
import re #regular expression package
import pickle
from joblib import cpu_count
#arithmetic libraries
import numpy as np
from scipy import sparse
#statistics libraries
import pandas as pd
#plot libraries
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoLocator as plt_autotick
import arviz as az
mpl.use('agg')
def RunStan(df_flatfile, df_cellinfo, df_celldist, stan_model_fname,
out_fname, out_dir, res_name='res', c_a_erg=0,
runstan_flag=True,
n_iter=600, n_chains=4,
adapt_delta=0.8, max_treedepth=10,
pystan_ver=2, pystan_parallel=False):
'''
Run full Bayessian regression in Stan. Non-ergodic model includes: a spatially
varying earthquake constant, a spatially varying site constant, a spatially
independent site constant, and uncorrelated anelastic attenuation.
Parameters
----------
df_flatfile : pd.DataFrame
Input data frame containing total residuals, eq and site coordinates.
df_cellinfo : pd.DataFrame
Dataframe with coordinates of anelastic attenuation cells.
df_celldist : pd.DataFrame
Datafame with cell path distances of all records in df_flatfile.
stan_model_fname : string
File name for stan model.
out_fname : string
File name for output files.
out_dir : string
Output directory.
res_name : string, optional
Column name for total residuals. The default is 'res'.
c_a_erg : double, optional
Value of ergodic anelatic attenuation coefficient. Used as mean of cell
specific anelastic attenuation prior distribution. The default is 0.
n_iter : integer, optional
Number of stan samples. The default is 600.
n_chains : integer, optional
Number of MCMC chains. The default is 4.
runstan_flag : bool, optional
Flag for running stan. If true run regression, if false read past regression
output and summarize non-ergodic parameters. The default is True.
adapt_delta : double, optional
Target average proposal acceptance probability for adaptation. The default is 0.8.
max_treedepth : integer, optional
Maximum number of evaluations for each iteration (2^max_treedepth). The default is 10.
pystan_ver : integer, optional
Version of pystan to run. The default is 2.
pystan_parallel : bool, optional
Flag for using multithreaded option in STAN. The default is False.
Returns
-------
None.
'''
## Read Data
#read stan model
with open(stan_model_fname, "r") as f:
stan_model_code = f.read()
## Preprocess Input Data
#set rsn column as dataframe index, skip if rsn already the index
if not df_flatfile.index.name == 'rsn':
df_flatfile.set_index('rsn', drop=True, inplace=True)
if not df_celldist.index.name == 'rsn':
df_celldist.set_index('rsn', drop=True, inplace=True)
#set cellid column as dataframe index, skip if cellid already the index
if not df_cellinfo.index.name == 'cellid':
df_cellinfo.set_index('cellid', drop=True, inplace=True)
#number of data
n_data = len(df_flatfile)
#earthquake data
data_eq_all = df_flatfile[['eqid','mag','eqX', 'eqY']].values
_, eq_idx, eq_inv = np.unique(df_flatfile[['eqid']], axis=0, return_inverse=True, return_index=True)
data_eq = data_eq_all[eq_idx,:]
X_eq = data_eq[:,[2,3]] #earthquake coordinates
#create earthquake ids for all records (1 to n_eq)
eq_id = eq_inv + 1
n_eq = len(data_eq)
#station data
data_sta_all = df_flatfile[['ssn','Vs30','staX','staY']].values
_, sta_idx, sta_inv = np.unique( df_flatfile[['ssn']].values, axis = 0, return_inverse=True, return_index=True)
data_sta = data_sta_all[sta_idx,:]
X_sta = data_sta[:,[2,3]] #station coordinates
#create station indices for all records (1 to n_sta)
sta_id = sta_inv + 1
n_sta = len(data_sta)
#ground-motion observations
y_data = df_flatfile[res_name].to_numpy().copy()
#cell data
#reorder and only keep records included in the flatfile
df_celldist = df_celldist.reindex(df_flatfile.index)
#cell info
cell_ids_all = df_cellinfo.index
cell_names_all = df_cellinfo.cellname
#cell distance matrix
celldist_all = df_celldist[cell_names_all] #cell-distance matrix with all cells
#find cell with more than one paths
i_cells_valid = np.where(celldist_all.sum(axis=0) > 0)[0] #valid cells with more than one path
cell_ids_valid = cell_ids_all[i_cells_valid]
cell_names_valid = cell_names_all[i_cells_valid]
celldist_valid = celldist_all.loc[:,cell_names_valid].to_numpy() #cell-distance with only non-zero cells
celldist_valid_sp = sparse.csr_matrix(celldist_valid)
#number of cells
n_cell = celldist_all.shape[1]
n_cell_valid = celldist_valid.shape[1]
#cell coordinates
X_cells_valid = df_cellinfo.loc[i_cells_valid,['mptX','mptY']].values
#print Rrup missfits
print('max R_rup misfit', np.abs(df_flatfile.Rrup.values - celldist_valid.sum(axis=1)).max())
stan_data = {'N': n_data,
'NEQ': n_eq,
'NSTAT': n_sta,
'NCELL': n_cell_valid,
'NCELL_SP': len(celldist_valid_sp.data),
'eq': eq_id, #earthquake id
'stat': sta_id, #station id
'X_e': X_eq, #earthquake coordinates
'X_s': X_sta, #station coordinates
'X_c': X_cells_valid,
'rec_mu': np.zeros(y_data.shape),
'RC_val': celldist_valid_sp.data,
'RC_w': celldist_valid_sp.indices+1,
'RC_u': celldist_valid_sp.indptr+1,
'c_a_erg': c_a_erg,
'Y': y_data,
}
stan_data_fname = out_fname + '_stan_data' + '.Rdata'
## Run Stan, fit model
#number of cores
n_cpu = max(cpu_count() -1,1)
#filename for STAN regression raw output file saved as pkl
stan_fit_fname = out_dir + out_fname + '_stan_fit' + '.pkl'
#run stan
if runstan_flag:
#control paramters
control_stan = {'adapt_delta':adapt_delta, 'max_treedepth':max_treedepth}
if pystan_ver == 2:
import pystan
if (not pystan_parallel) or n_cpu<=n_chains:
#compile
stan_model = pystan.StanModel(model_code=stan_model_code)
#full Bayesian statistics
stan_fit = stan_model.sampling(data=stan_data, iter=n_iter, chains = n_chains, refresh=10, control = control_stan)
else:
#number of cores per chain
n_cpu_chain = int(np.floor(n_cpu/n_chains))
#multi-processing arguments
os.environ['STAN_NUM_THREADS'] = str(n_cpu_chain)
extra_compile_args = ['-pthread', '-DSTAN_THREADS']
#compile
stan_model = pystan.StanModel(model_code=stan_model_code, extra_compile_args=extra_compile_args)
#full Bayesian statistics
stan_fit = stan_model.sampling(data=stan_data, iter=n_iter, chains = n_chains, refresh=1, control = control_stan)
elif pystan_ver == 3:
import nest_asyncio
import stan
nest_asyncio.apply()
#compile
stan_model = stan.build(stan_model_code, data=stan_data, random_seed=1)
#full Bayesian statistics
stan_fit = stan_model.sample(num_chains=n_chains, num_samples=n_iter, max_depth=max_treedepth, delta=adapt_delta)
#save stan model and fit
pathlib.Path(out_dir).mkdir(parents=True, exist_ok=True)
with open(stan_fit_fname, "wb") as f:
pickle.dump({'model' : stan_model, 'fit' : stan_fit}, f, protocol=-1)
else:
#load model and fit for postprocessing if has already been executed
with open(stan_fit_fname, "rb") as f:
data_dict = pickle.load(f)
stan_fit = data_dict['fit']
stan_model = data_dict['model']
del data_dict
## Postprocessing Data
## Extract posterior samples
#hyper-parameters
col_names_hyp = ['dc_0','ell_1e', 'ell_1as', 'omega_1e', 'omega_1as', 'omega_1bs',
'mu_cap', 'omega_cap',
'phi_0','tau_0']
#non-ergodic terms
col_names_dc_1e = ['dc_1e.%i'%(k) for k in range(n_eq)]
col_names_dc_1as = ['dc_1as.%i'%(k) for k in range(n_sta)]
col_names_dc_1bs = ['dc_1bs.%i'%(k) for k in range(n_sta)]
col_names_dB = ['dB.%i'%(k) for k in range(n_eq)]
col_names_cap = ['c_cap.%i'%(c_id) for c_id in cell_ids_valid]
col_names_all = col_names_hyp + col_names_dc_1e + col_names_dc_1as + col_names_dc_1bs + col_names_cap + col_names_dB
#summarize raw posterior distributions
stan_posterior = np.stack([stan_fit[c_n].flatten() for c_n in col_names_hyp], axis=1)
#adjustment terms
if pystan_ver == 2:
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1e']), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1as']), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1bs']), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['c_cap']), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dB']), axis=1)
else:
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1e'].T), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1as'].T), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1bs'].T), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['c_cap'].T), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dB'].T), axis=1)
#save raw-posterior distribution
df_stan_posterior_raw = pd.DataFrame(stan_posterior, columns = col_names_all)
df_stan_posterior_raw.to_csv(out_dir + out_fname + '_stan_posterior_raw' + '.csv', index=False)
## Summarize hyper-parameters
#summarize posterior distributions of hyper-parameters
perc_array = np.array([0.05,0.25,0.5,0.75,0.95])
df_stan_hyp = df_stan_posterior_raw[col_names_hyp].quantile(perc_array)
df_stan_hyp = df_stan_hyp.append(df_stan_posterior_raw[col_names_hyp].mean(axis = 0), ignore_index=True)
df_stan_hyp.index = ['prc_%.2f'%(prc) for prc in perc_array]+['mean']
df_stan_hyp.to_csv(out_dir + out_fname + '_stan_hyperparameters' + '.csv', index=True)
#detailed posterior percentiles of posterior distributions
perc_array = np.arange(0.01,0.99,0.01)
df_stan_posterior = df_stan_posterior_raw[col_names_hyp].quantile(perc_array)
df_stan_posterior.index.name = 'prc'
df_stan_posterior .to_csv(out_dir + out_fname + '_stan_hyperposterior' + '.csv', index=True)
del col_names_dc_1e, col_names_dc_1as, col_names_dc_1bs, col_names_dB
del stan_posterior, col_names_all
## Sample spatially varying coefficients and predictions at record locations
# earthquake and station location in database
X_eq_all = df_flatfile[['eqX', 'eqY']].values
X_sta_all = df_flatfile[['staX','staY']].values
# GMM anelastic attenuation
cells_ca_mu = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].mean() for k in cell_ids_valid])
cells_ca_med = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].median() for k in cell_ids_valid])
cells_ca_sig = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].std() for k in cell_ids_valid])
#effect of anelastic attenuation in GM
cells_LcA_mu = celldist_valid_sp @ cells_ca_mu
cells_LcA_med = celldist_valid_sp @ cells_ca_med
cells_LcA_sig = np.sqrt(celldist_valid_sp.power(2) @ cells_ca_sig**2)
#summary attenuation cells
catten_summary = np.vstack((np.tile(c_a_erg, n_cell_valid),
cells_ca_mu,
cells_ca_med,
cells_ca_sig)).T
columns_names = ['c_a_erg','c_cap_mean','c_cap_med','c_cap_sig']
df_catten_summary = pd.DataFrame(catten_summary, columns = columns_names, index=df_cellinfo.index[i_cells_valid])
#create dataframe with summary attenuation cells
df_catten_summary = pd.merge(df_cellinfo[['cellname','mptLat','mptLon','mptX','mptY','mptZ','UTMzone']],
df_catten_summary, how='right', left_index=True, right_index=True)
df_catten_summary.to_csv(out_dir + out_fname + '_stan_catten' + '.csv', index=True)
# GMM coefficients
#constant shift coefficient
coeff_0_mu = df_stan_posterior_raw.loc[:,'dc_0'].mean() * np.ones(n_data)
coeff_0_med = df_stan_posterior_raw.loc[:,'dc_0'].median() * np.ones(n_data)
coeff_0_sig = df_stan_posterior_raw.loc[:,'dc_0'].std() * np.ones(n_data)
#spatially varying earthquake constant coefficient
coeff_1e_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].mean() for k in range(n_eq)])
coeff_1e_mu = coeff_1e_mu[eq_inv]
coeff_1e_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].median() for k in range(n_eq)])
coeff_1e_med = coeff_1e_med[eq_inv]
coeff_1e_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].std() for k in range(n_eq)])
coeff_1e_sig = coeff_1e_sig[eq_inv]
#site term constant covariance
coeff_1as_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].mean() for k in range(n_sta)])
coeff_1as_mu = coeff_1as_mu[sta_inv]
coeff_1as_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].median() for k in range(n_sta)])
coeff_1as_med = coeff_1as_med[sta_inv]
coeff_1as_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].std() for k in range(n_sta)])
coeff_1as_sig = coeff_1as_sig[sta_inv]
#spatially varying station constant covariance
coeff_1bs_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].mean() for k in range(n_sta)])
coeff_1bs_mu = coeff_1bs_mu[sta_inv]
coeff_1bs_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].median() for k in range(n_sta)])
coeff_1bs_med = coeff_1bs_med[sta_inv]
coeff_1bs_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].std() for k in range(n_sta)])
coeff_1bs_sig = coeff_1bs_sig[sta_inv]
# aleatory variability
phi_0_array = np.array([df_stan_posterior_raw.phi_0.mean()]*X_sta_all.shape[0])
tau_0_array = np.array([df_stan_posterior_raw.tau_0.mean()]*X_sta_all.shape[0])
#initiaize flatfile for sumamry of non-erg coefficinets and residuals
df_flatinfo = df_flatfile[['eqid','ssn','eqLat','eqLon','staLat','staLon','eqX','eqY','staX','staY','UTMzone']]
#summary coefficients
coeffs_summary = np.vstack((coeff_0_mu,
coeff_1e_mu,
coeff_1as_mu,
coeff_1bs_mu,
cells_LcA_mu,
coeff_0_med,
coeff_1e_med,
coeff_1as_med,
coeff_1bs_med,
cells_LcA_med,
coeff_0_sig,
coeff_1e_sig,
coeff_1as_sig,
coeff_1bs_sig,
cells_LcA_sig)).T
columns_names = ['dc_0_mean','dc_1e_mean','dc_1as_mean','dc_1bs_mean','Lc_ca_mean',
'dc_0_med', 'dc_1e_med', 'dc_1as_med', 'dc_1bs_med', 'Lc_ca_med',
'dc_0_sig', 'dc_1e_sig', 'dc_1as_sig', 'dc_1bs_sig', 'Lc_ca_sig']
df_coeffs_summary = pd.DataFrame(coeffs_summary, columns = columns_names, index=df_flatfile.index)
#create dataframe with summary coefficients
df_coeffs_summary = pd.merge(df_flatinfo, df_coeffs_summary, how='right', left_index=True, right_index=True)
df_coeffs_summary[['eqid','ssn']] = df_coeffs_summary[['eqid','ssn']].astype(int)
df_coeffs_summary.to_csv(out_dir + out_fname + '_stan_coefficients' + '.csv', index=True)
# GMM prediction
#mean prediction
y_mu = (coeff_0_mu + coeff_1e_mu + coeff_1as_mu + coeff_1bs_mu + cells_LcA_mu)
#compute residuals
res_tot = y_data - y_mu
#residuals computed directly from stan regression
res_between = [df_stan_posterior_raw.loc[:,f'dB.{k}'].mean() for k in range(n_eq)]
res_between = np.array([res_between[k] for k in (eq_inv).astype(int)])
res_within = res_tot - res_between
#summary predictions and residuals
predict_summary = np.vstack((y_mu, res_tot, res_between, res_within)).T
columns_names = ['nerg_mu','res_tot','res_between','res_within']
df_predict_summary = pd.DataFrame(predict_summary, columns = columns_names, index=df_flatfile.index)
#create dataframe with predictions and residuals
df_predict_summary = pd.merge(df_flatinfo, df_predict_summary, how='right', left_index=True, right_index=True)
df_predict_summary[['eqid','ssn']] = df_predict_summary[['eqid','ssn']].astype(int)
df_predict_summary.to_csv(out_dir + out_fname + '_stan_residuals' + '.csv', index=True)
## Summary regression
#save summary statistics
stan_summary_fname = out_dir + out_fname + '_stan_summary' + '.txt'
with open(stan_summary_fname, 'w') as f:
print(stan_fit, file=f)
#create and save trace plots
fig_dir = out_dir + 'summary_figs/'
#create figures directory if doesn't exit
pathlib.Path(fig_dir).mkdir(parents=True, exist_ok=True)
#create stan trace plots
for c_name in col_names_hyp:
#create trace plot with arviz
ax = az.plot_trace(stan_fit, var_names=c_name, figsize=(10,5) ).ravel()
ax[0].yaxis.set_major_locator(plt_autotick())
ax[0].set_xlabel('sample value')
ax[0].set_ylabel('frequency')
ax[0].set_title('')
ax[0].grid(axis='both')
ax[1].set_xlabel('iteration')
ax[1].set_ylabel('sample value')
ax[1].grid(axis='both')
ax[1].set_title('')
fig = ax[0].figure
fig.suptitle(c_name)
fig.savefig(fig_dir + out_fname + '_stan_traceplot_' + c_name + '_arviz' + '.png')
return None
| 19,298 | 46.185819 | 130 | py |
ngmm_tools | ngmm_tools-master/Analyses/Python_lib/regression/pystan/regression_pystan_model1_unbounded_hyp.py | """
Created on Tue Jul 13 18:22:15 2021
@author: glavrent
"""
#imprort libraries
import os
import pathlib
import glob
import re #regular expression package
import pickle
from joblib import cpu_count
#arithmetic libraries
import numpy as np
#statistics libraries
import pandas as pd
#plot libraries
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoLocator as plt_autotick
import arviz as az
mpl.use('agg')
def RunStan(df_flatfile, stan_model_fname,
out_fname, out_dir, res_name='res',
runstan_flag=True,
n_iter=600, n_chains=4,
adapt_delta=0.8, max_treedepth=10,
pystan_ver=2, pystan_parallel=False):
'''
Run full Bayessian regression in Stan. Non-ergodic model includes: a spatially
varying earthquake constant, a spatially varying site constant, and a spatially
independent site constant.
Parameters
----------
df_flatfile : pd.DataFrame
Input data frame containing total residuals, eq and site coordinates.
stan_model_fname : string
File name for stan model.
out_fname : string
File name for output files.
out_dir : string
Output directory.
res_name : string, optional
Column name for total residuals. The default is 'res'.
runstan_flag : bool, optional
Flag for running stan. If true run regression, if false read past regression
output and summarize non-ergodic parameters. The default is True.
n_iter : integer, optional
Number of stan samples. The default is 600.
n_chains : integer, optional
Number of MCMC chains. The default is 4.
adapt_delta : double, optional
Target average proposal acceptance probability for adaptation. The default is 0.8.
max_treedepth : integer, optional
Maximum number of evaluations for each iteration (2^max_treedepth). The default is 10.
pystan_ver : integer, optional
Version of pystan to run. The default is 2.
pystan_parallel : bool, optional
Flag for using multithreaded option in STAN. The default is False.
Returns
-------
None.
'''
## Read Data
#read stan model
with open(stan_model_fname, "r") as f:
stan_model_code = f.read()
## Preprocess Input Data
#set rsn column as dataframe index, skip if rsn already the index
if not df_flatfile.index.name == 'rsn':
df_flatfile.set_index('rsn', drop=True, inplace=True)
#number of data
n_data = len(df_flatfile)
#earthquake data
data_eq_all = df_flatfile[['eqid','mag','eqX', 'eqY']].values
_, eq_idx, eq_inv = np.unique(df_flatfile[['eqid']].values, axis=0, return_inverse=True, return_index=True)
data_eq = data_eq_all[eq_idx,:]
X_eq = data_eq[:,[2,3]] #earthquake coordinates
#create earthquake ids for all records (1 to n_eq)
eq_id = eq_inv + 1
n_eq = len(data_eq)
#verify no collocated events
eq_dist_min = np.min([np.linalg.norm(x_eq - np.delete(X_eq,k, axis=0), axis=1).min() for k, x_eq in enumerate(X_eq) ])
assert(eq_dist_min > 5e-5),'Error. Singular covariance matrix due to collocated events'
#station data
data_sta_all = df_flatfile[['ssn','Vs30','staX','staY']].values
_, sta_idx, sta_inv = np.unique( df_flatfile[['ssn']].values, axis = 0, return_inverse=True, return_index=True)
data_sta = data_sta_all[sta_idx,:]
X_sta = data_sta[:,[2,3]] #station coordinates
#create station indices for all records (1 to n_sta)
sta_id = sta_inv + 1
n_sta = len(data_sta)
#verify no collocated stations
sta_dist_min = np.min([np.linalg.norm(x_sta - np.delete(X_sta,k, axis=0), axis=1).min() for k, x_sta in enumerate(X_sta) ])
assert(sta_dist_min > 5e-5),'Error. Singular covariance matrix due to collocated stations'
#ground-motion observations
y_data = df_flatfile[res_name].to_numpy().copy()
#stan data
stan_data = {'N': n_data,
'NEQ': n_eq,
'NSTAT': n_sta,
'eq': eq_id, #earthquake id
'stat': sta_id, #station id
'X_e': X_eq, #earthquake coordinates
'X_s': X_sta, #station coordinates
'rec_mu': np.zeros(y_data.shape),
'Y': y_data,
}
stan_data_fname = out_fname + '_stan_data' + '.Rdata'
## Run Stan, fit model
#number of cores
n_cpu = max(cpu_count() -1,1)
#filename for STAN regression raw output file saved as pkl
stan_fit_fname = out_dir + out_fname + '_stan_fit' + '.pkl'
#run stan
if runstan_flag:
#control paramters
control_stan = {'adapt_delta':adapt_delta, 'max_treedepth':max_treedepth}
if pystan_ver == 2:
import pystan
if (not pystan_parallel) or n_cpu<=n_chains:
#compile
stan_model = pystan.StanModel(model_code=stan_model_code)
#full Bayesian statistics
stan_fit = stan_model.sampling(data=stan_data, iter=n_iter, chains = n_chains, refresh=10, control = control_stan)
else:
#number of cores per chain
n_cpu_chain = int(np.floor(n_cpu/n_chains))
#multi-processing arguments
os.environ['STAN_NUM_THREADS'] = str(n_cpu_chain)
extra_compile_args = ['-pthread', '-DSTAN_THREADS']
#compile
stan_model = pystan.StanModel(model_code=stan_model_code, extra_compile_args=extra_compile_args)
#full Bayesian statistics
stan_fit = stan_model.sampling(data=stan_data, iter=n_iter, chains = n_chains, refresh=1, control = control_stan)
elif pystan_ver == 3:
import nest_asyncio
import stan
nest_asyncio.apply()
#compile
stan_model = stan.build(stan_model_code, data=stan_data, random_seed=1)
#full Bayesian statistics
stan_fit = stan_model.sample(num_chains=n_chains, num_samples=n_iter, max_depth=max_treedepth, delta=adapt_delta)
#save stan model and fit
pathlib.Path(out_dir).mkdir(parents=True, exist_ok=True)
with open(stan_fit_fname, "wb") as f:
pickle.dump({'model' : stan_model, 'fit' : stan_fit}, f, protocol=-1)
else:
#load model and fit for postprocessing if has already been executed
with open(stan_fit_fname, "rb") as f:
data_dict = pickle.load(f)
stan_fit = data_dict['fit']
stan_model = data_dict['model']
del data_dict
## Postprocessing Data
## Extract posterior samples
#hyper-parameters
col_names_hyp = ['dc_0','ell_1e', 'ell_1as', 'omega_1e', 'omega_1as', 'omega_1bs',
'phi_0','tau_0']
#non-ergodic terms
col_names_dc_1e = ['dc_1e.%i'%(k) for k in range(n_eq)]
col_names_dc_1as = ['dc_1as.%i'%(k) for k in range(n_sta)]
col_names_dc_1bs = ['dc_1bs.%i'%(k) for k in range(n_sta)]
col_names_dB = ['dB.%i'%(k) for k in range(n_eq)]
col_names_all = col_names_hyp + col_names_dc_1e + col_names_dc_1as + col_names_dc_1bs + col_names_dB
#summarize raw posterior distributions
stan_posterior = np.stack([stan_fit[c_n].flatten() for c_n in col_names_hyp], axis=1)
#adjustment terms
if pystan_ver == 2:
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1e']), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1as']), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1bs']), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dB']), axis=1)
else:
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1e'].T), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1as'].T), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1bs'].T), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dB'].T), axis=1)
#save raw-posterior distribution
df_stan_posterior_raw = pd.DataFrame(stan_posterior, columns = col_names_all)
df_stan_posterior_raw.to_csv(out_dir + out_fname + '_stan_posterior_raw' + '.csv', index=False)
## Summarize hyper-parameters
#summarize posterior distributions of hyper-parameters
perc_array = np.array([0.05,0.25,0.5,0.75,0.95])
df_stan_hyp = df_stan_posterior_raw[col_names_hyp].quantile(perc_array)
df_stan_hyp = df_stan_hyp.append(df_stan_posterior_raw[col_names_hyp].mean(axis = 0), ignore_index=True)
df_stan_hyp.index = ['prc_%.2f'%(prc) for prc in perc_array]+['mean']
df_stan_hyp.to_csv(out_dir + out_fname + '_stan_hyperparameters' + '.csv', index=True)
#detailed posterior percentiles of posterior distributions
perc_array = np.arange(0.01,0.99,0.01)
df_stan_posterior = df_stan_posterior_raw[col_names_hyp].quantile(perc_array)
df_stan_posterior.index.name = 'prc'
df_stan_posterior .to_csv(out_dir + out_fname + '_stan_hyperposterior' + '.csv', index=True)
del col_names_dc_1e, col_names_dc_1as, col_names_dc_1bs, col_names_dB
del stan_posterior, col_names_all
## Sample spatially varying coefficients and predictions at record locations
# earthquake and station location in database
X_eq_all = df_flatfile[['eqX', 'eqY']].values
X_sta_all = df_flatfile[['staX','staY']].values
# GMM coefficients
#constant shift coefficient
coeff_0_mu = df_stan_posterior_raw.loc[:,'dc_0'].mean() * np.ones(n_data)
coeff_0_med = df_stan_posterior_raw.loc[:,'dc_0'].median() * np.ones(n_data)
coeff_0_sig = df_stan_posterior_raw.loc[:,'dc_0'].std() * np.ones(n_data)
#spatially varying earthquake constant coefficient
coeff_1e_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].mean() for k in range(n_eq)])
coeff_1e_mu = coeff_1e_mu[eq_inv]
coeff_1e_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].median() for k in range(n_eq)])
coeff_1e_med = coeff_1e_med[eq_inv]
coeff_1e_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].std() for k in range(n_eq)])
coeff_1e_sig = coeff_1e_sig[eq_inv]
#site term constant covariance
coeff_1as_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].mean() for k in range(n_sta)])
coeff_1as_mu = coeff_1as_mu[sta_inv]
coeff_1as_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].median() for k in range(n_sta)])
coeff_1as_med = coeff_1as_med[sta_inv]
coeff_1as_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].std() for k in range(n_sta)])
coeff_1as_sig = coeff_1as_sig[sta_inv]
#spatially varying station constant covariance
coeff_1bs_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].mean() for k in range(n_sta)])
coeff_1bs_mu = coeff_1bs_mu[sta_inv]
coeff_1bs_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].median() for k in range(n_sta)])
coeff_1bs_med = coeff_1bs_med[sta_inv]
coeff_1bs_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].std() for k in range(n_sta)])
coeff_1bs_sig = coeff_1bs_sig[sta_inv]
# aleatory variability
phi_0_array = np.array([df_stan_posterior_raw.phi_0.mean()]*X_sta_all.shape[0])
tau_0_array = np.array([df_stan_posterior_raw.tau_0.mean()]*X_sta_all.shape[0])
#initiaize flatfile for sumamry of non-erg coefficinets and residuals
df_flatinfo = df_flatfile[['eqid','ssn','eqLat','eqLon','staLat','staLon','eqX','eqY','staX','staY','UTMzone']]
#summarize non-ergodic coefficients
coeffs_summary = np.vstack((coeff_0_mu,
coeff_1e_mu,
coeff_1as_mu,
coeff_1bs_mu,
coeff_0_med,
coeff_1e_med,
coeff_1as_med,
coeff_1bs_med,
coeff_0_sig,
coeff_1e_sig,
coeff_1as_sig,
coeff_1bs_sig)).T
columns_names = ['dc_0_mean','dc_1e_mean','dc_1as_mean','dc_1bs_mean',
'dc_0_med', 'dc_1e_med', 'dc_1as_med', 'dc_1bs_med',
'dc_0_sig', 'dc_1e_sig', 'dc_1as_sig', 'dc_1bs_sig']
df_coeffs_summary = pd.DataFrame(coeffs_summary, columns = columns_names, index=df_flatfile.index)
#create dataframe with summary coefficients
df_coeffs_summary = pd.merge(df_flatinfo, df_coeffs_summary, how='right', left_index=True, right_index=True)
df_coeffs_summary[['eqid','ssn']] = df_coeffs_summary[['eqid','ssn']].astype(int)
df_coeffs_summary.to_csv(out_dir + out_fname + '_stan_coefficients' + '.csv', index=True)
# GMM prediction and residuals
#mean prediction
y_mu = (coeff_0_mu + coeff_1e_mu + coeff_1as_mu + coeff_1bs_mu)
#compute residuals
res_tot = y_data - y_mu
#residuals computed directly from stan regression
res_between = [df_stan_posterior_raw.loc[:,f'dB.{k}'].mean() for k in range(n_eq)]
res_between = np.array([res_between[k] for k in (eq_inv).astype(int)])
res_within = res_tot - res_between
#summarize predictions and residuals
predict_summary = np.vstack((y_mu, res_tot, res_between, res_within)).T
columns_names = ['nerg_mu','res_tot','res_between','res_within']
df_predict_summary = pd.DataFrame(predict_summary, columns = columns_names, index=df_flatfile.index)
#create dataframe with predictions and residuals
df_predict_summary = pd.merge(df_flatinfo, df_predict_summary, how='right', left_index=True, right_index=True)
df_predict_summary[['eqid','ssn']] = df_predict_summary[['eqid','ssn']].astype(int)
df_predict_summary.to_csv(out_dir + out_fname + '_stan_residuals' + '.csv', index=True)
## Summary regression
#save summary statistics
stan_summary_fname = out_dir + out_fname + '_stan_summary' + '.txt'
with open(stan_summary_fname, 'w') as f:
print(stan_fit, file=f)
#create and save trace plots
fig_dir = out_dir + 'summary_figs/'
#create figures directory if doesn't exit
pathlib.Path(fig_dir).mkdir(parents=True, exist_ok=True)
#create stan trace plots
for c_name in col_names_hyp:
#create trace plot with arviz
ax = az.plot_trace(stan_fit, var_names=c_name, figsize=(10,5) ).ravel()
ax[0].yaxis.set_major_locator(plt_autotick())
ax[0].set_xlabel('sample value')
ax[0].set_ylabel('frequency')
ax[0].set_title('')
ax[0].grid(axis='both')
ax[1].set_xlabel('iteration')
ax[1].set_ylabel('sample value')
ax[1].grid(axis='both')
ax[1].set_title('')
fig = ax[0].figure
fig.suptitle(c_name)
fig.savefig(fig_dir + out_fname + '_stan_traceplot_' + c_name + '_arviz' + '.png')
return None
| 15,691 | 44.616279 | 130 | py |
ngmm_tools | ngmm_tools-master/Analyses/Python_lib/regression/pystan/regression_pystan_model2_uncorr_cells_unbounded_hyp.py | """
Created on Tue Jul 13 18:22:15 2021
@author: glavrent
"""
#load variables
import os
import pathlib
import glob
import re #regular expression package
import pickle
from joblib import cpu_count
#arithmetic libraries
import numpy as np
#statistics libraries
import pandas as pd
#plot libraries
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoLocator as plt_autotick
import arviz as az
mpl.use('agg')
def RunStan(df_flatfile, df_cellinfo, df_celldist, stan_model_fname,
out_fname, out_dir, res_name='res', c_a_erg=0,
runstan_flag=True,
n_iter=600, n_chains=4,
adapt_delta=0.8, max_treedepth=10,
pystan_ver=2, pystan_parallel=False):
'''
Run full Bayessian regression in Stan. Non-ergodic model includes: a spatially
varying earthquake constant, a spatially varying site constant, a spatially
independent site constant, and uncorrelated anelastic attenuation.
Parameters
----------
df_flatfile : pd.DataFrame
Input data frame containing total residuals, eq and site coordinates.
df_cellinfo : pd.DataFrame
Dataframe with coordinates of anelastic attenuation cells.
df_celldist : pd.DataFrame
Datafame with cell path distances of all records in df_flatfile.
stan_model_fname : string
File name for stan model.
out_fname : string
File name for output files.
out_dir : string
Output directory.
res_name : string, optional
Column name for total residuals. The default is 'res'.
c_a_erg : double, optional
Value of ergodic anelatic attenuation coefficient. Used as mean of cell
specific anelastic attenuation prior distribution. The default is 0.
n_iter : integer, optional
Number of stan samples. The default is 600.
n_chains : integer, optional
Number of MCMC chains. The default is 4.
runstan_flag : bool, optional
Flag for running stan. If true run regression, if false read past regression
output and summarize non-ergodic parameters. The default is True.
adapt_delta : double, optional
Target average proposal acceptance probability for adaptation. The default is 0.8.
max_treedepth : integer, optional
Maximum number of evaluations for each iteration (2^max_treedepth). The default is 10.
pystan_ver : integer, optional
Version of pystan to run. The default is 2.
pystan_parallel : bool, optional
Flag for using multithreaded option in STAN. The default is False.
Returns
-------
None.
'''
## Read Data
#read stan model
with open(stan_model_fname, "r") as f:
stan_model_code = f.read()
## Preprocess Input Data
#set rsn column as dataframe index, skip if rsn already the index
if not df_flatfile.index.name == 'rsn':
df_flatfile.set_index('rsn', drop=True, inplace=True)
if not df_celldist.index.name == 'rsn':
df_celldist.set_index('rsn', drop=True, inplace=True)
#set cellid column as dataframe index, skip if cellid already the index
if not df_cellinfo.index.name == 'cellid':
df_cellinfo.set_index('cellid', drop=True, inplace=True)
#number of data
n_data = len(df_flatfile)
#earthquake data
data_eq_all = df_flatfile[['eqid','mag','eqX', 'eqY']].values
_, eq_idx, eq_inv = np.unique(df_flatfile[['eqid']], axis=0, return_inverse=True, return_index=True)
data_eq = data_eq_all[eq_idx,:]
X_eq = data_eq[:,[2,3]] #earthquake coordinates
#create earthquake ids for all records (1 to n_eq)
eq_id = eq_inv + 1
n_eq = len(data_eq)
#station data
data_sta_all = df_flatfile[['ssn','Vs30','staX','staY']].values
_, sta_idx, sta_inv = np.unique( df_flatfile[['ssn']].values, axis = 0, return_inverse=True, return_index=True)
data_sta = data_sta_all[sta_idx,:]
X_sta = data_sta[:,[2,3]] #station coordinates
#create station indices for all records (1 to n_sta)
sta_id = sta_inv + 1
n_sta = len(data_sta)
#ground-motion observations
y_data = df_flatfile[res_name].to_numpy().copy()
#cell data
#reorder and only keep records included in the flatfile
df_celldist = df_celldist.reindex(df_flatfile.index)
#cell info
cell_ids_all = df_cellinfo.index
cell_names_all = df_cellinfo.cellname
#cell distance matrix
celldist_all = df_celldist[cell_names_all] #cell-distance matrix with all cells
#find cell with more than one paths
i_cells_valid = np.where(celldist_all.sum(axis=0) > 0)[0] #valid cells with more than one path
cell_ids_valid = cell_ids_all[i_cells_valid]
cell_names_valid = cell_names_all[i_cells_valid]
celldist_valid = celldist_all.loc[:,cell_names_valid] #cell-distance with only non-zero cells
#number of cells
n_cell = celldist_all.shape[1]
n_cell_valid = celldist_valid.shape[1]
#cell coordinates
X_cells_valid = df_cellinfo.loc[i_cells_valid,['mptX','mptY']].values
#print Rrup missfits
print('max R_rup misfit', (df_flatfile.Rrup.values - celldist_valid.sum(axis=1)).abs().max())
stan_data = {'N': n_data,
'NEQ': n_eq,
'NSTAT': n_sta,
'NCELL': n_cell_valid,
'eq': eq_id, #earthquake id
'stat': sta_id, #station id
'X_e': X_eq, #earthquake coordinates
'X_s': X_sta, #station coordinates
'X_c': X_cells_valid,
'rec_mu': np.zeros(y_data.shape),
'RC': celldist_valid.to_numpy(),
'c_a_erg': c_a_erg,
'Y': y_data,
}
stan_data_fname = out_fname + '_stan_data' + '.Rdata'
## Run Stan, fit model
#number of cores
n_cpu = max(cpu_count() -1,1)
#filename for STAN regression raw output file saved as pkl
stan_fit_fname = out_dir + out_fname + '_stan_fit' + '.pkl'
#run stan
if runstan_flag:
#control paramters
control_stan = {'adapt_delta':adapt_delta, 'max_treedepth':max_treedepth}
if pystan_ver == 2:
import pystan
if (not pystan_parallel) or n_cpu<=n_chains:
#compile
stan_model = pystan.StanModel(model_code=stan_model_code)
#full Bayesian statistics
stan_fit = stan_model.sampling(data=stan_data, iter=n_iter, chains = n_chains, refresh=10, control = control_stan)
else:
#number of cores per chain
n_cpu_chain = int(np.floor(n_cpu/n_chains))
#multi-processing arguments
os.environ['STAN_NUM_THREADS'] = str(n_cpu_chain)
extra_compile_args = ['-pthread', '-DSTAN_THREADS']
#compile
stan_model = pystan.StanModel(model_code=stan_model_code, extra_compile_args=extra_compile_args)
#full Bayesian statistics
stan_fit = stan_model.sampling(data=stan_data, iter=n_iter, chains = n_chains, refresh=1, control = control_stan)
elif pystan_ver == 3:
import nest_asyncio
import stan
nest_asyncio.apply()
#compile
stan_model = stan.build(stan_model_code, data=stan_data, random_seed=1)
#full Bayesian statistics
stan_fit = stan_model.sample(num_chains=n_chains, num_samples=n_iter, max_depth=max_treedepth, delta=adapt_delta)
#save stan model and fit
pathlib.Path(out_dir).mkdir(parents=True, exist_ok=True)
with open(stan_fit_fname, "wb") as f:
pickle.dump({'model' : stan_model, 'fit' : stan_fit}, f, protocol=-1)
else:
#load model and fit for postprocessing if has already been executed
with open(stan_fit_fname, "rb") as f:
data_dict = pickle.load(f)
stan_fit = data_dict['fit']
stan_model = data_dict['model']
del data_dict
## Postprocessing Data
## Extract posterior samples
#hyper-parameters
col_names_hyp = ['dc_0','ell_1e', 'ell_1as', 'omega_1e', 'omega_1as', 'omega_1bs',
'mu_cap', 'omega_cap',
'phi_0','tau_0']
#non-ergodic terms
col_names_dc_1e = ['dc_1e.%i'%(k) for k in range(n_eq)]
col_names_dc_1as = ['dc_1as.%i'%(k) for k in range(n_sta)]
col_names_dc_1bs = ['dc_1bs.%i'%(k) for k in range(n_sta)]
col_names_dB = ['dB.%i'%(k) for k in range(n_eq)]
col_names_cap = ['c_cap.%i'%(c_id) for c_id in cell_ids_valid]
col_names_all = col_names_hyp + col_names_dc_1e + col_names_dc_1as + col_names_dc_1bs + col_names_cap + col_names_dB
#summarize raw posterior distributions
stan_posterior = np.stack([stan_fit[c_n].flatten() for c_n in col_names_hyp], axis=1)
#adjustment terms
if pystan_ver == 2:
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1e']), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1as']), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1bs']), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['c_cap']), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dB']), axis=1)
else:
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1e'].T), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1as'].T), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1bs'].T), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['c_cap'].T), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dB'].T), axis=1)
#save raw-posterior distribution
df_stan_posterior_raw = pd.DataFrame(stan_posterior, columns = col_names_all)
df_stan_posterior_raw.to_csv(out_dir + out_fname + '_stan_posterior_raw' + '.csv', index=False)
## Summarize hyper-parameters
#summarize posterior distributions of hyper-parameters
perc_array = np.array([0.05,0.25,0.5,0.75,0.95])
df_stan_hyp = df_stan_posterior_raw[col_names_hyp].quantile(perc_array)
df_stan_hyp = df_stan_hyp.append(df_stan_posterior_raw[col_names_hyp].mean(axis = 0), ignore_index=True)
df_stan_hyp.index = ['prc_%.2f'%(prc) for prc in perc_array]+['mean']
df_stan_hyp.to_csv(out_dir + out_fname + '_stan_hyperparameters' + '.csv', index=True)
#detailed posterior percentiles of posterior distributions
perc_array = np.arange(0.01,0.99,0.01)
df_stan_posterior = df_stan_posterior_raw[col_names_hyp].quantile(perc_array)
df_stan_posterior.index.name = 'prc'
df_stan_posterior .to_csv(out_dir + out_fname + '_stan_hyperposterior' + '.csv', index=True)
del col_names_dc_1e, col_names_dc_1as, col_names_dc_1bs, col_names_dB
del stan_posterior, col_names_all
## Sample spatially varying coefficients and predictions at record locations
# earthquake and station location in database
X_eq_all = df_flatfile[['eqX', 'eqY']].values
X_sta_all = df_flatfile[['staX','staY']].values
# GMM anelastic attenuation
cells_ca_mu = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].mean() for k in cell_ids_valid])
cells_ca_med = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].median() for k in cell_ids_valid])
cells_ca_sig = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].std() for k in cell_ids_valid])
#effect of anelastic attenuation in GM
cells_LcA_mu = celldist_valid.values @ cells_ca_mu
cells_LcA_med = celldist_valid.values @ cells_ca_med
cells_LcA_sig = np.sqrt(np.square(celldist_valid.values) @ cells_ca_sig**2)
#summary attenuation cells
catten_summary = np.vstack((np.tile(c_a_erg, n_cell_valid),
cells_ca_mu,
cells_ca_med,
cells_ca_sig)).T
columns_names = ['c_a_erg','c_cap_mean','c_cap_med','c_cap_sig']
df_catten_summary = pd.DataFrame(catten_summary, columns = columns_names, index=df_cellinfo.index[i_cells_valid])
#create dataframe with summary attenuation cells
df_catten_summary = pd.merge(df_cellinfo[['cellname','mptLat','mptLon','mptX','mptY','mptZ','UTMzone']],
df_catten_summary, how='right', left_index=True, right_index=True)
df_catten_summary.to_csv(out_dir + out_fname + '_stan_catten' + '.csv', index=True)
# GMM coefficients
#constant shift coefficient
coeff_0_mu = df_stan_posterior_raw.loc[:,'dc_0'].mean() * np.ones(n_data)
coeff_0_med = df_stan_posterior_raw.loc[:,'dc_0'].median() * np.ones(n_data)
coeff_0_sig = df_stan_posterior_raw.loc[:,'dc_0'].std() * np.ones(n_data)
#spatially varying earthquake constant coefficient
coeff_1e_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].mean() for k in range(n_eq)])
coeff_1e_mu = coeff_1e_mu[eq_inv]
coeff_1e_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].median() for k in range(n_eq)])
coeff_1e_med = coeff_1e_med[eq_inv]
coeff_1e_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].std() for k in range(n_eq)])
coeff_1e_sig = coeff_1e_sig[eq_inv]
#site term constant covariance
coeff_1as_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].mean() for k in range(n_sta)])
coeff_1as_mu = coeff_1as_mu[sta_inv]
coeff_1as_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].median() for k in range(n_sta)])
coeff_1as_med = coeff_1as_med[sta_inv]
coeff_1as_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].std() for k in range(n_sta)])
coeff_1as_sig = coeff_1as_sig[sta_inv]
#spatially varying station constant covariance
coeff_1bs_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].mean() for k in range(n_sta)])
coeff_1bs_mu = coeff_1bs_mu[sta_inv]
coeff_1bs_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].median() for k in range(n_sta)])
coeff_1bs_med = coeff_1bs_med[sta_inv]
coeff_1bs_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].std() for k in range(n_sta)])
coeff_1bs_sig = coeff_1bs_sig[sta_inv]
# aleatory variability
phi_0_array = np.array([df_stan_posterior_raw.phi_0.mean()]*X_sta_all.shape[0])
tau_0_array = np.array([df_stan_posterior_raw.tau_0.mean()]*X_sta_all.shape[0])
#initiaize flatfile for sumamry of non-erg coefficinets and residuals
df_flatinfo = df_flatfile[['eqid','ssn','eqLat','eqLon','staLat','staLon','eqX','eqY','staX','staY','UTMzone']]
#summary coefficients
coeffs_summary = np.vstack((coeff_0_mu,
coeff_1e_mu,
coeff_1as_mu,
coeff_1bs_mu,
cells_LcA_mu,
coeff_0_med,
coeff_1e_med,
coeff_1as_med,
coeff_1bs_med,
cells_LcA_med,
coeff_0_sig,
coeff_1e_sig,
coeff_1as_sig,
coeff_1bs_sig,
cells_LcA_sig)).T
columns_names = ['dc_0_mean','dc_1e_mean','dc_1as_mean','dc_1bs_mean','Lc_ca_mean',
'dc_0_med', 'dc_1e_med', 'dc_1as_med', 'dc_1bs_med', 'Lc_ca_med',
'dc_0_sig', 'dc_1e_sig', 'dc_1as_sig', 'dc_1bs_sig', 'Lc_ca_sig']
df_coeffs_summary = pd.DataFrame(coeffs_summary, columns = columns_names, index=df_flatfile.index)
#create dataframe with summary coefficients
df_coeffs_summary = pd.merge(df_flatinfo, df_coeffs_summary, how='right', left_index=True, right_index=True)
df_coeffs_summary[['eqid','ssn']] = df_coeffs_summary[['eqid','ssn']].astype(int)
df_coeffs_summary.to_csv(out_dir + out_fname + '_stan_coefficients' + '.csv', index=True)
# GMM prediction
#mean prediction
y_mu = (coeff_0_mu + coeff_1e_mu + coeff_1as_mu + coeff_1bs_mu + cells_LcA_mu)
#compute residuals
res_tot = y_data - y_mu
#residuals computed directly from stan regression
res_between = [df_stan_posterior_raw.loc[:,f'dB.{k}'].mean() for k in range(n_eq)]
res_between = np.array([res_between[k] for k in (eq_inv).astype(int)])
res_within = res_tot - res_between
#summary predictions and residuals
predict_summary = np.vstack((y_mu, res_tot, res_between, res_within)).T
columns_names = ['nerg_mu','res_tot','res_between','res_within']
df_predict_summary = pd.DataFrame(predict_summary, columns = columns_names, index=df_flatfile.index)
#create dataframe with predictions and residuals
df_predict_summary = pd.merge(df_flatinfo, df_predict_summary, how='right', left_index=True, right_index=True)
df_predict_summary[['eqid','ssn']] = df_predict_summary[['eqid','ssn']].astype(int)
df_predict_summary.to_csv(out_dir + out_fname + '_stan_residuals' + '.csv', index=True)
## Summary regression
#save summary statistics
stan_summary_fname = out_dir + out_fname + '_stan_summary' + '.txt'
with open(stan_summary_fname, 'w') as f:
print(stan_fit, file=f)
#create and save trace plots
fig_dir = out_dir + 'summary_figs/'
#create figures directory if doesn't exit
pathlib.Path(fig_dir).mkdir(parents=True, exist_ok=True)
#create stan trace plots
for c_name in col_names_hyp:
#create trace plot with arviz
ax = az.plot_trace(stan_fit, var_names=c_name, figsize=(10,5) ).ravel()
ax[0].yaxis.set_major_locator(plt_autotick())
ax[0].set_xlabel('sample value')
ax[0].set_ylabel('frequency')
ax[0].set_title('')
ax[0].grid(axis='both')
ax[1].set_xlabel('iteration')
ax[1].set_ylabel('sample value')
ax[1].grid(axis='both')
ax[1].set_title('')
fig = ax[0].figure
fig.suptitle(c_name)
fig.savefig(fig_dir + out_fname + '_stan_traceplot_' + c_name + '_arviz' + '.png')
return None
| 19,042 | 46.136139 | 130 | py |
ngmm_tools | ngmm_tools-master/Analyses/Python_lib/regression/pystan/regression_pystan_model2_corr_cells_unbounded_hyp.py | """
Created on Tue Jul 13 18:22:15 2021
@author: glavrent
"""
#load variables
import os
import pathlib
import glob
import re #regular expression package
import pickle
from joblib import cpu_count
#arithmetic libraries
import numpy as np
#statistics libraries
import pandas as pd
#plot libraries
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoLocator as plt_autotick
import arviz as az
mpl.use('agg')
def RunStan(df_flatfile, df_cellinfo, df_celldist, stan_model_fname,
out_fname, out_dir, res_name='res', c_a_erg=0,
runstan_flag=True,
n_iter=600, n_chains=4,
adapt_delta=0.8, max_treedepth=10,
pystan_ver=2, pystan_parallel=False):
'''
Run full Bayessian regression in Stan. Non-ergodic model includes: a spatially
varying earthquake constant, a spatially varying site constant, a spatially
independent site constant, and partially spatially correlated anelastic
attenuation.
Parameters
----------
df_flatfile : pd.DataFrame
Input data frame containing total residuals, eq and site coordinates.
df_cellinfo : pd.DataFrame
Dataframe with coordinates of anelastic attenuation cells.
df_celldist : pd.DataFrame
Datafame with cell path distances of all records in df_flatfile.
stan_model_fname : string
File name for stan model.
out_fname : string
File name for output files.
out_dir : string
Output directory.
res_name : string, optional
Column name for total residuals. The default is 'res'.
c_a_erg : double, optional
Value of ergodic anelatic attenuation coefficient. Used as mean of cell
specific anelastic attenuation prior distribution. The default is 0.
n_iter : integer, optional
Number of stan samples. The default is 600.
n_chains : integer, optional
Number of MCMC chains. The default is 4.
runstan_flag : bool, optional
Flag for running stan. If true run regression, if false read past regression
output and summarize non-ergodic parameters. The default is True.
adapt_delta : double, optional
Target average proposal acceptance probability for adaptation. The default is 0.8.
max_treedepth : integer, optional
Maximum number of evaluations for each iteration (2^max_treedepth). The default is 10.
pystan_ver : integer, optional
Version of pystan to run. The default is 2.
pystan_parallel : bool, optional
Flag for using multithreaded option in STAN. The default is False.
Returns
-------
None.
'''
## Read Data
#read stan model
with open(stan_model_fname, "r") as f:
stan_model_code = f.read()
## Preprocess Input Data
#set rsn column as dataframe index, skip if rsn already the index
if not df_flatfile.index.name == 'rsn':
df_flatfile.set_index('rsn', drop=True, inplace=True)
if not df_celldist.index.name == 'rsn':
df_celldist.set_index('rsn', drop=True, inplace=True)
#set cellid column as dataframe index, skip if cellid already the index
if not df_cellinfo.index.name == 'cellid':
df_cellinfo.set_index('cellid', drop=True, inplace=True)
#number of data
n_data = len(df_flatfile)
#earthquake data
data_eq_all = df_flatfile[['eqid','mag','eqX', 'eqY']].values
_, eq_idx, eq_inv = np.unique(df_flatfile[['eqid']], axis=0, return_inverse=True, return_index=True)
data_eq = data_eq_all[eq_idx,:]
X_eq = data_eq[:,[2,3]] #earthquake coordinates
#create earthquake ids for all records (1 to n_eq)
eq_id = eq_inv + 1
n_eq = len(data_eq)
#station data
data_sta_all = df_flatfile[['ssn','Vs30','staX','staY']].values
_, sta_idx, sta_inv = np.unique( df_flatfile[['ssn']].values, axis = 0, return_inverse=True, return_index=True)
data_sta = data_sta_all[sta_idx,:]
X_sta = data_sta[:,[2,3]] #station coordinates
#create station indices for all records (1 to n_sta)
sta_id = sta_inv + 1
n_sta = len(data_sta)
#ground-motion observations
y_data = df_flatfile[res_name].to_numpy().copy()
#cell data
#reorder and only keep records included in the flatfile
df_celldist = df_celldist.reindex(df_flatfile.index)
#cell info
cell_ids_all = df_cellinfo.index
cell_names_all = df_cellinfo.cellname
#cell distance matrix
celldist_all = df_celldist[cell_names_all] #cell-distance matrix with all cells
#find cell with more than one paths
i_cells_valid = np.where(celldist_all.sum(axis=0) > 0)[0] #valid cells with more than one path
cell_ids_valid = cell_ids_all[i_cells_valid]
cell_names_valid = cell_names_all[i_cells_valid]
celldist_valid = celldist_all.loc[:,cell_names_valid] #cell-distance with only non-zero cells
#number of cells
n_cell = celldist_all.shape[1]
n_cell_valid = celldist_valid.shape[1]
#cell coordinates
X_cells_valid = df_cellinfo.loc[i_cells_valid,['mptX','mptY']].values
#print Rrup missfits
print('max R_rup misfit', (df_flatfile.Rrup.values - celldist_valid.sum(axis=1)).abs().max())
stan_data = {'N': n_data,
'NEQ': n_eq,
'NSTAT': n_sta,
'NCELL': n_cell_valid,
'eq': eq_id, #earthquake id
'stat': sta_id, #station id
'X_e': X_eq, #earthquake coordinates
'X_s': X_sta, #station coordinates
'X_c': X_cells_valid,
'rec_mu': np.zeros(y_data.shape),
'RC': celldist_valid.to_numpy(),
'c_a_erg': c_a_erg,
'Y': y_data,
}
stan_data_fname = out_fname + '_stan_data' + '.Rdata'
## Run Stan, fit model
#number of cores
n_cpu = max(cpu_count() -1,1)
#filename for STAN regression raw output file saved as pkl
stan_fit_fname = out_dir + out_fname + '_stan_fit' + '.pkl'
#run stan
if runstan_flag:
#control paramters
control_stan = {'adapt_delta':adapt_delta, 'max_treedepth':max_treedepth}
if pystan_ver == 2:
import pystan
if (not pystan_parallel) or n_cpu<=n_chains:
#compile
stan_model = pystan.StanModel(model_code=stan_model_code)
#full Bayesian statistics
stan_fit = stan_model.sampling(data=stan_data, iter=n_iter, chains = n_chains, refresh=10, control = control_stan)
else:
#number of cores per chain
n_cpu_chain = int(np.floor(n_cpu/n_chains))
#multi-processing arguments
os.environ['STAN_NUM_THREADS'] = str(n_cpu_chain)
extra_compile_args = ['-pthread', '-DSTAN_THREADS']
#compile
stan_model = pystan.StanModel(model_code=stan_model_code, extra_compile_args=extra_compile_args)
#full Bayesian statistics
stan_fit = stan_model.sampling(data=stan_data, iter=n_iter, chains = n_chains, refresh=1, control = control_stan)
elif pystan_ver == 3:
import nest_asyncio
import stan
nest_asyncio.apply()
#compile
stan_model = stan.build(stan_model_code, data=stan_data, random_seed=1)
#full Bayesian statistics
stan_fit = stan_model.sample(num_chains=n_chains, num_samples=n_iter, max_depth=max_treedepth, delta=adapt_delta)
#save stan model and fit
pathlib.Path(out_dir).mkdir(parents=True, exist_ok=True)
with open(stan_fit_fname, "wb") as f:
pickle.dump({'model' : stan_model, 'fit' : stan_fit}, f, protocol=-1)
else:
#load model and fit for postprocessing if has already been executed
with open(stan_fit_fname, "rb") as f:
data_dict = pickle.load(f)
stan_fit = data_dict['fit']
stan_model = data_dict['model']
del data_dict
## Postprocessing Data
## Extract posterior samples
#hyper-parameters
col_names_hyp = ['dc_0','ell_1e', 'ell_1as', 'omega_1e', 'omega_1as', 'omega_1bs',
'mu_cap', 'ell_ca1p', 'omega_ca1p', 'omega_ca2p',
'phi_0','tau_0']
#non-ergodic terms
col_names_dc_1e = ['dc_1e.%i'%(k) for k in range(n_eq)]
col_names_dc_1as = ['dc_1as.%i'%(k) for k in range(n_sta)]
col_names_dc_1bs = ['dc_1bs.%i'%(k) for k in range(n_sta)]
col_names_dB = ['dB.%i'%(k) for k in range(n_eq)]
col_names_cap = ['c_cap.%i'%(c_id) for c_id in cell_ids_valid]
col_names_all = col_names_hyp + col_names_dc_1e + col_names_dc_1as + col_names_dc_1bs + col_names_cap + col_names_dB
#summarize raw posterior distributions
stan_posterior = np.stack([stan_fit[c_n].flatten() for c_n in col_names_hyp], axis=1)
#adjustment terms
if pystan_ver == 2:
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1e']), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1as']), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1bs']), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['c_cap']), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dB']), axis=1)
else:
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1e'].T), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1as'].T), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1bs'].T), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['c_cap'].T), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dB'].T), axis=1)
#save raw-posterior distribution
df_stan_posterior_raw = pd.DataFrame(stan_posterior, columns = col_names_all)
df_stan_posterior_raw.to_csv(out_dir + out_fname + '_stan_posterior_raw' + '.csv', index=False)
## Summarize hyper-parameters
#summarize posterior distributions of hyper-parameters
perc_array = np.array([0.05,0.25,0.5,0.75,0.95])
df_stan_hyp = df_stan_posterior_raw[col_names_hyp].quantile(perc_array)
df_stan_hyp = df_stan_hyp.append(df_stan_posterior_raw[col_names_hyp].mean(axis = 0), ignore_index=True)
df_stan_hyp.index = ['prc_%.2f'%(prc) for prc in perc_array]+['mean']
df_stan_hyp.to_csv(out_dir + out_fname + '_stan_hyperparameters' + '.csv', index=True)
#detailed posterior percentiles of posterior distributions
perc_array = np.arange(0.01,0.99,0.01)
df_stan_posterior = df_stan_posterior_raw[col_names_hyp].quantile(perc_array)
df_stan_posterior.index.name = 'prc'
df_stan_posterior .to_csv(out_dir + out_fname + '_stan_hyperposterior' + '.csv', index=True)
del col_names_dc_1e, col_names_dc_1as, col_names_dc_1bs, col_names_dB
del stan_posterior, col_names_all
## Sample spatially varying coefficients and predictions at record locations
# earthquake and station location in database
X_eq_all = df_flatfile[['eqX', 'eqY']].values
X_sta_all = df_flatfile[['staX','staY']].values
# GMM anelastic attenuation
cells_ca_mu = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].mean() for k in cell_ids_valid])
cells_ca_med = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].median() for k in cell_ids_valid])
cells_ca_sig = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].std() for k in cell_ids_valid])
#effect of anelastic attenuation in GM
cells_LcA_mu = celldist_valid.values @ cells_ca_mu
cells_LcA_med = celldist_valid.values @ cells_ca_med
cells_LcA_sig = np.sqrt(np.square(celldist_valid.values) @ cells_ca_sig**2)
#summary attenuation cells
catten_summary = np.vstack((np.tile(c_a_erg, n_cell_valid),
cells_ca_mu,
cells_ca_med,
cells_ca_sig)).T
columns_names = ['c_a_erg','c_cap_mean','c_cap_med','c_cap_sig']
df_catten_summary = pd.DataFrame(catten_summary, columns = columns_names, index=df_cellinfo.index[i_cells_valid])
#create dataframe with summary attenuation cells
df_catten_summary = pd.merge(df_cellinfo[['cellname','mptLat','mptLon','mptX','mptY','mptZ','UTMzone']],
df_catten_summary, how='right', left_index=True, right_index=True)
df_catten_summary.to_csv(out_dir + out_fname + '_stan_catten' + '.csv', index=True)
# GMM coefficients
#constant shift coefficient
coeff_0_mu = df_stan_posterior_raw.loc[:,'dc_0'].mean() * np.ones(n_data)
coeff_0_med = df_stan_posterior_raw.loc[:,'dc_0'].median() * np.ones(n_data)
coeff_0_sig = df_stan_posterior_raw.loc[:,'dc_0'].std() * np.ones(n_data)
#spatially varying earthquake constant coefficient
coeff_1e_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].mean() for k in range(n_eq)])
coeff_1e_mu = coeff_1e_mu[eq_inv]
coeff_1e_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].median() for k in range(n_eq)])
coeff_1e_med = coeff_1e_med[eq_inv]
coeff_1e_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].std() for k in range(n_eq)])
coeff_1e_sig = coeff_1e_sig[eq_inv]
#site term constant covariance
coeff_1as_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].mean() for k in range(n_sta)])
coeff_1as_mu = coeff_1as_mu[sta_inv]
coeff_1as_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].median() for k in range(n_sta)])
coeff_1as_med = coeff_1as_med[sta_inv]
coeff_1as_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].std() for k in range(n_sta)])
coeff_1as_sig = coeff_1as_sig[sta_inv]
#spatially varying station constant covariance
coeff_1bs_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].mean() for k in range(n_sta)])
coeff_1bs_mu = coeff_1bs_mu[sta_inv]
coeff_1bs_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].median() for k in range(n_sta)])
coeff_1bs_med = coeff_1bs_med[sta_inv]
coeff_1bs_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].std() for k in range(n_sta)])
coeff_1bs_sig = coeff_1bs_sig[sta_inv]
# aleatory variability
phi_0_array = np.array([df_stan_posterior_raw.phi_0.mean()]*X_sta_all.shape[0])
tau_0_array = np.array([df_stan_posterior_raw.tau_0.mean()]*X_sta_all.shape[0])
#initiaize flatfile for sumamry of non-erg coefficinets and residuals
df_flatinfo = df_flatfile[['eqid','ssn','eqLat','eqLon','staLat','staLon','eqX','eqY','staX','staY','UTMzone']]
#summary coefficients
coeffs_summary = np.vstack((coeff_0_mu,
coeff_1e_mu,
coeff_1as_mu,
coeff_1bs_mu,
cells_LcA_mu,
coeff_0_med,
coeff_1e_med,
coeff_1as_med,
coeff_1bs_med,
cells_LcA_med,
coeff_0_sig,
coeff_1e_sig,
coeff_1as_sig,
coeff_1bs_sig,
cells_LcA_sig)).T
columns_names = ['dc_0_mean','dc_1e_mean','dc_1as_mean','dc_1bs_mean','Lc_ca_mean',
'dc_0_med', 'dc_1e_med', 'dc_1as_med', 'dc_1bs_med', 'Lc_ca_med',
'dc_0_sig', 'dc_1e_sig', 'dc_1as_sig', 'dc_1bs_sig', 'Lc_ca_sig']
df_coeffs_summary = pd.DataFrame(coeffs_summary, columns = columns_names, index=df_flatfile.index)
#create dataframe with summary coefficients
df_coeffs_summary = pd.merge(df_flatinfo, df_coeffs_summary, how='right', left_index=True, right_index=True)
df_coeffs_summary[['eqid','ssn']] = df_coeffs_summary[['eqid','ssn']].astype(int)
df_coeffs_summary.to_csv(out_dir + out_fname + '_stan_coefficients' + '.csv', index=True)
# GMM prediction
#mean prediction
y_mu = (coeff_0_mu + coeff_1e_mu + coeff_1as_mu + coeff_1bs_mu + cells_LcA_mu)
#compute residuals
res_tot = y_data - y_mu
#residuals computed directly from stan regression
res_between = [df_stan_posterior_raw.loc[:,f'dB.{k}'].mean() for k in range(n_eq)]
res_between = np.array([res_between[k] for k in (eq_inv).astype(int)])
res_within = res_tot - res_between
#summary predictions and residuals
predict_summary = np.vstack((y_mu, res_tot, res_between, res_within)).T
columns_names = ['nerg_mu','res_tot','res_between','res_within']
df_predict_summary = pd.DataFrame(predict_summary, columns = columns_names, index=df_flatfile.index)
#create dataframe with predictions and residuals
df_predict_summary = pd.merge(df_flatinfo, df_predict_summary, how='right', left_index=True, right_index=True)
df_predict_summary[['eqid','ssn']] = df_predict_summary[['eqid','ssn']].astype(int)
df_predict_summary.to_csv(out_dir + out_fname + '_stan_residuals' + '.csv', index=True)
## Summary regression
#save summary statistics
stan_summary_fname = out_dir + out_fname + '_stan_summary' + '.txt'
with open(stan_summary_fname, 'w') as f:
print(stan_fit, file=f)
#create and save trace plots
fig_dir = out_dir + 'summary_figs/'
#create figures directory if doesn't exit
pathlib.Path(fig_dir).mkdir(parents=True, exist_ok=True)
#create stan trace plots
for c_name in col_names_hyp:
#create trace plot with arviz
ax = az.plot_trace(stan_fit, var_names=c_name, figsize=(10,5) ).ravel()
ax[0].yaxis.set_major_locator(plt_autotick())
ax[0].set_xlabel('sample value')
ax[0].set_ylabel('frequency')
ax[0].set_title('')
ax[0].grid(axis='both')
ax[1].set_xlabel('iteration')
ax[1].set_ylabel('sample value')
ax[1].grid(axis='both')
ax[1].set_title('')
fig = ax[0].figure
fig.suptitle(c_name)
fig.savefig(fig_dir + out_fname + '_stan_traceplot_' + c_name + '_arviz' + '.png')
return None
| 19,072 | 46.093827 | 130 | py |
ngmm_tools | ngmm_tools-master/Analyses/Python_lib/regression/pystan/regression_pystan_model2_corr_cells_sparse_unbounded_hyp.py | """
Created on Tue Jul 13 18:22:15 2021
@author: glavrent
"""
#load variables
import os
import pathlib
import glob
import re #regular expression package
import pickle
from joblib import cpu_count
#arithmetic libraries
import numpy as np
from scipy import sparse
#statistics libraries
import pandas as pd
#plot libraries
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoLocator as plt_autotick
import arviz as az
mpl.use('agg')
def RunStan(df_flatfile, df_cellinfo, df_celldist, stan_model_fname,
out_fname, out_dir, res_name='res', c_a_erg=0,
runstan_flag=True,
n_iter=600, n_chains=4,
adapt_delta=0.8, max_treedepth=10,
pystan_ver=2, pystan_parallel=False):
'''
Run full Bayessian regression in Stan. Non-ergodic model includes: a spatially
varying earthquake constant, a spatially varying site constant, a spatially
independent site constant, and partially spatially correlated anelastic
attenuation.
Parameters
----------
df_flatfile : pd.DataFrame
Input data frame containing total residuals, eq and site coordinates.
df_cellinfo : pd.DataFrame
Dataframe with coordinates of anelastic attenuation cells.
df_celldist : pd.DataFrame
Datafame with cell path distances of all records in df_flatfile.
stan_model_fname : string
File name for stan model.
out_fname : string
File name for output files.
out_dir : string
Output directory.
res_name : string, optional
Column name for total residuals. The default is 'res'.
c_a_erg : double, optional
Value of ergodic anelatic attenuation coefficient. Used as mean of cell
specific anelastic attenuation prior distribution. The default is 0.
n_iter : integer, optional
Number of stan samples. The default is 600.
n_chains : integer, optional
Number of MCMC chains. The default is 4.
runstan_flag : bool, optional
Flag for running stan. If true run regression, if false read past regression
output and summarize non-ergodic parameters. The default is True.
adapt_delta : double, optional
Target average proposal acceptance probability for adaptation. The default is 0.8.
max_treedepth : integer, optional
Maximum number of evaluations for each iteration (2^max_treedepth). The default is 10.
pystan_ver : integer, optional
Version of pystan to run. The default is 2.
pystan_parallel : bool, optional
Flag for using multithreaded option in STAN. The default is False.
Returns
-------
None.
'''
## Read Data
#read stan model
with open(stan_model_fname, "r") as f:
stan_model_code = f.read()
## Preprocess Input Data
#set rsn column as dataframe index, skip if rsn already the index
if not df_flatfile.index.name == 'rsn':
df_flatfile.set_index('rsn', drop=True, inplace=True)
if not df_celldist.index.name == 'rsn':
df_celldist.set_index('rsn', drop=True, inplace=True)
#set cellid column as dataframe index, skip if cellid already the index
if not df_cellinfo.index.name == 'cellid':
df_cellinfo.set_index('cellid', drop=True, inplace=True)
#number of data
n_data = len(df_flatfile)
#earthquake data
data_eq_all = df_flatfile[['eqid','mag','eqX', 'eqY']].values
_, eq_idx, eq_inv = np.unique(df_flatfile[['eqid']], axis=0, return_inverse=True, return_index=True)
data_eq = data_eq_all[eq_idx,:]
X_eq = data_eq[:,[2,3]] #earthquake coordinates
#create earthquake ids for all records (1 to n_eq)
eq_id = eq_inv + 1
n_eq = len(data_eq)
#station data
data_sta_all = df_flatfile[['ssn','Vs30','staX','staY']].values
_, sta_idx, sta_inv = np.unique( df_flatfile[['ssn']].values, axis = 0, return_inverse=True, return_index=True)
data_sta = data_sta_all[sta_idx,:]
X_sta = data_sta[:,[2,3]] #station coordinates
#create station indices for all records (1 to n_sta)
sta_id = sta_inv + 1
n_sta = len(data_sta)
#ground-motion observations
y_data = df_flatfile[res_name].to_numpy().copy()
#cell data
#reorder and only keep records included in the flatfile
df_celldist = df_celldist.reindex(df_flatfile.index)
#cell info
cell_ids_all = df_cellinfo.index
cell_names_all = df_cellinfo.cellname
#cell distance matrix
celldist_all = df_celldist[cell_names_all] #cell-distance matrix with all cells
#find cell with more than one paths
i_cells_valid = np.where(celldist_all.sum(axis=0) > 0)[0] #valid cells with more than one path
cell_ids_valid = cell_ids_all[i_cells_valid]
cell_names_valid = cell_names_all[i_cells_valid]
celldist_valid = celldist_all.loc[:,cell_names_valid].to_numpy() #cell-distance with only non-zero cells
celldist_valid_sp = sparse.csr_matrix(celldist_valid)
#number of cells
n_cell = celldist_all.shape[1]
n_cell_valid = celldist_valid.shape[1]
#cell coordinates
X_cells_valid = df_cellinfo.loc[i_cells_valid,['mptX','mptY']].values
#print Rrup missfits
print('max R_rup misfit', np.abs(df_flatfile.Rrup.values - celldist_valid.sum(axis=1)).max())
stan_data = {'N': n_data,
'NEQ': n_eq,
'NSTAT': n_sta,
'NCELL': n_cell_valid,
'NCELL_SP': len(celldist_valid_sp.data),
'eq': eq_id, #earthquake id
'stat': sta_id, #station id
'X_e': X_eq, #earthquake coordinates
'X_s': X_sta, #station coordinates
'X_c': X_cells_valid,
'rec_mu': np.zeros(y_data.shape),
'RC_val': celldist_valid_sp.data,
'RC_w': celldist_valid_sp.indices+1,
'RC_u': celldist_valid_sp.indptr+1,
'c_a_erg': c_a_erg,
'Y': y_data,
}
stan_data_fname = out_fname + '_stan_data' + '.Rdata'
## Run Stan, fit model
#number of cores
n_cpu = max(cpu_count() -1,1)
#filename for STAN regression raw output file saved as pkl
stan_fit_fname = out_dir + out_fname + '_stan_fit' + '.pkl'
#run stan
if runstan_flag:
#control paramters
control_stan = {'adapt_delta':adapt_delta, 'max_treedepth':max_treedepth}
if pystan_ver == 2:
import pystan
if (not pystan_parallel) or n_cpu<=n_chains:
#compile
stan_model = pystan.StanModel(model_code=stan_model_code)
#full Bayesian statistics
stan_fit = stan_model.sampling(data=stan_data, iter=n_iter, chains = n_chains, refresh=10, control = control_stan)
else:
#number of cores per chain
n_cpu_chain = int(np.floor(n_cpu/n_chains))
#multi-processing arguments
os.environ['STAN_NUM_THREADS'] = str(n_cpu_chain)
extra_compile_args = ['-pthread', '-DSTAN_THREADS']
#compile
stan_model = pystan.StanModel(model_code=stan_model_code, extra_compile_args=extra_compile_args)
#full Bayesian statistics
stan_fit = stan_model.sampling(data=stan_data, iter=n_iter, chains = n_chains, refresh=1, control = control_stan)
elif pystan_ver == 3:
import nest_asyncio
import stan
nest_asyncio.apply()
#compile
stan_model = stan.build(stan_model_code, data=stan_data, random_seed=1)
#full Bayesian statistics
stan_fit = stan_model.sample(num_chains=n_chains, num_samples=n_iter, max_depth=max_treedepth, delta=adapt_delta)
#save stan model and fit
pathlib.Path(out_dir).mkdir(parents=True, exist_ok=True)
with open(stan_fit_fname, "wb") as f:
pickle.dump({'model' : stan_model, 'fit' : stan_fit}, f, protocol=-1)
else:
#load model and fit for postprocessing if has already been executed
with open(stan_fit_fname, "rb") as f:
data_dict = pickle.load(f)
stan_fit = data_dict['fit']
stan_model = data_dict['model']
del data_dict
## Postprocessing Data
## Extract posterior samples
#hyper-parameters
col_names_hyp = ['dc_0','ell_1e', 'ell_1as', 'omega_1e', 'omega_1as', 'omega_1bs',
'mu_cap', 'ell_ca1p', 'omega_ca1p', 'omega_ca2p',
'phi_0','tau_0']
#non-ergodic terms
col_names_dc_1e = ['dc_1e.%i'%(k) for k in range(n_eq)]
col_names_dc_1as = ['dc_1as.%i'%(k) for k in range(n_sta)]
col_names_dc_1bs = ['dc_1bs.%i'%(k) for k in range(n_sta)]
col_names_dB = ['dB.%i'%(k) for k in range(n_eq)]
col_names_cap = ['c_cap.%i'%(c_id) for c_id in cell_ids_valid]
col_names_all = col_names_hyp + col_names_dc_1e + col_names_dc_1as + col_names_dc_1bs + col_names_cap + col_names_dB
#summarize raw posterior distributions
stan_posterior = np.stack([stan_fit[c_n].flatten() for c_n in col_names_hyp], axis=1)
#adjustment terms
if pystan_ver == 2:
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1e']), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1as']), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1bs']), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['c_cap']), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dB']), axis=1)
else:
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1e'].T), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1as'].T), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1bs'].T), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['c_cap'].T), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dB'].T), axis=1)
#save raw-posterior distribution
df_stan_posterior_raw = pd.DataFrame(stan_posterior, columns = col_names_all)
df_stan_posterior_raw.to_csv(out_dir + out_fname + '_stan_posterior_raw' + '.csv', index=False)
## Summarize hyper-parameters
#summarize posterior distributions of hyper-parameters
perc_array = np.array([0.05,0.25,0.5,0.75,0.95])
df_stan_hyp = df_stan_posterior_raw[col_names_hyp].quantile(perc_array)
df_stan_hyp = df_stan_hyp.append(df_stan_posterior_raw[col_names_hyp].mean(axis = 0), ignore_index=True)
df_stan_hyp.index = ['prc_%.2f'%(prc) for prc in perc_array]+['mean']
df_stan_hyp.to_csv(out_dir + out_fname + '_stan_hyperparameters' + '.csv', index=True)
#detailed posterior percentiles of posterior distributions
perc_array = np.arange(0.01,0.99,0.01)
df_stan_posterior = df_stan_posterior_raw[col_names_hyp].quantile(perc_array)
df_stan_posterior.index.name = 'prc'
df_stan_posterior .to_csv(out_dir + out_fname + '_stan_hyperposterior' + '.csv', index=True)
del col_names_dc_1e, col_names_dc_1as, col_names_dc_1bs, col_names_dB
del stan_posterior, col_names_all
## Sample spatially varying coefficients and predictions at record locations
# earthquake and station location in database
X_eq_all = df_flatfile[['eqX', 'eqY']].values
X_sta_all = df_flatfile[['staX','staY']].values
# GMM anelastic attenuation
cells_ca_mu = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].mean() for k in cell_ids_valid])
cells_ca_med = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].median() for k in cell_ids_valid])
cells_ca_sig = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].std() for k in cell_ids_valid])
#effect of anelastic attenuation in GM
cells_LcA_mu = celldist_valid_sp @ cells_ca_mu
cells_LcA_med = celldist_valid_sp @ cells_ca_med
cells_LcA_sig = np.sqrt(celldist_valid_sp.power(2) @ cells_ca_sig**2)
#summary attenuation cells
catten_summary = np.vstack((np.tile(c_a_erg, n_cell_valid),
cells_ca_mu,
cells_ca_med,
cells_ca_sig)).T
columns_names = ['c_a_erg','c_cap_mean','c_cap_med','c_cap_sig']
df_catten_summary = pd.DataFrame(catten_summary, columns = columns_names, index=df_cellinfo.index[i_cells_valid])
#create dataframe with summary attenuation cells
df_catten_summary = pd.merge(df_cellinfo[['cellname','mptLat','mptLon','mptX','mptY','mptZ','UTMzone']],
df_catten_summary, how='right', left_index=True, right_index=True)
df_catten_summary.to_csv(out_dir + out_fname + '_stan_catten' + '.csv', index=True)
# GMM coefficients
#constant shift coefficient
coeff_0_mu = df_stan_posterior_raw.loc[:,'dc_0'].mean() * np.ones(n_data)
coeff_0_med = df_stan_posterior_raw.loc[:,'dc_0'].median() * np.ones(n_data)
coeff_0_sig = df_stan_posterior_raw.loc[:,'dc_0'].std() * np.ones(n_data)
#spatially varying earthquake constant coefficient
coeff_1e_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].mean() for k in range(n_eq)])
coeff_1e_mu = coeff_1e_mu[eq_inv]
coeff_1e_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].median() for k in range(n_eq)])
coeff_1e_med = coeff_1e_med[eq_inv]
coeff_1e_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].std() for k in range(n_eq)])
coeff_1e_sig = coeff_1e_sig[eq_inv]
#site term constant covariance
coeff_1as_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].mean() for k in range(n_sta)])
coeff_1as_mu = coeff_1as_mu[sta_inv]
coeff_1as_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].median() for k in range(n_sta)])
coeff_1as_med = coeff_1as_med[sta_inv]
coeff_1as_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].std() for k in range(n_sta)])
coeff_1as_sig = coeff_1as_sig[sta_inv]
#spatially varying station constant covariance
coeff_1bs_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].mean() for k in range(n_sta)])
coeff_1bs_mu = coeff_1bs_mu[sta_inv]
coeff_1bs_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].median() for k in range(n_sta)])
coeff_1bs_med = coeff_1bs_med[sta_inv]
coeff_1bs_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].std() for k in range(n_sta)])
coeff_1bs_sig = coeff_1bs_sig[sta_inv]
# aleatory variability
phi_0_array = np.array([df_stan_posterior_raw.phi_0.mean()]*X_sta_all.shape[0])
tau_0_array = np.array([df_stan_posterior_raw.tau_0.mean()]*X_sta_all.shape[0])
#initiaize flatfile for sumamry of non-erg coefficinets and residuals
df_flatinfo = df_flatfile[['eqid','ssn','eqLat','eqLon','staLat','staLon','eqX','eqY','staX','staY','UTMzone']]
#summary coefficients
coeffs_summary = np.vstack((coeff_0_mu,
coeff_1e_mu,
coeff_1as_mu,
coeff_1bs_mu,
cells_LcA_mu,
coeff_0_med,
coeff_1e_med,
coeff_1as_med,
coeff_1bs_med,
cells_LcA_med,
coeff_0_sig,
coeff_1e_sig,
coeff_1as_sig,
coeff_1bs_sig,
cells_LcA_sig)).T
columns_names = ['dc_0_mean','dc_1e_mean','dc_1as_mean','dc_1bs_mean','Lc_ca_mean',
'dc_0_med', 'dc_1e_med', 'dc_1as_med', 'dc_1bs_med', 'Lc_ca_med',
'dc_0_sig', 'dc_1e_sig', 'dc_1as_sig', 'dc_1bs_sig', 'Lc_ca_sig']
df_coeffs_summary = pd.DataFrame(coeffs_summary, columns = columns_names, index=df_flatfile.index)
#create dataframe with summary coefficients
df_coeffs_summary = pd.merge(df_flatinfo, df_coeffs_summary, how='right', left_index=True, right_index=True)
df_coeffs_summary[['eqid','ssn']] = df_coeffs_summary[['eqid','ssn']].astype(int)
df_coeffs_summary.to_csv(out_dir + out_fname + '_stan_coefficients' + '.csv', index=True)
# GMM prediction
#mean prediction
y_mu = (coeff_0_mu + coeff_1e_mu + coeff_1as_mu + coeff_1bs_mu + cells_LcA_mu)
#compute residuals
res_tot = y_data - y_mu
#residuals computed directly from stan regression
res_between = [df_stan_posterior_raw.loc[:,f'dB.{k}'].mean() for k in range(n_eq)]
res_between = np.array([res_between[k] for k in (eq_inv).astype(int)])
res_within = res_tot - res_between
#summary predictions and residuals
predict_summary = np.vstack((y_mu, res_tot, res_between, res_within)).T
columns_names = ['nerg_mu','res_tot','res_between','res_within']
df_predict_summary = pd.DataFrame(predict_summary, columns = columns_names, index=df_flatfile.index)
#create dataframe with predictions and residuals
df_predict_summary = pd.merge(df_flatinfo, df_predict_summary, how='right', left_index=True, right_index=True)
df_predict_summary[['eqid','ssn']] = df_predict_summary[['eqid','ssn']].astype(int)
df_predict_summary.to_csv(out_dir + out_fname + '_stan_residuals' + '.csv', index=True)
## Summary regression
#save summary statistics
stan_summary_fname = out_dir + out_fname + '_stan_summary' + '.txt'
with open(stan_summary_fname, 'w') as f:
print(stan_fit, file=f)
#create and save trace plots
fig_dir = out_dir + 'summary_figs/'
#create figures directory if doesn't exit
pathlib.Path(fig_dir).mkdir(parents=True, exist_ok=True)
#create stan trace plots
for c_name in col_names_hyp:
#create trace plot with arviz
ax = az.plot_trace(stan_fit, var_names=c_name, figsize=(10,5) ).ravel()
ax[0].yaxis.set_major_locator(plt_autotick())
ax[0].set_xlabel('sample value')
ax[0].set_ylabel('frequency')
ax[0].set_title('')
ax[0].grid(axis='both')
ax[1].set_xlabel('iteration')
ax[1].set_ylabel('sample value')
ax[1].grid(axis='both')
ax[1].set_title('')
fig = ax[0].figure
fig.suptitle(c_name)
fig.savefig(fig_dir + out_fname + '_stan_traceplot_' + c_name + '_arviz' + '.png')
return None
| 19,326 | 46.139024 | 130 | py |
ngmm_tools | ngmm_tools-master/Analyses/Python_lib/regression/pystan/regression_pystan_model3_corr_cells_unbounded_hyp.py | """
Created on Tue Jul 13 18:22:15 2021
@author: glavrent
"""
#load variables
import os
import pathlib
import glob
import re #regular expression package
import pickle
from joblib import cpu_count
#arithmetic libraries
import numpy as np
#statistics libraries
import pandas as pd
#plot libraries
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoLocator as plt_autotick
import arviz as az
mpl.use('agg')
def RunStan(df_flatfile, df_cellinfo, df_celldist, stan_model_fname,
out_fname, out_dir, res_name='res', c_2_erg=0, c_3_erg=0, c_a_erg=0,
runstan_flag=True,
n_iter=600, n_chains=4,
adapt_delta=0.8, max_treedepth=10,
pystan_ver=2, pystan_parallel=False):
'''
Run full Bayessian regression in Stan. Non-ergodic model includes: a spatially
varying earthquake constant, a spatially varying site constant, a spatially
independent site constant, and partially spatially correlated anelastic
attenuation.
Parameters
----------
df_flatfile : pd.DataFrame
Input data frame containing total residuals, eq and site coordinates.
df_cellinfo : pd.DataFrame
Dataframe with coordinates of anelastic attenuation cells.
df_celldist : pd.DataFrame
Datafame with cell path distances of all records in df_flatfile.
stan_model_fname : string
File name for stan model.
out_fname : string
File name for output files.
out_dir : string
Output directory.
res_name : string, optional
Column name for total residuals. The default is 'res'.
c_2_erg : double, optional
Value of ergodic geometrical spreading coefficient. The default is 0.
c_3_erg : double, optional
Value of ergodic Vs30 coefficient. The default is 0.
c_a_erg : double, optional
Value of ergodic anelatic attenuation coefficient. Used as mean of cell
specific anelastic attenuation prior distribution. The default is 0.
n_iter : integer, optional
Number of stan samples. The default is 600.
n_chains : integer, optional
Number of MCMC chains. The default is 4.
runstan_flag : bool, optional
Flag for running stan. If true run regression, if false read past regression
output and summarize non-ergodic parameters. The default is True.
adapt_delta : double, optional
Target average proposal acceptance probability for adaptation. The default is 0.8.
max_treedepth : integer, optional
Maximum number of evaluations for each iteration (2^max_treedepth). The default is 10.
pystan_ver : integer, optional
Version of pystan to run. The default is 2.
pystan_parallel : bool, optional
Flag for using multithreaded option in STAN. The default is False.
Returns
-------
None.
'''
#number of cores
n_cpu = max(cpu_count() -1,1)
## Read Data
#read stan model
with open(stan_model_fname, "r") as f:
stan_model_code = f.read()
## Preprocess Input Data
#set rsn column as dataframe index, skip if rsn already the index
if not df_flatfile.index.name == 'rsn':
df_flatfile.set_index('rsn', drop=True, inplace=True)
if not df_celldist.index.name == 'rsn':
df_celldist.set_index('rsn', drop=True, inplace=True)
#set cellid column as dataframe index, skip if cellid already the index
if not df_cellinfo.index.name == 'cellid':
df_cellinfo.set_index('cellid', drop=True, inplace=True)
#number of data
n_data = len(df_flatfile)
#earthquake data
data_eq_all = df_flatfile[['eqid','mag','eqX', 'eqY']].values
_, eq_idx, eq_inv = np.unique(df_flatfile[['eqid']], axis=0, return_inverse=True, return_index=True)
data_eq = data_eq_all[eq_idx,:]
X_eq = data_eq[:,[2,3]] #earthquake coordinates
#create earthquake ids for all records (1 to n_eq)
eq_id = eq_inv + 1
n_eq = len(data_eq)
#station data
data_sta_all = df_flatfile[['ssn','Vs30','x_3','staX','staY']].values
_, sta_idx, sta_inv = np.unique( df_flatfile[['ssn']].values, axis = 0, return_inverse=True, return_index=True)
data_sta = data_sta_all[sta_idx,:]
X_sta = data_sta[:,[3,4]] #station coordinates
#create station indices for all records (1 to n_sta)
sta_id = sta_inv + 1
n_sta = len(data_sta)
#geometrical spreading covariates
x_2 = df_flatfile['x_2'].values
#vs30 covariates
x_3 = df_flatfile['x_3'].values[sta_idx]
#ground-motion observations
y_data = df_flatfile[res_name].to_numpy().copy()
#cell data
#reorder and only keep records included in the flatfile
df_celldist = df_celldist.reindex(df_flatfile.index)
#cell info
cell_ids_all = df_cellinfo.index
cell_names_all = df_cellinfo.cellname
#cell distance matrix
celldist_all = df_celldist[cell_names_all] #cell-distance matrix with all cells
#find cell with more than one paths
i_cells_valid = np.where(celldist_all.sum(axis=0) > 0)[0] #valid cells with more than one path
cell_ids_valid = cell_ids_all[i_cells_valid]
cell_names_valid = cell_names_all[i_cells_valid]
celldist_valid = celldist_all.loc[:,cell_names_valid] #cell-distance with only non-zero cells
#number of cells
n_cell = celldist_all.shape[1]
n_cell_valid = celldist_valid.shape[1]
#cell coordinates
X_cells_valid = df_cellinfo.loc[i_cells_valid,['mptX','mptY']].values
#print Rrup missfits
print('max R_rup misfit', (df_flatfile.Rrup.values - celldist_valid.sum(axis=1)).abs().max())
stan_data = {'N': n_data,
'NEQ': n_eq,
'NSTAT': n_sta,
'NCELL': n_cell_valid,
'eq': eq_id, #earthquake id
'stat': sta_id, #station id
'rec_mu': np.zeros(y_data.shape),
'Y': y_data,
'x_2': x_2,
'x_3': x_3,
'c_2_erg': c_2_erg,
'c_3_erg': c_3_erg,
'c_a_erg': c_a_erg,
'X_e': X_eq, #earthquake coordinates
'X_s': X_sta, #station coordinates
'X_c': X_cells_valid,
'RC': celldist_valid.to_numpy(),
}
stan_data_fname = out_fname + '_stan_data' + '.Rdata'
## Run Stan, fit model
#number of cores
n_cpu = max(cpu_count() -1,1)
#filename for STAN regression raw output file saved as pkl
stan_fit_fname = out_dir + out_fname + '_stan_fit' + '.pkl'
#run stan
if runstan_flag:
#control paramters
control_stan = {'adapt_delta':adapt_delta, 'max_treedepth':max_treedepth}
if pystan_ver == 2:
import pystan
if (not pystan_parallel) or n_cpu<=n_chains:
#compile
stan_model = pystan.StanModel(model_code=stan_model_code)
#full Bayesian statistics
stan_fit = stan_model.sampling(data=stan_data, iter=n_iter, chains = n_chains, refresh=10, control = control_stan)
else:
#number of cores per chain
n_cpu_chain = int(np.floor(n_cpu/n_chains))
#multi-processing arguments
os.environ['STAN_NUM_THREADS'] = str(n_cpu_chain)
extra_compile_args = ['-pthread', '-DSTAN_THREADS']
#compile
stan_model = pystan.StanModel(model_code=stan_model_code, extra_compile_args=extra_compile_args)
#full Bayesian statistics
stan_fit = stan_model.sampling(data=stan_data, iter=n_iter, chains = n_chains, refresh=1, control = control_stan)
elif pystan_ver == 3:
import nest_asyncio
import stan
nest_asyncio.apply()
#compile
stan_model = stan.build(stan_model_code, data=stan_data, random_seed=1)
#full Bayesian statistics
stan_fit = stan_model.sample(num_chains=n_chains, num_samples=n_iter, max_depth=max_treedepth, delta=adapt_delta)
#save stan model and fit
pathlib.Path(out_dir).mkdir(parents=True, exist_ok=True)
with open(stan_fit_fname, "wb") as f:
pickle.dump({'model' : stan_model, 'fit' : stan_fit}, f, protocol=-1)
else:
#load model and fit for postprocessing if has already been executed
with open(stan_fit_fname, "rb") as f:
data_dict = pickle.load(f)
stan_fit = data_dict['fit']
stan_model = data_dict['model']
del data_dict
## Postprocessing Data
## Extract posterior samples
#hyper-parameters
col_names_hyp = ['dc_0','mu_2p','mu_3s',
'ell_1e', 'ell_1as', 'omega_1e', 'omega_1as', 'omega_1bs',
'ell_2p', 'ell_3s', 'omega_2p', 'omega_3s',
'mu_cap', 'ell_ca1p', 'omega_ca1p', 'omega_ca2p',
'phi_0','tau_0']
#non-ergodic terms
col_names_dc_1e = ['dc_1e.%i'%(k) for k in range(n_eq)]
col_names_dc_1as = ['dc_1as.%i'%(k) for k in range(n_sta)]
col_names_dc_1bs = ['dc_1bs.%i'%(k) for k in range(n_sta)]
col_names_c_2p = ['c_2p.%i'%(k) for k in range(n_eq)]
col_names_c_3s = ['c_3s.%i'%(k) for k in range(n_sta)]
col_names_dB = ['dB.%i'%(k) for k in range(n_eq)]
col_names_cap = ['c_cap.%i'%(c_id) for c_id in cell_ids_valid]
col_names_all = (col_names_hyp + col_names_dc_1e + col_names_dc_1as + col_names_dc_1bs +
col_names_c_2p + col_names_c_3s + col_names_cap + col_names_dB)
#summarize raw posterior distributions
stan_posterior = np.stack([stan_fit[c_n].flatten() for c_n in col_names_hyp], axis=1)
#adjustment terms
if pystan_ver == 2:
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1e']), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1as']), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1bs']), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['c_2p']), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['c_3s']), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['c_cap']), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dB']), axis=1)
else:
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1e'].T), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1as'].T), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dc_1bs'].T), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['c_2p'].T), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['c_3s'].T), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['c_cap'].T), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit['dB'].T), axis=1)
#save raw-posterior distribution
df_stan_posterior_raw = pd.DataFrame(stan_posterior, columns = col_names_all)
df_stan_posterior_raw.to_csv(out_dir + out_fname + '_stan_posterior_raw' + '.csv', index=False)
## Summarize hyper-parameters
#summarize posterior distributions of hyper-parameters
perc_array = np.array([0.05,0.25,0.5,0.75,0.95])
df_stan_hyp = df_stan_posterior_raw[col_names_hyp].quantile(perc_array)
df_stan_hyp = df_stan_hyp.append(df_stan_posterior_raw[col_names_hyp].mean(axis = 0), ignore_index=True)
df_stan_hyp.index = ['prc_%.2f'%(prc) for prc in perc_array]+['mean']
df_stan_hyp.to_csv(out_dir + out_fname + '_stan_hyperparameters' + '.csv', index=True)
#detailed posterior percentiles of posterior distributions
perc_array = np.arange(0.01,0.99,0.01)
df_stan_posterior = df_stan_posterior_raw[col_names_hyp].quantile(perc_array)
df_stan_posterior.index.name = 'prc'
df_stan_posterior .to_csv(out_dir + out_fname + '_stan_hyperposterior' + '.csv', index=True)
del col_names_dc_1e, col_names_dc_1as, col_names_dc_1bs, col_names_c_2p, col_names_c_3s, col_names_dB
del stan_posterior, col_names_all
## Sample spatially varying coefficients and predictions at record locations
# earthquake and station location in database
X_eq_all = df_flatfile[['eqX', 'eqY']].values
X_sta_all = df_flatfile[['staX','staY']].values
# GMM anelastic attenuation
cells_ca_mu = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].mean() for k in cell_ids_valid])
cells_ca_med = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].median() for k in cell_ids_valid])
cells_ca_sig = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].std() for k in cell_ids_valid])
#effect of anelastic attenuation in GM
cells_LcA_mu = celldist_valid.values @ cells_ca_mu
cells_LcA_med = celldist_valid.values @ cells_ca_med
cells_LcA_sig = np.sqrt(np.square(celldist_valid.values) @ cells_ca_sig**2)
#summary attenuation cells
catten_summary = np.vstack((np.tile(c_a_erg, n_cell_valid),
cells_ca_mu,
cells_ca_med,
cells_ca_sig)).T
columns_names = ['c_a_erg','c_cap_mean','c_cap_med','c_cap_sig']
df_catten_summary = pd.DataFrame(catten_summary, columns = columns_names, index=df_cellinfo.index[i_cells_valid])
#create dataframe with summary attenuation cells
df_catten_summary = pd.merge(df_cellinfo[['cellname','mptLat','mptLon','mptX','mptY','mptZ','UTMzone']],
df_catten_summary, how='right', left_index=True, right_index=True)
df_catten_summary.to_csv(out_dir + out_fname + '_stan_catten' + '.csv', index=True)
# GMM coefficients
#constant shift coefficient
coeff_0_mu = df_stan_posterior_raw.loc[:,'dc_0'].mean() * np.ones(n_data)
coeff_0_med = df_stan_posterior_raw.loc[:,'dc_0'].median() * np.ones(n_data)
coeff_0_sig = df_stan_posterior_raw.loc[:,'dc_0'].std() * np.ones(n_data)
#spatially varying earthquake constant coefficient
coeff_1e_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].mean() for k in range(n_eq)])
coeff_1e_mu = coeff_1e_mu[eq_inv]
coeff_1e_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].median() for k in range(n_eq)])
coeff_1e_med = coeff_1e_med[eq_inv]
coeff_1e_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].std() for k in range(n_eq)])
coeff_1e_sig = coeff_1e_sig[eq_inv]
#site term constant covariance
coeff_1as_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].mean() for k in range(n_sta)])
coeff_1as_mu = coeff_1as_mu[sta_inv]
coeff_1as_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].median() for k in range(n_sta)])
coeff_1as_med = coeff_1as_med[sta_inv]
coeff_1as_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].std() for k in range(n_sta)])
coeff_1as_sig = coeff_1as_sig[sta_inv]
#spatially varying station constant covariance
coeff_1bs_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].mean() for k in range(n_sta)])
coeff_1bs_mu = coeff_1bs_mu[sta_inv]
coeff_1bs_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].median() for k in range(n_sta)])
coeff_1bs_med = coeff_1bs_med[sta_inv]
coeff_1bs_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].std() for k in range(n_sta)])
coeff_1bs_sig = coeff_1bs_sig[sta_inv]
#spatially varying geometrical spreading coefficient
coeff_2p_mu = np.array([df_stan_posterior_raw.loc[:,f'c_2p.{k}'].mean() for k in range(n_eq)])
coeff_2p_mu = coeff_2p_mu[eq_inv]
coeff_2p_med = np.array([df_stan_posterior_raw.loc[:,f'c_2p.{k}'].median() for k in range(n_eq)])
coeff_2p_med = coeff_2p_med[eq_inv]
coeff_2p_sig = np.array([df_stan_posterior_raw.loc[:,f'c_2p.{k}'].std() for k in range(n_eq)])
coeff_2p_sig = coeff_2p_sig[eq_inv]
#spatially varying Vs30 coefficient
coeff_3s_mu = np.array([df_stan_posterior_raw.loc[:,f'c_3s.{k}'].mean() for k in range(n_sta)])
coeff_3s_mu = coeff_3s_mu[sta_inv]
coeff_3s_med = np.array([df_stan_posterior_raw.loc[:,f'c_3s.{k}'].median() for k in range(n_sta)])
coeff_3s_med = coeff_3s_med[sta_inv]
coeff_3s_sig = np.array([df_stan_posterior_raw.loc[:,f'c_3s.{k}'].std() for k in range(n_sta)])
coeff_3s_sig = coeff_3s_sig[sta_inv]
# aleatory variability
phi_0_array = np.array([df_stan_posterior_raw.phi_0.mean()]*X_sta_all.shape[0])
tau_0_array = np.array([df_stan_posterior_raw.tau_0.mean()]*X_sta_all.shape[0])
#initiaize flatfile for sumamry of non-erg coefficinets and residuals
df_flatinfo = df_flatfile[['eqid','ssn','eqLat','eqLon','staLat','staLon','eqX','eqY','staX','staY','UTMzone']]
#summary coefficients
coeffs_summary = np.vstack((coeff_0_mu,
coeff_1e_mu,
coeff_1as_mu,
coeff_1bs_mu,
coeff_2p_mu,
coeff_3s_mu,
cells_LcA_mu,
coeff_0_med,
coeff_1e_med,
coeff_1as_med,
coeff_1bs_med,
coeff_2p_med,
coeff_3s_med,
cells_LcA_med,
coeff_0_sig,
coeff_1e_sig,
coeff_1as_sig,
coeff_1bs_sig,
coeff_2p_sig,
coeff_3s_sig,
cells_LcA_sig)).T
columns_names = ['dc_0_mean','dc_1e_mean','dc_1as_mean','dc_1bs_mean','c_2p_mean','c_3s_mean','Lc_ca_mean',
'dc_0_med', 'dc_1e_med', 'dc_1as_med', 'dc_1bs_med', 'c_2p_med', 'c_3s_med', 'Lc_ca_med',
'dc_0_sig', 'dc_1e_sig', 'dc_1as_sig', 'dc_1bs_sig', 'c_2p_sig', 'c_3s_sig', 'Lc_ca_sig']
df_coeffs_summary = pd.DataFrame(coeffs_summary, columns = columns_names, index=df_flatfile.index)
#create dataframe with summary coefficients
df_coeffs_summary = pd.merge(df_flatinfo, df_coeffs_summary, how='right', left_index=True, right_index=True)
df_coeffs_summary[['eqid','ssn']] = df_coeffs_summary[['eqid','ssn']].astype(int)
df_coeffs_summary.to_csv(out_dir + out_fname + '_stan_coefficients' + '.csv', index=True)
# GMM prediction
#mean prediction
y_mu = (coeff_0_mu + coeff_1e_mu + coeff_1as_mu + coeff_1bs_mu + coeff_2p_mu*x_2 + coeff_3s_mu*x_3[sta_inv] + cells_LcA_mu)
#compute residuals
res_tot = y_data - y_mu
#residuals computed directly from stan regression
res_between = [df_stan_posterior_raw.loc[:,f'dB.{k}'].mean() for k in range(n_eq)]
res_between = np.array([res_between[k] for k in (eq_inv).astype(int)])
res_within = res_tot - res_between
#summary predictions and residuals
predict_summary = np.vstack((y_mu, res_tot, res_between, res_within)).T
columns_names = ['nerg_mu','res_tot','res_between','res_within']
df_predict_summary = pd.DataFrame(predict_summary, columns = columns_names, index=df_flatfile.index)
#create dataframe with predictions and residuals
df_predict_summary = pd.merge(df_flatinfo, df_predict_summary, how='right', left_index=True, right_index=True)
df_predict_summary[['eqid','ssn']] = df_predict_summary[['eqid','ssn']].astype(int)
df_predict_summary.to_csv(out_dir + out_fname + '_stan_residuals' + '.csv', index=True)
## Summary regression
#save summary statistics
stan_summary_fname = out_dir + out_fname + '_stan_summary' + '.txt'
with open(stan_summary_fname, 'w') as f:
print(stan_fit, file=f)
#create and save trace plots
fig_dir = out_dir + 'summary_figs/'
#create figures directory if doesn't exit
pathlib.Path(fig_dir).mkdir(parents=True, exist_ok=True)
#create stan trace plots
for c_name in col_names_hyp:
#create trace plot with arviz
ax = az.plot_trace(stan_fit, var_names=c_name, figsize=(10,5) ).ravel()
ax[0].yaxis.set_major_locator(plt_autotick())
ax[0].set_xlabel('sample value')
ax[0].set_ylabel('frequency')
ax[0].set_title('')
ax[0].grid(axis='both')
ax[1].set_xlabel('iteration')
ax[1].set_ylabel('sample value')
ax[1].grid(axis='both')
ax[1].set_title('')
fig = ax[0].figure
fig.suptitle(c_name)
fig.savefig(fig_dir + out_fname + '_stan_traceplot_' + c_name + '_arviz' + '.png')
return None
| 21,689 | 46.986726 | 130 | py |
ngmm_tools | ngmm_tools-master/Analyses/Python_lib/regression/cmdstan/regression_cmdstan_model1_unbounded_hyp.py | """
Created on Tue Jul 13 18:22:15 2021
@author: glavrent
"""
#load variables
import pathlib
from joblib import cpu_count
#arithmetic libraries
import numpy as np
#statistics libraries
import pandas as pd
#plot libraries
import matplotlib as mpl
from matplotlib.ticker import AutoLocator as plt_autotick
import arviz as az
mpl.use('agg')
#stan library
import cmdstanpy
def RunStan(df_flatfile, stan_model_fname,
out_fname, out_dir, res_name='res',
n_iter_warmup=300, n_iter_sampling=300, n_chains=4,
max_treedepth=10, adapt_delta=0.80,
stan_parallel=False):
'''
Run full Bayessian regression in Stan. Non-ergodic model includes: a spatially
varying earthquake constant, a spatially varying site constant, and a spatially
independent site constant.
Parameters
----------
df_flatfile : pd.DataFrame
Input data frame containing total residuals, eq and site coordinates.
stan_model_fname : string
File name for stan model.
out_fname : string
File name for output files.
out_dir : string
Output directory.
res_name : string, optional
Column name for total residuals. The default is 'res'.
n_iter_warmup : integer, optional
Number of burn out MCMC samples. The default is 300.
n_iter_sampling : integer, optional
Number of MCMC samples for computing the posterior distributions. The default is 300.
n_chains : integer, optional
Number of MCMC chains. The default is 4.
adapt_delta : double, optional
Target average proposal acceptance probability for adaptation. The default is 0.8.
max_treedepth : integer, optional
Maximum number of evaluations for each iteration (2^max_treedepth). The default is 10.
stan_parallel : bool, optional
Flag for using multithreaded option in STAN. The default is False.
Returns
-------
None.
'''
## Preprocess Input Data
#set rsn column as dataframe index, skip if rsn already the index
if not df_flatfile.index.name == 'rsn':
df_flatfile.set_index('rsn', drop=True, inplace=True)
#number of data
n_data = len(df_flatfile)
#earthquake data
data_eq_all = df_flatfile[['eqid','mag','eqX', 'eqY']].values
_, eq_idx, eq_inv = np.unique(df_flatfile[['eqid']].values, axis=0, return_inverse=True, return_index=True)
data_eq = data_eq_all[eq_idx,:]
X_eq = data_eq[:,[2,3]] #earthquake coordinates
#create earthquake ids for all records (1 to n_eq)
eq_id = eq_inv + 1
n_eq = len(data_eq)
#verify no collocated events
eq_dist_min = np.min([np.linalg.norm(x_eq - np.delete(X_eq,k, axis=0), axis=1).min() for k, x_eq in enumerate(X_eq) ])
assert(eq_dist_min > 5e-5),'Error. Singular covariance matrix due to collocated events'
#station data
data_sta_all = df_flatfile[['ssn','Vs30','staX','staY']].values
_, sta_idx, sta_inv = np.unique( df_flatfile[['ssn']].values, axis = 0, return_inverse=True, return_index=True)
data_sta = data_sta_all[sta_idx,:]
X_sta = data_sta[:,[2,3]] #station coordinates
#create station indices for all records (1 to n_sta)
sta_id = sta_inv + 1
n_sta = len(data_sta)
#verify no collocated stations
sta_dist_min = np.min([np.linalg.norm(x_sta - np.delete(X_sta,k, axis=0), axis=1).min() for k, x_sta in enumerate(X_sta) ])
assert(sta_dist_min > 5e-5),'Error. Singular covariance matrix due to collocated stations'
#ground-motion observations
y_data = df_flatfile[res_name].to_numpy().copy()
#stan data
stan_data = {'N': n_data,
'NEQ': n_eq,
'NSTAT': n_sta,
'eq': eq_id, #earthquake id
'stat': sta_id, #station id
'X_e': X_eq, #earthquake coordinates
'X_s': X_sta, #station coordinates
'rec_mu': np.zeros(y_data.shape),
'Y': y_data,
}
stan_data_fname = out_dir + out_fname + '_stan_data' + '.json'
#create output directory
pathlib.Path(out_dir).mkdir(parents=True, exist_ok=True)
#write as json file
cmdstanpy.utils.write_stan_json(stan_data_fname, stan_data)
## Run Stan, fit model
#number of cores
n_cpu = max(cpu_count() -1,1)
#run stan
if (not stan_parallel) or n_cpu<=n_chains:
#compile stan model
stan_model = cmdstanpy.CmdStanModel(stan_file=stan_model_fname)
stan_model.compile(force=True)
#run full MCMC sampler
stan_fit = stan_model.sample(data=stan_data_fname, chains=n_chains,
iter_warmup=n_iter_warmup, iter_sampling=n_iter_sampling,
seed=1, max_treedepth=max_treedepth, adapt_delta=adapt_delta,
show_progress=True, output_dir=out_dir+'stan_fit/')
else:
#compile stan model
stan_model = cmdstanpy.CmdStanModel(stan_file=stan_model_fname, cpp_options={"STAN_THREADS": True})
stan_model.compile(force=True)
#number of cores per chain
n_cpu_chain = int(np.floor(n_cpu/n_chains))
#run full MCMC sampler
stan_fit = stan_model.sample(data=stan_data_fname, chains=n_chains, threads_per_chain=n_cpu_chain,
iter_warmup=n_iter_warmup, iter_sampling=n_iter_sampling,
seed=1, max_treedepth=max_treedepth, adapt_delta=adapt_delta,
show_progress=True, output_dir=out_dir+'stan_fit/')
## Postprocessing Data
## Extract posterior samples
#hyper-parameters
col_names_hyp = ['dc_0','ell_1e', 'ell_1as', 'omega_1e', 'omega_1as', 'omega_1bs',
'phi_0','tau_0']
#non-ergodic terms
col_names_dc_1e = ['dc_1e.%i'%(k) for k in range(n_eq)]
col_names_dc_1as = ['dc_1as.%i'%(k) for k in range(n_sta)]
col_names_dc_1bs = ['dc_1bs.%i'%(k) for k in range(n_sta)]
col_names_dB = ['dB.%i'%(k) for k in range(n_eq)]
col_names_all = col_names_hyp + col_names_dc_1e + col_names_dc_1as + col_names_dc_1bs + col_names_dB
#summarize raw posterior distributions
stan_posterior = np.stack([stan_fit.stan_variable(c_n) for c_n in col_names_hyp], axis=1)
#adjustment terms
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('dc_1e')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('dc_1as')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('dc_1bs')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('dB')), axis=1)
#save raw-posterior distribution
df_stan_posterior_raw = pd.DataFrame(stan_posterior, columns = col_names_all)
df_stan_posterior_raw.to_csv(out_dir + out_fname + '_stan_posterior_raw' + '.csv', index=False)
## Summarize hyper-parameters
#summarize posterior distributions of hyper-parameters
perc_array = np.array([0.05,0.25,0.5,0.75,0.95])
df_stan_hyp = df_stan_posterior_raw[col_names_hyp].quantile(perc_array)
df_stan_hyp = df_stan_hyp.append(df_stan_posterior_raw[col_names_hyp].mean(axis = 0), ignore_index=True)
df_stan_hyp.index = ['prc_%.2f'%(prc) for prc in perc_array]+['mean']
df_stan_hyp.to_csv(out_dir + out_fname + '_stan_hyperparameters' + '.csv', index=True)
#detailed posterior percentiles of posterior distributions
perc_array = np.arange(0.01,0.99,0.01)
df_stan_posterior = df_stan_posterior_raw[col_names_hyp].quantile(perc_array)
df_stan_posterior.index.name = 'prc'
df_stan_posterior.to_csv(out_dir + out_fname + '_stan_hyperposterior' + '.csv', index=True)
del col_names_dc_1e, col_names_dc_1as, col_names_dc_1bs, col_names_dB
del stan_posterior, col_names_all
## Sample spatially varying coefficients and predictions at record locations
# earthquake and station location in database
X_eq_all = df_flatfile[['eqX', 'eqY']].values
X_sta_all = df_flatfile[['staX','staY']].values
# GMM coefficients
#constant shift coefficient
coeff_0_mu = df_stan_posterior_raw.loc[:,'dc_0'].mean() * np.ones(n_data)
coeff_0_med = df_stan_posterior_raw.loc[:,'dc_0'].median() * np.ones(n_data)
coeff_0_sig = df_stan_posterior_raw.loc[:,'dc_0'].std() * np.ones(n_data)
#spatially varying earthquake constant coefficient
coeff_1e_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].mean() for k in range(n_eq)])
coeff_1e_mu = coeff_1e_mu[eq_inv]
coeff_1e_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].median() for k in range(n_eq)])
coeff_1e_med = coeff_1e_med[eq_inv]
coeff_1e_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].std() for k in range(n_eq)])
coeff_1e_sig = coeff_1e_sig[eq_inv]
#site term constant covariance
coeff_1as_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].mean() for k in range(n_sta)])
coeff_1as_mu = coeff_1as_mu[sta_inv]
coeff_1as_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].median() for k in range(n_sta)])
coeff_1as_med = coeff_1as_med[sta_inv]
coeff_1as_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].std() for k in range(n_sta)])
coeff_1as_sig = coeff_1as_sig[sta_inv]
#spatially varying station constant covariance
coeff_1bs_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].mean() for k in range(n_sta)])
coeff_1bs_mu = coeff_1bs_mu[sta_inv]
coeff_1bs_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].median() for k in range(n_sta)])
coeff_1bs_med = coeff_1bs_med[sta_inv]
coeff_1bs_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].std() for k in range(n_sta)])
coeff_1bs_sig = coeff_1bs_sig[sta_inv]
# aleatory variability
phi_0_array = np.array([df_stan_posterior_raw.phi_0.mean()]*X_sta_all.shape[0])
tau_0_array = np.array([df_stan_posterior_raw.tau_0.mean()]*X_sta_all.shape[0])
#initiaize flatfile for sumamry of non-erg coefficinets and residuals
df_flatinfo = df_flatfile[['eqid','ssn','eqLat','eqLon','staLat','staLon','eqX','eqY','staX','staY','UTMzone']]
#summary coefficients
coeffs_summary = np.vstack((coeff_0_mu,
coeff_1e_mu,
coeff_1as_mu,
coeff_1bs_mu,
coeff_0_med,
coeff_1e_med,
coeff_1as_med,
coeff_1bs_med,
coeff_0_sig,
coeff_1e_sig,
coeff_1as_sig,
coeff_1bs_sig)).T
columns_names = ['dc_0_mean','dc_1e_mean','dc_1as_mean','dc_1bs_mean',
'dc_0_med', 'dc_1e_med', 'dc_1as_med', 'dc_1bs_med',
'dc_0_sig', 'dc_1e_sig', 'dc_1as_sig', 'dc_1bs_sig']
df_coeffs_summary = pd.DataFrame(coeffs_summary, columns = columns_names, index=df_flatfile.index)
#create dataframe with summary coefficients
df_coeffs_summary = pd.merge(df_flatinfo, df_coeffs_summary, how='right', left_index=True, right_index=True)
df_coeffs_summary[['eqid','ssn']] = df_coeffs_summary[['eqid','ssn']].astype(int)
df_coeffs_summary.to_csv(out_dir + out_fname + '_stan_coefficients' + '.csv', index=True)
# GMM prediction
#mean prediction
y_mu = (coeff_0_mu + coeff_1e_mu + coeff_1as_mu + coeff_1bs_mu)
#compute residuals
res_tot = y_data - y_mu
#residuals computed directly from stan regression
res_between = [df_stan_posterior_raw.loc[:,f'dB.{k}'].mean() for k in range(n_eq)]
res_between = np.array([res_between[k] for k in (eq_inv).astype(int)])
res_within = res_tot - res_between
#summary predictions and residuals
predict_summary = np.vstack((y_mu, res_tot, res_between, res_within)).T
columns_names = ['nerg_mu','res_tot','res_between','res_within']
df_predict_summary = pd.DataFrame(predict_summary, columns = columns_names, index=df_flatfile.index)
#create dataframe with predictions and residuals
df_predict_summary = pd.merge(df_flatinfo, df_predict_summary, how='right', left_index=True, right_index=True)
df_predict_summary[['eqid','ssn']] = df_predict_summary[['eqid','ssn']].astype(int)
df_predict_summary.to_csv(out_dir + out_fname + '_stan_residuals' + '.csv', index=True)
## Summary regression
#save summary statistics
stan_summary_fname = out_dir + out_fname + '_stan_summary' + '.txt'
with open(stan_summary_fname, 'w') as f:
print(stan_fit.summary(), file=f)
#create and save trace plots
fig_dir = out_dir + 'summary_figs/'
#create figures directory if doesn't exit
pathlib.Path(fig_dir).mkdir(parents=True, exist_ok=True)
#create stan trace plots
stan_az_fit = az.from_cmdstanpy(stan_fit)
# stan_az_fit = az.from_cmdstanpy(stan_fit, posterior_predictive='Y')
for c_name in col_names_hyp:
#create trace plot with arviz
ax = az.plot_trace(stan_az_fit, var_names=c_name, figsize=(10,5) ).ravel()
ax[0].yaxis.set_major_locator(plt_autotick())
ax[0].set_xlabel('sample value')
ax[0].set_ylabel('frequency')
ax[0].set_title('')
ax[0].grid(axis='both')
ax[1].set_xlabel('iteration')
ax[1].set_ylabel('sample value')
ax[1].grid(axis='both')
ax[1].set_title('')
fig = ax[0].figure
fig.suptitle(c_name)
fig.savefig(fig_dir + out_fname + '_stan_traceplot_' + c_name + '_arviz' + '.png')
return None
| 14,382 | 45.247588 | 127 | py |
ngmm_tools | ngmm_tools-master/Analyses/Python_lib/regression/cmdstan/regression_cmdstan_model2_corr_cells_sparse_unbounded_hyp.py | """
Created on Wed Dec 29 15:13:49 2021
@author: glavrent
"""
#load variables
import pathlib
from joblib import cpu_count
#arithmetic libraries
import numpy as np
from scipy import sparse
#statistics libraries
import pandas as pd
#plot libraries
import matplotlib as mpl
from matplotlib.ticker import AutoLocator as plt_autotick
import arviz as az
mpl.use('agg')
#stan library
import cmdstanpy
def RunStan(df_flatfile, df_cellinfo, df_celldist, stan_model_fname,
out_fname, out_dir, res_name='res', c_a_erg=0,
n_iter_warmup=300, n_iter_sampling=300, n_chains=4,
adapt_delta=0.8, max_treedepth=10,
stan_parallel=False):
'''
Run full Bayessian regression in Stan. Non-ergodic model includes: a spatially
varying earthquake constant, a spatially varying site constant, a spatially
independent site constant, and partially spatially correlated anelastic
attenuation.
Parameters
----------
df_flatfile : pd.DataFrame
Input data frame containing total residuals, eq and site coordinates.
df_cellinfo : pd.DataFrame
Dataframe with coordinates of anelastic attenuation cells.
df_celldist : pd.DataFrame
Datafame with cell path distances of all records in df_flatfile.
stan_model_fname : string
File name for stan model.
out_fname : string
File name for output files.
out_dir : string
Output directory.
res_name : string, optional
Column name for total residuals. The default is 'res'.
c_a_erg : double, optional
Value of ergodic anelatic attenuation coefficient. Used as mean of cell
specific anelastic attenuation prior distribution. The default is 0.
n_iter_warmup : integer, optional
Number of burn out MCMC samples. The default is 300.
n_iter_sampling : integer, optional
Number of MCMC samples for computing the posterior distributions. The default is 300.
n_chains : integer, optional
Number of MCMC chains. The default is 4.
adapt_delta : double, optional
Target average proposal acceptance probability for adaptation. The default is 0.8.
max_treedepth : integer, optional
Maximum number of evaluations for each iteration (2^max_treedepth). The default is 10.
stan_parallel : bool, optional
Flag for using multithreaded option in STAN. The default is False.
Returns
-------
None.
'''
## Preprocess Input Data
#set rsn column as dataframe index, skip if rsn already the index
if not df_flatfile.index.name == 'rsn':
df_flatfile.set_index('rsn', drop=True, inplace=True)
if not df_celldist.index.name == 'rsn':
df_celldist.set_index('rsn', drop=True, inplace=True)
#set cellid column as dataframe index, skip if cellid already the index
if not df_cellinfo.index.name == 'cellid':
df_cellinfo.set_index('cellid', drop=True, inplace=True)
#number of data
n_data = len(df_flatfile)
#earthquake data
data_eq_all = df_flatfile[['eqid','mag','eqX', 'eqY']].values
_, eq_idx, eq_inv = np.unique(df_flatfile[['eqid']], axis=0, return_inverse=True, return_index=True)
data_eq = data_eq_all[eq_idx,:]
X_eq = data_eq[:,[2,3]] #earthquake coordinates
#create earthquake ids for all records (1 to n_eq)
eq_id = eq_inv + 1
n_eq = len(data_eq)
#station data
data_sta_all = df_flatfile[['ssn','Vs30','staX','staY']].values
_, sta_idx, sta_inv = np.unique( df_flatfile[['ssn']].values, axis = 0, return_inverse=True, return_index=True)
data_sta = data_sta_all[sta_idx,:]
X_sta = data_sta[:,[2,3]] #station coordinates
#create station indices for all records (1 to n_sta)
sta_id = sta_inv + 1
n_sta = len(data_sta)
#ground-motion observations
y_data = df_flatfile[res_name].to_numpy().copy()
#cell data
#reorder and only keep records included in the flatfile
df_celldist = df_celldist.reindex(df_flatfile.index)
#cell info
cell_ids_all = df_cellinfo.index
cell_names_all = df_cellinfo.cellname
#cell distance matrix
celldist_all = df_celldist[cell_names_all] #cell-distance matrix with all cells
#find cell with more than one paths
i_cells_valid = np.where(celldist_all.sum(axis=0) > 0)[0] #valid cells with more than one path
cell_ids_valid = cell_ids_all[i_cells_valid]
cell_names_valid = cell_names_all[i_cells_valid]
celldist_valid = celldist_all.loc[:,cell_names_valid].to_numpy() #cell-distance with only non-zero cells
celldist_valid_sp = sparse.csr_matrix(celldist_valid)
#number of cells
n_cell = celldist_all.shape[1]
n_cell_valid = celldist_valid.shape[1]
#cell coordinates
X_cells_valid = df_cellinfo.loc[i_cells_valid,['mptX','mptY']].values
#print Rrup missfits
print('max R_rup misfit', np.abs(df_flatfile.Rrup.values - celldist_valid.sum(axis=1)).max())
stan_data = {'N': n_data,
'NEQ': n_eq,
'NSTAT': n_sta,
'NCELL': n_cell_valid,
'NCELL_SP': len(celldist_valid_sp.data),
'eq': eq_id, #earthquake id
'stat': sta_id, #station id
'X_e': X_eq, #earthquake coordinates
'X_s': X_sta, #station coordinates
'X_c': X_cells_valid,
'rec_mu': np.zeros(y_data.shape),
'RC_val': celldist_valid_sp.data,
'RC_w': celldist_valid_sp.indices+1,
'RC_u': celldist_valid_sp.indptr+1,
'c_a_erg': c_a_erg,
'Y': y_data,
}
stan_data_fname = out_dir + out_fname + '_stan_data' + '.json'
#create output directory
pathlib.Path(out_dir).mkdir(parents=True, exist_ok=True)
#write as json file
cmdstanpy.utils.write_stan_json(stan_data_fname, stan_data)
## Run Stan, fit model
#number of cores
n_cpu = max(cpu_count() -1,1)
#run stan
if (not stan_parallel) or n_cpu<=n_chains:
#compile stan model
stan_model = cmdstanpy.CmdStanModel(stan_file=stan_model_fname)
stan_model.compile(force=True)
#run full MCMC sampler
stan_fit = stan_model.sample(data=stan_data_fname, chains=n_chains,
iter_warmup=n_iter_warmup, iter_sampling=n_iter_sampling,
seed=1, max_treedepth=max_treedepth, adapt_delta=adapt_delta,
show_progress=True, output_dir=out_dir+'stan_fit/')
else:
#compile stan model
stan_model = cmdstanpy.CmdStanModel(stan_file=stan_model_fname, cpp_options={"STAN_THREADS": True})
stan_model.compile(force=True)
#number of cores per chain
n_cpu_chain = int(np.floor(n_cpu/n_chains))
#run full MCMC sampler
stan_fit = stan_model.sample(data=stan_data_fname, chains=n_chains, threads_per_chain=n_cpu_chain,
iter_warmup=n_iter_warmup, iter_sampling=n_iter_sampling,
seed=1, max_treedepth=max_treedepth, adapt_delta=adapt_delta,
show_progress=True, output_dir=out_dir+'stan_fit/')
## Postprocessing Data
## Extract posterior samples
#hyper-parameters
col_names_hyp = ['dc_0','ell_1e', 'ell_1as', 'omega_1e', 'omega_1as', 'omega_1bs',
'mu_cap', 'ell_ca1p', 'omega_ca1p', 'omega_ca2p',
'phi_0','tau_0']
#non-ergodic terms
col_names_dc_1e = ['dc_1e.%i'%(k) for k in range(n_eq)]
col_names_dc_1as = ['dc_1as.%i'%(k) for k in range(n_sta)]
col_names_dc_1bs = ['dc_1bs.%i'%(k) for k in range(n_sta)]
col_names_dB = ['dB.%i'%(k) for k in range(n_eq)]
col_names_cap = ['c_cap.%i'%(c_id) for c_id in cell_ids_valid]
col_names_all = col_names_hyp + col_names_dc_1e + col_names_dc_1as + col_names_dc_1bs + col_names_cap + col_names_dB
#summarize raw posterior distributions
stan_posterior = np.stack([stan_fit.stan_variable(c_n) for c_n in col_names_hyp], axis=1)
#adjustment terms
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('dc_1e')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('dc_1as')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('dc_1bs')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('c_cap')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('dB')), axis=1)
#save raw-posterior distribution
df_stan_posterior_raw = pd.DataFrame(stan_posterior, columns = col_names_all)
df_stan_posterior_raw.to_csv(out_dir + out_fname + '_stan_posterior_raw' + '.csv', index=False)
## Summarize hyper-parameters
#summarize posterior distributions of hyper-parameters
perc_array = np.array([0.05,0.25,0.5,0.75,0.95])
df_stan_hyp = df_stan_posterior_raw[col_names_hyp].quantile(perc_array)
df_stan_hyp = df_stan_hyp.append(df_stan_posterior_raw[col_names_hyp].mean(axis = 0), ignore_index=True)
df_stan_hyp.index = ['prc_%.2f'%(prc) for prc in perc_array]+['mean']
df_stan_hyp.to_csv(out_dir + out_fname + '_stan_hyperparameters' + '.csv', index=True)
#detailed posterior percentiles of posterior distributions
perc_array = np.arange(0.01,0.99,0.01)
df_stan_posterior = df_stan_posterior_raw[col_names_hyp].quantile(perc_array)
df_stan_posterior.index.name = 'prc'
df_stan_posterior.to_csv(out_dir + out_fname + '_stan_hyperposterior' + '.csv', index=True)
del col_names_dc_1e, col_names_dc_1as, col_names_dc_1bs, col_names_dB
del stan_posterior, col_names_all
## Sample spatially varying coefficients and predictions at record locations
# earthquake and station location in database
X_eq_all = df_flatfile[['eqX', 'eqY']].values
X_sta_all = df_flatfile[['staX','staY']].values
# GMM anelastic attenuation
cells_ca_mu = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].mean() for k in cell_ids_valid])
cells_ca_med = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].median() for k in cell_ids_valid])
cells_ca_sig = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].std() for k in cell_ids_valid])
#effect of anelastic attenuation in GM
cells_LcA_mu = celldist_valid_sp @ cells_ca_mu
cells_LcA_med = celldist_valid_sp @ cells_ca_med
cells_LcA_sig = np.sqrt(celldist_valid_sp.power(2) @ cells_ca_sig**2)
#summary attenuation cells
catten_summary = np.vstack((np.tile(c_a_erg, n_cell_valid),
cells_ca_mu,
cells_ca_med,
cells_ca_sig)).T
columns_names = ['c_a_erg','c_cap_mean','c_cap_med','c_cap_sig']
df_catten_summary = pd.DataFrame(catten_summary, columns = columns_names, index=df_cellinfo.index[i_cells_valid])
#create dataframe with summary attenuation cells
df_catten_summary = pd.merge(df_cellinfo[['cellname','mptLat','mptLon','mptX','mptY','mptZ','UTMzone']],
df_catten_summary, how='right', left_index=True, right_index=True)
df_catten_summary.to_csv(out_dir + out_fname + '_stan_catten' + '.csv', index=True)
# GMM coefficients
#constant shift coefficient
coeff_0_mu = df_stan_posterior_raw.loc[:,'dc_0'].mean() * np.ones(n_data)
coeff_0_med = df_stan_posterior_raw.loc[:,'dc_0'].median() * np.ones(n_data)
coeff_0_sig = df_stan_posterior_raw.loc[:,'dc_0'].std() * np.ones(n_data)
#spatially varying earthquake constant coefficient
coeff_1e_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].mean() for k in range(n_eq)])
coeff_1e_mu = coeff_1e_mu[eq_inv]
coeff_1e_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].median() for k in range(n_eq)])
coeff_1e_med = coeff_1e_med[eq_inv]
coeff_1e_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].std() for k in range(n_eq)])
coeff_1e_sig = coeff_1e_sig[eq_inv]
#site term constant covariance
coeff_1as_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].mean() for k in range(n_sta)])
coeff_1as_mu = coeff_1as_mu[sta_inv]
coeff_1as_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].median() for k in range(n_sta)])
coeff_1as_med = coeff_1as_med[sta_inv]
coeff_1as_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].std() for k in range(n_sta)])
coeff_1as_sig = coeff_1as_sig[sta_inv]
#spatially varying station constant covariance
coeff_1bs_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].mean() for k in range(n_sta)])
coeff_1bs_mu = coeff_1bs_mu[sta_inv]
coeff_1bs_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].median() for k in range(n_sta)])
coeff_1bs_med = coeff_1bs_med[sta_inv]
coeff_1bs_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].std() for k in range(n_sta)])
coeff_1bs_sig = coeff_1bs_sig[sta_inv]
# aleatory variability
phi_0_array = np.array([df_stan_posterior_raw.phi_0.mean()]*X_sta_all.shape[0])
tau_0_array = np.array([df_stan_posterior_raw.tau_0.mean()]*X_sta_all.shape[0])
#dataframe with flatfile info
df_flatinfo = df_flatfile[['eqid','ssn','eqLat','eqLon','staLat','staLon','eqX','eqY','staX','staY','UTMzone']]
#summary coefficients
coeffs_summary = np.vstack((coeff_0_mu,
coeff_1e_mu,
coeff_1as_mu,
coeff_1bs_mu,
cells_LcA_mu,
coeff_0_med,
coeff_1e_med,
coeff_1as_med,
coeff_1bs_med,
cells_LcA_med,
coeff_0_sig,
coeff_1e_sig,
coeff_1as_sig,
coeff_1bs_sig,
cells_LcA_sig)).T
columns_names = ['dc_0_mean','dc_1e_mean','dc_1as_mean','dc_1bs_mean','Lc_ca_mean',
'dc_0_med', 'dc_1e_med', 'dc_1as_med', 'dc_1bs_med', 'Lc_ca_med',
'dc_0_sig', 'dc_1e_sig', 'dc_1as_sig', 'dc_1bs_sig', 'Lc_ca_sig']
df_coeffs_summary = pd.DataFrame(coeffs_summary, columns = columns_names, index=df_flatfile.index)
#create dataframe with summary coefficients
df_coeffs_summary = pd.merge(df_flatinfo, df_coeffs_summary, how='right', left_index=True, right_index=True)
df_coeffs_summary[['eqid','ssn']] = df_coeffs_summary[['eqid','ssn']].astype(int)
df_coeffs_summary.to_csv(out_dir + out_fname + '_stan_coefficients' + '.csv', index=True)
# GMM prediction
#mean prediction
y_mu = (coeff_0_mu + coeff_1e_mu + coeff_1as_mu + coeff_1bs_mu + cells_LcA_mu)
#compute residuals
res_tot = y_data - y_mu
#residuals computed directly from stan regression
res_between = [df_stan_posterior_raw.loc[:,f'dB.{k}'].mean() for k in range(n_eq)]
res_between = np.array([res_between[k] for k in (eq_inv).astype(int)])
res_within = res_tot - res_between
#summary predictions and residuals
predict_summary = np.vstack((y_mu, res_tot, res_between, res_within)).T
columns_names = ['nerg_mu','res_tot','res_between','res_within']
df_predict_summary = pd.DataFrame(predict_summary, columns = columns_names, index=df_flatfile.index)
#create dataframe with predictions and residuals
df_predict_summary = pd.merge(df_flatinfo, df_predict_summary, how='right', left_index=True, right_index=True)
df_predict_summary[['eqid','ssn']] = df_predict_summary[['eqid','ssn']].astype(int)
df_predict_summary.to_csv(out_dir + out_fname + '_stan_residuals' + '.csv', index=True)
## Summary regression
#save summary statistics
stan_summary_fname = out_dir + out_fname + '_stan_summary' + '.txt'
with open(stan_summary_fname, 'w') as f:
print(stan_fit, file=f)
#create and save trace plots
fig_dir = out_dir + 'summary_figs/'
#create figures directory if doesn't exit
pathlib.Path(fig_dir).mkdir(parents=True, exist_ok=True)
#create stan trace plots
stan_az_fit = az.from_cmdstanpy(stan_fit)
# stan_az_fit = az.from_cmdstanpy(stan_fit, posterior_predictive='Y')
for c_name in col_names_hyp:
#create trace plot with arviz
ax = az.plot_trace(stan_az_fit, var_names=c_name, figsize=(10,5) ).ravel()
ax[0].yaxis.set_major_locator(plt_autotick())
ax[0].set_xlabel('sample value')
ax[0].set_ylabel('frequency')
ax[0].set_title('')
ax[0].grid(axis='both')
ax[1].set_xlabel('iteration')
ax[1].set_ylabel('sample value')
ax[1].grid(axis='both')
ax[1].set_title('')
fig = ax[0].figure
fig.suptitle(c_name)
fig.savefig(fig_dir + out_fname + '_stan_traceplot_' + c_name + '_arviz' + '.png')
return None
| 17,927 | 46.808 | 120 | py |
ngmm_tools | ngmm_tools-master/Analyses/Python_lib/regression/cmdstan/regression_cmdstan_model2_uncorr_cells_unbounded_hyp.py | """
Created on Wed Dec 29 15:13:49 2021
@author: glavrent
"""
#load variables
import pathlib
from joblib import cpu_count
#arithmetic libraries
import numpy as np
#statistics libraries
import pandas as pd
#plot libraries
import matplotlib as mpl
from matplotlib.ticker import AutoLocator as plt_autotick
import arviz as az
mpl.use('agg')
#stan library
import cmdstanpy
def RunStan(df_flatfile, df_cellinfo, df_celldist, stan_model_fname,
out_fname, out_dir, res_name='res', c_a_erg=0,
n_iter_warmup=300, n_iter_sampling=300, n_chains=4,
adapt_delta=0.8, max_treedepth=10,
stan_parallel=False):
'''
Run full Bayessian regression in Stan. Non-ergodic model includes: a spatially
varying earthquake constant, a spatially varying site constant, a spatially
independent site constant, and uncorrelated anelastic attenuation.
Parameters
----------
df_flatfile : pd.DataFrame
Input data frame containing total residuals, eq and site coordinates.
df_cellinfo : pd.DataFrame
Dataframe with coordinates of anelastic attenuation cells.
df_celldist : pd.DataFrame
Datafame with cell path distances of all records in df_flatfile.
stan_model_fname : string
File name for stan model.
out_fname : string
File name for output files.
out_dir : string
Output directory.
res_name : string, optional
Column name for total residuals. The default is 'res'.
c_a_erg : double, optional
Value of ergodic anelatic attenuation coefficient. Used as mean of cell
specific anelastic attenuation prior distribution. The default is 0.
n_iter_warmup : integer, optional
Number of burn out MCMC samples. The default is 300.
n_iter_sampling : integer, optional
Number of MCMC samples for computing the posterior distributions. The default is 300.
n_chains : integer, optional
Number of MCMC chains. The default is 4.
adapt_delta : double, optional
Target average proposal acceptance probability for adaptation. The default is 0.8.
max_treedepth : integer, optional
Maximum number of evaluations for each iteration (2^max_treedepth). The default is 10.
pystan_ver : integer, optional
Version of pystan to run. The default is 2.
pystan_parallel : bool, optional
Flag for using multithreaded option in STAN. The default is False.
Returns
-------
None.
'''
## Preprocess Input Data
#set rsn column as dataframe index, skip if rsn already the index
if not df_flatfile.index.name == 'rsn':
df_flatfile.set_index('rsn', drop=True, inplace=True)
if not df_celldist.index.name == 'rsn':
df_celldist.set_index('rsn', drop=True, inplace=True)
#set cellid column as dataframe index, skip if cellid already the index
if not df_cellinfo.index.name == 'cellid':
df_cellinfo.set_index('cellid', drop=True, inplace=True)
#number of data
n_data = len(df_flatfile)
#earthquake data
data_eq_all = df_flatfile[['eqid','mag','eqX', 'eqY']].values
_, eq_idx, eq_inv = np.unique(df_flatfile[['eqid']], axis=0, return_inverse=True, return_index=True)
data_eq = data_eq_all[eq_idx,:]
X_eq = data_eq[:,[2,3]] #earthquake coordinates
#create earthquake ids for all records (1 to n_eq)
eq_id = eq_inv + 1
n_eq = len(data_eq)
#station data
data_sta_all = df_flatfile[['ssn','Vs30','staX','staY']].values
_, sta_idx, sta_inv = np.unique( df_flatfile[['ssn']].values, axis = 0, return_inverse=True, return_index=True)
data_sta = data_sta_all[sta_idx,:]
X_sta = data_sta[:,[2,3]] #station coordinates
#create station indices for all records (1 to n_sta)
sta_id = sta_inv + 1
n_sta = len(data_sta)
#ground-motion observations
y_data = df_flatfile[res_name].to_numpy().copy()
#cell data
#reorder and only keep records included in the flatfile
df_celldist = df_celldist.reindex(df_flatfile.index)
#cell info
cell_ids_all = df_cellinfo.index
cell_names_all = df_cellinfo.cellname
#cell distance matrix
celldist_all = df_celldist[cell_names_all] #cell-distance matrix with all cells
#find cell with more than one paths
i_cells_valid = np.where(celldist_all.sum(axis=0) > 0)[0] #valid cells with more than one path
cell_ids_valid = cell_ids_all[i_cells_valid]
cell_names_valid = cell_names_all[i_cells_valid]
celldist_valid = celldist_all.loc[:,cell_names_valid] #cell-distance with only non-zero cells
#number of cells
n_cell = celldist_all.shape[1]
n_cell_valid = celldist_valid.shape[1]
#cell coordinates
X_cells_valid = df_cellinfo.loc[i_cells_valid,['mptX','mptY']].values
#print Rrup missfits
print('max R_rup misfit', (df_flatfile.Rrup.values - celldist_valid.sum(axis=1)).abs().max())
stan_data = {'N': n_data,
'NEQ': n_eq,
'NSTAT': n_sta,
'NCELL': n_cell_valid,
'eq': eq_id, #earthquake id
'stat': sta_id, #station id
'X_e': X_eq, #earthquake coordinates
'X_s': X_sta, #station coordinates
'X_c': X_cells_valid,
'rec_mu': np.zeros(y_data.shape),
'RC': celldist_valid.to_numpy(),
'c_a_erg': c_a_erg,
'Y': y_data,
}
stan_data_fname = out_dir + out_fname + '_stan_data' + '.json'
#create output directory
pathlib.Path(out_dir).mkdir(parents=True, exist_ok=True)
#write as json file
cmdstanpy.utils.write_stan_json(stan_data_fname, stan_data)
## Run Stan, fit model
#number of cores
n_cpu = max(cpu_count() -1,1)
#run stan
if (not stan_parallel) or n_cpu<=n_chains:
#compile stan model
stan_model = cmdstanpy.CmdStanModel(stan_file=stan_model_fname)
stan_model.compile(force=True)
#run full MCMC sampler
stan_fit = stan_model.sample(data=stan_data_fname, chains=n_chains,
iter_warmup=n_iter_warmup, iter_sampling=n_iter_sampling,
seed=1, max_treedepth=max_treedepth, adapt_delta=adapt_delta,
show_progress=True, output_dir=out_dir+'stan_fit/')
else:
#compile stan model
stan_model = cmdstanpy.CmdStanModel(stan_file=stan_model_fname, cpp_options={"STAN_THREADS": True})
stan_model.compile(force=True)
#number of cores per chain
n_cpu_chain = int(np.floor(n_cpu/n_chains))
#run full MCMC sampler
stan_fit = stan_model.sample(data=stan_data_fname, chains=n_chains, threads_per_chain=n_cpu_chain,
iter_warmup=n_iter_warmup, iter_sampling=n_iter_sampling,
seed=1, max_treedepth=max_treedepth, adapt_delta=adapt_delta,
show_progress=True, output_dir=out_dir+'stan_fit/')
## Postprocessing Data
## Extract posterior samples
#hyper-parameters
col_names_hyp = ['dc_0','ell_1e', 'ell_1as', 'omega_1e', 'omega_1as', 'omega_1bs',
'mu_cap', 'omega_cap',
'phi_0','tau_0']
#non-ergodic terms
col_names_dc_1e = ['dc_1e.%i'%(k) for k in range(n_eq)]
col_names_dc_1as = ['dc_1as.%i'%(k) for k in range(n_sta)]
col_names_dc_1bs = ['dc_1bs.%i'%(k) for k in range(n_sta)]
col_names_dB = ['dB.%i'%(k) for k in range(n_eq)]
col_names_cap = ['c_cap.%i'%(c_id) for c_id in cell_ids_valid]
col_names_all = col_names_hyp + col_names_dc_1e + col_names_dc_1as + col_names_dc_1bs + col_names_cap + col_names_dB
#summarize raw posterior distributions
stan_posterior = np.stack([stan_fit.stan_variable(c_n) for c_n in col_names_hyp], axis=1)
#adjustment terms
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('dc_1e')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('dc_1as')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('dc_1bs')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('c_cap')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('dB')), axis=1)
#save raw-posterior distribution
df_stan_posterior_raw = pd.DataFrame(stan_posterior, columns = col_names_all)
df_stan_posterior_raw.to_csv(out_dir + out_fname + '_stan_posterior_raw' + '.csv', index=False)
## Summarize hyper-parameters
#summarize posterior distributions of hyper-parameters
perc_array = np.array([0.05,0.25,0.5,0.75,0.95])
df_stan_hyp = df_stan_posterior_raw[col_names_hyp].quantile(perc_array)
df_stan_hyp = df_stan_hyp.append(df_stan_posterior_raw[col_names_hyp].mean(axis = 0), ignore_index=True)
df_stan_hyp.index = ['prc_%.2f'%(prc) for prc in perc_array]+['mean']
df_stan_hyp.to_csv(out_dir + out_fname + '_stan_hyperparameters' + '.csv', index=True)
#detailed posterior percentiles of posterior distributions
perc_array = np.arange(0.01,0.99,0.01)
df_stan_posterior = df_stan_posterior_raw[col_names_hyp].quantile(perc_array)
df_stan_posterior.index.name = 'prc'
df_stan_posterior .to_csv(out_dir + out_fname + '_stan_hyperposterior' + '.csv', index=True)
del col_names_dc_1e, col_names_dc_1as, col_names_dc_1bs, col_names_dB
del stan_posterior, col_names_all
## Sample spatially varying coefficients and predictions at record locations
# earthquake and station location in database
X_eq_all = df_flatfile[['eqX', 'eqY']].values
X_sta_all = df_flatfile[['staX','staY']].values
# GMM anelastic attenuation
cells_ca_mu = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].mean() for k in cell_ids_valid])
cells_ca_med = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].median() for k in cell_ids_valid])
cells_ca_sig = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].std() for k in cell_ids_valid])
#effect of anelastic attenuation in GM
cells_LcA_mu = celldist_valid.values @ cells_ca_mu
cells_LcA_med = celldist_valid.values @ cells_ca_med
cells_LcA_sig = np.sqrt(np.square(celldist_valid.values) @ cells_ca_sig**2)
#summary attenuation cells
catten_summary = np.vstack((np.tile(c_a_erg, n_cell_valid),
cells_ca_mu,
cells_ca_med,
cells_ca_sig)).T
columns_names = ['c_a_erg','c_cap_mean','c_cap_med','c_cap_sig']
df_catten_summary = pd.DataFrame(catten_summary, columns = columns_names, index=df_cellinfo.index[i_cells_valid])
#create dataframe with summary attenuation cells
df_catten_summary = pd.merge(df_cellinfo[['cellname','mptLat','mptLon','mptX','mptY','mptZ','UTMzone']],
df_catten_summary, how='right', left_index=True, right_index=True)
df_catten_summary.to_csv(out_dir + out_fname + '_stan_catten' + '.csv', index=True)
# GMM coefficients
#constant shift coefficient
coeff_0_mu = df_stan_posterior_raw.loc[:,'dc_0'].mean() * np.ones(n_data)
coeff_0_med = df_stan_posterior_raw.loc[:,'dc_0'].median() * np.ones(n_data)
coeff_0_sig = df_stan_posterior_raw.loc[:,'dc_0'].std() * np.ones(n_data)
#spatially varying earthquake constant coefficient
coeff_1e_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].mean() for k in range(n_eq)])
coeff_1e_mu = coeff_1e_mu[eq_inv]
coeff_1e_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].median() for k in range(n_eq)])
coeff_1e_med = coeff_1e_med[eq_inv]
coeff_1e_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].std() for k in range(n_eq)])
coeff_1e_sig = coeff_1e_sig[eq_inv]
#site term constant covariance
coeff_1as_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].mean() for k in range(n_sta)])
coeff_1as_mu = coeff_1as_mu[sta_inv]
coeff_1as_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].median() for k in range(n_sta)])
coeff_1as_med = coeff_1as_med[sta_inv]
coeff_1as_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].std() for k in range(n_sta)])
coeff_1as_sig = coeff_1as_sig[sta_inv]
#spatially varying station constant covariance
coeff_1bs_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].mean() for k in range(n_sta)])
coeff_1bs_mu = coeff_1bs_mu[sta_inv]
coeff_1bs_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].median() for k in range(n_sta)])
coeff_1bs_med = coeff_1bs_med[sta_inv]
coeff_1bs_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].std() for k in range(n_sta)])
coeff_1bs_sig = coeff_1bs_sig[sta_inv]
# aleatory variability
phi_0_array = np.array([df_stan_posterior_raw.phi_0.mean()]*X_sta_all.shape[0])
tau_0_array = np.array([df_stan_posterior_raw.tau_0.mean()]*X_sta_all.shape[0])
#initiaize flatfile for sumamry of non-erg coefficinets and residuals
df_flatinfo = df_flatfile[['eqid','ssn','eqLat','eqLon','staLat','staLon','eqX','eqY','staX','staY','UTMzone']]
#summary coefficients
coeffs_summary = np.vstack((coeff_0_mu,
coeff_1e_mu,
coeff_1as_mu,
coeff_1bs_mu,
cells_LcA_mu,
coeff_0_med,
coeff_1e_med,
coeff_1as_med,
coeff_1bs_med,
cells_LcA_med,
coeff_0_sig,
coeff_1e_sig,
coeff_1as_sig,
coeff_1bs_sig,
cells_LcA_sig)).T
columns_names = ['dc_0_mean','dc_1e_mean','dc_1as_mean','dc_1bs_mean','Lc_ca_mean',
'dc_0_med', 'dc_1e_med', 'dc_1as_med', 'dc_1bs_med', 'Lc_ca_med',
'dc_0_sig', 'dc_1e_sig', 'dc_1as_sig', 'dc_1bs_sig', 'Lc_ca_sig']
df_coeffs_summary = pd.DataFrame(coeffs_summary, columns = columns_names, index=df_flatfile.index)
#create dataframe with summary coefficients
df_coeffs_summary = pd.merge(df_flatinfo, df_coeffs_summary, how='right', left_index=True, right_index=True)
df_coeffs_summary[['eqid','ssn']] = df_coeffs_summary[['eqid','ssn']].astype(int)
df_coeffs_summary.to_csv(out_dir + out_fname + '_stan_coefficients' + '.csv', index=True)
# GMM prediction
#mean prediction
y_mu = (coeff_0_mu + coeff_1e_mu + coeff_1as_mu + coeff_1bs_mu + cells_LcA_mu)
#compute residuals
res_tot = y_data - y_mu
#residuals computed directly from stan regression
res_between = [df_stan_posterior_raw.loc[:,f'dB.{k}'].mean() for k in range(n_eq)]
res_between = np.array([res_between[k] for k in (eq_inv).astype(int)])
res_within = res_tot - res_between
#summary predictions and residuals
predict_summary = np.vstack((y_mu, res_tot, res_between, res_within)).T
columns_names = ['nerg_mu','res_tot','res_between','res_within']
df_predict_summary = pd.DataFrame(predict_summary, columns = columns_names, index=df_flatfile.index)
#create dataframe with predictions and residuals
df_predict_summary = pd.merge(df_flatinfo, df_predict_summary, how='right', left_index=True, right_index=True)
df_predict_summary[['eqid','ssn']] = df_predict_summary[['eqid','ssn']].astype(int)
df_predict_summary.to_csv(out_dir + out_fname + '_stan_residuals' + '.csv', index=True)
## Summary regression
#save summary statistics
stan_summary_fname = out_dir + out_fname + '_stan_summary' + '.txt'
with open(stan_summary_fname, 'w') as f:
print(stan_fit, file=f)
#create and save trace plots
fig_dir = out_dir + 'summary_figs/'
#create figures directory if doesn't exit
pathlib.Path(fig_dir).mkdir(parents=True, exist_ok=True)
#create stan trace plots
stan_az_fit = az.from_cmdstanpy(stan_fit)
# stan_az_fit = az.from_cmdstanpy(stan_fit, posterior_predictive='Y')
for c_name in col_names_hyp:
#create trace plot with arviz
ax = az.plot_trace(stan_az_fit, var_names=c_name, figsize=(10,5) ).ravel()
ax[0].yaxis.set_major_locator(plt_autotick())
ax[0].set_xlabel('sample value')
ax[0].set_ylabel('frequency')
ax[0].set_title('')
ax[0].grid(axis='both')
ax[1].set_xlabel('iteration')
ax[1].set_ylabel('sample value')
ax[1].grid(axis='both')
ax[1].set_title('')
fig = ax[0].figure
fig.suptitle(c_name)
fig.savefig(fig_dir + out_fname + '_stan_traceplot_' + c_name + '_arviz' + '.png')
return None
| 17,759 | 46.741935 | 120 | py |
ngmm_tools | ngmm_tools-master/Analyses/Python_lib/regression/cmdstan/regression_cmdstan_model3_uncorr_cells_unbounded_hyp.py | """
Created on Wed Dec 29 15:13:49 2021
@author: glavrent
"""
#load variables
import pathlib
from joblib import cpu_count
#arithmetic libraries
import numpy as np
#statistics libraries
import pandas as pd
#plot libraries
import matplotlib as mpl
from matplotlib.ticker import AutoLocator as plt_autotick
import arviz as az
mpl.use('agg')
#stan library
import cmdstanpy
def RunStan(df_flatfile, df_cellinfo, df_celldist, stan_model_fname,
out_fname, out_dir, res_name='res', c_2_erg=0, c_3_erg=0, c_a_erg=0,
n_iter_warmup=300, n_iter_sampling=300, n_chains=4,
adapt_delta=0.8, max_treedepth=10,
stan_parallel=False):
'''
Run full Bayessian regression in Stan. Non-ergodic model includes: a spatially
varying earthquake constant, a spatially varying site constant, a spatially
independent site constant, and uncorrelated anelastic attenuation.
Parameters
----------
df_flatfile : pd.DataFrame
Input data frame containing total residuals, eq and site coordinates.
df_cellinfo : pd.DataFrame
Dataframe with coordinates of anelastic attenuation cells.
df_celldist : pd.DataFrame
Datafame with cell path distances of all records in df_flatfile.
stan_model_fname : string
File name for stan model.
out_fname : string
File name for output files.
out_dir : string
Output directory.
res_name : string, optional
Column name for total residuals. The default is 'res'.
c_a_erg : double, optional
Value of ergodic anelatic attenuation coefficient. Used as mean of cell
specific anelastic attenuation prior distribution. The default is 0.
n_iter_warmup : integer, optional
Number of burn out MCMC samples. The default is 300.
n_iter_sampling : integer, optional
Number of MCMC samples for computing the posterior distributions. The default is 300.
n_chains : integer, optional
Number of MCMC chains. The default is 4.
adapt_delta : double, optional
Target average proposal acceptance probability for adaptation. The default is 0.8.
max_treedepth : integer, optional
Maximum number of evaluations for each iteration (2^max_treedepth). The default is 10.
pystan_ver : integer, optional
Version of pystan to run. The default is 2.
pystan_parallel : bool, optional
Flag for using multithreaded option in STAN. The default is False.
Returns
-------
None.
'''
## Preprocess Input Data
#set rsn column as dataframe index, skip if rsn already the index
if not df_flatfile.index.name == 'rsn':
df_flatfile.set_index('rsn', drop=True, inplace=True)
if not df_celldist.index.name == 'rsn':
df_celldist.set_index('rsn', drop=True, inplace=True)
#set cellid column as dataframe index, skip if cellid already the index
if not df_cellinfo.index.name == 'cellid':
df_cellinfo.set_index('cellid', drop=True, inplace=True)
#number of data
n_data = len(df_flatfile)
#earthquake data
data_eq_all = df_flatfile[['eqid','mag','eqX', 'eqY']].values
_, eq_idx, eq_inv = np.unique(df_flatfile[['eqid']], axis=0, return_inverse=True, return_index=True)
data_eq = data_eq_all[eq_idx,:]
X_eq = data_eq[:,[2,3]] #earthquake coordinates
#create earthquake ids for all records (1 to n_eq)
eq_id = eq_inv + 1
n_eq = len(data_eq)
#station data
data_sta_all = df_flatfile[['ssn','Vs30','x_3','staX','staY']].values
_, sta_idx, sta_inv = np.unique( df_flatfile[['ssn']].values, axis = 0, return_inverse=True, return_index=True)
data_sta = data_sta_all[sta_idx,:]
X_sta = data_sta[:,[3,4]] #station coordinates
#create station indices for all records (1 to n_sta)
sta_id = sta_inv + 1
n_sta = len(data_sta)
#ground-motion observations
y_data = df_flatfile[res_name].to_numpy().copy()
#geometrical spreading covariates
x_2 = df_flatfile['x_2'].values
#vs30 covariates
x_3 = df_flatfile['x_3'].values[sta_idx]
#cell data
#reorder and only keep records included in the flatfile
df_celldist = df_celldist.reindex(df_flatfile.index)
#cell info
cell_ids_all = df_cellinfo.index
cell_names_all = df_cellinfo.cellname
#cell distance matrix
celldist_all = df_celldist[cell_names_all] #cell-distance matrix with all cells
#find cell with more than one paths
i_cells_valid = np.where(celldist_all.sum(axis=0) > 0)[0] #valid cells with more than one path
cell_ids_valid = cell_ids_all[i_cells_valid]
cell_names_valid = cell_names_all[i_cells_valid]
celldist_valid = celldist_all.loc[:,cell_names_valid] #cell-distance with only non-zero cells
#number of cells
n_cell = celldist_all.shape[1]
n_cell_valid = celldist_valid.shape[1]
#cell coordinates
X_cells_valid = df_cellinfo.loc[i_cells_valid,['mptX','mptY']].values
#print Rrup missfits
print('max R_rup misfit', (df_flatfile.Rrup.values - celldist_valid.sum(axis=1)).abs().max())
stan_data = {'N': n_data,
'NEQ': n_eq,
'NSTAT': n_sta,
'NCELL': n_cell_valid,
'eq': eq_id, #earthquake id
'stat': sta_id, #station id
'rec_mu': np.zeros(y_data.shape),
'Y': y_data,
'x_2': x_2,
'x_3': x_3,
'c_2_erg': c_2_erg,
'c_3_erg': c_3_erg,
'c_a_erg': c_a_erg,
'X_e': X_eq, #earthquake coordinates
'X_s': X_sta, #station coordinates
'X_c': X_cells_valid,
'RC': celldist_valid.to_numpy(),
}
stan_data_fname = out_dir + out_fname + '_stan_data' + '.json'
#create output directory
pathlib.Path(out_dir).mkdir(parents=True, exist_ok=True)
#write as json file
cmdstanpy.utils.write_stan_json(stan_data_fname, stan_data)
## Run Stan, fit model
#number of cores
n_cpu = max(cpu_count() -1,1)
#run stan
if (not stan_parallel) or n_cpu<=n_chains:
#compile stan model
stan_model = cmdstanpy.CmdStanModel(stan_file=stan_model_fname)
stan_model.compile(force=True)
#run full MCMC sampler
stan_fit = stan_model.sample(data=stan_data_fname, chains=n_chains,
iter_warmup=n_iter_warmup, iter_sampling=n_iter_sampling,
seed=1, max_treedepth=max_treedepth, adapt_delta=adapt_delta,
show_progress=True, output_dir=out_dir+'stan_fit/')
else:
#compile stan model
stan_model = cmdstanpy.CmdStanModel(stan_file=stan_model_fname, cpp_options={"STAN_THREADS": True})
stan_model.compile(force=True)
#number of cores per chain
n_cpu_chain = int(np.floor(n_cpu/n_chains))
#run full MCMC sampler
stan_fit = stan_model.sample(data=stan_data_fname, chains=n_chains, threads_per_chain=n_cpu_chain,
iter_warmup=n_iter_warmup, iter_sampling=n_iter_sampling,
seed=1, max_treedepth=max_treedepth, adapt_delta=adapt_delta,
show_progress=True, output_dir=out_dir+'stan_fit/')
## Postprocessing Data
## Extract posterior samples
#hyper-parameters
col_names_hyp = ['dc_0','mu_2p','mu_3s',
'ell_1e', 'ell_1as', 'omega_1e', 'omega_1as', 'omega_1bs',
'ell_2p', 'ell_3s', 'omega_2p', 'omega_3s',
'mu_cap', 'omega_cap',
'phi_0','tau_0']
#non-ergodic terms
col_names_dc_1e = ['dc_1e.%i'%(k) for k in range(n_eq)]
col_names_dc_1as = ['dc_1as.%i'%(k) for k in range(n_sta)]
col_names_dc_1bs = ['dc_1bs.%i'%(k) for k in range(n_sta)]
col_names_c_2p = ['c_2p.%i'%(k) for k in range(n_eq)]
col_names_c_3s = ['c_3s.%i'%(k) for k in range(n_sta)]
col_names_dB = ['dB.%i'%(k) for k in range(n_eq)]
col_names_cap = ['c_cap.%i'%(c_id) for c_id in cell_ids_valid]
col_names_all = (col_names_hyp + col_names_dc_1e + col_names_dc_1as + col_names_dc_1bs +
col_names_c_2p + col_names_c_3s + col_names_cap + col_names_dB)
#sumarize raw posterior distributions
stan_posterior = np.stack([stan_fit.stan_variable(c_n) for c_n in col_names_hyp], axis=1)
#adjustment terms
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('dc_1e')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('dc_1as')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('dc_1bs')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('c_2p')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('c_3s')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('c_cap')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('dB')), axis=1)
#save raw-posterior distribution
df_stan_posterior_raw = pd.DataFrame(stan_posterior, columns = col_names_all)
df_stan_posterior_raw.to_csv(out_dir + out_fname + '_stan_posterior_raw' + '.csv', index=False)
## Summarize hyper-parameters
#summarize posterior distributions of hyper-parameters
perc_array = np.array([0.05,0.25,0.5,0.75,0.95])
df_stan_hyp = df_stan_posterior_raw[col_names_hyp].quantile(perc_array)
df_stan_hyp = df_stan_hyp.append(df_stan_posterior_raw[col_names_hyp].mean(axis = 0), ignore_index=True)
df_stan_hyp.index = ['prc_%.2f'%(prc) for prc in perc_array]+['mean']
df_stan_hyp.to_csv(out_dir + out_fname + '_stan_hyperparameters' + '.csv', index=True)
#detailed posterior percentiles of posterior distributions
perc_array = np.arange(0.01,0.99,0.01)
df_stan_posterior = df_stan_posterior_raw[col_names_hyp].quantile(perc_array)
df_stan_posterior.index.name = 'prc'
df_stan_posterior .to_csv(out_dir + out_fname + '_stan_hyperposterior' + '.csv', index=True)
del col_names_dc_1e, col_names_dc_1as, col_names_dc_1bs, col_names_c_2p, col_names_c_3s, col_names_dB
del stan_posterior, col_names_all
## Sample spatially varying coefficients and predictions at record locations
# earthquake and station location in database
X_eq_all = df_flatfile[['eqX', 'eqY']].values
X_sta_all = df_flatfile[['staX','staY']].values
# GMM anelastic attenuation
cells_ca_mu = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].mean() for k in cell_ids_valid])
cells_ca_med = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].median() for k in cell_ids_valid])
cells_ca_sig = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].std() for k in cell_ids_valid])
#effect of anelastic attenuation in GM
cells_LcA_mu = celldist_valid.values @ cells_ca_mu
cells_LcA_med = celldist_valid.values @ cells_ca_med
cells_LcA_sig = np.sqrt(np.square(celldist_valid.values) @ cells_ca_sig**2)
#summary attenuation cells
catten_summary = np.vstack((np.tile(c_a_erg, n_cell_valid),
cells_ca_mu,
cells_ca_med,
cells_ca_sig)).T
columns_names = ['c_a_erg','c_cap_mean','c_cap_med','c_cap_sig']
df_catten_summary = pd.DataFrame(catten_summary, columns = columns_names, index=df_cellinfo.index[i_cells_valid])
#create dataframe with summary attenuation cells
df_catten_summary = pd.merge(df_cellinfo[['cellname','mptLat','mptLon','mptX','mptY','mptZ','UTMzone']],
df_catten_summary, how='right', left_index=True, right_index=True)
df_catten_summary.to_csv(out_dir + out_fname + '_stan_catten' + '.csv', index=True)
# GMM coefficients
#constant shift coefficient
coeff_0_mu = df_stan_posterior_raw.loc[:,'dc_0'].mean() * np.ones(n_data)
coeff_0_med = df_stan_posterior_raw.loc[:,'dc_0'].median() * np.ones(n_data)
coeff_0_sig = df_stan_posterior_raw.loc[:,'dc_0'].std() * np.ones(n_data)
#spatially varying earthquake constant coefficient
coeff_1e_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].mean() for k in range(n_eq)])
coeff_1e_mu = coeff_1e_mu[eq_inv]
coeff_1e_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].median() for k in range(n_eq)])
coeff_1e_med = coeff_1e_med[eq_inv]
coeff_1e_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].std() for k in range(n_eq)])
coeff_1e_sig = coeff_1e_sig[eq_inv]
#site term constant covariance
coeff_1as_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].mean() for k in range(n_sta)])
coeff_1as_mu = coeff_1as_mu[sta_inv]
coeff_1as_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].median() for k in range(n_sta)])
coeff_1as_med = coeff_1as_med[sta_inv]
coeff_1as_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].std() for k in range(n_sta)])
coeff_1as_sig = coeff_1as_sig[sta_inv]
#spatially varying station constant covariance
coeff_1bs_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].mean() for k in range(n_sta)])
coeff_1bs_mu = coeff_1bs_mu[sta_inv]
coeff_1bs_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].median() for k in range(n_sta)])
coeff_1bs_med = coeff_1bs_med[sta_inv]
coeff_1bs_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].std() for k in range(n_sta)])
coeff_1bs_sig = coeff_1bs_sig[sta_inv]
#spatially varying geometrical spreading coefficient
coeff_2p_mu = np.array([df_stan_posterior_raw.loc[:,f'c_2p.{k}'].mean() for k in range(n_eq)])
coeff_2p_mu = coeff_2p_mu[eq_inv]
coeff_2p_med = np.array([df_stan_posterior_raw.loc[:,f'c_2p.{k}'].median() for k in range(n_eq)])
coeff_2p_med = coeff_2p_med[eq_inv]
coeff_2p_sig = np.array([df_stan_posterior_raw.loc[:,f'c_2p.{k}'].std() for k in range(n_eq)])
coeff_2p_sig = coeff_2p_sig[eq_inv]
#spatially varying Vs30 coefficient
coeff_3s_mu = np.array([df_stan_posterior_raw.loc[:,f'c_3s.{k}'].mean() for k in range(n_sta)])
coeff_3s_mu = coeff_3s_mu[sta_inv]
coeff_3s_med = np.array([df_stan_posterior_raw.loc[:,f'c_3s.{k}'].median() for k in range(n_sta)])
coeff_3s_med = coeff_3s_med[sta_inv]
coeff_3s_sig = np.array([df_stan_posterior_raw.loc[:,f'c_3s.{k}'].std() for k in range(n_sta)])
coeff_3s_sig = coeff_3s_sig[sta_inv]
# aleatory variability
phi_0_array = np.array([df_stan_posterior_raw.phi_0.mean()]*X_sta_all.shape[0])
tau_0_array = np.array([df_stan_posterior_raw.tau_0.mean()]*X_sta_all.shape[0])
#dataframe with flatfile info
df_flatinfo = df_flatfile[['eqid','ssn','eqLat','eqLon','staLat','staLon','eqX','eqY','staX','staY','UTMzone']]
#summary coefficients
coeffs_summary = np.vstack((coeff_0_mu,
coeff_1e_mu,
coeff_1as_mu,
coeff_1bs_mu,
coeff_2p_mu,
coeff_3s_mu,
cells_LcA_mu,
coeff_0_med,
coeff_1e_med,
coeff_1as_med,
coeff_1bs_med,
coeff_2p_med,
coeff_3s_med,
cells_LcA_med,
coeff_0_sig,
coeff_1e_sig,
coeff_1as_sig,
coeff_1bs_sig,
coeff_2p_sig,
coeff_3s_sig,
cells_LcA_sig)).T
columns_names = ['dc_0_mean','dc_1e_mean','dc_1as_mean','dc_1bs_mean','c_2p_mean','c_3s_mean','Lc_ca_mean',
'dc_0_med', 'dc_1e_med', 'dc_1as_med', 'dc_1bs_med', 'c_2p_med', 'c_3s_med', 'Lc_ca_med',
'dc_0_sig', 'dc_1e_sig', 'dc_1as_sig', 'dc_1bs_sig', 'c_2p_sig', 'c_3s_sig', 'Lc_ca_sig']
df_coeffs_summary = pd.DataFrame(coeffs_summary, columns = columns_names, index=df_flatfile.index)
#create dataframe with summary coefficients
df_coeffs_summary = pd.merge(df_flatinfo, df_coeffs_summary, how='right', left_index=True, right_index=True)
df_coeffs_summary[['eqid','ssn']] = df_coeffs_summary[['eqid','ssn']].astype(int)
df_coeffs_summary.to_csv(out_dir + out_fname + '_stan_coefficients' + '.csv', index=True)
# GMM prediction
#mean prediction
y_mu = (coeff_0_mu + coeff_1e_mu + coeff_1as_mu + coeff_1bs_mu + coeff_2p_mu*x_2 + coeff_3s_mu*x_3[sta_inv] + cells_LcA_mu)
#compute residuals
res_tot = y_data - y_mu
#residuals computed directly from stan regression
res_between = [df_stan_posterior_raw.loc[:,f'dB.{k}'].mean() for k in range(n_eq)]
res_between = np.array([res_between[k] for k in (eq_inv).astype(int)])
res_within = res_tot - res_between
#summary predictions and residuals
predict_summary = np.vstack((y_mu, res_tot, res_between, res_within)).T
columns_names = ['nerg_mu','res_tot','res_between','res_within']
df_predict_summary = pd.DataFrame(predict_summary, columns = columns_names, index=df_flatfile.index)
#create dataframe with predictions and residuals
df_predict_summary = pd.merge(df_flatinfo, df_predict_summary, how='right', left_index=True, right_index=True)
df_predict_summary[['eqid','ssn']] = df_predict_summary[['eqid','ssn']].astype(int)
df_predict_summary.to_csv(out_dir + out_fname + '_stan_residuals' + '.csv', index=True)
## Summary regression
#save summary statistics
stan_summary_fname = out_dir + out_fname + '_stan_summary' + '.txt'
with open(stan_summary_fname, 'w') as f:
print(stan_fit, file=f)
#create and save trace plots
fig_dir = out_dir + 'summary_figs/'
#create figures directory if doesn't exit
pathlib.Path(fig_dir).mkdir(parents=True, exist_ok=True)
#create stan trace plots
stan_az_fit = az.from_cmdstanpy(stan_fit)
# stan_az_fit = az.from_cmdstanpy(stan_fit, posterior_predictive='Y')
for c_name in col_names_hyp:
#create trace plot with arviz
ax = az.plot_trace(stan_az_fit, var_names=c_name, figsize=(10,5) ).ravel()
ax[0].yaxis.set_major_locator(plt_autotick())
ax[0].set_xlabel('sample value')
ax[0].set_ylabel('frequency')
ax[0].set_title('')
ax[0].grid(axis='both')
ax[1].set_xlabel('iteration')
ax[1].set_ylabel('sample value')
ax[1].grid(axis='both')
ax[1].set_title('')
fig = ax[0].figure
fig.suptitle(c_name)
fig.savefig(fig_dir + out_fname + '_stan_traceplot_' + c_name + '_arviz' + '.png')
return None
| 19,904 | 47.667482 | 128 | py |
ngmm_tools | ngmm_tools-master/Analyses/Python_lib/regression/cmdstan/regression_cmdstan_model3_uncorr_cells_sparse_unbounded_hyp.py | """
Created on Wed Dec 29 15:13:49 2021
@author: glavrent
"""
#load variables
import pathlib
from joblib import cpu_count
#arithmetic libraries
import numpy as np
from scipy import sparse
#statistics libraries
import pandas as pd
#plot libraries
import matplotlib as mpl
from matplotlib.ticker import AutoLocator as plt_autotick
import arviz as az
mpl.use('agg')
#stan library
import cmdstanpy
def RunStan(df_flatfile, df_cellinfo, df_celldist, stan_model_fname,
out_fname, out_dir, res_name='res', c_2_erg=0, c_3_erg=0, c_a_erg=0,
n_iter_warmup=300, n_iter_sampling=300, n_chains=4,
adapt_delta=0.8, max_treedepth=10,
stan_parallel=False):
'''
Run full Bayessian regression in Stan. Non-ergodic model includes: a spatially
varying earthquake constant, a spatially varying site constant, a spatially
independent site constant, and uncorrelated anelastic attenuation.
Parameters
----------
df_flatfile : pd.DataFrame
Input data frame containing total residuals, eq and site coordinates.
df_cellinfo : pd.DataFrame
Dataframe with coordinates of anelastic attenuation cells.
df_celldist : pd.DataFrame
Datafame with cell path distances of all records in df_flatfile.
stan_model_fname : string
File name for stan model.
out_fname : string
File name for output files.
out_dir : string
Output directory.
res_name : string, optional
Column name for total residuals. The default is 'res'.
c_a_erg : double, optional
Value of ergodic anelatic attenuation coefficient. Used as mean of cell
specific anelastic attenuation prior distribution. The default is 0.
n_iter_warmup : integer, optional
Number of burn out MCMC samples. The default is 300.
n_iter_sampling : integer, optional
Number of MCMC samples for computing the posterior distributions. The default is 300.
n_chains : integer, optional
Number of MCMC chains. The default is 4.
adapt_delta : double, optional
Target average proposal acceptance probability for adaptation. The default is 0.8.
max_treedepth : integer, optional
Maximum number of evaluations for each iteration (2^max_treedepth). The default is 10.
pystan_ver : integer, optional
Version of pystan to run. The default is 2.
pystan_parallel : bool, optional
Flag for using multithreaded option in STAN. The default is False.
Returns
-------
None.
'''
## Preprocess Input Data
#set rsn column as dataframe index, skip if rsn already the index
if not df_flatfile.index.name == 'rsn':
df_flatfile.set_index('rsn', drop=True, inplace=True)
if not df_celldist.index.name == 'rsn':
df_celldist.set_index('rsn', drop=True, inplace=True)
#set cellid column as dataframe index, skip if cellid already the index
if not df_cellinfo.index.name == 'cellid':
df_cellinfo.set_index('cellid', drop=True, inplace=True)
#number of data
n_data = len(df_flatfile)
#earthquake data
data_eq_all = df_flatfile[['eqid','mag','eqX', 'eqY']].values
_, eq_idx, eq_inv = np.unique(df_flatfile[['eqid']], axis=0, return_inverse=True, return_index=True)
data_eq = data_eq_all[eq_idx,:]
X_eq = data_eq[:,[2,3]] #earthquake coordinates
#create earthquake ids for all records (1 to n_eq)
eq_id = eq_inv + 1
n_eq = len(data_eq)
#station data
data_sta_all = df_flatfile[['ssn','Vs30','x_3','staX','staY']].values
_, sta_idx, sta_inv = np.unique( df_flatfile[['ssn']].values, axis = 0, return_inverse=True, return_index=True)
data_sta = data_sta_all[sta_idx,:]
X_sta = data_sta[:,[3,4]] #station coordinates
#create station indices for all records (1 to n_sta)
sta_id = sta_inv + 1
n_sta = len(data_sta)
#ground-motion observations
y_data = df_flatfile[res_name].to_numpy().copy()
#geometrical spreading covariates
x_2 = df_flatfile['x_2'].values
#vs30 covariates
x_3 = df_flatfile['x_3'].values[sta_idx]
#cell data
#reorder and only keep records included in the flatfile
df_celldist = df_celldist.reindex(df_flatfile.index)
#cell info
cell_ids_all = df_cellinfo.index
cell_names_all = df_cellinfo.cellname
#cell distance matrix
celldist_all = df_celldist[cell_names_all] #cell-distance matrix with all cells
#find cell with more than one paths
i_cells_valid = np.where(celldist_all.sum(axis=0) > 0)[0] #valid cells with more than one path
cell_ids_valid = cell_ids_all[i_cells_valid]
cell_names_valid = cell_names_all[i_cells_valid]
celldist_valid = celldist_all.loc[:,cell_names_valid] #cell-distance with only non-zero cells
celldist_valid_sp = sparse.csr_matrix(celldist_valid)
#number of cells
n_cell = celldist_all.shape[1]
n_cell_valid = celldist_valid.shape[1]
#cell coordinates
X_cells_valid = df_cellinfo.loc[i_cells_valid,['mptX','mptY']].values
#print Rrup missfits
print('max R_rup misfit', (df_flatfile.Rrup.values - celldist_valid.sum(axis=1)).abs().max())
stan_data = {'N': n_data,
'NEQ': n_eq,
'NSTAT': n_sta,
'NCELL': n_cell_valid,
'NCELL_SP': len(celldist_valid_sp.data),
'eq': eq_id, #earthquake id
'stat': sta_id, #station id
'rec_mu': np.zeros(y_data.shape),
'Y': y_data,
'x_2': x_2,
'x_3': x_3,
'c_2_erg': c_2_erg,
'c_3_erg': c_3_erg,
'c_a_erg': c_a_erg,
'X_e': X_eq, #earthquake coordinates
'X_s': X_sta, #station coordinates
'X_c': X_cells_valid,
'RC_val': celldist_valid_sp.data,
'RC_w': celldist_valid_sp.indices+1,
'RC_u': celldist_valid_sp.indptr+1,
}
stan_data_fname = out_dir + out_fname + '_stan_data' + '.json'
#create output directory
pathlib.Path(out_dir).mkdir(parents=True, exist_ok=True)
#write as json file
cmdstanpy.utils.write_stan_json(stan_data_fname, stan_data)
## Run Stan, fit model
#number of cores
n_cpu = max(cpu_count() -1,1)
#run stan
if (not stan_parallel) or n_cpu<=n_chains:
#compile stan model
stan_model = cmdstanpy.CmdStanModel(stan_file=stan_model_fname)
stan_model.compile(force=True)
#run full MCMC sampler
stan_fit = stan_model.sample(data=stan_data_fname, chains=n_chains,
iter_warmup=n_iter_warmup, iter_sampling=n_iter_sampling,
seed=1, max_treedepth=max_treedepth, adapt_delta=adapt_delta,
show_progress=True, output_dir=out_dir+'stan_fit/')
else:
#compile stan model
stan_model = cmdstanpy.CmdStanModel(stan_file=stan_model_fname, cpp_options={"STAN_THREADS": True})
stan_model.compile(force=True)
#number of cores per chain
n_cpu_chain = int(np.floor(n_cpu/n_chains))
#run full MCMC sampler
stan_fit = stan_model.sample(data=stan_data_fname, chains=n_chains, threads_per_chain=n_cpu_chain,
iter_warmup=n_iter_warmup, iter_sampling=n_iter_sampling,
seed=1, max_treedepth=max_treedepth, adapt_delta=adapt_delta,
show_progress=True, output_dir=out_dir+'stan_fit/')
## Postprocessing Data
## Extract posterior samples
#hyper-parameters
col_names_hyp = ['dc_0','mu_2p','mu_3s',
'ell_1e', 'ell_1as', 'omega_1e', 'omega_1as', 'omega_1bs',
'ell_2p', 'ell_3s', 'omega_2p', 'omega_3s',
'mu_cap', 'omega_cap',
'phi_0','tau_0']
#non-ergodic terms
col_names_dc_1e = ['dc_1e.%i'%(k) for k in range(n_eq)]
col_names_dc_1as = ['dc_1as.%i'%(k) for k in range(n_sta)]
col_names_dc_1bs = ['dc_1bs.%i'%(k) for k in range(n_sta)]
col_names_c_2p = ['c_2p.%i'%(k) for k in range(n_eq)]
col_names_c_3s = ['c_3s.%i'%(k) for k in range(n_sta)]
col_names_dB = ['dB.%i'%(k) for k in range(n_eq)]
col_names_cap = ['c_cap.%i'%(c_id) for c_id in cell_ids_valid]
col_names_all = (col_names_hyp + col_names_dc_1e + col_names_dc_1as + col_names_dc_1bs +
col_names_c_2p + col_names_c_3s + col_names_cap + col_names_dB)
#sumarize raw posterior distributions
stan_posterior = np.stack([stan_fit.stan_variable(c_n) for c_n in col_names_hyp], axis=1)
#adjustment terms
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('dc_1e')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('dc_1as')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('dc_1bs')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('c_2p')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('c_3s')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('c_cap')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('dB')), axis=1)
#save raw-posterior distribution
df_stan_posterior_raw = pd.DataFrame(stan_posterior, columns = col_names_all)
df_stan_posterior_raw.to_csv(out_dir + out_fname + '_stan_posterior_raw' + '.csv', index=False)
## Summarize hyper-parameters
#summarize posterior distributions of hyper-parameters
perc_array = np.array([0.05,0.25,0.5,0.75,0.95])
df_stan_hyp = df_stan_posterior_raw[col_names_hyp].quantile(perc_array)
df_stan_hyp = df_stan_hyp.append(df_stan_posterior_raw[col_names_hyp].mean(axis = 0), ignore_index=True)
df_stan_hyp.index = ['prc_%.2f'%(prc) for prc in perc_array]+['mean']
df_stan_hyp.to_csv(out_dir + out_fname + '_stan_hyperparameters' + '.csv', index=True)
#detailed posterior percentiles of posterior distributions
perc_array = np.arange(0.01,0.99,0.01)
df_stan_posterior = df_stan_posterior_raw[col_names_hyp].quantile(perc_array)
df_stan_posterior.index.name = 'prc'
df_stan_posterior.to_csv(out_dir + out_fname + '_stan_hyperposterior' + '.csv', index=True)
del col_names_dc_1e, col_names_dc_1as, col_names_dc_1bs, col_names_c_2p, col_names_c_3s, col_names_dB
del stan_posterior, col_names_all
## Sample spatially varying coefficients and predictions at record locations
# earthquake and station location in database
X_eq_all = df_flatfile[['eqX', 'eqY']].values
X_sta_all = df_flatfile[['staX','staY']].values
# GMM anelastic attenuation
cells_ca_mu = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].mean() for k in cell_ids_valid])
cells_ca_med = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].median() for k in cell_ids_valid])
cells_ca_sig = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].std() for k in cell_ids_valid])
#effect of anelastic attenuation in GM
cells_LcA_mu = celldist_valid_sp @ cells_ca_mu
cells_LcA_med = celldist_valid_sp @ cells_ca_med
cells_LcA_sig = np.sqrt(celldist_valid_sp.power(2) @ cells_ca_sig**2)
#summary attenuation cells
catten_summary = np.vstack((np.tile(c_a_erg, n_cell_valid),
cells_ca_mu,
cells_ca_med,
cells_ca_sig)).T
columns_names = ['c_a_erg','c_cap_mean','c_cap_med','c_cap_sig']
df_catten_summary = pd.DataFrame(catten_summary, columns = columns_names, index=df_cellinfo.index[i_cells_valid])
#create dataframe with summary attenuation cells
df_catten_summary = pd.merge(df_cellinfo[['cellname','mptLat','mptLon','mptX','mptY','mptZ','UTMzone']],
df_catten_summary, how='right', left_index=True, right_index=True)
df_catten_summary.to_csv(out_dir + out_fname + '_stan_catten' + '.csv', index=True)
# GMM coefficients
#constant shift coefficient
coeff_0_mu = df_stan_posterior_raw.loc[:,'dc_0'].mean() * np.ones(n_data)
coeff_0_med = df_stan_posterior_raw.loc[:,'dc_0'].median() * np.ones(n_data)
coeff_0_sig = df_stan_posterior_raw.loc[:,'dc_0'].std() * np.ones(n_data)
#spatially varying earthquake constant coefficient
coeff_1e_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].mean() for k in range(n_eq)])
coeff_1e_mu = coeff_1e_mu[eq_inv]
coeff_1e_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].median() for k in range(n_eq)])
coeff_1e_med = coeff_1e_med[eq_inv]
coeff_1e_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].std() for k in range(n_eq)])
coeff_1e_sig = coeff_1e_sig[eq_inv]
#site term constant covariance
coeff_1as_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].mean() for k in range(n_sta)])
coeff_1as_mu = coeff_1as_mu[sta_inv]
coeff_1as_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].median() for k in range(n_sta)])
coeff_1as_med = coeff_1as_med[sta_inv]
coeff_1as_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].std() for k in range(n_sta)])
coeff_1as_sig = coeff_1as_sig[sta_inv]
#spatially varying station constant covariance
coeff_1bs_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].mean() for k in range(n_sta)])
coeff_1bs_mu = coeff_1bs_mu[sta_inv]
coeff_1bs_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].median() for k in range(n_sta)])
coeff_1bs_med = coeff_1bs_med[sta_inv]
coeff_1bs_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].std() for k in range(n_sta)])
coeff_1bs_sig = coeff_1bs_sig[sta_inv]
#spatially varying geometrical spreading coefficient
coeff_2p_mu = np.array([df_stan_posterior_raw.loc[:,f'c_2p.{k}'].mean() for k in range(n_eq)])
coeff_2p_mu = coeff_2p_mu[eq_inv]
coeff_2p_med = np.array([df_stan_posterior_raw.loc[:,f'c_2p.{k}'].median() for k in range(n_eq)])
coeff_2p_med = coeff_2p_med[eq_inv]
coeff_2p_sig = np.array([df_stan_posterior_raw.loc[:,f'c_2p.{k}'].std() for k in range(n_eq)])
coeff_2p_sig = coeff_2p_sig[eq_inv]
#spatially varying Vs30 coefficient
coeff_3s_mu = np.array([df_stan_posterior_raw.loc[:,f'c_3s.{k}'].mean() for k in range(n_sta)])
coeff_3s_mu = coeff_3s_mu[sta_inv]
coeff_3s_med = np.array([df_stan_posterior_raw.loc[:,f'c_3s.{k}'].median() for k in range(n_sta)])
coeff_3s_med = coeff_3s_med[sta_inv]
coeff_3s_sig = np.array([df_stan_posterior_raw.loc[:,f'c_3s.{k}'].std() for k in range(n_sta)])
coeff_3s_sig = coeff_3s_sig[sta_inv]
# aleatory variability
phi_0_array = np.array([df_stan_posterior_raw.phi_0.mean()]*X_sta_all.shape[0])
tau_0_array = np.array([df_stan_posterior_raw.tau_0.mean()]*X_sta_all.shape[0])
#dataframe with flatfile info
df_flatinfo = df_flatfile[['eqid','ssn','eqLat','eqLon','staLat','staLon','eqX','eqY','staX','staY','UTMzone']]
#summary coefficients
coeffs_summary = np.vstack((coeff_0_mu,
coeff_1e_mu,
coeff_1as_mu,
coeff_1bs_mu,
coeff_2p_mu,
coeff_3s_mu,
cells_LcA_mu,
coeff_0_med,
coeff_1e_med,
coeff_1as_med,
coeff_1bs_med,
coeff_2p_med,
coeff_3s_med,
cells_LcA_med,
coeff_0_sig,
coeff_1e_sig,
coeff_1as_sig,
coeff_1bs_sig,
coeff_2p_sig,
coeff_3s_sig,
cells_LcA_sig)).T
columns_names = ['dc_0_mean','dc_1e_mean','dc_1as_mean','dc_1bs_mean','c_2p_mean','c_3s_mean','Lc_ca_mean',
'dc_0_med', 'dc_1e_med', 'dc_1as_med', 'dc_1bs_med', 'c_2p_med', 'c_3s_med', 'Lc_ca_med',
'dc_0_sig', 'dc_1e_sig', 'dc_1as_sig', 'dc_1bs_sig', 'c_2p_sig', 'c_3s_sig', 'Lc_ca_sig']
df_coeffs_summary = pd.DataFrame(coeffs_summary, columns = columns_names, index=df_flatfile.index)
#create dataframe with summary coefficients
df_coeffs_summary = pd.merge(df_flatinfo, df_coeffs_summary, how='right', left_index=True, right_index=True)
df_coeffs_summary[['eqid','ssn']] = df_coeffs_summary[['eqid','ssn']].astype(int)
df_coeffs_summary.to_csv(out_dir + out_fname + '_stan_coefficients' + '.csv', index=True)
# GMM prediction
#mean prediction
y_mu = (coeff_0_mu + coeff_1e_mu + coeff_1as_mu + coeff_1bs_mu + coeff_2p_mu*x_2 + coeff_3s_mu*x_3[sta_inv] + cells_LcA_mu)
#compute residuals
res_tot = y_data - y_mu
#residuals computed directly from stan regression
res_between = [df_stan_posterior_raw.loc[:,f'dB.{k}'].mean() for k in range(n_eq)]
res_between = np.array([res_between[k] for k in (eq_inv).astype(int)])
res_within = res_tot - res_between
#summary predictions and residuals
predict_summary = np.vstack((y_mu, res_tot, res_between, res_within)).T
columns_names = ['nerg_mu','res_tot','res_between','res_within']
df_predict_summary = pd.DataFrame(predict_summary, columns = columns_names, index=df_flatfile.index)
#create dataframe with predictions and residuals
df_predict_summary = pd.merge(df_flatinfo, df_predict_summary, how='right', left_index=True, right_index=True)
df_predict_summary[['eqid','ssn']] = df_predict_summary[['eqid','ssn']].astype(int)
df_predict_summary.to_csv(out_dir + out_fname + '_stan_residuals' + '.csv', index=True)
## Summary regression
#save summary statistics
stan_summary_fname = out_dir + out_fname + '_stan_summary' + '.txt'
with open(stan_summary_fname, 'w') as f:
print(stan_fit, file=f)
#create and save trace plots
fig_dir = out_dir + 'summary_figs/'
#create figures directory if doesn't exit
pathlib.Path(fig_dir).mkdir(parents=True, exist_ok=True)
#create stan trace plots
stan_az_fit = az.from_cmdstanpy(stan_fit)
# stan_az_fit = az.from_cmdstanpy(stan_fit, posterior_predictive='Y')
for c_name in col_names_hyp:
#create trace plot with arviz
ax = az.plot_trace(stan_az_fit, var_names=c_name, figsize=(10,5) ).ravel()
ax[0].yaxis.set_major_locator(plt_autotick())
ax[0].set_xlabel('sample value')
ax[0].set_ylabel('frequency')
ax[0].set_title('')
ax[0].grid(axis='both')
ax[1].set_xlabel('iteration')
ax[1].set_ylabel('sample value')
ax[1].grid(axis='both')
ax[1].set_title('')
fig = ax[0].figure
fig.suptitle(c_name)
fig.savefig(fig_dir + out_fname + '_stan_traceplot_' + c_name + '_arviz' + '.png')
return None
| 20,144 | 47.65942 | 128 | py |
ngmm_tools | ngmm_tools-master/Analyses/Python_lib/regression/cmdstan/regression_cmdstan_model2_corr_cells_unbounded_hyp.py | """
Created on Wed Dec 29 15:13:49 2021
@author: glavrent
"""
#load variables
import pathlib
from joblib import cpu_count
#arithmetic libraries
import numpy as np
#statistics libraries
import pandas as pd
#plot libraries
import matplotlib as mpl
from matplotlib.ticker import AutoLocator as plt_autotick
import arviz as az
mpl.use('agg')
#stan library
import cmdstanpy
def RunStan(df_flatfile, df_cellinfo, df_celldist, stan_model_fname,
out_fname, out_dir, res_name='res', c_a_erg=0,
n_iter_warmup=300, n_iter_sampling=300, n_chains=4,
adapt_delta=0.8, max_treedepth=10,
stan_parallel=False):
'''
Run full Bayessian regression in Stan. Non-ergodic model includes: a spatially
varying earthquake constant, a spatially varying site constant, a spatially
independent site constant, and partially spatially correlated anelastic
attenuation.
Parameters
----------
df_flatfile : pd.DataFrame
Input data frame containing total residuals, eq and site coordinates.
df_cellinfo : pd.DataFrame
Dataframe with coordinates of anelastic attenuation cells.
df_celldist : pd.DataFrame
Datafame with cell path distances of all records in df_flatfile.
stan_model_fname : string
File name for stan model.
out_fname : string
File name for output files.
out_dir : string
Output directory.
res_name : string, optional
Column name for total residuals. The default is 'res'.
c_a_erg : double, optional
Value of ergodic anelatic attenuation coefficient. Used as mean of cell
specific anelastic attenuation prior distribution. The default is 0.
n_iter_warmup : integer, optional
Number of burn out MCMC samples. The default is 300.
n_iter_sampling : integer, optional
Number of MCMC samples for computing the posterior distributions. The default is 300.
n_chains : integer, optional
Number of MCMC chains. The default is 4.
adapt_delta : double, optional
Target average proposal acceptance probability for adaptation. The default is 0.8.
max_treedepth : integer, optional
Maximum number of evaluations for each iteration (2^max_treedepth). The default is 10.
stan_parallel : bool, optional
Flag for using multithreaded option in STAN. The default is False.
Returns
-------
None.
'''
## Preprocess Input Data
#set rsn column as dataframe index, skip if rsn already the index
if not df_flatfile.index.name == 'rsn':
df_flatfile.set_index('rsn', drop=True, inplace=True)
if not df_celldist.index.name == 'rsn':
df_celldist.set_index('rsn', drop=True, inplace=True)
#set cellid column as dataframe index, skip if cellid already the index
if not df_cellinfo.index.name == 'cellid':
df_cellinfo.set_index('cellid', drop=True, inplace=True)
#number of data
n_data = len(df_flatfile)
#earthquake data
data_eq_all = df_flatfile[['eqid','mag','eqX', 'eqY']].values
_, eq_idx, eq_inv = np.unique(df_flatfile[['eqid']], axis=0, return_inverse=True, return_index=True)
data_eq = data_eq_all[eq_idx,:]
X_eq = data_eq[:,[2,3]] #earthquake coordinates
#create earthquake ids for all records (1 to n_eq)
eq_id = eq_inv + 1
n_eq = len(data_eq)
#station data
data_sta_all = df_flatfile[['ssn','Vs30','staX','staY']].values
_, sta_idx, sta_inv = np.unique( df_flatfile[['ssn']].values, axis = 0, return_inverse=True, return_index=True)
data_sta = data_sta_all[sta_idx,:]
X_sta = data_sta[:,[2,3]] #station coordinates
#create station indices for all records (1 to n_sta)
sta_id = sta_inv + 1
n_sta = len(data_sta)
#ground-motion observations
y_data = df_flatfile[res_name].to_numpy().copy()
#cell data
#reorder and only keep records included in the flatfile
df_celldist = df_celldist.reindex(df_flatfile.index)
#cell info
cell_ids_all = df_cellinfo.index
cell_names_all = df_cellinfo.cellname
#cell distance matrix
celldist_all = df_celldist[cell_names_all] #cell-distance matrix with all cells
#find cell with more than one paths
i_cells_valid = np.where(celldist_all.sum(axis=0) > 0)[0] #valid cells with more than one path
cell_ids_valid = cell_ids_all[i_cells_valid]
cell_names_valid = cell_names_all[i_cells_valid]
celldist_valid = celldist_all.loc[:,cell_names_valid] #cell-distance with only non-zero cells
#number of cells
n_cell = celldist_all.shape[1]
n_cell_valid = celldist_valid.shape[1]
#cell coordinates
X_cells_valid = df_cellinfo.loc[i_cells_valid,['mptX','mptY']].values
#print Rrup missfits
print('max R_rup misfit', (df_flatfile.Rrup.values - celldist_valid.sum(axis=1)).abs().max())
stan_data = {'N': n_data,
'NEQ': n_eq,
'NSTAT': n_sta,
'NCELL': n_cell_valid,
'eq': eq_id, #earthquake id
'stat': sta_id, #station id
'X_e': X_eq, #earthquake coordinates
'X_s': X_sta, #station coordinates
'X_c': X_cells_valid,
'rec_mu': np.zeros(y_data.shape),
'RC': celldist_valid.to_numpy(),
'c_a_erg': c_a_erg,
'Y': y_data,
}
stan_data_fname = out_dir + out_fname + '_stan_data' + '.json'
#create output directory
pathlib.Path(out_dir).mkdir(parents=True, exist_ok=True)
#write as json file
cmdstanpy.utils.write_stan_json(stan_data_fname, stan_data)
## Run Stan, fit model
#number of cores
n_cpu = max(cpu_count() -1,1)
#run stan
if (not stan_parallel) or n_cpu<=n_chains:
#compile stan model
stan_model = cmdstanpy.CmdStanModel(stan_file=stan_model_fname)
stan_model.compile(force=True)
#run full MCMC sampler
stan_fit = stan_model.sample(data=stan_data_fname, chains=n_chains,
iter_warmup=n_iter_warmup, iter_sampling=n_iter_sampling,
seed=1, max_treedepth=max_treedepth, adapt_delta=adapt_delta,
show_progress=True, output_dir=out_dir+'stan_fit/')
else:
#compile stan model
stan_model = cmdstanpy.CmdStanModel(stan_file=stan_model_fname, cpp_options={"STAN_THREADS": True})
stan_model.compile(force=True)
#number of cores per chain
n_cpu_chain = int(np.floor(n_cpu/n_chains))
#run full MCMC sampler
stan_fit = stan_model.sample(data=stan_data_fname, chains=n_chains, threads_per_chain=n_cpu_chain,
iter_warmup=n_iter_warmup, iter_sampling=n_iter_sampling,
seed=1, max_treedepth=max_treedepth, adapt_delta=adapt_delta,
show_progress=True, output_dir=out_dir+'stan_fit/')
## Postprocessing Data
## Extract posterior samples
#hyper-parameters
col_names_hyp = ['dc_0','ell_1e', 'ell_1as', 'omega_1e', 'omega_1as', 'omega_1bs',
'mu_cap', 'ell_ca1p', 'omega_ca1p', 'omega_ca2p',
'phi_0','tau_0']
#non-ergodic terms
col_names_dc_1e = ['dc_1e.%i'%(k) for k in range(n_eq)]
col_names_dc_1as = ['dc_1as.%i'%(k) for k in range(n_sta)]
col_names_dc_1bs = ['dc_1bs.%i'%(k) for k in range(n_sta)]
col_names_dB = ['dB.%i'%(k) for k in range(n_eq)]
col_names_cap = ['c_cap.%i'%(c_id) for c_id in cell_ids_valid]
col_names_all = col_names_hyp + col_names_dc_1e + col_names_dc_1as + col_names_dc_1bs + col_names_cap + col_names_dB
#summarize raw posterior distributions
stan_posterior = np.stack([stan_fit.stan_variable(c_n) for c_n in col_names_hyp], axis=1)
#adjustment terms
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('dc_1e')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('dc_1as')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('dc_1bs')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('c_cap')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('dB')), axis=1)
#save raw-posterior distribution
df_stan_posterior_raw = pd.DataFrame(stan_posterior, columns = col_names_all)
df_stan_posterior_raw.to_csv(out_dir + out_fname + '_stan_posterior_raw' + '.csv', index=False)
## Summarize hyper-parameters
#summarize posterior distributions of hyper-parameters
perc_array = np.array([0.05,0.25,0.5,0.75,0.95])
df_stan_hyp = df_stan_posterior_raw[col_names_hyp].quantile(perc_array)
df_stan_hyp = df_stan_hyp.append(df_stan_posterior_raw[col_names_hyp].mean(axis = 0), ignore_index=True)
df_stan_hyp.index = ['prc_%.2f'%(prc) for prc in perc_array]+['mean']
df_stan_hyp.to_csv(out_dir + out_fname + '_stan_hyperparameters' + '.csv', index=True)
#detailed posterior percentiles of posterior distributions
perc_array = np.arange(0.01,0.99,0.01)
df_stan_posterior = df_stan_posterior_raw[col_names_hyp].quantile(perc_array)
df_stan_posterior.index.name = 'prc'
df_stan_posterior.to_csv(out_dir + out_fname + '_stan_hyperposterior' + '.csv', index=True)
del col_names_dc_1e, col_names_dc_1as, col_names_dc_1bs, col_names_dB
del stan_posterior, col_names_all
## Sample spatially varying coefficients and predictions at record locations
# earthquake and station location in database
X_eq_all = df_flatfile[['eqX', 'eqY']].values
X_sta_all = df_flatfile[['staX','staY']].values
# GMM anelastic attenuation
cells_ca_mu = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].mean() for k in cell_ids_valid])
cells_ca_med = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].median() for k in cell_ids_valid])
cells_ca_sig = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].std() for k in cell_ids_valid])
#effect of anelastic attenuation in GM
cells_LcA_mu = celldist_valid.values @ cells_ca_mu
cells_LcA_med = celldist_valid.values @ cells_ca_med
cells_LcA_sig = np.sqrt(np.square(celldist_valid.values) @ cells_ca_sig**2)
#summary attenuation cells
catten_summary = np.vstack((np.tile(c_a_erg, n_cell_valid),
cells_ca_mu,
cells_ca_med,
cells_ca_sig)).T
columns_names = ['c_a_erg','c_cap_mean','c_cap_med','c_cap_sig']
df_catten_summary = pd.DataFrame(catten_summary, columns = columns_names, index=df_cellinfo.index[i_cells_valid])
#create dataframe with summary attenuation cells
df_catten_summary = pd.merge(df_cellinfo[['cellname','mptLat','mptLon','mptX','mptY','mptZ','UTMzone']],
df_catten_summary, how='right', left_index=True, right_index=True)
df_catten_summary.to_csv(out_dir + out_fname + '_stan_catten' + '.csv', index=True)
# GMM coefficients
#constant shift coefficient
coeff_0_mu = df_stan_posterior_raw.loc[:,'dc_0'].mean() * np.ones(n_data)
coeff_0_med = df_stan_posterior_raw.loc[:,'dc_0'].median() * np.ones(n_data)
coeff_0_sig = df_stan_posterior_raw.loc[:,'dc_0'].std() * np.ones(n_data)
#spatially varying earthquake constant coefficient
coeff_1e_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].mean() for k in range(n_eq)])
coeff_1e_mu = coeff_1e_mu[eq_inv]
coeff_1e_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].median() for k in range(n_eq)])
coeff_1e_med = coeff_1e_med[eq_inv]
coeff_1e_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].std() for k in range(n_eq)])
coeff_1e_sig = coeff_1e_sig[eq_inv]
#site term constant covariance
coeff_1as_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].mean() for k in range(n_sta)])
coeff_1as_mu = coeff_1as_mu[sta_inv]
coeff_1as_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].median() for k in range(n_sta)])
coeff_1as_med = coeff_1as_med[sta_inv]
coeff_1as_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].std() for k in range(n_sta)])
coeff_1as_sig = coeff_1as_sig[sta_inv]
#spatially varying station constant covariance
coeff_1bs_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].mean() for k in range(n_sta)])
coeff_1bs_mu = coeff_1bs_mu[sta_inv]
coeff_1bs_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].median() for k in range(n_sta)])
coeff_1bs_med = coeff_1bs_med[sta_inv]
coeff_1bs_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].std() for k in range(n_sta)])
coeff_1bs_sig = coeff_1bs_sig[sta_inv]
# aleatory variability
phi_0_array = np.array([df_stan_posterior_raw.phi_0.mean()]*X_sta_all.shape[0])
tau_0_array = np.array([df_stan_posterior_raw.tau_0.mean()]*X_sta_all.shape[0])
#dataframe with flatfile info
df_flatinfo = df_flatfile[['eqid','ssn','eqLat','eqLon','staLat','staLon','eqX','eqY','staX','staY','UTMzone']]
#summary coefficients
coeffs_summary = np.vstack((coeff_0_mu,
coeff_1e_mu,
coeff_1as_mu,
coeff_1bs_mu,
cells_LcA_mu,
coeff_0_med,
coeff_1e_med,
coeff_1as_med,
coeff_1bs_med,
cells_LcA_med,
coeff_0_sig,
coeff_1e_sig,
coeff_1as_sig,
coeff_1bs_sig,
cells_LcA_sig)).T
columns_names = ['dc_0_mean','dc_1e_mean','dc_1as_mean','dc_1bs_mean','Lc_ca_mean',
'dc_0_med', 'dc_1e_med', 'dc_1as_med', 'dc_1bs_med', 'Lc_ca_med',
'dc_0_sig', 'dc_1e_sig', 'dc_1as_sig', 'dc_1bs_sig', 'Lc_ca_sig']
df_coeffs_summary = pd.DataFrame(coeffs_summary, columns = columns_names, index=df_flatfile.index)
#create dataframe with summary coefficients
df_coeffs_summary = pd.merge(df_flatinfo, df_coeffs_summary, how='right', left_index=True, right_index=True)
df_coeffs_summary[['eqid','ssn']] = df_coeffs_summary[['eqid','ssn']].astype(int)
df_coeffs_summary.to_csv(out_dir + out_fname + '_stan_coefficients' + '.csv', index=True)
# GMM prediction
#mean prediction
y_mu = (coeff_0_mu + coeff_1e_mu + coeff_1as_mu + coeff_1bs_mu + cells_LcA_mu)
#compute residuals
res_tot = y_data - y_mu
#residuals computed directly from stan regression
res_between = [df_stan_posterior_raw.loc[:,f'dB.{k}'].mean() for k in range(n_eq)]
res_between = np.array([res_between[k] for k in (eq_inv).astype(int)])
res_within = res_tot - res_between
#summary predictions and residuals
predict_summary = np.vstack((y_mu, res_tot, res_between, res_within)).T
columns_names = ['nerg_mu','res_tot','res_between','res_within']
df_predict_summary = pd.DataFrame(predict_summary, columns = columns_names, index=df_flatfile.index)
#create dataframe with predictions and residuals
df_predict_summary = pd.merge(df_flatinfo, df_predict_summary, how='right', left_index=True, right_index=True)
df_predict_summary[['eqid','ssn']] = df_predict_summary[['eqid','ssn']].astype(int)
df_predict_summary.to_csv(out_dir + out_fname + '_stan_residuals' + '.csv', index=True)
## Summary regression
#save summary statistics
stan_summary_fname = out_dir + out_fname + '_stan_summary' + '.txt'
with open(stan_summary_fname, 'w') as f:
print(stan_fit, file=f)
#create and save trace plots
fig_dir = out_dir + 'summary_figs/'
#create figures directory if doesn't exit
pathlib.Path(fig_dir).mkdir(parents=True, exist_ok=True)
#create stan trace plots
stan_az_fit = az.from_cmdstanpy(stan_fit)
# stan_az_fit = az.from_cmdstanpy(stan_fit, posterior_predictive='Y')
for c_name in col_names_hyp:
#create trace plot with arviz
ax = az.plot_trace(stan_az_fit, var_names=c_name, figsize=(10,5) ).ravel()
ax[0].yaxis.set_major_locator(plt_autotick())
ax[0].set_xlabel('sample value')
ax[0].set_ylabel('frequency')
ax[0].set_title('')
ax[0].grid(axis='both')
ax[1].set_xlabel('iteration')
ax[1].set_ylabel('sample value')
ax[1].grid(axis='both')
ax[1].set_title('')
fig = ax[0].figure
fig.suptitle(c_name)
fig.savefig(fig_dir + out_fname + '_stan_traceplot_' + c_name + '_arviz' + '.png')
return None
| 17,672 | 46.764865 | 120 | py |
ngmm_tools | ngmm_tools-master/Analyses/Python_lib/regression/cmdstan/regression_cmdstan_model3_corr_cells_unbounded_hyp.py | """
Created on Wed Dec 29 15:13:49 2021
@author: glavrent
"""
#load variables
import pathlib
from joblib import cpu_count
#arithmetic libraries
import numpy as np
#statistics libraries
import pandas as pd
#plot libraries
import matplotlib as mpl
from matplotlib.ticker import AutoLocator as plt_autotick
import arviz as az
mpl.use('agg')
#stan library
import cmdstanpy
def RunStan(df_flatfile, df_cellinfo, df_celldist, stan_model_fname,
out_fname, out_dir, res_name='res', c_2_erg=0, c_3_erg=0, c_a_erg=0,
n_iter_warmup=300, n_iter_sampling=300, n_chains=4,
adapt_delta=0.8, max_treedepth=10,
stan_parallel=False):
'''
Run full Bayessian regression in Stan. Non-ergodic model includes: a spatially
varying earthquake constant, a spatially varying site constant, a spatially
independent site constant, and partially spatially correlated anelastic
attenuation.
Parameters
----------
df_flatfile : pd.DataFrame
Input data frame containing total residuals, eq and site coordinates.
df_cellinfo : pd.DataFrame
Dataframe with coordinates of anelastic attenuation cells.
df_celldist : pd.DataFrame
Datafame with cell path distances of all records in df_flatfile.
stan_model_fname : string
File name for stan model.
out_fname : string
File name for output files.
out_dir : string
Output directory.
res_name : string, optional
Column name for total residuals. The default is 'res'.
c_2_erg : double, optional
Value of ergodic geometrical spreading coefficient. The default is 0.
c_3_erg : double, optional
Value of ergodic Vs30 coefficient. The default is 0.
c_a_erg : double, optional
Value of ergodic anelatic attenuation coefficient. Used as mean of cell
specific anelastic attenuation prior distribution. The default is 0.
n_iter_warmup : integer, optional
Number of burn out MCMC samples. The default is 300.
n_iter_sampling : integer, optional
Number of MCMC samples for computing the posterior distributions. The default is 300.
n_chains : integer, optional
Number of MCMC chains. The default is 4.
adapt_delta : double, optional
Target average proposal acceptance probability for adaptation. The default is 0.8.
max_treedepth : integer, optional
Maximum number of evaluations for each iteration (2^max_treedepth). The default is 10.
stan_parallel : bool, optional
Flag for using multithreaded option in STAN. The default is False.
Returns
-------
None.
'''
## Preprocess Input Data
#set rsn column as dataframe index, skip if rsn already the index
if not df_flatfile.index.name == 'rsn':
df_flatfile.set_index('rsn', drop=True, inplace=True)
if not df_celldist.index.name == 'rsn':
df_celldist.set_index('rsn', drop=True, inplace=True)
#set cellid column as dataframe index, skip if cellid already the index
if not df_cellinfo.index.name == 'cellid':
df_cellinfo.set_index('cellid', drop=True, inplace=True)
#number of data
n_data = len(df_flatfile)
#earthquake data
data_eq_all = df_flatfile[['eqid','mag','eqX', 'eqY']].values
_, eq_idx, eq_inv = np.unique(df_flatfile[['eqid']], axis=0, return_inverse=True, return_index=True)
data_eq = data_eq_all[eq_idx,:]
X_eq = data_eq[:,[2,3]] #earthquake coordinates
#create earthquake ids for all records (1 to n_eq)
eq_id = eq_inv + 1
n_eq = len(data_eq)
#station data
data_sta_all = df_flatfile[['ssn','Vs30','x_3','staX','staY']].values
_, sta_idx, sta_inv = np.unique( df_flatfile[['ssn']].values, axis = 0, return_inverse=True, return_index=True)
data_sta = data_sta_all[sta_idx,:]
X_sta = data_sta[:,[3,4]] #station coordinates
#create station indices for all records (1 to n_sta)
sta_id = sta_inv + 1
n_sta = len(data_sta)
#geometrical spreading covariates
x_2 = df_flatfile['x_2'].values
#vs30 covariates
x_3 = df_flatfile['x_3'].values[sta_idx]
#ground-motion observations
y_data = df_flatfile[res_name].to_numpy().copy()
#cell data
#reorder and only keep records included in the flatfile
df_celldist = df_celldist.reindex(df_flatfile.index)
#cell info
cell_ids_all = df_cellinfo.index
cell_names_all = df_cellinfo.cellname
#cell distance matrix
celldist_all = df_celldist[cell_names_all] #cell-distance matrix with all cells
#find cell with more than one paths
i_cells_valid = np.where(celldist_all.sum(axis=0) > 0)[0] #valid cells with more than one path
cell_ids_valid = cell_ids_all[i_cells_valid]
cell_names_valid = cell_names_all[i_cells_valid]
celldist_valid = celldist_all.loc[:,cell_names_valid] #cell-distance with only non-zero cells
#number of cells
n_cell = celldist_all.shape[1]
n_cell_valid = celldist_valid.shape[1]
#cell coordinates
X_cells_valid = df_cellinfo.loc[i_cells_valid,['mptX','mptY']].values
#print Rrup missfits
print('max R_rup misfit', (df_flatfile.Rrup.values - celldist_valid.sum(axis=1)).abs().max())
stan_data = {'N': n_data,
'NEQ': n_eq,
'NSTAT': n_sta,
'NCELL': n_cell_valid,
'eq': eq_id, #earthquake id
'stat': sta_id, #station id
'rec_mu': np.zeros(y_data.shape),
'Y': y_data,
'x_2': x_2,
'x_3': x_3,
'c_2_erg': c_2_erg,
'c_3_erg': c_3_erg,
'c_a_erg': c_a_erg,
'X_e': X_eq, #earthquake coordinates
'X_s': X_sta, #station coordinates
'X_c': X_cells_valid,
'RC': celldist_valid.to_numpy(),
}
stan_data_fname = out_dir + out_fname + '_stan_data' + '.json'
#create output directory
pathlib.Path(out_dir).mkdir(parents=True, exist_ok=True)
#write as json file
cmdstanpy.utils.write_stan_json(stan_data_fname, stan_data)
## Run Stan, fit model
#number of cores
n_cpu = max(cpu_count() -1,1)
#run stan
if (not stan_parallel) or n_cpu<=n_chains:
#compile stan model
stan_model = cmdstanpy.CmdStanModel(stan_file=stan_model_fname)
stan_model.compile(force=True)
#run full MCMC sampler
stan_fit = stan_model.sample(data=stan_data_fname, chains=n_chains,
iter_warmup=n_iter_warmup, iter_sampling=n_iter_sampling,
seed=1, max_treedepth=max_treedepth, adapt_delta=adapt_delta,
show_progress=True, output_dir=out_dir+'stan_fit/')
else:
#compile stan model
stan_model = cmdstanpy.CmdStanModel(stan_file=stan_model_fname, cpp_options={"STAN_THREADS": True})
stan_model.compile(force=True)
#number of cores per chain
n_cpu_chain = int(np.floor(n_cpu/n_chains))
#run full MCMC sampler
stan_fit = stan_model.sample(data=stan_data_fname, chains=n_chains, threads_per_chain=n_cpu_chain,
iter_warmup=n_iter_warmup, iter_sampling=n_iter_sampling,
seed=1, max_treedepth=max_treedepth, adapt_delta=adapt_delta,
show_progress=True, output_dir=out_dir+'stan_fit/')
## Postprocessing Data
## Extract posterior samples
#hyper-parameters
col_names_hyp = ['dc_0','mu_2p','mu_3s',
'ell_1e', 'ell_1as', 'omega_1e', 'omega_1as', 'omega_1bs',
'ell_2p', 'ell_3s', 'omega_2p', 'omega_3s',
'mu_cap', 'ell_ca1p', 'omega_ca1p', 'omega_ca2p',
'phi_0','tau_0']
#non-ergodic terms
col_names_dc_1e = ['dc_1e.%i'%(k) for k in range(n_eq)]
col_names_dc_1as = ['dc_1as.%i'%(k) for k in range(n_sta)]
col_names_dc_1bs = ['dc_1bs.%i'%(k) for k in range(n_sta)]
col_names_c_2p = ['c_2p.%i'%(k) for k in range(n_eq)]
col_names_c_3s = ['c_3s.%i'%(k) for k in range(n_sta)]
col_names_dB = ['dB.%i'%(k) for k in range(n_eq)]
col_names_cap = ['c_cap.%i'%(c_id) for c_id in cell_ids_valid]
col_names_all = (col_names_hyp + col_names_dc_1e + col_names_dc_1as + col_names_dc_1bs +
col_names_c_2p + col_names_c_3s + col_names_cap + col_names_dB)
#summarize raw posterior distributions
stan_posterior = np.stack([stan_fit.stan_variable(c_n) for c_n in col_names_hyp], axis=1)
#adjustment terms
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('dc_1e')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('dc_1as')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('dc_1bs')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('c_2p')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('c_3s')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('c_cap')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('dB')), axis=1)
#save raw-posterior distribution
df_stan_posterior_raw = pd.DataFrame(stan_posterior, columns = col_names_all)
df_stan_posterior_raw.to_csv(out_dir + out_fname + '_stan_posterior_raw' + '.csv', index=False)
## Summarize hyper-parameters
#summarize posterior distributions of hyper-parameters
perc_array = np.array([0.05,0.25,0.5,0.75,0.95])
df_stan_hyp = df_stan_posterior_raw[col_names_hyp].quantile(perc_array)
df_stan_hyp = df_stan_hyp.append(df_stan_posterior_raw[col_names_hyp].mean(axis = 0), ignore_index=True)
df_stan_hyp.index = ['prc_%.2f'%(prc) for prc in perc_array]+['mean']
df_stan_hyp.to_csv(out_dir + out_fname + '_stan_hyperparameters' + '.csv', index=True)
#detailed posterior percentiles of posterior distributions
perc_array = np.arange(0.01,0.99,0.01)
df_stan_posterior = df_stan_posterior_raw[col_names_hyp].quantile(perc_array)
df_stan_posterior.index.name = 'prc'
df_stan_posterior .to_csv(out_dir + out_fname + '_stan_hyperposterior' + '.csv', index=True)
del col_names_dc_1e, col_names_dc_1as, col_names_dc_1bs, col_names_c_2p, col_names_c_3s, col_names_dB
del stan_posterior, col_names_all
## Sample spatially varying coefficients and predictions at record locations
# earthquake and station location in database
X_eq_all = df_flatfile[['eqX', 'eqY']].values
X_sta_all = df_flatfile[['staX','staY']].values
# GMM anelastic attenuation
cells_ca_mu = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].mean() for k in cell_ids_valid])
cells_ca_med = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].median() for k in cell_ids_valid])
cells_ca_sig = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].std() for k in cell_ids_valid])
#effect of anelastic attenuation in GM
cells_LcA_mu = celldist_valid.values @ cells_ca_mu
cells_LcA_med = celldist_valid.values @ cells_ca_med
cells_LcA_sig = np.sqrt(np.square(celldist_valid.values) @ cells_ca_sig**2)
#summary attenuation cells
catten_summary = np.vstack((np.tile(c_a_erg, n_cell_valid),
cells_ca_mu,
cells_ca_med,
cells_ca_sig)).T
columns_names = ['c_a_erg','c_cap_mean','c_cap_med','c_cap_sig']
df_catten_summary = pd.DataFrame(catten_summary, columns = columns_names, index=df_cellinfo.index[i_cells_valid])
#create dataframe with summary attenuation cells
df_catten_summary = pd.merge(df_cellinfo[['cellname','mptLat','mptLon','mptX','mptY','mptZ','UTMzone']],
df_catten_summary, how='right', left_index=True, right_index=True)
df_catten_summary.to_csv(out_dir + out_fname + '_stan_catten' + '.csv', index=True)
# GMM coefficients
#constant shift coefficient
coeff_0_mu = df_stan_posterior_raw.loc[:,'dc_0'].mean() * np.ones(n_data)
coeff_0_med = df_stan_posterior_raw.loc[:,'dc_0'].median() * np.ones(n_data)
coeff_0_sig = df_stan_posterior_raw.loc[:,'dc_0'].std() * np.ones(n_data)
#spatially varying earthquake constant coefficient
coeff_1e_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].mean() for k in range(n_eq)])
coeff_1e_mu = coeff_1e_mu[eq_inv]
coeff_1e_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].median() for k in range(n_eq)])
coeff_1e_med = coeff_1e_med[eq_inv]
coeff_1e_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].std() for k in range(n_eq)])
coeff_1e_sig = coeff_1e_sig[eq_inv]
#site term constant covariance
coeff_1as_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].mean() for k in range(n_sta)])
coeff_1as_mu = coeff_1as_mu[sta_inv]
coeff_1as_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].median() for k in range(n_sta)])
coeff_1as_med = coeff_1as_med[sta_inv]
coeff_1as_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].std() for k in range(n_sta)])
coeff_1as_sig = coeff_1as_sig[sta_inv]
#spatially varying station constant covariance
coeff_1bs_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].mean() for k in range(n_sta)])
coeff_1bs_mu = coeff_1bs_mu[sta_inv]
coeff_1bs_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].median() for k in range(n_sta)])
coeff_1bs_med = coeff_1bs_med[sta_inv]
coeff_1bs_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].std() for k in range(n_sta)])
coeff_1bs_sig = coeff_1bs_sig[sta_inv]
#spatially varying geometrical spreading coefficient
coeff_2p_mu = np.array([df_stan_posterior_raw.loc[:,f'c_2p.{k}'].mean() for k in range(n_eq)])
coeff_2p_mu = coeff_2p_mu[eq_inv]
coeff_2p_med = np.array([df_stan_posterior_raw.loc[:,f'c_2p.{k}'].median() for k in range(n_eq)])
coeff_2p_med = coeff_2p_med[eq_inv]
coeff_2p_sig = np.array([df_stan_posterior_raw.loc[:,f'c_2p.{k}'].std() for k in range(n_eq)])
coeff_2p_sig = coeff_2p_sig[eq_inv]
#spatially varying Vs30 coefficient
coeff_3s_mu = np.array([df_stan_posterior_raw.loc[:,f'c_3s.{k}'].mean() for k in range(n_sta)])
coeff_3s_mu = coeff_3s_mu[sta_inv]
coeff_3s_med = np.array([df_stan_posterior_raw.loc[:,f'c_3s.{k}'].median() for k in range(n_sta)])
coeff_3s_med = coeff_3s_med[sta_inv]
coeff_3s_sig = np.array([df_stan_posterior_raw.loc[:,f'c_3s.{k}'].std() for k in range(n_sta)])
coeff_3s_sig = coeff_3s_sig[sta_inv]
# aleatory variability
phi_0_array = np.array([df_stan_posterior_raw.phi_0.mean()]*X_sta_all.shape[0])
tau_0_array = np.array([df_stan_posterior_raw.tau_0.mean()]*X_sta_all.shape[0])
#initiaize flatfile for sumamry of non-erg coefficinets and residuals
df_flatinfo = df_flatfile[['eqid','ssn','eqLat','eqLon','staLat','staLon','eqX','eqY','staX','staY','UTMzone']]
#summary coefficients
coeffs_summary = np.vstack((coeff_0_mu,
coeff_1e_mu,
coeff_1as_mu,
coeff_1bs_mu,
coeff_2p_mu,
coeff_3s_mu,
cells_LcA_mu,
coeff_0_med,
coeff_1e_med,
coeff_1as_med,
coeff_1bs_med,
coeff_2p_med,
coeff_3s_med,
cells_LcA_med,
coeff_0_sig,
coeff_1e_sig,
coeff_1as_sig,
coeff_1bs_sig,
coeff_2p_sig,
coeff_3s_sig,
cells_LcA_sig)).T
columns_names = ['dc_0_mean','dc_1e_mean','dc_1as_mean','dc_1bs_mean','c_2p_mean','c_3s_mean','Lc_ca_mean',
'dc_0_med', 'dc_1e_med', 'dc_1as_med', 'dc_1bs_med', 'c_2p_med', 'c_3s_med', 'Lc_ca_med',
'dc_0_sig', 'dc_1e_sig', 'dc_1as_sig', 'dc_1bs_sig', 'c_2p_sig', 'c_3s_sig', 'Lc_ca_sig']
df_coeffs_summary = pd.DataFrame(coeffs_summary, columns = columns_names, index=df_flatfile.index)
#create dataframe with summary coefficients
df_coeffs_summary = pd.merge(df_flatinfo, df_coeffs_summary, how='right', left_index=True, right_index=True)
df_coeffs_summary[['eqid','ssn']] = df_coeffs_summary[['eqid','ssn']].astype(int)
df_coeffs_summary.to_csv(out_dir + out_fname + '_stan_coefficients' + '.csv', index=True)
# GMM prediction
#mean prediction
y_mu = (coeff_0_mu + coeff_1e_mu + coeff_1as_mu + coeff_1bs_mu + coeff_2p_mu*x_2 + coeff_3s_mu*x_3[sta_inv] + cells_LcA_mu)
#compute residuals
res_tot = y_data - y_mu
#residuals computed directly from stan regression
res_between = [df_stan_posterior_raw.loc[:,f'dB.{k}'].mean() for k in range(n_eq)]
res_between = np.array([res_between[k] for k in (eq_inv).astype(int)])
res_within = res_tot - res_between
#summary predictions and residuals
predict_summary = np.vstack((y_mu, res_tot, res_between, res_within)).T
columns_names = ['nerg_mu','res_tot','res_between','res_within']
df_predict_summary = pd.DataFrame(predict_summary, columns = columns_names, index=df_flatfile.index)
#create dataframe with predictions and residuals
df_predict_summary = pd.merge(df_flatinfo, df_predict_summary, how='right', left_index=True, right_index=True)
df_predict_summary[['eqid','ssn']] = df_predict_summary[['eqid','ssn']].astype(int)
df_predict_summary.to_csv(out_dir + out_fname + '_stan_residuals' + '.csv', index=True)
## Summary regression
#save summary statistics
stan_summary_fname = out_dir + out_fname + '_stan_summary' + '.txt'
with open(stan_summary_fname, 'w') as f:
print(stan_fit, file=f)
#create and save trace plots
fig_dir = out_dir + 'summary_figs/'
#create figures directory if doesn't exit
pathlib.Path(fig_dir).mkdir(parents=True, exist_ok=True)
#create stan trace plots
stan_az_fit = az.from_cmdstanpy(stan_fit)
# stan_az_fit = az.from_cmdstanpy(stan_fit, posterior_predictive='Y')
for c_name in col_names_hyp:
#create trace plot with arviz
ax = az.plot_trace(stan_az_fit, var_names=c_name, figsize=(10,5) ).ravel()
ax[0].yaxis.set_major_locator(plt_autotick())
ax[0].set_xlabel('sample value')
ax[0].set_ylabel('frequency')
ax[0].set_title('')
ax[0].grid(axis='both')
ax[1].set_xlabel('iteration')
ax[1].set_ylabel('sample value')
ax[1].grid(axis='both')
ax[1].set_title('')
fig = ax[0].figure
fig.suptitle(c_name)
fig.savefig(fig_dir + out_fname + '_stan_traceplot_' + c_name + '_arviz' + '.png')
return None
| 20,112 | 47.699758 | 128 | py |
ngmm_tools | ngmm_tools-master/Analyses/Python_lib/regression/cmdstan/regression_cmdstan_model2_uncorr_cells_sparse_unbounded_hyp.py | """
Created on Wed Dec 29 15:13:49 2021
@author: glavrent
"""
#load variables
import pathlib
from joblib import cpu_count
#arithmetic libraries
import numpy as np
from scipy import sparse
#statistics libraries
import pandas as pd
#plot libraries
import matplotlib as mpl
from matplotlib.ticker import AutoLocator as plt_autotick
import arviz as az
mpl.use('agg')
#stan library
import cmdstanpy
def RunStan(df_flatfile, df_cellinfo, df_celldist, stan_model_fname,
out_fname, out_dir, res_name='res', c_a_erg=0,
n_iter_warmup=300, n_iter_sampling=300, n_chains=4,
adapt_delta=0.8, max_treedepth=10,
stan_parallel=False):
'''
Run full Bayessian regression in Stan. Non-ergodic model includes: a spatially
varying earthquake constant, a spatially varying site constant, a spatially
independent site constant, and uncorrelated anelastic attenuation.
Parameters
----------
df_flatfile : pd.DataFrame
Input data frame containing total residuals, eq and site coordinates.
df_cellinfo : pd.DataFrame
Dataframe with coordinates of anelastic attenuation cells.
df_celldist : pd.DataFrame
Datafame with cell path distances of all records in df_flatfile.
stan_model_fname : string
File name for stan model.
out_fname : string
File name for output files.
out_dir : string
Output directory.
res_name : string, optional
Column name for total residuals. The default is 'res'.
c_a_erg : double, optional
Value of ergodic anelatic attenuation coefficient. Used as mean of cell
specific anelastic attenuation prior distribution. The default is 0.
n_iter_warmup : integer, optional
Number of burn out MCMC samples. The default is 300.
n_iter_sampling : integer, optional
Number of MCMC samples for computing the posterior distributions. The default is 300.
n_chains : integer, optional
Number of MCMC chains. The default is 4.
adapt_delta : double, optional
Target average proposal acceptance probability for adaptation. The default is 0.8.
max_treedepth : integer, optional
Maximum number of evaluations for each iteration (2^max_treedepth). The default is 10.
pystan_ver : integer, optional
Version of pystan to run. The default is 2.
pystan_parallel : bool, optional
Flag for using multithreaded option in STAN. The default is False.
Returns
-------
None.
'''
## Preprocess Input Data
#set rsn column as dataframe index, skip if rsn already the index
if not df_flatfile.index.name == 'rsn':
df_flatfile.set_index('rsn', drop=True, inplace=True)
if not df_celldist.index.name == 'rsn':
df_celldist.set_index('rsn', drop=True, inplace=True)
#set cellid column as dataframe index, skip if cellid already the index
if not df_cellinfo.index.name == 'cellid':
df_cellinfo.set_index('cellid', drop=True, inplace=True)
#number of data
n_data = len(df_flatfile)
#earthquake data
data_eq_all = df_flatfile[['eqid','mag','eqX', 'eqY']].values
_, eq_idx, eq_inv = np.unique(df_flatfile[['eqid']], axis=0, return_inverse=True, return_index=True)
data_eq = data_eq_all[eq_idx,:]
X_eq = data_eq[:,[2,3]] #earthquake coordinates
#create earthquake ids for all records (1 to n_eq)
eq_id = eq_inv + 1
n_eq = len(data_eq)
#station data
data_sta_all = df_flatfile[['ssn','Vs30','staX','staY']].values
_, sta_idx, sta_inv = np.unique( df_flatfile[['ssn']].values, axis = 0, return_inverse=True, return_index=True)
data_sta = data_sta_all[sta_idx,:]
X_sta = data_sta[:,[2,3]] #station coordinates
#create station indices for all records (1 to n_sta)
sta_id = sta_inv + 1
n_sta = len(data_sta)
#ground-motion observations
y_data = df_flatfile[res_name].to_numpy().copy()
#cell data
#reorder and only keep records included in the flatfile
df_celldist = df_celldist.reindex(df_flatfile.index)
#cell info
cell_ids_all = df_cellinfo.index
cell_names_all = df_cellinfo.cellname
#cell distance matrix
celldist_all = df_celldist[cell_names_all] #cell-distance matrix with all cells
#find cell with more than one paths
i_cells_valid = np.where(celldist_all.sum(axis=0) > 0)[0] #valid cells with more than one path
cell_ids_valid = cell_ids_all[i_cells_valid]
cell_names_valid = cell_names_all[i_cells_valid]
celldist_valid = celldist_all.loc[:,cell_names_valid].to_numpy() #cell-distance with only non-zero cells
celldist_valid_sp = sparse.csr_matrix(celldist_valid)
#number of cells
n_cell = celldist_all.shape[1]
n_cell_valid = celldist_valid.shape[1]
#cell coordinates
X_cells_valid = df_cellinfo.loc[i_cells_valid,['mptX','mptY']].values
#print Rrup missfits
print('max R_rup misfit', np.abs(df_flatfile.Rrup.values - celldist_valid.sum(axis=1)).max())
stan_data = {'N': n_data,
'NEQ': n_eq,
'NSTAT': n_sta,
'NCELL': n_cell_valid,
'NCELL_SP': len(celldist_valid_sp.data),
'eq': eq_id, #earthquake id
'stat': sta_id, #station id
'X_e': X_eq, #earthquake coordinates
'X_s': X_sta, #station coordinates
'X_c': X_cells_valid,
'rec_mu': np.zeros(y_data.shape),
'RC_val': celldist_valid_sp.data,
'RC_w': celldist_valid_sp.indices+1,
'RC_u': celldist_valid_sp.indptr+1,
'c_a_erg': c_a_erg,
'Y': y_data,
}
stan_data_fname = out_dir + out_fname + '_stan_data' + '.json'
#create output directory
pathlib.Path(out_dir).mkdir(parents=True, exist_ok=True)
#write as json file
cmdstanpy.utils.write_stan_json(stan_data_fname, stan_data)
## Run Stan, fit model
#number of cores
n_cpu = max(cpu_count() -1,1)
#run stan
if (not stan_parallel) or n_cpu<=n_chains:
#compile stan model
stan_model = cmdstanpy.CmdStanModel(stan_file=stan_model_fname)
stan_model.compile(force=True)
#run full MCMC sampler
stan_fit = stan_model.sample(data=stan_data_fname, chains=n_chains,
iter_warmup=n_iter_warmup, iter_sampling=n_iter_sampling,
seed=1, max_treedepth=max_treedepth, adapt_delta=adapt_delta,
show_progress=True, output_dir=out_dir+'stan_fit/')
else:
#compile stan model
stan_model = cmdstanpy.CmdStanModel(stan_file=stan_model_fname, cpp_options={"STAN_THREADS": True})
stan_model.compile(force=True)
#number of cores per chain
n_cpu_chain = int(np.floor(n_cpu/n_chains))
#run full MCMC sampler
stan_fit = stan_model.sample(data=stan_data_fname, chains=n_chains, threads_per_chain=n_cpu_chain,
iter_warmup=n_iter_warmup, iter_sampling=n_iter_sampling,
seed=1, max_treedepth=max_treedepth, adapt_delta=adapt_delta,
show_progress=True, output_dir=out_dir+'stan_fit/')
## Postprocessing Data
## Extract posterior samples
#hyper-parameters
col_names_hyp = ['dc_0','ell_1e', 'ell_1as', 'omega_1e', 'omega_1as', 'omega_1bs',
'mu_cap', 'omega_cap',
'phi_0','tau_0']
#non-ergodic terms
col_names_dc_1e = ['dc_1e.%i'%(k) for k in range(n_eq)]
col_names_dc_1as = ['dc_1as.%i'%(k) for k in range(n_sta)]
col_names_dc_1bs = ['dc_1bs.%i'%(k) for k in range(n_sta)]
col_names_dB = ['dB.%i'%(k) for k in range(n_eq)]
col_names_cap = ['c_cap.%i'%(c_id) for c_id in cell_ids_valid]
col_names_all = col_names_hyp + col_names_dc_1e + col_names_dc_1as + col_names_dc_1bs + col_names_cap + col_names_dB
#summarize raw posterior distributions
stan_posterior = np.stack([stan_fit.stan_variable(c_n) for c_n in col_names_hyp], axis=1)
#adjustment terms
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('dc_1e')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('dc_1as')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('dc_1bs')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('c_cap')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('dB')), axis=1)
#save raw-posterior distribution
df_stan_posterior_raw = pd.DataFrame(stan_posterior, columns = col_names_all)
df_stan_posterior_raw.to_csv(out_dir + out_fname + '_stan_posterior_raw' + '.csv', index=False)
## Summarize hyper-parameters
#summarize posterior distributions of hyper-parameters
perc_array = np.array([0.05,0.25,0.5,0.75,0.95])
df_stan_hyp = df_stan_posterior_raw[col_names_hyp].quantile(perc_array)
df_stan_hyp = df_stan_hyp.append(df_stan_posterior_raw[col_names_hyp].mean(axis = 0), ignore_index=True)
df_stan_hyp.index = ['prc_%.2f'%(prc) for prc in perc_array]+['mean']
df_stan_hyp.to_csv(out_dir + out_fname + '_stan_hyperparameters' + '.csv', index=True)
#detailed posterior percentiles of posterior distributions
perc_array = np.arange(0.01,0.99,0.01)
df_stan_posterior = df_stan_posterior_raw[col_names_hyp].quantile(perc_array)
df_stan_posterior.index.name = 'prc'
df_stan_posterior.to_csv(out_dir + out_fname + '_stan_hyperposterior' + '.csv', index=True)
del col_names_dc_1e, col_names_dc_1as, col_names_dc_1bs, col_names_dB
del stan_posterior, col_names_all
## Sample spatially varying coefficients and predictions at record locations
# earthquake and station location in database
X_eq_all = df_flatfile[['eqX', 'eqY']].values
X_sta_all = df_flatfile[['staX','staY']].values
# GMM anelastic attenuation
cells_ca_mu = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].mean() for k in cell_ids_valid])
cells_ca_med = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].median() for k in cell_ids_valid])
cells_ca_sig = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].std() for k in cell_ids_valid])
#effect of anelastic attenuation in GM
cells_LcA_mu = celldist_valid_sp @ cells_ca_mu
cells_LcA_med = celldist_valid_sp @ cells_ca_med
cells_LcA_sig = np.sqrt(celldist_valid_sp.power(2) @ cells_ca_sig**2)
#summary attenuation cells
catten_summary = np.vstack((np.tile(c_a_erg, n_cell_valid),
cells_ca_mu,
cells_ca_med,
cells_ca_sig)).T
columns_names = ['c_a_erg','c_cap_mean','c_cap_med','c_cap_sig']
df_catten_summary = pd.DataFrame(catten_summary, columns = columns_names, index=df_cellinfo.index[i_cells_valid])
#create dataframe with summary attenuation cells
df_catten_summary = pd.merge(df_cellinfo[['cellname','mptLat','mptLon','mptX','mptY','mptZ','UTMzone']],
df_catten_summary, how='right', left_index=True, right_index=True)
df_catten_summary.to_csv(out_dir + out_fname + '_stan_catten' + '.csv', index=True)
# GMM coefficients
#constant shift coefficient
coeff_0_mu = df_stan_posterior_raw.loc[:,'dc_0'].mean() * np.ones(n_data)
coeff_0_med = df_stan_posterior_raw.loc[:,'dc_0'].median() * np.ones(n_data)
coeff_0_sig = df_stan_posterior_raw.loc[:,'dc_0'].std() * np.ones(n_data)
#spatially varying earthquake constant coefficient
coeff_1e_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].mean() for k in range(n_eq)])
coeff_1e_mu = coeff_1e_mu[eq_inv]
coeff_1e_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].median() for k in range(n_eq)])
coeff_1e_med = coeff_1e_med[eq_inv]
coeff_1e_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].std() for k in range(n_eq)])
coeff_1e_sig = coeff_1e_sig[eq_inv]
#site term constant covariance
coeff_1as_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].mean() for k in range(n_sta)])
coeff_1as_mu = coeff_1as_mu[sta_inv]
coeff_1as_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].median() for k in range(n_sta)])
coeff_1as_med = coeff_1as_med[sta_inv]
coeff_1as_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].std() for k in range(n_sta)])
coeff_1as_sig = coeff_1as_sig[sta_inv]
#spatially varying station constant covariance
coeff_1bs_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].mean() for k in range(n_sta)])
coeff_1bs_mu = coeff_1bs_mu[sta_inv]
coeff_1bs_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].median() for k in range(n_sta)])
coeff_1bs_med = coeff_1bs_med[sta_inv]
coeff_1bs_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].std() for k in range(n_sta)])
coeff_1bs_sig = coeff_1bs_sig[sta_inv]
# aleatory variability
phi_0_array = np.array([df_stan_posterior_raw.phi_0.mean()]*X_sta_all.shape[0])
tau_0_array = np.array([df_stan_posterior_raw.tau_0.mean()]*X_sta_all.shape[0])
#initiaize flatfile for sumamry of non-erg coefficinets and residuals
df_flatinfo = df_flatfile[['eqid','ssn','eqLat','eqLon','staLat','staLon','eqX','eqY','staX','staY','UTMzone']]
#summary coefficients
coeffs_summary = np.vstack((coeff_0_mu,
coeff_1e_mu,
coeff_1as_mu,
coeff_1bs_mu,
cells_LcA_mu,
coeff_0_med,
coeff_1e_med,
coeff_1as_med,
coeff_1bs_med,
cells_LcA_med,
coeff_0_sig,
coeff_1e_sig,
coeff_1as_sig,
coeff_1bs_sig,
cells_LcA_sig)).T
columns_names = ['dc_0_mean','dc_1e_mean','dc_1as_mean','dc_1bs_mean','Lc_ca_mean',
'dc_0_med', 'dc_1e_med', 'dc_1as_med', 'dc_1bs_med', 'Lc_ca_med',
'dc_0_sig', 'dc_1e_sig', 'dc_1as_sig', 'dc_1bs_sig', 'Lc_ca_sig']
df_coeffs_summary = pd.DataFrame(coeffs_summary, columns = columns_names, index=df_flatfile.index)
#create dataframe with summary coefficients
df_coeffs_summary = pd.merge(df_flatinfo, df_coeffs_summary, how='right', left_index=True, right_index=True)
df_coeffs_summary[['eqid','ssn']] = df_coeffs_summary[['eqid','ssn']].astype(int)
df_coeffs_summary.to_csv(out_dir + out_fname + '_stan_coefficients' + '.csv', index=True)
# GMM prediction
#mean prediction
y_mu = (coeff_0_mu + coeff_1e_mu + coeff_1as_mu + coeff_1bs_mu + cells_LcA_mu)
#compute residuals
res_tot = y_data - y_mu
#residuals computed directly from stan regression
res_between = [df_stan_posterior_raw.loc[:,f'dB.{k}'].mean() for k in range(n_eq)]
res_between = np.array([res_between[k] for k in (eq_inv).astype(int)])
res_within = res_tot - res_between
#summary predictions and residuals
predict_summary = np.vstack((y_mu, res_tot, res_between, res_within)).T
columns_names = ['nerg_mu','res_tot','res_between','res_within']
df_predict_summary = pd.DataFrame(predict_summary, columns = columns_names, index=df_flatfile.index)
#create dataframe with predictions and residuals
df_predict_summary = pd.merge(df_flatinfo, df_predict_summary, how='right', left_index=True, right_index=True)
df_predict_summary[['eqid','ssn']] = df_predict_summary[['eqid','ssn']].astype(int)
df_predict_summary.to_csv(out_dir + out_fname + '_stan_residuals' + '.csv', index=True)
## Summary regression
#save summary statistics
stan_summary_fname = out_dir + out_fname + '_stan_summary' + '.txt'
with open(stan_summary_fname, 'w') as f:
print(stan_fit, file=f)
#create and save trace plots
fig_dir = out_dir + 'summary_figs/'
#create figures directory if doesn't exit
pathlib.Path(fig_dir).mkdir(parents=True, exist_ok=True)
#create stan trace plots
stan_az_fit = az.from_cmdstanpy(stan_fit)
# stan_az_fit = az.from_cmdstanpy(stan_fit, posterior_predictive='Y')
for c_name in col_names_hyp:
#create trace plot with arviz
ax = az.plot_trace(stan_az_fit, var_names=c_name, figsize=(10,5) ).ravel()
ax[0].yaxis.set_major_locator(plt_autotick())
ax[0].set_xlabel('sample value')
ax[0].set_ylabel('frequency')
ax[0].set_title('')
ax[0].grid(axis='both')
ax[1].set_xlabel('iteration')
ax[1].set_ylabel('sample value')
ax[1].grid(axis='both')
ax[1].set_title('')
fig = ax[0].figure
fig.suptitle(c_name)
fig.savefig(fig_dir + out_fname + '_stan_traceplot_' + c_name + '_arviz' + '.png')
return None
| 18,013 | 46.782493 | 120 | py |
ngmm_tools | ngmm_tools-master/Analyses/Python_lib/regression/cmdstan/regression_cmdstan_model3_corr_cells_sparse_unbounded_hyp.py | """
Created on Wed Dec 29 15:13:49 2021
@author: glavrent
"""
#load variables
import pathlib
from joblib import cpu_count
#arithmetic libraries
import numpy as np
from scipy import sparse
#statistics libraries
import pandas as pd
#plot libraries
import matplotlib as mpl
from matplotlib.ticker import AutoLocator as plt_autotick
import arviz as az
mpl.use('agg')
#stan library
import cmdstanpy
def RunStan(df_flatfile, df_cellinfo, df_celldist, stan_model_fname,
out_fname, out_dir, res_name='res', c_2_erg=0, c_3_erg=0, c_a_erg=0,
n_iter_warmup=300, n_iter_sampling=300, n_chains=4,
adapt_delta=0.8, max_treedepth=10,
stan_parallel=False):
'''
Run full Bayessian regression in Stan. Non-ergodic model includes: a spatially
varying earthquake constant, a spatially varying site constant, a spatially
independent site constant, and partially spatially correlated anelastic
attenuation.
Parameters
----------
df_flatfile : pd.DataFrame
Input data frame containing total residuals, eq and site coordinates.
df_cellinfo : pd.DataFrame
Dataframe with coordinates of anelastic attenuation cells.
df_celldist : pd.DataFrame
Datafame with cell path distances of all records in df_flatfile.
stan_model_fname : string
File name for stan model.
out_fname : string
File name for output files.
out_dir : string
Output directory.
res_name : string, optional
Column name for total residuals. The default is 'res'.
c_2_erg : double, optional
Value of ergodic geometrical spreading coefficient. The default is 0.
c_3_erg : double, optional
Value of ergodic Vs30 coefficient. The default is 0.
c_a_erg : double, optional
Value of ergodic anelatic attenuation coefficient. Used as mean of cell
specific anelastic attenuation prior distribution. The default is 0.
n_iter_warmup : integer, optional
Number of burn out MCMC samples. The default is 300.
n_iter_sampling : integer, optional
Number of MCMC samples for computing the posterior distributions. The default is 300.
n_chains : integer, optional
Number of MCMC chains. The default is 4.
adapt_delta : double, optional
Target average proposal acceptance probability for adaptation. The default is 0.8.
max_treedepth : integer, optional
Maximum number of evaluations for each iteration (2^max_treedepth). The default is 10.
stan_parallel : bool, optional
Flag for using multithreaded option in STAN. The default is False.
Returns
-------
None.
'''
## Preprocess Input Data
#set rsn column as dataframe index, skip if rsn already the index
if not df_flatfile.index.name == 'rsn':
df_flatfile.set_index('rsn', drop=True, inplace=True)
if not df_celldist.index.name == 'rsn':
df_celldist.set_index('rsn', drop=True, inplace=True)
#set cellid column as dataframe index, skip if cellid already the index
if not df_cellinfo.index.name == 'cellid':
df_cellinfo.set_index('cellid', drop=True, inplace=True)
#number of data
n_data = len(df_flatfile)
#earthquake data
data_eq_all = df_flatfile[['eqid','mag','eqX', 'eqY']].values
_, eq_idx, eq_inv = np.unique(df_flatfile[['eqid']], axis=0, return_inverse=True, return_index=True)
data_eq = data_eq_all[eq_idx,:]
X_eq = data_eq[:,[2,3]] #earthquake coordinates
#create earthquake ids for all records (1 to n_eq)
eq_id = eq_inv + 1
n_eq = len(data_eq)
#station data
data_sta_all = df_flatfile[['ssn','Vs30','x_3','staX','staY']].values
_, sta_idx, sta_inv = np.unique( df_flatfile[['ssn']].values, axis = 0, return_inverse=True, return_index=True)
data_sta = data_sta_all[sta_idx,:]
X_sta = data_sta[:,[3,4]] #station coordinates
#create station indices for all records (1 to n_sta)
sta_id = sta_inv + 1
n_sta = len(data_sta)
#geometrical spreading covariates
x_2 = df_flatfile['x_2'].values
#vs30 covariates
x_3 = df_flatfile['x_3'].values[sta_idx]
#ground-motion observations
y_data = df_flatfile[res_name].to_numpy().copy()
#cell data
#reorder and only keep records included in the flatfile
df_celldist = df_celldist.reindex(df_flatfile.index)
#cell info
cell_ids_all = df_cellinfo.index
cell_names_all = df_cellinfo.cellname
#cell distance matrix
celldist_all = df_celldist[cell_names_all] #cell-distance matrix with all cells
#find cell with more than one paths
i_cells_valid = np.where(celldist_all.sum(axis=0) > 0)[0] #valid cells with more than one path
cell_ids_valid = cell_ids_all[i_cells_valid]
cell_names_valid = cell_names_all[i_cells_valid]
celldist_valid = celldist_all.loc[:,cell_names_valid] #cell-distance with only non-zero cells
celldist_valid_sp = sparse.csr_matrix(celldist_valid)
#number of cells
n_cell = celldist_all.shape[1]
n_cell_valid = celldist_valid.shape[1]
#cell coordinates
X_cells_valid = df_cellinfo.loc[i_cells_valid,['mptX','mptY']].values
#print Rrup missfits
print('max R_rup misfit', (df_flatfile.Rrup.values - celldist_valid.sum(axis=1)).abs().max())
stan_data = {'N': n_data,
'NEQ': n_eq,
'NSTAT': n_sta,
'NCELL': n_cell_valid,
'NCELL_SP': len(celldist_valid_sp.data),
'eq': eq_id, #earthquake id
'stat': sta_id, #station id
'rec_mu': np.zeros(y_data.shape),
'Y': y_data,
'x_2': x_2,
'x_3': x_3,
'c_2_erg': c_2_erg,
'c_3_erg': c_3_erg,
'c_a_erg': c_a_erg,
'X_e': X_eq, #earthquake coordinates
'X_s': X_sta, #station coordinates
'X_c': X_cells_valid,
'RC_val': celldist_valid_sp.data,
'RC_w': celldist_valid_sp.indices+1,
'RC_u': celldist_valid_sp.indptr+1,
}
stan_data_fname = out_dir + out_fname + '_stan_data' + '.json'
#create output directory
pathlib.Path(out_dir).mkdir(parents=True, exist_ok=True)
#write as json file
cmdstanpy.utils.write_stan_json(stan_data_fname, stan_data)
## Run Stan, fit model
#number of cores
n_cpu = max(cpu_count() -1,1)
#run stan
if (not stan_parallel) or n_cpu<=n_chains:
#compile stan model
stan_model = cmdstanpy.CmdStanModel(stan_file=stan_model_fname)
stan_model.compile(force=True)
#run full MCMC sampler
stan_fit = stan_model.sample(data=stan_data_fname, chains=n_chains,
iter_warmup=n_iter_warmup, iter_sampling=n_iter_sampling,
seed=1, max_treedepth=max_treedepth, adapt_delta=adapt_delta,
show_progress=True, output_dir=out_dir+'stan_fit/')
else:
#compile stan model
stan_model = cmdstanpy.CmdStanModel(stan_file=stan_model_fname, cpp_options={"STAN_THREADS": True})
stan_model.compile(force=True)
#number of cores per chain
n_cpu_chain = int(np.floor(n_cpu/n_chains))
#run full MCMC sampler
stan_fit = stan_model.sample(data=stan_data_fname, chains=n_chains, threads_per_chain=n_cpu_chain,
iter_warmup=n_iter_warmup, iter_sampling=n_iter_sampling,
seed=1, max_treedepth=max_treedepth, adapt_delta=adapt_delta,
show_progress=True, output_dir=out_dir+'stan_fit/')
## Postprocessing Data
## Extract posterior samples
#hyper-parameters
col_names_hyp = ['dc_0','mu_2p','mu_3s',
'ell_1e', 'ell_1as', 'omega_1e', 'omega_1as', 'omega_1bs',
'ell_2p', 'ell_3s', 'omega_2p', 'omega_3s',
'mu_cap', 'ell_ca1p', 'omega_ca1p', 'omega_ca2p',
'phi_0','tau_0']
#non-ergodic terms
col_names_dc_1e = ['dc_1e.%i'%(k) for k in range(n_eq)]
col_names_dc_1as = ['dc_1as.%i'%(k) for k in range(n_sta)]
col_names_dc_1bs = ['dc_1bs.%i'%(k) for k in range(n_sta)]
col_names_c_2p = ['c_2p.%i'%(k) for k in range(n_eq)]
col_names_c_3s = ['c_3s.%i'%(k) for k in range(n_sta)]
col_names_dB = ['dB.%i'%(k) for k in range(n_eq)]
col_names_cap = ['c_cap.%i'%(c_id) for c_id in cell_ids_valid]
col_names_all = (col_names_hyp + col_names_dc_1e + col_names_dc_1as + col_names_dc_1bs +
col_names_c_2p + col_names_c_3s + col_names_cap + col_names_dB)
#summarize raw posterior distributions
stan_posterior = np.stack([stan_fit.stan_variable(c_n) for c_n in col_names_hyp], axis=1)
#adjustment terms
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('dc_1e')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('dc_1as')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('dc_1bs')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('c_2p')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('c_3s')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('c_cap')), axis=1)
stan_posterior = np.concatenate((stan_posterior, stan_fit.stan_variable('dB')), axis=1)
#save raw-posterior distribution
df_stan_posterior_raw = pd.DataFrame(stan_posterior, columns = col_names_all)
df_stan_posterior_raw.to_csv(out_dir + out_fname + '_stan_posterior_raw' + '.csv', index=False)
## Summarize hyper-parameters
#summarize posterior distributions of hyper-parameters
perc_array = np.array([0.05,0.25,0.5,0.75,0.95])
df_stan_hyp = df_stan_posterior_raw[col_names_hyp].quantile(perc_array)
df_stan_hyp = df_stan_hyp.append(df_stan_posterior_raw[col_names_hyp].mean(axis = 0), ignore_index=True)
df_stan_hyp.index = ['prc_%.2f'%(prc) for prc in perc_array]+['mean']
df_stan_hyp.to_csv(out_dir + out_fname + '_stan_hyperparameters' + '.csv', index=True)
#detailed posterior percentiles of posterior distributions
perc_array = np.arange(0.01,0.99,0.01)
df_stan_posterior = df_stan_posterior_raw[col_names_hyp].quantile(perc_array)
df_stan_posterior.index.name = 'prc'
df_stan_posterior.to_csv(out_dir + out_fname + '_stan_hyperposterior' + '.csv', index=True)
del col_names_dc_1e, col_names_dc_1as, col_names_dc_1bs, col_names_c_2p, col_names_c_3s, col_names_dB
del stan_posterior, col_names_all
## Sample spatially varying coefficients and predictions at record locations
# earthquake and station location in database
X_eq_all = df_flatfile[['eqX', 'eqY']].values
X_sta_all = df_flatfile[['staX','staY']].values
# GMM anelastic attenuation
cells_ca_mu = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].mean() for k in cell_ids_valid])
cells_ca_med = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].median() for k in cell_ids_valid])
cells_ca_sig = np.array([df_stan_posterior_raw.loc[:,'c_cap.%i'%(k)].std() for k in cell_ids_valid])
#effect of anelastic attenuation in GM
cells_LcA_mu = celldist_valid_sp @ cells_ca_mu
cells_LcA_med = celldist_valid_sp @ cells_ca_med
cells_LcA_sig = np.sqrt(celldist_valid_sp.power(2) @ cells_ca_sig**2)
#summary attenuation cells
catten_summary = np.vstack((np.tile(c_a_erg, n_cell_valid),
cells_ca_mu,
cells_ca_med,
cells_ca_sig)).T
columns_names = ['c_a_erg','c_cap_mean','c_cap_med','c_cap_sig']
df_catten_summary = pd.DataFrame(catten_summary, columns = columns_names, index=df_cellinfo.index[i_cells_valid])
#create dataframe with summary attenuation cells
df_catten_summary = pd.merge(df_cellinfo[['cellname','mptLat','mptLon','mptX','mptY','mptZ','UTMzone']],
df_catten_summary, how='right', left_index=True, right_index=True)
df_catten_summary.to_csv(out_dir + out_fname + '_stan_catten' + '.csv', index=True)
# GMM coefficients
#constant shift coefficient
coeff_0_mu = df_stan_posterior_raw.loc[:,'dc_0'].mean() * np.ones(n_data)
coeff_0_med = df_stan_posterior_raw.loc[:,'dc_0'].median() * np.ones(n_data)
coeff_0_sig = df_stan_posterior_raw.loc[:,'dc_0'].std() * np.ones(n_data)
#spatially varying earthquake constant coefficient
coeff_1e_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].mean() for k in range(n_eq)])
coeff_1e_mu = coeff_1e_mu[eq_inv]
coeff_1e_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].median() for k in range(n_eq)])
coeff_1e_med = coeff_1e_med[eq_inv]
coeff_1e_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1e.{k}'].std() for k in range(n_eq)])
coeff_1e_sig = coeff_1e_sig[eq_inv]
#site term constant covariance
coeff_1as_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].mean() for k in range(n_sta)])
coeff_1as_mu = coeff_1as_mu[sta_inv]
coeff_1as_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].median() for k in range(n_sta)])
coeff_1as_med = coeff_1as_med[sta_inv]
coeff_1as_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1as.{k}'].std() for k in range(n_sta)])
coeff_1as_sig = coeff_1as_sig[sta_inv]
#spatially varying station constant covariance
coeff_1bs_mu = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].mean() for k in range(n_sta)])
coeff_1bs_mu = coeff_1bs_mu[sta_inv]
coeff_1bs_med = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].median() for k in range(n_sta)])
coeff_1bs_med = coeff_1bs_med[sta_inv]
coeff_1bs_sig = np.array([df_stan_posterior_raw.loc[:,f'dc_1bs.{k}'].std() for k in range(n_sta)])
coeff_1bs_sig = coeff_1bs_sig[sta_inv]
#spatially varying geometrical spreading coefficient
coeff_2p_mu = np.array([df_stan_posterior_raw.loc[:,f'c_2p.{k}'].mean() for k in range(n_eq)])
coeff_2p_mu = coeff_2p_mu[eq_inv]
coeff_2p_med = np.array([df_stan_posterior_raw.loc[:,f'c_2p.{k}'].median() for k in range(n_eq)])
coeff_2p_med = coeff_2p_med[eq_inv]
coeff_2p_sig = np.array([df_stan_posterior_raw.loc[:,f'c_2p.{k}'].std() for k in range(n_eq)])
coeff_2p_sig = coeff_2p_sig[eq_inv]
#spatially varying Vs30 coefficient
coeff_3s_mu = np.array([df_stan_posterior_raw.loc[:,f'c_3s.{k}'].mean() for k in range(n_sta)])
coeff_3s_mu = coeff_3s_mu[sta_inv]
coeff_3s_med = np.array([df_stan_posterior_raw.loc[:,f'c_3s.{k}'].median() for k in range(n_sta)])
coeff_3s_med = coeff_3s_med[sta_inv]
coeff_3s_sig = np.array([df_stan_posterior_raw.loc[:,f'c_3s.{k}'].std() for k in range(n_sta)])
coeff_3s_sig = coeff_3s_sig[sta_inv]
# aleatory variability
phi_0_array = np.array([df_stan_posterior_raw.phi_0.mean()]*X_sta_all.shape[0])
tau_0_array = np.array([df_stan_posterior_raw.tau_0.mean()]*X_sta_all.shape[0])
#initiaize flatfile for sumamry of non-erg coefficinets and residuals
df_flatinfo = df_flatfile[['eqid','ssn','eqLat','eqLon','staLat','staLon','eqX','eqY','staX','staY','UTMzone']]
#summary coefficients
coeffs_summary = np.vstack((coeff_0_mu,
coeff_1e_mu,
coeff_1as_mu,
coeff_1bs_mu,
coeff_2p_mu,
coeff_3s_mu,
cells_LcA_mu,
coeff_0_med,
coeff_1e_med,
coeff_1as_med,
coeff_1bs_med,
coeff_2p_med,
coeff_3s_med,
cells_LcA_med,
coeff_0_sig,
coeff_1e_sig,
coeff_1as_sig,
coeff_1bs_sig,
coeff_2p_sig,
coeff_3s_sig,
cells_LcA_sig)).T
columns_names = ['dc_0_mean','dc_1e_mean','dc_1as_mean','dc_1bs_mean','c_2p_mean','c_3s_mean','Lc_ca_mean',
'dc_0_med', 'dc_1e_med', 'dc_1as_med', 'dc_1bs_med', 'c_2p_med', 'c_3s_med', 'Lc_ca_med',
'dc_0_sig', 'dc_1e_sig', 'dc_1as_sig', 'dc_1bs_sig', 'c_2p_sig', 'c_3s_sig', 'Lc_ca_sig']
df_coeffs_summary = pd.DataFrame(coeffs_summary, columns = columns_names, index=df_flatfile.index)
#create dataframe with summary coefficients
df_coeffs_summary = pd.merge(df_flatinfo, df_coeffs_summary, how='right', left_index=True, right_index=True)
df_coeffs_summary[['eqid','ssn']] = df_coeffs_summary[['eqid','ssn']].astype(int)
df_coeffs_summary.to_csv(out_dir + out_fname + '_stan_coefficients' + '.csv', index=True)
# GMM prediction
#mean prediction
y_mu = (coeff_0_mu + coeff_1e_mu + coeff_1as_mu + coeff_1bs_mu + coeff_2p_mu*x_2 + coeff_3s_mu*x_3[sta_inv] + cells_LcA_mu)
#compute residuals
res_tot = y_data - y_mu
#residuals computed directly from stan regression
res_between = [df_stan_posterior_raw.loc[:,f'dB.{k}'].mean() for k in range(n_eq)]
res_between = np.array([res_between[k] for k in (eq_inv).astype(int)])
res_within = res_tot - res_between
#summary predictions and residuals
predict_summary = np.vstack((y_mu, res_tot, res_between, res_within)).T
columns_names = ['nerg_mu','res_tot','res_between','res_within']
df_predict_summary = pd.DataFrame(predict_summary, columns = columns_names, index=df_flatfile.index)
#create dataframe with predictions and residuals
df_predict_summary = pd.merge(df_flatinfo, df_predict_summary, how='right', left_index=True, right_index=True)
df_predict_summary[['eqid','ssn']] = df_predict_summary[['eqid','ssn']].astype(int)
df_predict_summary.to_csv(out_dir + out_fname + '_stan_residuals' + '.csv', index=True)
## Summary regression
#save summary statistics
stan_summary_fname = out_dir + out_fname + '_stan_summary' + '.txt'
with open(stan_summary_fname, 'w') as f:
print(stan_fit, file=f)
#create and save trace plots
fig_dir = out_dir + 'summary_figs/'
#create figures directory if doesn't exit
pathlib.Path(fig_dir).mkdir(parents=True, exist_ok=True)
#create stan trace plots
stan_az_fit = az.from_cmdstanpy(stan_fit)
# stan_az_fit = az.from_cmdstanpy(stan_fit, posterior_predictive='Y')
for c_name in col_names_hyp:
#create trace plot with arviz
ax = az.plot_trace(stan_az_fit, var_names=c_name, figsize=(10,5) ).ravel()
ax[0].yaxis.set_major_locator(plt_autotick())
ax[0].set_xlabel('sample value')
ax[0].set_ylabel('frequency')
ax[0].set_title('')
ax[0].grid(axis='both')
ax[1].set_xlabel('iteration')
ax[1].set_ylabel('sample value')
ax[1].grid(axis='both')
ax[1].set_title('')
fig = ax[0].figure
fig.suptitle(c_name)
fig.savefig(fig_dir + out_fname + '_stan_traceplot_' + c_name + '_arviz' + '.png')
return None
| 20,349 | 47.684211 | 128 | py |
ngmm_tools | ngmm_tools-master/Analyses/Prediction/create_scen_dataframe.py | """
Created on Sat Aug 20 17:26:17 2022
@author: glavrent
"""
#load variables
import os
import sys
import pathlib
#arithmetic libraries
import numpy as np
#statistics libraries
import pandas as pd
#geographic libraries
import pyproj
import geopy.distance
#user libraries
sys.path.insert(0,'../Python_lib/ground_motions')
from pylib_gmm_eas import BA18
ba18 = BA18()
# Define Problem
#structural period
freq = 5.0119
#earthquake scenario
mag = 7.0
vs30 = 400
sof = 'SS'
dip = 90
z_tor = 0
#color bar limits
cbar_lim = [np.log(1e-8),np.log(.06)]
#earthquake coordinates
scen_eq_latlon = [34.2, -116.9]
#utm zone
utm_zone = '11S'
#grid
grid_X_dxdy = [10, 10]
#scenario filename
fname_scen_predict = '../../Data/Prediction/scen_predict.csv'
# UTM projection
# projection system
utmProj = pyproj.Proj("+proj=utm +zone="+utm_zone+", +ellps=WGS84 +datum=WGS84 +units=m +no_defs")
#grid limits in UTM
grid_X_win = np.array([[-140, 3500], [780, 4700]])
#create coordinate grid
grid_x_edge = np.arange(grid_X_win[0,0],grid_X_win[1,0],grid_X_dxdy[0])
grid_y_edge = np.arange(grid_X_win[0,1],grid_X_win[1,1],grid_X_dxdy[0])
grid_x, grid_y = np.meshgrid(grid_x_edge, grid_y_edge)
#create coordinate array with all grid nodes
grid_X = np.vstack([grid_x.T.flatten(), grid_y.T.flatten()]).T
#compute lat/lon coordinate array
grid_latlon = np.fliplr(np.array([utmProj(g_x*1000, g_y*1000, inverse=True) for g_x, g_y in
zip(grid_X[:,0], grid_X[:,1])]))
n_gpt = len(grid_X)
#earthquake UTM coordinates
scen_eq_X = np.array(utmProj(scen_eq_latlon[1], scen_eq_latlon[0])) / 1000
#create earthquake and site ids
eqid_array = np.full(n_gpt, -1)
site_array = -1*(1+np.arange(n_gpt))
# Compute Ergodic Base Scaling
#compute distances
scen_dist_array = np.linalg.norm(grid_X - scen_eq_X, axis=1)
scen_dist_array = np.sqrt(scen_dist_array**2 + z_tor**2)
#scenarios of interest
scen_eas_nerg_scl = np.full(n_gpt, np.nan)
scen_eas_nerg_aleat = np.full(n_gpt, np.nan)
for k, d in enumerate(scen_dist_array):
fnorm = 1 if sof == 'SS' else 0
#median and aleatory
scen_eas_nerg_scl[k], _, scen_eas_nerg_aleat[k] = ba18.EasF(freq, mag, rrup=d, vs30=vs30, ztor=z_tor, fnorm=fnorm, flag_keep_b7 = False)
# Summarize Scenario Dataframe
df_scen_prdct = pd.DataFrame({'eqid':eqid_array, 'ssn':site_array,
'eqLat':np.full(n_gpt,scen_eq_latlon[0]), 'eqLon':np.full(n_gpt,scen_eq_latlon[0]),
'staLat':grid_latlon[:,0], 'staLon':grid_latlon[:,1],
'eqX':np.full(n_gpt,scen_eq_X[0]), 'eqY':np.full(n_gpt,scen_eq_X[1]), 'eqZ':np.full(n_gpt,-z_tor),
'staX':grid_X[:,0], 'staY':grid_X[:,1],
'erg_base':scen_eas_nerg_scl})
#save prediction scenarios
df_scen_prdct.to_csv(fname_scen_predict )
| 3,049 | 28.61165 | 140 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/synthetic_datasets/create_synthetic_ds1.py | """
Created on Thu Jul 1 21:25:34 2021
@author: glavrent
"""
# load libraries
import os
import pathlib
# arithmetic libraries
import numpy as np
# statistics libraries
import pandas as pd
# python interface to Stan for Bayesian inference
# for installation check https://pystan.readthedocs.io/en/latest/
import pystan
# set working directories
os.chdir(os.getcwd()) # change directory to current directory
# %% Define Input Data
# USER SETS THE INPUT FLATFILE NAMES AND PATH
# ++++++++++++++++++++++++++++++++++++++++
#input flatfile
# fname_flatfile = 'CatalogNGAWest3CA'
# fname_flatfile = 'CatalogNGAWest3CA_2013'
# fname_flatfile = 'CatalogNGAWest3NCA'
# fname_flatfile = 'CatalogNGAWest3SCA'
fname_flatfile = 'CatalogNGAWest3CALite'
dir_flatfile = '../../../Data/Validation/preprocessing/flatfiles/merged/'
# ++++++++++++++++++++++++++++++++++++++++
# USER SETS THE INPUT FLATFILE NAMES AND PATH
# ++++++++++++++++++++++++++++++++++++++++
fname_stan_model = 'create_synthetic_ds1.stan'
# ++++++++++++++++++++++++++++++++++++++++
# USER SETS THE OUTPUT FILE PATH AND NAME
# ++++++++++++++++++++++++++++++++++++++++
# output filename sufix
# synds_suffix = '_small_corr_len'
# synds_suffix = '_large_corr_len'
# output directories
dir_out = f'../../../Data/Validation/synthetic_datasets/ds1{synds_suffix}/'
# ++++++++++++++++++++++++++++++++++++++++
# user defines hyper parameters
# number of synthetic data-sets
n_ds = 5
# number of chains and seed number in stan model
n_chains = 1
n_seed = 1
# define hyper-parameters
# omega_0: standard deviation for constant offset
# omega_1e: standard deviation for spatially varying earthquake constant
# omega_1as: standard deviation for spatially vayring site constant
# omega_1bs: standard deviation for independent site constant
# ell_1e: correlation lenght for spatially varying earthquake constant
# ell_1as: correlation lenght for spatially vayring site constant
# phi_0: within-event standard deviation
# tau_0: between-event standard deviation
# USER NEEDS TO SPECIFY HYPERPARAMETERS
# ++++++++++++++++++++++++++++++++++++++++
# # small correlation lengths
# hyp = {'omega_0': 0.1, 'omega_1e':0.1, 'omega_1as': 0.35, 'omega_1bs': 0.25,
# 'ell_1e':60, 'ell_1as':30, 'phi_0':0.4, 'tau_0':0.3 }
# #large correlation lengths
# hyp = {'omega_0': 0.1, 'omega_1e':0.2, 'omega_1as': 0.4, 'omega_1bs': 0.3,
# 'ell_1e':100, 'ell_1as':70, 'phi_0':0.4, 'tau_0':0.3 }
# ++++++++++++++++++++++++++++++++++++++++
# %% Load Data
#load flatfile
fullname_flatfile = dir_flatfile + fname_flatfile + '.csv'
df_flatfile = pd.read_csv(fullname_flatfile)
# %% Processing
# read earthquake and station data from the flatfile
n_rec = len(df_flatfile)
# read earthquake data
# earthquake IDs (eqid), magnitudes (mag), and coordinates (eqX,eqY)
# user may change these IDs based on the headers of the flatfile
data_eq_all = df_flatfile[['eqid','mag','eqX', 'eqY']].values
_, eq_idx, eq_inv = np.unique(df_flatfile[['eqid']], axis=0, return_index=True, return_inverse=True)
data_eq = data_eq_all[eq_idx,:]
X_eq = data_eq[:,[2,3]] #earthquake coordinates
# create earthquake ids for all recordings
eq_id = eq_inv + 1
n_eq = len(data_eq)
# read station data
# station IDs (ssn), Vs30, and coordinates (staX,staY)
# user may change these IDs based on the headers of the flatfile
data_stat_all = df_flatfile[['ssn','Vs30','staX','staY']].values
_, sta_idx, sta_inv = np.unique(df_flatfile[['ssn']].values, axis = 0, return_index=True, return_inverse=True)
data_stat = data_stat_all[sta_idx,:]
X_stat = data_stat[:,[2,3]] #station coordinates
# create station ids for all recordings
sta_id = sta_inv + 1
n_stat = len(data_stat)
# %% Stan
## Stan Data
stan_data = {'N': n_rec,
'NEQ': n_eq,
'NSTAT': n_stat,
'X_e': X_eq, #earthquake coordinates
'X_s': X_stat, #station coordinates
'eq': eq_id, #earthquake index
'stat': sta_id, #station index
'mu_gmm': np.zeros(n_rec),
#hyper-parameters of generated data-set
'omega_0': hyp['omega_0'],
'omega_1e': hyp['omega_1e'],
'omega_1as': hyp['omega_1as'],
'omega_1bs': hyp['omega_1bs'],
'ell_1e': hyp['ell_1e'],
'ell_1as': hyp['ell_1as'],
#aleatory terms
'phi_0': hyp['phi_0'],
'tau_0': hyp['tau_0']
}
## Compile and Run Stan model
# compile model
sm = pystan.StanModel(file=fname_stan_model)
# generate samples
fit = sm.sampling(data=stan_data, algorithm="Fixed_param", iter=n_ds, chains=n_chains, seed=n_seed)
# keep valid datasets
Y_nerg_med = fit['Y_nerg_med']
Y_aleat = fit['Y_aleat']
Y_tot = fit['Y_tot']
# %% Output
if not os.path.isdir(dir_out): pathlib.Path(dir_out).mkdir(parents=True, exist_ok=True)
# save generated data-sets
for k, (Y_nm, Y_t) in enumerate(zip(Y_nerg_med, Y_tot)):
#copy catalog info to synthetic data-set
df_synthetic_data = df_flatfile.copy()
#add residuals columns
df_synthetic_data.loc[:,'nerg_gm'] = Y_nm
df_synthetic_data.loc[:,'tot'] = Y_t
#add columns with sampled coefficients
df_synthetic_data.loc[:,'dc_0'] = fit['dc_0'][k]
df_synthetic_data.loc[:,'dc_1e'] = fit['dc_1e'][k][eq_inv]
df_synthetic_data.loc[:,'dc_1as'] = fit['dc_1as'][k][sta_inv]
df_synthetic_data.loc[:,'dc_1bs'] = fit['dc_1bs'][k][sta_inv]
#add columns aleatory terms
df_synthetic_data.loc[:,'dW'] = fit['dW'][k]
df_synthetic_data.loc[:,'dB'] = fit['dB'][k][eq_inv]
#create data-frame with synthetic dataset
fname_synthetic_data = dir_out + f'{fname_flatfile}_synthetic_data{synds_suffix}_Y{k+1}.csv'
df_synthetic_data.to_csv(fname_synthetic_data, index=False)
| 6,252 | 34.528409 | 110 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/synthetic_datasets/create_synthetic_ds3.py | """
Created on Sun Dec 26 15:47:17 2021
@author: glavrent
"""
# load libraries
import os
import pathlib
# arithmetic libraries
import numpy as np
# statistics libraries
import pandas as pd
# python interface to Stan for Bayesian inference
# for installation check https://pystan.readthedocs.io/en/latest/
import pystan
# %% set working directories
os.chdir(os.getcwd()) # change directory to current directory
# %% Define Input Data
# USER SETS THE INPUT FLATFILE NAMES AND PATH
# ++++++++++++++++++++++++++++++++++++++++
#input flatfile
# fname_flatfile = 'CatalogNGAWest3CA'
# fname_flatfile = 'CatalogNGAWest3CA_2013'
# fname_flatfile = 'CatalogNGAWest3NCA'
# fname_flatfile = 'CatalogNGAWest3SCA'
fname_flatfile = 'CatalogNGAWest3CALite'
dir_flatfile = '../../../Data/Validation/preprocessing/flatfiles/merged/'
# cell data
# fname_cellinfo = 'CatalogNGAWest3CA_cellinfo.csv'
# fname_celldist = 'CatalogNGAWest3CA_distancematrix.csv'
# fname_cellinfo = 'CatalogNGAWest3CA_2013_cellinfo.csv'
# fname_celldist = 'CatalogNGAWest3CA_2013_distancematrix.csv'
fname_cellinfo = 'CatalogNGAWest3CALite_cellinfo.csv'
fname_celldist = 'CatalogNGAWest3CALite_distancematrix.csv'
fname_celldist_sp = 'CatalogNGAWest3CALite_distancematrix_sparce.csv'
dir_celldata = '../../../Data/Validation/preprocessing/cell_distances/'
# ++++++++++++++++++++++++++++++++++++++++
# USER SETS THE INPUT FLATFILE NAMES AND PATH
# ++++++++++++++++++++++++++++++++++++++++
fname_stan_model = 'create_synthetic_ds3.stan'
# ++++++++++++++++++++++++++++++++++++++++
# USER SETS THE OUTPUT FILE PATH AND NAME
# ++++++++++++++++++++++++++++++++++++++++
# output filename sufix
# synds_suffix = '_small_corr_len'
# synds_suffix = '_large_corr_len'
# output directories
dir_out = f'../../../Data/Validation/synthetic_datasets/ds3{synds_suffix}/'
# ++++++++++++++++++++++++++++++++++++++++
# number of synthetic data-sets
n_dataset = 5
n_attempts = 500
# number of chains and seed number in stan model
n_chains = 1
n_seed = 1
# define hyper-parameters
# omega_0: standard deviation for constant offset
# omega_1e: standard deviation for spatially varying earthquake constant
# omega_1as: standard deviation for spatially varying site constant
# omega_1bs: standard deviation for independent site constant
# ell_1e: correlation length for spatially varying earthquake constant
# ell_1as: correlation length for spatially varying site constant
# c_2_erg: ergodic geometrical-spreading coefficient
# omega_2: standard deviation for shift in average geometrical-spreading
# omega_2p: standard deviation for spatially varying geometrical-spreading coefficient
# ell_2p: correlation length for spatially varying geometrical-spreading coefficient
# c_3_erg: ergodic Vs30 scaling coefficient
# omega_3: standard deviation for shift in average Vs30 scaling
# omega_3s: standard deviation for spatially varying Vs30 scaling
# ell_3s: correlation length for spatially varying Vs30 scaling
# c_cap_erg: erogodic cell-specific anelastic attenuation
# omega_cap_mu: standard deviation for constant offset of cell-specific anelastic attenuation
# omega_ca1p: standard deviation for spatially varying component of cell-specific anelastic attenuation
# omega_ca2p: standard deviation for spatially independent component of cell-specific anelastic attenuation
# ell_ca1p: correlation length for spatially varying component of cell-specific anelastic attenuation
# phi_0: within-event standard deviation
# tau_0: between-event standard deviation
# USER NEEDS TO SPECIFY HYPERPARAMETERS
# ++++++++++++++++++++++++++++++++++++++++
# # small correlation lengths
# hyp = {'omega_0': 0.1, 'omega_1e':0.1, 'omega_1as': 0.35, 'omega_1bs': 0.25,
# 'ell_1e':60, 'ell_1as':30,
# 'c_2_erg': -2.0,
# 'omega_2': 0.2,
# 'omega_2p': 0.15, 'ell_2p': 80,
# 'c_3_erg':-0.6,
# 'omega_3': 0.15,
# 'omega_3s': 0.15, 'ell_3s': 130,
# 'c_cap_erg': -0.011,
# 'omega_cap_mu': 0.005, 'omega_ca1p':0.004, 'omega_ca2p':0.002,
# 'ell_ca1p': 75,
# 'phi_0':0.3, 'tau_0':0.25 }
# # large correlation lengths
# hyp = {'omega_0': 0.1, 'omega_1e':0.2, 'omega_1as': 0.4, 'omega_1bs': 0.3,
# 'ell_1e':100, 'ell_1as':70,
# 'c_2_erg': -2.0,
# 'omega_2': 0.2,
# 'omega_2p': 0.15, 'ell_2p': 140,
# 'c_3_erg':-0.6,
# 'omega_3': 0.15,
# 'omega_3s': 0.15, 'ell_3s': 180,
# 'c_cap_erg': -0.02,
# 'omega_cap_mu': 0.008, 'omega_ca1p':0.005, 'omega_ca2p':0.003,
# 'ell_ca1p': 120,
# 'phi_0':0.3, 'tau_0':0.25}
# ++++++++++++++++++++++++++++++++++++++++
#psuedo depth term for mag saturation
h_M = 4
# %% Load Data
# load flatfile
fullname_flatfile = dir_flatfile + fname_flatfile + '.csv'
df_flatfile = pd.read_csv(fullname_flatfile)
# load celldata
df_cell_dist = pd.read_csv(dir_celldata + fname_celldist, index_col=0)
df_cell_dist_sp = pd.read_csv(dir_celldata + fname_celldist_sp)
df_cell_info = pd.read_csv(dir_celldata + fname_cellinfo)
# %% Processing
# read earthquake and station data from the flatfile
n_rec = len(df_flatfile)
# read earthquake data
# earthquake IDs (rqid), magnitudes (mag), and coordinates (eqX,eqY)
# user may change these IDs based on the headers of the flatfile
data_eq_all = df_flatfile[['eqid','mag','eqX', 'eqY']].values
_, eq_idx, eq_inv = np.unique(df_flatfile[['eqid']], axis=0, return_index=True, return_inverse=True)
data_eq = data_eq_all[eq_idx,:]
X_eq = data_eq[:,[2,3]] # earthquake coordinates
# create earthquake ids for all recordings
eq_id = eq_inv + 1
n_eq = len(data_eq)
# read station data
# station IDs (ssn), Vs30, and coordinates (staX,staY)
# user may change these IDs based on the headers of the flatfile
data_sta_all = df_flatfile[['ssn','Vs30','staX','staY']].values
_, sta_idx, sta_inv = np.unique(df_flatfile[['ssn']].values, axis = 0, return_index=True, return_inverse=True)
data_sta = data_sta_all[sta_idx,:]
X_sta = data_sta[:,[2,3]] # station coordinates
# create station ids for all recordings
sta_id = sta_inv + 1
n_sta = len(data_sta)
# geometrical spreading covariate
x_2 = np.log(np.sqrt(df_flatfile.Rrup.values**2 + h_M**2))
#vs30 covariate
x_3 = np.log(np.minimum(data_sta[:,1], 1000)/1000)
assert(~np.isnan(x_3).all()),'Error. Invalid Vs30 values'
# read cell data
n_cell = len(df_cell_info)
df_cell_dist = df_cell_dist.reindex(df_flatfile.rsn) #cell distance matrix for records in the synthetic data-set
# cell names
cells_names = df_cell_info.cellname.values
cells_id = df_cell_info.cellid.values
# cell distance matrix
cell_dmatrix = df_cell_dist.loc[:,cells_names].values
# cell coordinates
X_cell = df_cell_info[['mptX','mptY']].values
# valid cells
i_val_cells = cell_dmatrix.sum(axis=0) > 0
# %% Stan
## Stan Data
stan_data = {'N': n_rec,
'NEQ': n_eq,
'NSTAT': n_sta,
'NCELL': n_cell,
'eq': eq_id, #earthquake index
'stat': sta_id, #station index
'X_e': X_eq, #earthquake coordinates
'X_s': X_sta, #station coordinates
'X_c': X_cell, #cell coordinates
'RC': cell_dmatrix, #cell distances
'mu_gmm': np.zeros(n_rec),
#covariates
'x_2': x_2, #geometrical spreading
'x_3': x_3, #Vs30 scaling
#hyper-parameters of generated data-set
'omega_0': hyp['omega_0'],
'omega_1e': hyp['omega_1e'],
'omega_1as': hyp['omega_1as'],
'omega_1bs': hyp['omega_1bs'],
'ell_1e': hyp['ell_1e'],
'ell_1as': hyp['ell_1as'],
'c_2_erg': hyp['c_2_erg'],
'omega_2': hyp['omega_2'],
'omega_2p': hyp['omega_2p'],
'ell_2p': hyp['ell_2p'],
'c_3_erg': hyp['c_3_erg'],
'omega_3': hyp['omega_3'],
'omega_3s': hyp['omega_3s'],
'ell_3s': hyp['ell_3s'],
#anelastic attenuation
'c_cap_erg': hyp['c_cap_erg'],
'omega_cap_mu': hyp['omega_cap_mu'],
'omega_ca1p': hyp['omega_ca1p'],
'omega_ca2p': hyp['omega_ca2p'],
'ell_ca1p': hyp['ell_ca1p'],
#aleatory terms
'phi_0': hyp['phi_0'],
'tau_0': hyp['tau_0']
}
## Compile and Run Stan model
# compile model
sm = pystan.StanModel(file=fname_stan_model)
# generate samples
fit = sm.sampling(data=stan_data, algorithm="Fixed_param", iter=n_attempts, chains=n_chains, seed=n_seed)
# select only data-sets with negative anelastic attenuation coefficients
valid_dataset = np.array( n_attempts * [False] )
for k, (c_2p, c_cap) in enumerate(zip(fit['c_2p'], fit['c_cap'])):
valid_dataset[k] = np.all(c_2p <= 0 ) & np.all(c_cap <= 0 )
valid_dataset = np.where(valid_dataset)[0] #valid data-set ids
valid_dataset = valid_dataset[:min(n_dataset,len(valid_dataset))]
# keep valid datasets
Y_nerg_med = fit['Y_nerg_med'][valid_dataset]
Y_var_coeff = fit['Y_var_ceoff'][valid_dataset]
Y_inattent = fit['Y_inattent'][valid_dataset]
Y_aleat = fit['Y_aleat'][valid_dataset]
Y_tot = fit['Y_tot'][valid_dataset]
c_cap = fit['c_cap'][valid_dataset]
# %% Output
if not os.path.isdir(dir_out): pathlib.Path(dir_out).mkdir(parents=True, exist_ok=True)
# save generated data-sets
for k, (k_vds, Y_nm, Y_vc, Y_iatt, Y_t) in enumerate(zip(valid_dataset, Y_nerg_med, Y_var_coeff, Y_inattent, Y_tot)):
#copy catalog info to synthetic data-set
df_synthetic_data = df_flatfile.copy()
#add covariates
df_synthetic_data.loc[:,'x_2'] = x_2
df_synthetic_data.loc[:,'x_3'] = x_3[sta_inv]
#add residuals columns
df_synthetic_data.loc[:,'nerg_gm'] = Y_nm
df_synthetic_data.loc[:,'vcoeff'] = Y_vc
df_synthetic_data.loc[:,'inatten'] = Y_iatt
df_synthetic_data.loc[:,'tot'] = Y_t
#add columns with sampled coefficients
df_synthetic_data.loc[:,'dc_0'] = fit['dc_0'][k_vds]
df_synthetic_data.loc[:,'dc_1e'] = fit['dc_1e'][k_vds][eq_inv]
df_synthetic_data.loc[:,'dc_1as'] = fit['dc_1as'][k_vds][sta_inv]
df_synthetic_data.loc[:,'dc_1bs'] = fit['dc_1bs'][k_vds][sta_inv]
df_synthetic_data.loc[:,'c_2'] = fit['c_2_mu'][k_vds]
df_synthetic_data.loc[:,'c_2p'] = fit['c_2p'][k_vds][eq_inv]
df_synthetic_data.loc[:,'c_3'] = fit['c_3_mu'][k_vds]
df_synthetic_data.loc[:,'c_3s'] = fit['c_3s'][k_vds][sta_inv]
#add columns aleatory terms
df_synthetic_data.loc[:,'dW'] = fit['dW'][k_vds]
df_synthetic_data.loc[:,'dB'] = fit['dB'][k_vds][eq_inv]
#create data-frame with synthetic dataset
fname_synthetic_data = dir_out + f'{fname_flatfile}_synthetic_data{synds_suffix}_Y{k+1}.csv'
df_synthetic_data.to_csv(fname_synthetic_data, index=False)
# save coeffiicients
for k, (k_vds, c_ca) in enumerate(zip(valid_dataset, c_cap)):
#create synthetic cell dataset
df_synthetic_cell = df_cell_info.copy()
#cell specific anelastic attenuation
df_synthetic_cell.loc[:,'c_cap_mu'] = fit['c_cap_mu'][k_vds]
df_synthetic_cell.loc[:,'c_cap'] = c_ca
#create data-frame with cell specific dataset
fname_synthetic_atten = dir_out + f'{fname_flatfile}_synthetic_atten{synds_suffix}_Y{k+1}.csv'
df_synthetic_cell.to_csv(fname_synthetic_atten, index=False)
# save cell data
fname_cell_info = dir_out + f'{fname_flatfile}_cellinfo.csv'
fname_cell_dist = dir_out + f'{fname_flatfile}_distancematrix.csv'
fname_cell_dist_sp = dir_out + f'{fname_flatfile}_distancematrix_sparse.csv'
df_cell_info.to_csv(fname_cell_info, index=False)
df_cell_dist.to_csv(fname_cell_dist)
df_cell_dist_sp.to_csv(fname_cell_dist_sp, index=False)
| 12,361 | 40.206667 | 117 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/synthetic_datasets/create_synthetic_ds2.py | """
Created on Thu Jul 1 21:25:34 2021
@author: glavrent
"""
# load libraries
import os
import pathlib
# arithmetic libraries
import numpy as np
# statistics libraries
import pandas as pd
# python interface to Stan for Bayesian inference
# for installation check https://pystan.readthedocs.io/en/latest/
import pystan
# %% set working directories
os.chdir(os.getcwd()) # change directory to current directory
# %% Define Input Data
# USER SETS THE INPUT FLATFILE NAMES AND PATH
# ++++++++++++++++++++++++++++++++++++++++
#input flatfile
# fname_flatfile = 'CatalogNGAWest3CA'
# fname_flatfile = 'CatalogNGAWest3CA_2013'
# fname_flatfile = 'CatalogNGAWest3NCA'
# fname_flatfile = 'CatalogNGAWest3SCA'
fname_flatfile = 'CatalogNGAWest3CALite'
dir_flatfile = '../../../Data/Validation/preprocessing/flatfiles/merged/'
# cell data
# fname_cellinfo = 'CatalogNGAWest3CA_cellinfo.csv'
# fname_celldist = 'CatalogNGAWest3CA_distancematrix.csv'
# fname_cellinfo = 'CatalogNGAWest3CA_2013_cellinfo.csv'
# fname_celldist = 'CatalogNGAWest3CA_2013_distancematrix.csv'
fname_cellinfo = 'CatalogNGAWest3CALite_cellinfo.csv'
fname_celldist = 'CatalogNGAWest3CALite_distancematrix.csv'
fname_celldist_sp = 'CatalogNGAWest3CALite_distancematrix_sparce.csv'
dir_celldata = '../../../Data/Validation/preprocessing/cell_distances/'
# ++++++++++++++++++++++++++++++++++++++++
# USER SETS THE INPUT FLATFILE NAMES AND PATH
# ++++++++++++++++++++++++++++++++++++++++
fname_stan_model = 'create_synthetic_ds2.stan'
# ++++++++++++++++++++++++++++++++++++++++
# USER SETS THE OUTPUT FILE PATH AND NAME
# ++++++++++++++++++++++++++++++++++++++++
# output filename sufix
# synds_suffix = '_small_corr_len'
# synds_suffix = '_large_corr_len'
# output directories
dir_out = f'../../../Data/Validation/synthetic_datasets/ds2{synds_suffix}/'
# ++++++++++++++++++++++++++++++++++++++++
# number of synthetic data-sets
n_dataset = 5
n_attempts = 500
# number of chains and seed number in stan model
n_chains = 1
n_seed = 1
# define hyper-parameters
# omega_0: standard deviation for constant offset
# omega_1e: standard deviation for spatially varying earthquake constant
# omega_1as: standard deviation for spatially varying site constant
# omega_1bs: standard deviation for independent site constant
# ell_1e: correlation length for spatially varying earthquake constant
# ell_1as: correlation length for spatially varying site constant
# c_cap_erg: erogodic cell-specific anelastic attenuation
# omega_cap_mu: standard deviation for constant offset of cell-specific anelastic attenuation
# omega_ca1p: standard deviation for spatially varying component of cell-specific anelastic attenuation
# omega_ca2p: standard deviation for spatially independent component of cell-specific anelastic attenuation
# ell_ca1p: correlation length for spatially varying component of cell-specific anelastic attenuation
# phi_0: within-event standard deviation
# tau_0: between-event standard deviation
# USER NEEDS TO SPECIFY HYPERPARAMETERS
# ++++++++++++++++++++++++++++++++++++++++
# # small correlation lengths
# hyp = {'omega_0': 0.1, 'omega_1e':0.1, 'omega_1as': 0.35, 'omega_1bs': 0.25,
# 'ell_1e':60, 'ell_1as':30,
# 'c_cap_erg': -0.011,
# 'omega_cap_mu': 0.005, 'omega_ca1p':0.004, 'omega_ca2p':0.002,
# 'ell_ca1p': 75,
# 'phi_0':0.4, 'tau_0':0.3 }
# # large correlation lengths
# hyp = {'omega_0': 0.1, 'omega_1e':0.2, 'omega_1as': 0.4, 'omega_1bs': 0.3,
# 'ell_1e':100, 'ell_1as':70,
# 'c_cap_erg': -0.02,
# 'omega_cap_mu': 0.008, 'omega_ca1p':0.005, 'omega_ca2p':0.003,
# 'ell_ca1p': 120,
# 'phi_0':0.4, 'tau_0':0.3}
# ++++++++++++++++++++++++++++++++++++++++
# %% Load Data
# load flatfile
fullname_flatfile = dir_flatfile + fname_flatfile + '.csv'
df_flatfile = pd.read_csv(fullname_flatfile)
# load celldata
df_cell_dist = pd.read_csv(dir_celldata + fname_celldist, index_col=0)
df_cell_dist_sp = pd.read_csv(dir_celldata + fname_celldist_sp)
df_cell_info = pd.read_csv(dir_celldata + fname_cellinfo)
# %% Processing
# read earthquake and station data from the flatfile
n_rec = len(df_flatfile)
# read earthquake data
# earthquake IDs (rqid), magnitudes (mag), and coordinates (eqX,eqY)
# user may change these IDs based on the headers of the flatfile
data_eq_all = df_flatfile[['eqid','mag','eqX', 'eqY']].values
_, eq_idx, eq_inv = np.unique(df_flatfile[['eqid']], axis=0, return_index=True, return_inverse=True)
data_eq = data_eq_all[eq_idx,:]
X_eq = data_eq[:,[2,3]] # earthquake coordinates
# create earthquake ids for all recordings
eq_id = eq_inv + 1
n_eq = len(data_eq)
# read station data
# station IDs (ssn), Vs30, and coordinates (staX,staY)
# user may change these IDs based on the headers of the flatfile
data_sta_all = df_flatfile[['ssn','Vs30','staX','staY']].values
_, sta_idx, sta_inv = np.unique(df_flatfile[['ssn']].values, axis = 0, return_index=True, return_inverse=True)
data_sta = data_sta_all[sta_idx,:]
X_sta = data_sta[:,[2,3]] # station coordinates
# create station ids for all recordings
sta_id = sta_inv + 1
n_sta = len(data_sta)
# read cell data
n_cell = len(df_cell_info)
df_cell_dist = df_cell_dist.reindex(df_flatfile.rsn) #cell distance matrix for records in the synthetic data-set
# cell names
cells_names = df_cell_info.cellname.values
cells_id = df_cell_info.cellid.values
# cell distance matrix
cell_dmatrix = df_cell_dist.loc[:,cells_names].values
# cell coordinates
X_cell = df_cell_info[['mptX','mptY']].values
# valid cells
i_val_cells = cell_dmatrix.sum(axis=0) > 0
# %% Stan
## Stan Data
stan_data = {'N': n_rec,
'NEQ': n_eq,
'NSTAT': n_sta,
'NCELL': n_cell,
'eq': eq_id, #earthquake index
'stat': sta_id, #station index
'X_e': X_eq, #earthquake coordinates
'X_s': X_sta, #station coordinates
'X_c': X_cell, #cell coordinates
'RC': cell_dmatrix, #cell distances
'mu_gmm': np.zeros(n_rec),
#hyper-parameters of generated data-set
'omega_0': hyp['omega_0'],
'omega_1e': hyp['omega_1e'],
'omega_1as': hyp['omega_1as'],
'omega_1bs': hyp['omega_1bs'],
'ell_1e': hyp['ell_1e'],
'ell_1as': hyp['ell_1as'],
#anelastic attenuation
'c_cap_erg': hyp['c_cap_erg'],
'omega_cap_mu': hyp['omega_cap_mu'],
'omega_ca1p': hyp['omega_ca1p'],
'omega_ca2p': hyp['omega_ca2p'],
'ell_ca1p': hyp['ell_ca1p'],
#aleatory terms
'phi_0': hyp['phi_0'],
'tau_0': hyp['tau_0']
}
## Compile and Run Stan model
# compile model
sm = pystan.StanModel(file=fname_stan_model)
# generate samples
fit = sm.sampling(data=stan_data, algorithm="Fixed_param", iter=n_attempts, chains=n_chains, seed=n_seed)
# select only data-sets with negative anelastic attenuation coefficients
valid_dataset = np.array( n_attempts * [False] )
for k, c_cap in enumerate(fit['c_cap']):
valid_dataset[k] = np.all(c_cap <= 0 )
valid_dataset = np.where(valid_dataset)[0] #valid data-set ids
valid_dataset = valid_dataset[:min(n_dataset,len(valid_dataset))]
# keep valid datasets
Y_nerg_med = fit['Y_nerg_med'][valid_dataset]
Y_var_coeff = fit['Y_var_ceoff'][valid_dataset]
Y_inattent = fit['Y_inattent'][valid_dataset]
Y_aleat = fit['Y_aleat'][valid_dataset]
Y_tot = fit['Y_tot'][valid_dataset]
c_cap = fit['c_cap'][valid_dataset]
# %% Output
if not os.path.isdir(dir_out): pathlib.Path(dir_out).mkdir(parents=True, exist_ok=True)
# save generated data-sets
for k, (k_vds, Y_nm, Y_vc, Y_iatt, Y_t) in enumerate(zip(valid_dataset, Y_nerg_med, Y_var_coeff, Y_inattent, Y_tot)):
#copy catalog info to synthetic data-set
df_synthetic_data = df_flatfile.copy()
#add residuals columns
df_synthetic_data.loc[:,'nerg_gm'] = Y_nm
df_synthetic_data.loc[:,'vcoeff'] = Y_vc
df_synthetic_data.loc[:,'inatten'] = Y_iatt
df_synthetic_data.loc[:,'tot'] = Y_t
#add columns with sampled coefficients
df_synthetic_data.loc[:,'dc_0'] = fit['dc_0'][k_vds]
df_synthetic_data.loc[:,'dc_1e'] = fit['dc_1e'][k_vds][eq_inv]
df_synthetic_data.loc[:,'dc_1as'] = fit['dc_1as'][k_vds][sta_inv]
df_synthetic_data.loc[:,'dc_1bs'] = fit['dc_1bs'][k_vds][sta_inv]
#add columns aleatory terms
df_synthetic_data.loc[:,'dW'] = fit['dW'][k_vds]
df_synthetic_data.loc[:,'dB'] = fit['dB'][k_vds][eq_inv]
#create data-frame with synthetic dataset
fname_synthetic_data = dir_out + f'{fname_flatfile}_synthetic_data{synds_suffix}_Y{k+1}.csv'
df_synthetic_data.to_csv(fname_synthetic_data, index=False)
# save coeffiicients
for k, (k_vds, c_ca) in enumerate(zip(valid_dataset, c_cap)):
#create synthetic cell dataset
df_synthetic_cell = df_cell_info.copy()
#cell specific anelastic attenuation
df_synthetic_cell.loc[:,'c_cap_mu'] = fit['c_cap_mu'][k_vds]
df_synthetic_cell.loc[:,'c_cap'] = c_ca
#create data-frame with cell specific dataset
fname_synthetic_atten = dir_out + f'{fname_flatfile}_synthetic_atten{synds_suffix}_Y{k+1}.csv'
df_synthetic_cell.to_csv(fname_synthetic_atten, index=False)
# save cell data
fname_cell_info = dir_out + f'{fname_flatfile}_cellinfo.csv'
fname_cell_dist = dir_out + f'{fname_flatfile}_distancematrix.csv'
fname_cell_dist_sp = dir_out + f'{fname_flatfile}_distancematrix_sparse.csv'
df_cell_info.to_csv(fname_cell_info, index=False)
df_cell_dist.to_csv(fname_cell_dist)
df_cell_dist_sp.to_csv(fname_cell_dist_sp, index=False)
| 10,211 | 38.890625 | 117 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds1/main_pystan_model1_NGAWest2CANorth.py | """
Created on Wed Jul 14 14:17:52 2021
@author: glavrent
"""
# Working directory and Packages
#load libraries
import os
import sys
import numpy as np
import pandas as pd
import time
#user functions
sys.path.insert(0,'../../../Python_lib/regression/pystan/')
from regression_pystan_model1_unbounded_hyp import RunStan
# Define variables
#filename suffix
# synds_suffix = '_small_corr_len'
# synds_suffix = '_large_corr_len'
#synthetic datasets directory
ds_dir = '../../../../Data/Verification/synthetic_datasets/ds1'
ds_dir = r'%s%s/'%(ds_dir, synds_suffix)
# dataset info
# ds_fname_main = 'CatalogNGAWest3CA_synthetic_data'
ds_fname_main = 'CatalogNGAWest3CALite_synthetic_data'
ds_id = np.arange(1,6)
#stan model
# sm_fname = '../../../Stan_lib/regression_stan_model1_unbounded_hyp.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model1_unbounded_hyp_chol.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model1_unbounded_hyp_chol_efficient.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model1_unbounded_hyp_chol_efficient2.stan'
#output info
#main output filename
out_fname_main = 'NGAWest2CANorth_syndata'
#main output directory
out_dir_main = '../../../../Data/Verification/regression/ds1/'
#output sub-directory
#pystan2
# out_dir_sub = 'PYSTAN_NGAWest2CANorth'
# out_dir_sub = 'PYSTAN_NGAWest2CANorth_chol'
# out_dir_sub = 'PYSTAN_NGAWest2CANorth_chol_eff'
# out_dir_sub = 'PYSTAN_NGAWest2CANorth_chol_eff2'
#pystan3
# out_dir_sub = 'PYSTAN3_NGAWest2CANorth'
# out_dir_sub = 'PYSTAN3_NGAWest2CANorth_chol'
# out_dir_sub = 'PYSTAN3_NGAWest2CANorth_chol_eff'
# out_dir_sub = 'PYSTAN3_NGAWest2CANorth_chol_eff2'
#stan parameters
runstan_flag = True
# pystan_ver = 2
pystan_ver = 3
res_name = 'tot'
n_iter = 1000
n_chains = 4
adapt_delta = 0.8
max_treedepth = 10
#parallel options
# flag_parallel = True
flag_parallel = False
#output sub-dir with corr with suffix info
out_dir_sub = f'%s%s'%(out_dir_sub, synds_suffix)
# Run stan regression
#create datafame with computation time
df_run_info = list()
#iterate over all synthetic datasets
for d_id in ds_id:
print('Synthetic dataset %i fo %i'%(d_id, len(ds_id)))
#run time start
run_t_strt = time.time()
#input flatfile
ds_fname = '%s%s%s_Y%i.csv'%(ds_dir, ds_fname_main, synds_suffix, d_id)
#load flatfile
df_flatfile = pd.read_csv(ds_fname)
#keep only North records of NGAWest2
df_flatfile = df_flatfile.loc[np.logical_and(df_flatfile.dsid==0,
df_flatfile.sreg==1),:]
#output file name and directory
out_fname = '%s%s_Y%i'%(out_fname_main, synds_suffix, d_id)
out_dir = '%s/%s/Y%i/'%(out_dir_main, out_dir_sub, d_id)
#run stan model
RunStan(df_flatfile, sm_fname, out_fname, out_dir, res_name,
runstan_flag=runstan_flag, n_iter=n_iter, n_chains=n_chains,
adapt_delta=adapt_delta, max_treedepth=max_treedepth,
pystan_ver=pystan_ver, pystan_parallel=flag_parallel)
#run time end
run_t_end = time.time()
#compute run time
run_tm = (run_t_end - run_t_strt)/60
#log run time
df_run_info.append(pd.DataFrame({'computer_name':os.uname()[1],'out_name':out_dir_sub,
'ds_id':d_id,'run_time':run_tm}, index=[d_id]))
#write out run info
out_fname = '%s%s/run_info.csv'%(out_dir_main, out_dir_sub)
pd.concat(df_run_info).reset_index(drop=True).to_csv(out_fname, index=False)
| 3,687 | 30.793103 | 90 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds1/comparison_stan_model1.py | """
Created on Thu Aug 12 20:52:09 2021
@author: glavrent
"""
# Working directory and Packages
#load packages
import os
import sys
import pathlib
import glob
import re #regular expression package
import pickle
#arithmetic libraries
import numpy as np
#statistics libraries
import pandas as pd
#plot libraries
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoLocator as plt_autotick
#user functions
sys.path.insert(0,'../../../Python_lib/regression/')
from pylib_stats import CalcRMS
from pylib_stats import CalcLKDivergece
# Define variables
# USER SETS DIRECTORIES AND FILE INFO OF SYNTHETIC DS AND REGRESSION RESULTS
# ++++++++++++++++++++++++++++++++++++++++
#processed dataset
# name_dataset = 'NGAWest2CANorth'
name_dataset = 'NGAWest2CA'
# name_dataset = 'NGAWest3CA'
#correlation info
# 1: Small Correlation Lengths
# 2: Large Correlation Lenghts
corr_id = 1
#package
# 1: Pystan v2
# 2: Pystan v3
# 3: stancmd
pkg_id = 3
#approximation type
# 1: multivariate normal
# 2: cholesky
# 3: cholesky efficient
# 4: cholesky efficient v2
aprox_id = 3
#directories (synthetic dataset)
if corr_id == 1:
dir_syndata = '../../../../Data/Verification/synthetic_datasets/ds1_small_corr_len'
elif corr_id == 2:
dir_syndata = '../../../../Data/Verification/synthetic_datasets/ds1_large_corr_len'
#directories (regression results)
if pkg_id == 1:
dir_results = f'../../../../Data/Verification/regression/ds1/PYSTAN_%s'%name_dataset
elif pkg_id == 2:
dir_results = f'../../../../Data/Verification/regression/ds1/PYSTAN3_%s'%name_dataset
elif pkg_id == 3:
dir_results = f'../../../../Data/Verification/regression/ds1/CMDSTAN_%s'%name_dataset
#prefix for synthetic data and results
prfx_syndata = 'CatalogNGAWest3CALite_synthetic'
# FILE INFO FOR REGRESSION RESULTS
# ++++++++++++++++++++++++++++++++++++++++
#regression results filename prefix
prfx_results = f'%s_syndata'%name_dataset
#output filename sufix (synthetic dataset)
if corr_id == 1: synds_suffix = '_small_corr_len'
elif corr_id == 2: synds_suffix = '_large_corr_len'
#output filename sufix (regression results)
if aprox_id == 1: synds_suffix_stan = synds_suffix
elif aprox_id == 2: synds_suffix_stan = '_chol' + synds_suffix
elif aprox_id == 3: synds_suffix_stan = '_chol_eff' + synds_suffix
elif aprox_id == 4: synds_suffix_stan = '_chol_eff2' + synds_suffix
# dataset info
ds_id = np.arange(1,6)
# USER NEEDS TO SPECIFY HYPERPARAMETERS OF SYNTHETIC DATASET
# ++++++++++++++++++++++++++++++++++++++++
# hyper-parameters
if corr_id == 1:
# small correlation lengths
hyp = {'omega_0': 0.1, 'omega_1e':0.1, 'omega_1as': 0.35, 'omega_1bs': 0.25,
'ell_1e':60, 'ell_1as':30, 'phi_0':0.4, 'tau_0':0.3 }
elif corr_id == 2:
#large correlation lengths
hyp = {'omega_0': 0.1, 'omega_1e':0.2, 'omega_1as': 0.4, 'omega_1bs': 0.3,
'ell_1e':100, 'ell_1as':70, 'phi_0':0.4, 'tau_0':0.3 }
# ++++++++++++++++++++++++++++++++++++++++
#ploting options
flag_report = True
# Compare coefficients
#initialize misfit metrics dataframe
df_misfit = pd.DataFrame(index=['Y%i'%d_id for d_id in ds_id])
#iterate over different datasets
for d_id in ds_id:
# Load Data
#file names
#synthetic data
fname_sdata_gmotion = '%s/%s_%s%s_Y%i'%(dir_syndata, prfx_syndata, 'data', synds_suffix, d_id) + '.csv'
#regression results
fname_reg_gmotion = '%s%s/Y%i/%s%s_Y%i_stan_%s'%(dir_results, synds_suffix_stan, d_id, prfx_results, synds_suffix, d_id, 'residuals') + '.csv'
fname_reg_coeff = '%s%s/Y%i/%s%s_Y%i_stan_%s'%(dir_results, synds_suffix_stan, d_id, prfx_results, synds_suffix, d_id, 'coefficients') + '.csv'
#load synthetic results
df_sdata_gmotion = pd.read_csv(fname_sdata_gmotion).set_index('rsn')
#load regression results
df_reg_gmotion = pd.read_csv(fname_reg_gmotion, index_col=0)
df_reg_coeff = pd.read_csv(fname_reg_coeff, index_col=0)
# Processing
#keep only common records from synthetic dataset
df_sdata_gmotion = df_sdata_gmotion.reindex(df_reg_gmotion.index)
#find unique earthqakes and stations
eq_id, eq_idx, eq_nrec = np.unique(df_sdata_gmotion.eqid, return_index=True, return_counts=True)
sta_id, sta_idx, sta_nrec = np.unique(df_sdata_gmotion.ssn, return_index=True, return_counts=True)
# Compute Root Mean Square Error
df_misfit.loc['Y%i'%d_id,'nerg_tot_rms'] = CalcRMS(df_sdata_gmotion.nerg_gm.values, df_reg_gmotion.nerg_mu.values)
df_misfit.loc['Y%i'%d_id,'dc_1e_rms'] = CalcRMS(df_sdata_gmotion['dc_1e'].values[eq_idx], df_reg_coeff['dc_1e_mean'].values[eq_idx])
df_misfit.loc['Y%i'%d_id,'dc_1as_rms'] = CalcRMS(df_sdata_gmotion['dc_1as'].values[sta_idx], df_reg_coeff['dc_1as_mean'].values[sta_idx])
df_misfit.loc['Y%i'%d_id,'dc_1bs_rms'] = CalcRMS(df_sdata_gmotion['dc_1bs'].values[sta_idx], df_reg_coeff['dc_1bs_mean'].values[sta_idx])
# Compute Divergence
df_misfit.loc['Y%i'%d_id,'nerg_tot_KL'] = CalcLKDivergece(df_sdata_gmotion.nerg_gm.values, df_reg_gmotion.nerg_mu.values)
df_misfit.loc['Y%i'%d_id,'dc_1e_KL'] = CalcLKDivergece(df_sdata_gmotion['dc_1e'].values[eq_idx], df_reg_coeff['dc_1e_mean'].values[eq_idx])
df_misfit.loc['Y%i'%d_id,'dc_1as_KL'] = CalcLKDivergece(df_sdata_gmotion['dc_1as'].values[sta_idx], df_reg_coeff['dc_1as_mean'].values[sta_idx])
df_misfit.loc['Y%i'%d_id,'dc_1bs_KL'] = CalcLKDivergece(df_sdata_gmotion['dc_1bs'].values[sta_idx], df_reg_coeff['dc_1bs_mean'].values[sta_idx])
# Output
#figure directory
dir_fig = '%s%s/Y%i/figures_cmp/'%(dir_results,synds_suffix_stan,d_id)
pathlib.Path(dir_fig).mkdir(parents=True, exist_ok=True)
#compare ground motion predictions
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_tot_res_scatter'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#median
ax.scatter(df_sdata_gmotion.nerg_gm.values, df_reg_gmotion.nerg_mu.values)
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title('Comparison total residuals, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=35)
ax.set_ylabel('Estimated', fontsize=35)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=32)
ax.tick_params(axis='y', labelsize=32)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-2,2])
ax.set_ylim([-2,2])
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#compare dc_1e
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_dc_1e_scatter'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_sdata_gmotion['dc_1e'].values[eq_idx], df_reg_coeff['dc_1e_mean'].values[eq_idx])
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1,e}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-.4,.4])
ax.set_ylim([-.4,.4])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1e_accuracy'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_reg_coeff['dc_1e_sig'].values[eq_idx],
df_sdata_gmotion['dc_1e'].values[eq_idx] - df_reg_coeff['dc_1e_mean'].values[eq_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1,e}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Standard Deviation', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0,.15])
ax.set_ylim([-.4,.4])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1e_nrec'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(eq_nrec,
df_sdata_gmotion['dc_1e'].values[eq_idx] - df_reg_coeff['dc_1e_mean'].values[eq_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1,e}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Number of records', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.set_xscale('log')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0.9,1e3])
ax.set_ylim([-.4,.4])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#compare dc_1as
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_dc_1as_scatter'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_sdata_gmotion['dc_1as'].values[sta_idx], df_reg_coeff['dc_1as_mean'].values[sta_idx])
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1a,s}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-1.5,1.5])
ax.set_ylim([-1.5,1.5])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1as_accuracy'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#accuray
ax.scatter(df_reg_coeff['dc_1as_sig'].values[sta_idx],
df_sdata_gmotion['dc_1as'].values[sta_idx] - df_reg_coeff['dc_1as_mean'].values[sta_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1a,s}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Standard Deviation', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0,.4])
ax.set_ylim([-1.5,1.5])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1as_nrec'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#accuray
ax.scatter(sta_nrec,
df_sdata_gmotion['dc_1as'].values[sta_idx] - df_reg_coeff['dc_1as_mean'].values[sta_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1a,s}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Number of records', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.set_xscale('log')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([.9,1000])
ax.set_ylim([-1.5,1.5])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#compare dc_1bs
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_dc_1bs_scatter'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_sdata_gmotion['dc_1bs'].values[sta_idx], df_reg_coeff['dc_1bs_mean'].values[sta_idx])
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1b,s}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-1.5,1.5])
ax.set_ylim([-1.5,1.5])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1bs_accuracy'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#accuray
ax.scatter(df_reg_coeff['dc_1bs_sig'].values[sta_idx],
df_sdata_gmotion['dc_1bs'].values[sta_idx] - df_reg_coeff['dc_1bs_mean'].values[sta_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1b,s}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Standard Deviation', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0,.4])
ax.set_ylim([-1.5,1.5])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1bs_nrec'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#accuray
ax.scatter(sta_nrec,
df_sdata_gmotion['dc_1bs'].values[sta_idx] - df_reg_coeff['dc_1bs_mean'].values[sta_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1b,s}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Number of records', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.set_xscale('log')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([.9,1000])
ax.set_ylim([-1.5,1.5])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Compare Misfit Metrics
#summary directory
dir_sum = '%s%s/summary/'%(dir_results,synds_suffix_stan)
pathlib.Path(dir_fig).mkdir(parents=True, exist_ok=True)
#figure directory
dir_fig = '%s/figures/'%(dir_sum)
pathlib.Path(dir_fig).mkdir(parents=True, exist_ok=True)
#save
df_misfit.to_csv(dir_sum + 'misfit_summary.csv')
#RMS misfit
fname_fig = 'misfit_score'
#plot KL divergence
fig, ax = plt.subplots(figsize = (10,10))
ax.plot(ds_id, df_misfit.nerg_tot_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label= 'tot nerg')
ax.plot(ds_id, df_misfit.dc_1e_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1,e}$')
ax.plot(ds_id, df_misfit.dc_1as_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1a,s}$')
ax.plot(ds_id, df_misfit.dc_1bs_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1b,s}$')
#figure properties
ax.set_ylim([0,0.50])
ax.set_xlabel('synthetic dataset', fontsize=25)
ax.set_ylabel('RSME', fontsize=25)
ax.grid(which='both')
ax.set_xticks(ds_id)
ax.set_xticklabels(labels=df_misfit.index)
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#legend
ax.legend(loc='upper left', fontsize=25)
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#KL divergence
fname_fig = 'KLdiv_score'
#plot KL divergence
fig, ax = plt.subplots(figsize = (10,10))
ax.plot(ds_id, df_misfit.nerg_tot_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label= 'tot nerg')
ax.plot(ds_id, df_misfit.dc_1e_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1,e}$')
ax.plot(ds_id, df_misfit.dc_1as_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1a,s}$')
ax.plot(ds_id, df_misfit.dc_1bs_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1b,s}$')
#figure properties
ax.set_ylim([0,0.50])
ax.set_xlabel('synthetic dataset', fontsize=25)
ax.set_ylabel('KL divergence', fontsize=25)
ax.grid(which='both')
ax.set_xticks(ds_id)
ax.set_xticklabels(labels=df_misfit.index)
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#legend
ax.legend(loc='upper left', fontsize=25)
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Compare hyper-paramters
#iterate over different datasets
df_reg_hyp = list()
df_reg_hyp_post = list()
for d_id in ds_id:
# Load Data
#regression hyperparamters results
fname_reg_hyp = '%s%s/Y%i/%s%s_Y%i_stan_%s'%(dir_results,synds_suffix_stan, d_id,prfx_results, synds_suffix, d_id, 'hyperparameters') + '.csv'
fname_reg_hyp_post = '%s%s/Y%i/%s%s_Y%i_stan_%s'%(dir_results,synds_suffix_stan, d_id,prfx_results, synds_suffix, d_id, 'hyperposterior') + '.csv'
#load regression results
df_reg_hyp.append( pd.read_csv(fname_reg_hyp, index_col=0) )
df_reg_hyp_post.append( pd.read_csv(fname_reg_hyp_post, index_col=0) )
# Omega_1e
#hyper-paramter name
name_hyp = 'omega_1e'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
# ymax_mode = df_r_h_p.loc[:,name_hyp+'_pdf'].max()
# ymax_mean = 1.5*np.ceil(ymax_mode/10)*10
ymax_mode = 40
ymax_mean = 40
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mean, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\omega_{1,e}$', fontsize=30)
ax.set_xlabel('$\omega_{1,e}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.25])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Omega_1as
#hyper-paramter name
name_hyp = 'omega_1as'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
# ymax_mode = df_r_h_p.loc[:,name_hyp+'_pdf'].max()
# ymax_mean = 1.5*np.ceil(ymax_mode/10)*10
ymax_mode = 30
ymax_mean = 30
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\omega_{1a,s}$', fontsize=30)
ax.set_xlabel('$\omega_{1a,s}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.5])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Omega_1bs
#hyper-paramter name
name_hyp = 'omega_1bs'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
# ymax_mode = df_r_h_p.loc[:,name_hyp+'_pdf'].max()
# ymax_mean = 1.5*np.ceil(ymax_mode/10)*10
ymax_mode = 60
ymax_mean = 60
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\omega_{1b,s}$', fontsize=30)
ax.set_xlabel('$\omega_{1b,s}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.5])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Ell_1e
#hyper-paramter name
name_hyp = 'ell_1e'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
# ymax_mode = df_r_h_p.loc[:,name_hyp+'_pdf'].max()
# ymax_mean = 1.5*np.ceil(ymax_mode/10)*10
ymax_mode = 0.02
ymax_mean = 0.02
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\ell_{1,e}$', fontsize=30)
ax.set_xlabel('$\ell_{1,e}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,500])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Ell_1as
#hyper-paramter name
name_hyp = 'ell_1as'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
# ymax_mode = df_r_h_p.loc[:,name_hyp+'_pdf'].max()
# ymax_mean = 1.5*np.ceil(ymax_mode/10)*10
ymax_mode = 0.1
ymax_mean = 0.1
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\ell_{1a,s}$', fontsize=30)
ax.set_xlabel('$\ell_{1a,s}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,150])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Tau_0
#hyper-paramter name
name_hyp = 'tau_0'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
# ymax_mode = df_r_h_p.loc[:,name_hyp+'_pdf'].max()
# ymax_mean = 1.5*np.ceil(ymax_mode/10)*10
ymax_mode = 60
ymax_mean = 60
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\tau_{0}$', fontsize=30)
ax.set_xlabel(r'$\tau_{0}$', fontsize=25)
ax.set_ylabel(r'probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.5])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Phi_0
#hyper-paramter name
name_hyp = 'phi_0'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
# ymax_mode = df_r_h_p.loc[:,name_hyp+'_pdf'].max()
# ymax_mean = 1.5*np.ceil(ymax_mode/10)*10
ymax_mode = 100
ymax_mean = 100
#plot posterior dist
ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\phi_{0}$', fontsize=30)
ax.set_xlabel('$\phi_{0}$', fontsize=25)
ax.set_ylabel(r'probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.6])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# # Delta c_0
# #hyper-paramter name
# name_hyp = 'dc_0'
# #figure title
# fname_fig = 'post_dist_' + name_hyp
# #create figure
# fig, ax = plt.subplots(figsize = (10,10))
# for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
# #estimate vertical line height for mean and mode
# ymax_mode = df_r_h_p.loc[:,name_hyp+'_pdf'].max()
# ymax_mean = 1.5*np.ceil(ymax_mode/10)*10
# ymax_mean = 15
# #plot posterior dist
# pl_pdf = ax.plot(df_r_h_p.loc[:,name_hyp], df_r_h_p.loc[:,name_hyp+'_pdf'])
# ax.vlines(df_r_h.loc[name_hyp,'mean'], ymin=0, ymax=ymax_mean, linestyle='-', color=pl_pdf[0].get_color(), label='Mean')
# ax.vlines(df_r_h.loc[name_hyp,'mode'], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_pdf[0].get_color(), label='Mode')
# #plot true value
# ymax_hyp = ymax_mean
# # ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
# #edit figure
# ax.set_title(r'Comparison $\delta c_{0}$', fontsize=30)
# ax.set_xlabel('$\delta c_{0}$', fontsize=25)
# ax.set_ylabel('probability density function ', fontsize=25)
# ax.grid(which='both')
# ax.tick_params(axis='x', labelsize=22)
# ax.tick_params(axis='y', labelsize=22)
# #plot limits
# ax.set_xlim([-1,1])
# ax.set_ylim([0,ymax_hyp])
# #save figure
# fig.tight_layout()
# # fig.savefig( dir_fig + fname_fig + '.png' )
| 29,388 | 37.977454 | 153 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds1/comparison_inla_model1_time.py | """
Created on Tue Mar 15 22:38:50 2022
@author: glavrent
"""
# Working directory and Packages
#load variables
import os
import sys
import pathlib
#arithmetic libraries
import numpy as np
#statistics libraries
import pandas as pd
#plot libraries
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoLocator as plt_autotick
# Define variables
#mesh info
mesh_info = ['coarse', 'medium', 'fine']
#dataset name
dataset_name = ['NGAWest2CANorth', 'NGAWest2CA', 'NGAWest3CA']
#correlation info
# 1: Small Correlation Lengths
# 2: Large Correlation Lenghts
corr_id = 1
#correlation name
if corr_id == 1:
synds_name = 'small corr len'
synds_suffix = '_small_corr_len'
elif corr_id == 2:
synds_name = 'large corr len'
synds_suffix = '_large_corr_len'
#directories regressions
dir_reg = '../../../../Data/Verification/regression/ds1/'
#directory output
dir_out = '../../../../Data/Verification/regression/ds1/comparisons/'
# Load Data
#initialize dataframe
df_runinfo_all = {};
#iterate over different analyses
for j1, m_i in enumerate(mesh_info):
for j2, d_n in enumerate(dataset_name):
key_runinfo = '%s_%s'%(m_i, d_n)
fname_runinfo = '%s/INLA_%s_%s%s/run_info.csv'%(dir_reg, d_n, m_i, synds_suffix)
#store calc time
df_runinfo_all[key_runinfo] = pd.read_csv(fname_runinfo)
# Comparison Figures
pathlib.Path(dir_out).mkdir(parents=True, exist_ok=True)
#line style (iterate with mesh info)
line_style = [':','--','-']
#color map (iterate with dataset)
c_map = plt.get_cmap('Dark2')
#run time figure
fig_fname = 'run_time_inla'
#create figure axes
fig, ax = plt.subplots(figsize = (20,10))
#iterate over different analyses
for j2, d_n in enumerate(dataset_name):
for j1, (m_i, l_s) in enumerate(zip(mesh_info, line_style)):
key_runinfo = '%s_%s'%(m_i, d_n)
#
ds_id = df_runinfo_all[key_runinfo].ds_id
ds_name = ['Y%i'%d_i for d_i in ds_id]
#
run_time = df_runinfo_all[key_runinfo].run_time
ax.plot(ds_id, run_time, linestyle=l_s, marker='o', linewidth=2, markersize=10, color=c_map(j2), label='%s - %s'%(d_n, m_i))
#figure properties
ax.set_ylim([0, max(0.50, max(ax.get_ylim()))])
ax.set_xlabel('synthetic dataset', fontsize=30)
ax.set_ylabel('Run Time (min)', fontsize=30)
ax.grid(which='both')
ax.set_xticks(ds_id)
ax.set_xticklabels(labels=ds_name)
ax.tick_params(axis='x', labelsize=25)
ax.tick_params(axis='y', labelsize=25)
#legend
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=25)
#save figure
fig.tight_layout()
fig.savefig( dir_out + fig_fname + '.png' )
| 2,861 | 25.747664 | 132 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds1/comparison_stan_inla_model1_misfit.py | """
Created on Fri Jun 10 15:40:29 2022
@author: glavrent
"""
# Working directory and Packages
#load variables
import os
import sys
import pathlib
#arithmetic libraries
import numpy as np
#statistics libraries
import pandas as pd
#plot libraries
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
#user functions
def PlotRSMCmp(df_list, names_list, c_name, width, fig_fname):
#create figure axes
fig, ax = plt.subplots(figsize = (10,10))
#
x_offset =
#plot rms value
for nm, df_l in zip(names_list, df_list):
ax.bar(np.arange(df_l)-x_offset, df_sum_reg_stan.nerg_tot_rms.values, width=width, label=nm)
#figure properties
ax.set_ylim([0, 0.2])
ax.set_xticks(labels=df_l.ds_name)
ax.set_xlabel('Dataset', fontsize=35)
ax.set_ylabel('RMSE', fontsize=35)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=32)
ax.tick_params(axis='y', labelsize=32)
ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
#legend
ax.legend(loc='upper left', fontsize=32)
#save figure
fig.tight_layout()
fig.savefig( fig_fname + '.png' )
# Define variables
# COMPARISONS
# Different Mesh sizes
cmp_name = 'STAN_INLA_medium'
reg_title = [f'STAN - NGAW2 CA, North', f'STAN - NGAW2 CA', f'STAN - NGAW3* CA',
f'INLA - NGAW2 CA, North', f'INLA - NGAW2 CA', f'INLA - NGAW3* CA' ]
reg_fname = ['CMDSTAN_NGAWest2CANorth_chol_eff_small_corr_len', 'CMDSTAN_NGAWest2CA_chol_eff_small_corr_len', 'CMDSTAN_NGAWest3CA_chol_eff_small_corr_len',
'INLA_NGAWest2CANorth_medium_small_corr_len', 'INLA_NGAWest2CA_medium_small_corr_len', 'INLA_NGAWest3CA_medium_small_corr_len']
ds_name = ['NGAW2\nCA, North','NGAW2\nCA', 'NGAW3\nCA',
'NGAW2\nCA, North','NGAW2\nCA', 'NGAW3*\nCA']
ds_id = np.array([1,2,3,1,2,3])
sftwr_name = 3*['STAN'] + 3*['INLA']
sftwr_id = np.array(3*[1]+3*[2])
#directories regressions
reg_dir = [f'../../../../Data/Verification/regression/ds1/%s/'%r_f for r_f in reg_fname]
#directory output
dir_out = '../../../../Data/Verification/regression/ds1/comparisons/'
# Load Data
#intialize main regression summary dataframe
df_sum_reg = pd.DataFrame({'ds_id':ds_id, 'ds_name':ds_name, 'sftwr_id':sftwr_id, 'sftwr_name':sftwr_name})
#initialize misfit dataframe
df_sum_misfit_all = {};
#read misfit info
for k, (r_t, r_d) in enumerate(zip(reg_title, reg_dir)):
#filename misfit info
fname_sum = r_d + 'summary/misfit_summary.csv'
#read KL score for coefficients
df_sum_misfit_all[r_t] = pd.read_csv(fname_sum, index_col=0)
#summarize regression rms
df_sum_reg.loc[k,'nerg_tot_rms'] = df_sum_misfit_all[r_t].nerg_tot_rms.mean()
#initialize run time dataframe
df_runinfo_all = {};
#read run time info
for k, (r_t, r_d) in enumerate(zip(reg_title, reg_dir)):
#filename run time
fname_runinfo = r_d + '/run_info.csv'
#store calc time
df_runinfo_all[r_t] = pd.read_csv(fname_runinfo)
#summarize regression rms
df_sum_reg.loc[k,'run_time'] = df_runinfo_all[r_t].run_time.mean()
#print mean run time
print(f'%s: %.1f min'%( r_t, df_runinfo_all[r_t].run_time.mean() ))
#separate STNA and INLA runs
df_sum_reg_stan = df_sum_reg.loc[df_sum_reg.sftwr_id==1,:]
df_sum_reg_inla = df_sum_reg.loc[df_sum_reg.sftwr_id==2,:]
# Comparison Figures
pathlib.Path(dir_out).mkdir(parents=True, exist_ok=True)
# RMSE
#coefficient name
c_name = 'nerg_tot'
#figure name
fig_fname = '%s/%s_%s_RMSE'%(dir_out, cmp_name, c_name)
# #create figure axes
# fig, ax = plt.subplots(figsize = (10,10))
# #plot rms value
# ax.bar(np.array([1,3,5])-0.3, df_sum_reg_stan.nerg_tot_rms.values, width=0.6, label='STAN')
# ax.bar(np.array([1,3,5])+0.3, df_sum_reg_inla.nerg_tot_rms.values, width=0.6, label='INLA')
# #figure properties
# ax.set_ylim([0, 0.2])
# ax.set_xticks([1,3,5], df_sum_reg_stan.ds_name)
# ax.set_xlabel('Dataset', fontsize=35)
# ax.set_ylabel('RMSE', fontsize=35)
# ax.grid(which='both')
# ax.tick_params(axis='x', labelsize=32)
# ax.tick_params(axis='y', labelsize=32)
# ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
# #legend
# ax.legend(loc='upper left', fontsize=32)
# #save figure
# fig.tight_layout()
# fig.savefig( fig_fname + '.png' )
# Run Time
#run time figure
fig_fname = '%s/%s_run_time'%(dir_out, cmp_name)
#create figure axes
fig, ax = plt.subplots(figsize = (10,10))
#plot rms value
ax.bar(np.array([1,3,5])-0.3, df_sum_reg_stan.run_time.values, width=0.6, label='STAN')
ax.bar(np.array([1,3,5])+0.3, df_sum_reg_inla.run_time.values, width=0.6, label='INLA')
#figure properties
# ax.set_ylim([0, 0.2])
ax.set_xticks([1,3,5], df_sum_reg_stan.ds_name)
ax.set_xlabel('Dataset', fontsize=35)
ax.set_ylabel('Run Time (min)', fontsize=35)
ax.grid(which='both')
ax.set_yscale('log')
ax.tick_params(axis='x', labelsize=32)
ax.tick_params(axis='y', labelsize=32)
# ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
#legend
ax.legend(loc='upper left', fontsize=32)
#save figure
fig.tight_layout()
fig.savefig( fig_fname + '.png' )
| 5,403 | 31.95122 | 158 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds1/comparison_model1_misfit.py | """
Created on Tue Mar 15 14:50:27 2022
@author: glavrent
"""
# Working directory and Packages
#load variables
import os
import sys
import pathlib
#arithmetic libraries
import numpy as np
#statistics libraries
import pandas as pd
#plot libraries
import matplotlib as mpl
import matplotlib.pyplot as plt
#user functions
def PlotRSMCmp(df_rms_all, c_name, fig_fname):
#create figure axes
fig, ax = plt.subplots(figsize = (10,10))
for k in df_rms_all:
df_rms = df_rms_all[k]
ds_id = np.array(range(len(df_rms)))
ax.plot(ds_id, df_rms.loc[:,c_name+'_rms'], linestyle='-', marker='o', linewidth=2, markersize=10, label=k)
#figure properties
ax.set_ylim([0, max(0.50, max(ax.get_ylim()))])
ax.set_xlabel('synthetic dataset', fontsize=35)
ax.set_ylabel('RMSE', fontsize=35)
ax.grid(which='both')
ax.set_xticks(ds_id)
ax.set_xticklabels(labels=df_rms.index)
ax.tick_params(axis='x', labelsize=32)
ax.tick_params(axis='y', labelsize=32)
#legend
ax.legend(loc='upper left', fontsize=32)
#save figure
fig.tight_layout()
fig.savefig( fig_fname + '.png' )
return fig, ax
def PlotKLCmp(df_KL_all, c_name, fig_fname):
#create figure axes
fig, ax = plt.subplots(figsize = (10,10))
for k in df_KL_all:
df_KL = df_KL_all[k]
ds_id = np.array(range(len(df_KL)))
ax.plot(ds_id, df_KL.loc[:,c_name+'_KL'], linestyle='-', marker='o', linewidth=2, markersize=10, label=k)
#figure properties
ax.set_ylim([0, max(0.50, max(ax.get_ylim()))])
ax.set_xlabel('synthetic dataset', fontsize=35)
ax.set_ylabel('KL divergence', fontsize=35)
ax.grid(which='both')
ax.set_xticks(ds_id)
ax.set_xticklabels(labels=df_KL.index)
ax.tick_params(axis='x', labelsize=32)
ax.tick_params(axis='y', labelsize=32)
#legend
ax.legend(loc='upper left', fontsize=32)
#save figure
fig.tight_layout()
fig.savefig( fig_fname + '.png' )
return fig, ax
# Define variables
# COMPARISONS
# Different Packages
cmp_name = 'STAN_pckg_cmp_NGAWest2CANorth'
reg_title = ['PYSTAN2', 'PYSTAN3', 'CMDSTANPY']
reg_fname = ['PYSTAN_NGAWest2CANorth_chol_eff_small_corr_len','PYSTAN3_NGAWest2CANorth_chol_eff_small_corr_len','CMDSTAN_NGAWest2CANorth_chol_eff_small_corr_len']
ylim_time = [0, 700]
# # Different Implementations
# cmp_name = 'STAN_impl_cmp_NGAWest2CANorth'
# reg_title = ['CMDSTANPY Chol.', 'CMDSTANPY Chol. Eff.']
# reg_fname = ['CMDSTAN_NGAWest2CANorth_chol_small_corr_len','CMDSTAN_NGAWest2CANorth_chol_eff_small_corr_len']
# ylim_time = [0, 700]
# # Different Software
# cmp_name = 'STAN_vs_INLA_cmp_NGAWest2CANorth'
# reg_title = ['STAN','INLA']
# reg_fname = ['CMDSTAN_NGAWest2CANorth_chol_eff_small_corr_len','INLA_NGAWest2CANorth_coarse_small_corr_len']
# ylim_time = [0, 700]
# Different
# # NGAWest2CANorth
# cmp_name = 'INLA_mesh_cmp_NGAWest2CANorth'
# reg_title = ['INLA coarse mesh', 'INLA medium mesh', 'INLA fine mesh']
# reg_fname = ['INLA_NGAWest2CANorth_coarse_small_corr_len','INLA_NGAWest2CANorth_medium_small_corr_len','INLA_NGAWest2CANorth_fine_small_corr_len']
# ylim_time = [0, 20]
# # NGAWest2CANorth
# cmp_name = 'INLA_mesh_cmp_NGAWest3CA'
# reg_title = ['INLA coarse mesh', 'INLA medium mesh', 'INLA fine mesh']
# reg_fname = ['INLA_NGAWest3CA_coarse_small_corr_len','INLA_NGAWest3CA_medium_small_corr_len','INLA_NGAWest3CA_fine_small_corr_len']
# ylim_time = [0, 100]
#directories regressions
reg_dir = [f'../../../../Data/Verification/regression/ds1/%s/'%r_f for r_f in reg_fname]
#directory output
dir_out = '../../../../Data/Verification/regression/ds1/comparisons/'
# Load Data
#initialize misfit dataframe
df_sum_misfit_all = {};
#read misfit info
for k, (r_t, r_d) in enumerate(zip(reg_title, reg_dir)):
#filename misfit info
fname_sum = r_d + 'summary/misfit_summary.csv'
#read KL score for coefficients
df_sum_misfit_all[r_t] = pd.read_csv(fname_sum, index_col=0)
#initialize run time dataframe
df_runinfo_all = {};
#read run time info
for k, (r_t, r_d) in enumerate(zip(reg_title, reg_dir)):
#filename run time
fname_runinfo = r_d + '/run_info.csv'
#store calc time
df_runinfo_all[r_t] = pd.read_csv(fname_runinfo)
# Comparison Figures
pathlib.Path(dir_out).mkdir(parents=True, exist_ok=True)
# RMSE divergence
#coefficient name
c_name = 'nerg_tot'
#figure name
fig_fname = '%s/%s_%s_RMSE'%(dir_out, cmp_name, c_name)
#plotting
PlotRSMCmp(df_sum_misfit_all , c_name, fig_fname);
#coefficient name
c_name = 'dc_1e'
#figure name
fig_fname = '%s/%s_%s_RMSE'%(dir_out, cmp_name, c_name)
#plotting
PlotRSMCmp(df_sum_misfit_all , c_name, fig_fname);
#coefficient name
c_name = 'dc_1as'
#figure name
fig_fname = '%s/%s_%s_RMSE'%(dir_out, cmp_name, c_name)
#plotting
PlotRSMCmp(df_sum_misfit_all , c_name, fig_fname);
#coefficient name
c_name = 'dc_1bs'
#figure name
fig_fname = '%s/%s_%s_RMSE'%(dir_out, cmp_name, c_name)
#plotting
PlotRSMCmp(df_sum_misfit_all , c_name, fig_fname);
# KL divergence
#coefficient name
c_name = 'nerg_tot'
#figure name
fig_fname = '%s/%s_%s_KLdiv'%(dir_out, cmp_name, c_name)
#plotting
PlotKLCmp(df_sum_misfit_all , c_name, fig_fname);
#coefficient name
c_name = 'dc_1e'
#figure name
fig_fname = '%s/%s_%s_KLdiv'%(dir_out, cmp_name, c_name)
#plotting
PlotKLCmp(df_sum_misfit_all , c_name, fig_fname);
#coefficient name
c_name = 'dc_1as'
#figure name
fig_fname = '%s/%s_%s_KLdiv'%(dir_out, cmp_name, c_name)
#plotting
PlotKLCmp(df_sum_misfit_all , c_name, fig_fname);
#coefficient name
c_name = 'dc_1bs'
#figure name
fig_fname = '%s/%s_%s_KLdiv'%(dir_out, cmp_name, c_name)
#plotting
PlotKLCmp(df_sum_misfit_all , c_name, fig_fname);
# Run Time
#run time figure
fig_fname = '%s/%s_run_time'%(dir_out, cmp_name)
#create figure axes
fig, ax = plt.subplots(figsize = (10,10))
#iterate over different analyses
for j, k in enumerate(df_runinfo_all):
ds_id = df_runinfo_all[k].ds_id
ds_name = ['Y%i'%d_i for d_i in ds_id]
run_time = df_runinfo_all[k].run_time
ax.plot(ds_id, run_time, marker='o', linewidth=2, markersize=10, label=k)
#figure properties
ax.set_ylim(ylim_time)
ax.set_xlabel('synthetic dataset', fontsize=35)
ax.set_ylabel('Run Time (min)', fontsize=35)
ax.grid(which='both')
ax.set_xticks(ds_id)
ax.set_xticklabels(labels=ds_name)
ax.tick_params(axis='x', labelsize=32)
ax.tick_params(axis='y', labelsize=32)
#legend
ax.legend(loc='lower left', fontsize=32)
# ax.legend(loc='upper left', fontsize=32)
# ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=25)
#save figure
fig.tight_layout()
fig.savefig( fig_fname + '.png' )
| 7,051 | 29.79476 | 162 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds1/comparison_inla_model1.py | """
Created on Thu Aug 12 20:52:09 2021
@author: glavrent
"""
# Working directory and Packages
#load variables
import os
import sys
import pathlib
import glob
import re #regular expression package
import pickle
#arithmetic libraries
import numpy as np
#statistics libraries
import pandas as pd
#plot libraries
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoLocator as plt_autotick
#user functions
sys.path.insert(0,'../../../Python_lib/regression/')
from pylib_stats import CalcRMS
from pylib_stats import CalcLKDivergece
# Define variables
# USER SETS DIRECTORIES AND FILE INFO OF SYNTHETIC DS AND REGRESSION RESULTS
# ++++++++++++++++++++++++++++++++++++++++
#processed dataset
# name_dataset = 'NGAWest2CANorth'
# name_dataset = 'NGAWest2CA'
# name_dataset = 'NGAWest3CA'
#correlation info
# 1: Small Correlation Lengths
# 2: Large Correlation Lenghts
# corr_id = 1
#kernel function
# 1: Mattern kernel (alpha=2)
# 2: Negative Exp (alpha=3/2)
# ker_id = 1
#mesh type
# 1: Fine Mesh
# 2: Medium Mesh
# 3: Coarse Mesh
# mesh_id = 3
#directories (synthetic dataset)
if corr_id == 1:
dir_syndata = '../../../../Data/Verification/synthetic_datasets/ds1_small_corr_len'
elif corr_id == 2:
dir_syndata = '../../../../Data/Verification/synthetic_datasets/ds1_large_corr_len'
#directories (regression results)
if mesh_id == 1:
dir_results = f'../../../../Data/Verification/regression/ds1/INLA_%s_fine'%name_dataset
elif mesh_id == 2:
dir_results = f'../../../../Data/Verification/regression/ds1/INLA_%s_medium'%name_dataset
elif mesh_id == 3:
dir_results = f'../../../../Data/Verification/regression/ds1/INLA_%s_coarse'%name_dataset
#prefix for synthetic data and results
prfx_syndata = 'CatalogNGAWest3CALite_synthetic'
#regression results filename prefix
prfx_results = f'%s_syndata'%name_dataset
# dataset info
ds_id = np.arange(1,6)
# ++++++++++++++++++++++++++++++++++++++++
# USER NEEDS TO SPECIFY HYPERPARAMETERS OF SYNTHETIC DATASET
# ++++++++++++++++++++++++++++++++++++++++
# hyper-parameters
if corr_id == 1:
# small correlation lengths
hyp = {'omega_0': 0.1, 'omega_1e':0.1, 'omega_1as': 0.35, 'omega_1bs': 0.25,
'ell_1e':60, 'ell_1as':30, 'phi_0':0.4, 'tau_0':0.3 }
elif corr_id == 2:
#large correlation lengths
hyp = {'omega_0': 0.1, 'omega_1e':0.2, 'omega_1as': 0.4, 'omega_1bs': 0.3,
'ell_1e':100, 'ell_1as':70, 'phi_0':0.4, 'tau_0':0.3 }
# ++++++++++++++++++++++++++++++++++++++++
# FILE INFO FOR REGRESSION RESULTS
# ++++++++++++++++++++++++++++++++++++++++
#output filename sufix
if corr_id == 1:
synds_suffix = '_small_corr_len'
elif corr_id == 2:
synds_suffix = '_large_corr_len'
#kenel info
if ker_id == 1:
ker_suffix = ''
elif ker_id == 2:
ker_suffix = '_nexp'
# ++++++++++++++++++++++++++++++++++++++++
#ploting options
flag_report = True
# Compare coefficients
#initialize misfit metrics dataframe
df_misfit = pd.DataFrame(index=['Y%i'%d_id for d_id in ds_id])
#iterate over different datasets
for d_id in ds_id:
# Load Data
#file names
#synthetic data
fname_sdata_gmotion = '%s/%s_%s%s_Y%i'%(dir_syndata, prfx_syndata, 'data', synds_suffix, d_id) + '.csv'
#regression results
fname_reg_gmotion = '%s%s/Y%i/%s%s_Y%i_inla_%s'%(dir_results, ker_suffix+synds_suffix, d_id, prfx_results, synds_suffix, d_id, 'residuals') + '.csv'
fname_reg_coeff = '%s%s/Y%i/%s%s_Y%i_inla_%s'%(dir_results, ker_suffix+synds_suffix, d_id, prfx_results, synds_suffix, d_id, 'coefficients') + '.csv'
#load synthetic results
df_sdata_gmotion = pd.read_csv(fname_sdata_gmotion).set_index('rsn')
#load regression results
df_reg_gmotion = pd.read_csv(fname_reg_gmotion, index_col=0)
df_reg_coeff = pd.read_csv(fname_reg_coeff, index_col=0)
# Processing
#keep only common records from synthetic dataset
df_sdata_gmotion = df_sdata_gmotion.reindex(df_reg_gmotion.index)
#find unique earthqakes and stations
eq_id, eq_idx, eq_nrec = np.unique(df_sdata_gmotion.eqid, return_index=True, return_counts=True)
sta_id, sta_idx, sta_nrec = np.unique(df_sdata_gmotion.ssn, return_index=True, return_counts=True)
# Compute Root Mean Square Error
df_misfit.loc['Y%i'%d_id,'nerg_tot_rms'] = CalcRMS(df_sdata_gmotion.nerg_gm.values, df_reg_gmotion.nerg_mu.values)
df_misfit.loc['Y%i'%d_id,'dc_1e_rms'] = CalcRMS(df_sdata_gmotion['dc_1e'].values[eq_idx], df_reg_coeff['dc_1e_mean'].values[eq_idx])
df_misfit.loc['Y%i'%d_id,'dc_1as_rms'] = CalcRMS(df_sdata_gmotion['dc_1as'].values[sta_idx], df_reg_coeff['dc_1as_mean'].values[sta_idx])
df_misfit.loc['Y%i'%d_id,'dc_1bs_rms'] = CalcRMS(df_sdata_gmotion['dc_1bs'].values[sta_idx], df_reg_coeff['dc_1bs_mean'].values[sta_idx])
# Compute Divergence
df_misfit.loc['Y%i'%d_id,'nerg_tot_KL'] = CalcLKDivergece(df_sdata_gmotion.nerg_gm.values, df_reg_gmotion.nerg_mu.values)
df_misfit.loc['Y%i'%d_id,'dc_1e_KL'] = CalcLKDivergece(df_sdata_gmotion['dc_1e'].values[eq_idx], df_reg_coeff['dc_1e_mean'].values[eq_idx])
df_misfit.loc['Y%i'%d_id,'dc_1as_KL'] = CalcLKDivergece(df_sdata_gmotion['dc_1as'].values[sta_idx], df_reg_coeff['dc_1as_mean'].values[sta_idx])
df_misfit.loc['Y%i'%d_id,'dc_1bs_KL'] = CalcLKDivergece(df_sdata_gmotion['dc_1bs'].values[sta_idx], df_reg_coeff['dc_1bs_mean'].values[sta_idx])
# Output
#figure directory
dir_fig = '%s%s/Y%i/figures_cmp/'%(dir_results,ker_suffix+synds_suffix,d_id)
pathlib.Path(dir_fig).mkdir(parents=True, exist_ok=True)
#compare ground motion predictions
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_tot_res_scatter'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#median
ax.scatter(df_sdata_gmotion.nerg_gm.values, df_reg_gmotion.nerg_mu.values)
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title('Comparison total residuals, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-2,2])
ax.set_ylim([-2,2])
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#compare dc_1e
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_dc_1e_scatter'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_sdata_gmotion['dc_1e'].values[eq_idx], df_reg_coeff['dc_1e_mean'].values[eq_idx])
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1,e}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-.4,.4])
ax.set_ylim([-.4,.4])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1e_accuracy'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_reg_coeff['dc_1e_sig'].values[eq_idx],
df_sdata_gmotion['dc_1e'].values[eq_idx] - df_reg_coeff['dc_1e_mean'].values[eq_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1,e}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Standard Deviation', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0,.15])
ax.set_ylim([-.4,.4])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1e_nrec'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(eq_nrec,
df_sdata_gmotion['dc_1e'].values[eq_idx] - df_reg_coeff['dc_1e_mean'].values[eq_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1,e}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Number of records', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.set_xscale('log')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0.9,1e3])
ax.set_ylim([-.4,.4])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#compare dc_1as
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_dc_1as_scatter'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_sdata_gmotion['dc_1as'].values[sta_idx], df_reg_coeff['dc_1as_mean'].values[sta_idx])
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1a,s}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-1.5,1.5])
ax.set_ylim([-1.5,1.5])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1as_accuracy'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#accuray
ax.scatter(df_reg_coeff['dc_1as_sig'].values[sta_idx],
df_sdata_gmotion['dc_1as'].values[sta_idx] - df_reg_coeff['dc_1as_mean'].values[sta_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1a,s}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Standard Deviation', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0,.4])
ax.set_ylim([-1.5,1.5])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1as_nrec'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#accuray
ax.scatter(sta_nrec,
df_sdata_gmotion['dc_1as'].values[sta_idx] - df_reg_coeff['dc_1as_mean'].values[sta_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1a,s}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Number of records', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.set_xscale('log')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([.9,1000])
ax.set_ylim([-1.5,1.5])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#compare dc_1bs
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_dc_1bs_scatter'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_sdata_gmotion['dc_1bs'].values[sta_idx], df_reg_coeff['dc_1bs_mean'].values[sta_idx])
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1b,s}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-1.5,1.5])
ax.set_ylim([-1.5,1.5])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1bs_accuracy'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#accuray
ax.scatter(df_reg_coeff['dc_1bs_sig'].values[sta_idx],
df_sdata_gmotion['dc_1bs'].values[sta_idx] - df_reg_coeff['dc_1bs_mean'].values[sta_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1b,s}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Standard Deviation', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0,.4])
ax.set_ylim([-1.5,1.5])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1bs_nrec'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#accuray
ax.scatter(sta_nrec,
df_sdata_gmotion['dc_1bs'].values[sta_idx] - df_reg_coeff['dc_1bs_mean'].values[sta_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1b,s}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Number of records', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.set_xscale('log')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([.9,1000])
ax.set_ylim([-1.5,1.5])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Compare Misfit Metrics
#summary directory
dir_sum = '%s%s/summary/'%(dir_results,ker_suffix+synds_suffix)
pathlib.Path(dir_fig).mkdir(parents=True, exist_ok=True)
#figure directory
dir_fig = '%s/figures/'%(dir_sum)
pathlib.Path(dir_fig).mkdir(parents=True, exist_ok=True)
#save
df_misfit.to_csv(dir_sum + 'misfit_summary.csv')
#RMS misfit
fname_fig = 'misfit_score'
#plot KL divergence
fig, ax = plt.subplots(figsize = (10,10))
ax.plot(ds_id, df_misfit.nerg_tot_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label= 'tot nerg')
ax.plot(ds_id, df_misfit.dc_1e_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1,e}$')
ax.plot(ds_id, df_misfit.dc_1as_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1a,s}$')
ax.plot(ds_id, df_misfit.dc_1bs_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1b,s}$')
#figure properties
ax.set_ylim([0,0.50])
ax.set_xlabel('synthetic dataset', fontsize=25)
ax.set_ylabel('RSME', fontsize=25)
ax.grid(which='both')
ax.set_xticks(ds_id)
ax.set_xticklabels(labels=df_misfit.index)
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#legend
ax.legend(loc='upper left', fontsize=25)
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#KL divergence
fname_fig = 'KLdiv_score'
#plot KL divergence
fig, ax = plt.subplots(figsize = (10,10))
ax.plot(ds_id, df_misfit.nerg_tot_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label= 'tot nerg')
ax.plot(ds_id, df_misfit.dc_1e_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1,e}$')
ax.plot(ds_id, df_misfit.dc_1as_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1a,s}$')
ax.plot(ds_id, df_misfit.dc_1bs_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1b,s}$')
#figure properties
ax.set_ylim([0,0.50])
ax.set_xlabel('synthetic dataset', fontsize=25)
ax.set_ylabel('KL divergence', fontsize=25)
ax.grid(which='both')
ax.set_xticks(ds_id)
ax.set_xticklabels(labels=df_misfit.index)
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#legend
ax.legend(loc='upper left', fontsize=25)
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Compare hyper-paramters
#iterate over different datasets
df_reg_hyp = list()
df_reg_hyp_post = list()
for d_id in ds_id:
# Load Data
#regression hyperparamters results
fname_reg_hyp = '%s%s/Y%i/%s%s_Y%i_inla_%s'%(dir_results,synds_suffix, d_id,prfx_results, synds_suffix, d_id, 'hyperparameters') + '.csv'
fname_reg_hyp_post = '%s%s/Y%i/%s%s_Y%i_inla_%s'%(dir_results,synds_suffix, d_id,prfx_results, synds_suffix, d_id, 'hyperposterior') + '.csv'
#load regression results
df_reg_hyp.append( pd.read_csv(fname_reg_hyp, index_col=0) )
df_reg_hyp_post.append( pd.read_csv(fname_reg_hyp_post, index_col=0) )
# Omega_1e
#hyper-paramter name
name_hyp = 'omega_1e'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = df_r_h_p.loc[:,name_hyp+'_pdf'].max()
# ymax_mean = 1.5*np.ceil(ymax_mode/10)*10
# ymax_mode = 40
ymax_mean = 40
#plot posterior dist
pl_pdf = ax.plot(df_r_h_p.loc[:,name_hyp], df_r_h_p.loc[:,name_hyp+'_pdf'])
ax.vlines(df_r_h.loc[name_hyp,'mean'], ymin=0, ymax=ymax_mean, linestyle='-', color=pl_pdf[0].get_color(), label='Mean')
ax.vlines(df_r_h.loc[name_hyp,'mode'], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_pdf[0].get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\omega_{1,e}$', fontsize=30)
ax.set_xlabel('$\omega_{1,e}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.25])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Omega_1as
#hyper-paramter name
name_hyp = 'omega_1as'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = df_r_h_p.loc[:,name_hyp+'_pdf'].max()
# ymax_mean = 1.5*np.ceil(ymax_mode/10)*10
# ymax_mode = 30
ymax_mean = 30
#plot posterior dist
pl_pdf = ax.plot(df_r_h_p.loc[:,name_hyp], df_r_h_p.loc[:,name_hyp+'_pdf'])
ax.vlines(df_r_h.loc[name_hyp,'mean'], ymin=0, ymax=ymax_mean, linestyle='-', color=pl_pdf[0].get_color(), label='Mean')
ax.vlines(df_r_h.loc[name_hyp,'mode'], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_pdf[0].get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\omega_{1a,s}$', fontsize=30)
ax.set_xlabel('$\omega_{1a,s}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.5])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Omega_1bs
#hyper-paramter name
name_hyp = 'omega_1bs'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = df_r_h_p.loc[:,name_hyp+'_pdf'].max()
# ymax_mean = 1.5*np.ceil(ymax_mode/10)*10
ymax_mode = 60
# ymax_mean = 60
#plot posterior dist
pl_pdf = ax.plot(df_r_h_p.loc[:,name_hyp], df_r_h_p.loc[:,name_hyp+'_pdf'])
ax.vlines(df_r_h.loc[name_hyp,'mean'], ymin=0, ymax=ymax_mean, linestyle='-', color=pl_pdf[0].get_color(), label='Mean')
ax.vlines(df_r_h.loc[name_hyp,'mode'], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_pdf[0].get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\omega_{1b,s}$', fontsize=30)
ax.set_xlabel('$\omega_{1b,s}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.5])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Ell_1e
#hyper-paramter name
name_hyp = 'ell_1e'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = df_r_h_p.loc[:,name_hyp+'_pdf'].max()
# ymax_mean = 1.5*np.ceil(ymax_mode/10)*10
ymax_mode = 0.02
ymax_mean = 0.02
#plot posterior dist
pl_pdf = ax.plot(df_r_h_p.loc[:,name_hyp], df_r_h_p.loc[:,name_hyp+'_pdf'])
ax.vlines(df_r_h.loc[name_hyp,'0.5quant'], ymin=0, ymax=ymax_mean, linestyle='-', color=pl_pdf[0].get_color(), label='Mean')
# ax.vlines(df_r_h.loc[name_hyp,'mean'], ymin=0, ymax=ymax_mean, linestyle='-', color=pl_pdf[0].get_color(), label='Mean')
ax.vlines(df_r_h.loc[name_hyp,'mode'], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_pdf[0].get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\ell_{1,e}$', fontsize=30)
ax.set_xlabel('$\ell_{1,e}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,500])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Ell_1as
#hyper-paramter name
name_hyp = 'ell_1as'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = df_r_h_p.loc[:,name_hyp+'_pdf'].max()
# ymax_mean = 1.5*np.ceil(ymax_mode/10)*10
# ymax_mode = 0.1
ymax_mean = 0.1
#plot posterior dist
pl_pdf = ax.plot(df_r_h_p.loc[:,name_hyp], df_r_h_p.loc[:,name_hyp+'_pdf'])
ax.vlines(df_r_h.loc[name_hyp,'mean'], ymin=0, ymax=ymax_mean, linestyle='-', color=pl_pdf[0].get_color(), label='Mean')
# ax.vlines(df_r_h.loc[name_hyp,'mean'], ymin=0, ymax=ymax_mean, linestyle='-', color=pl_pdf[0].get_color(), label='Mean')
ax.vlines(df_r_h.loc[name_hyp,'mode'], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_pdf[0].get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\ell_{1a,s}$', fontsize=30)
ax.set_xlabel('$\ell_{1a,s}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,150])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Tau_0
#hyper-paramter name
name_hyp = 'tau_0'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = df_r_h_p.loc[:,name_hyp+'_pdf'].max()
# ymax_mean = 1.5*np.ceil(ymax_mode/10)*10
# ymax_mode = 60
ymax_mean = 60
#plot posterior dist
pl_pdf = ax.plot(df_r_h_p.loc[:,name_hyp], df_r_h_p.loc[:,name_hyp+'_pdf'])
ax.vlines(df_r_h.loc[name_hyp,'mean'], ymin=0, ymax=ymax_mean, linestyle='-', color=pl_pdf[0].get_color(), label='Mean')
# ax.vlines(df_r_h.loc[name_hyp,'mean'], ymin=0, ymax=ymax_mean, linestyle='-', color=pl_pdf[0].get_color(), label='Mean')
ax.vlines(df_r_h.loc[name_hyp,'mode'], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_pdf[0].get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\tau_{0}$', fontsize=30)
ax.set_xlabel(r'$\tau_{0}$', fontsize=25)
ax.set_ylabel(r'probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.5])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Phi_0
#hyper-paramter name
name_hyp = 'phi_0'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = df_r_h_p.loc[:,name_hyp+'_pdf'].max()
# ymax_mean = 1.5*np.ceil(ymax_mode/10)*10
# ymax_mode = 100
ymax_mean = 100
#plot posterior dist
pl_pdf = ax.plot(df_r_h_p.loc[:,name_hyp], df_r_h_p.loc[:,name_hyp+'_pdf'])
ax.vlines(df_r_h.loc[name_hyp,'mean'], ymin=0, ymax=ymax_mean, linestyle='-', color=pl_pdf[0].get_color(), label='Mean')
ax.vlines(df_r_h.loc[name_hyp,'mode'], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_pdf[0].get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\phi_{0}$', fontsize=30)
ax.set_xlabel('$\phi_{0}$', fontsize=25)
ax.set_ylabel(r'probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.6])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# # Delta c_0
# #hyper-paramter name
# name_hyp = 'dc_0'
# #figure title
# fname_fig = 'post_dist_' + name_hyp
# #create figure
# fig, ax = plt.subplots(figsize = (10,10))
# for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
# #estimate vertical line height for mean and mode
# ymax_mode = df_r_h_p.loc[:,name_hyp+'_pdf'].max()
# ymax_mean = 1.5*np.ceil(ymax_mode/10)*10
# ymax_mean = 15
# #plot posterior dist
# pl_pdf = ax.plot(df_r_h_p.loc[:,name_hyp], df_r_h_p.loc[:,name_hyp+'_pdf'])
# ax.vlines(df_r_h.loc[name_hyp,'mean'], ymin=0, ymax=ymax_mean, linestyle='-', color=pl_pdf[0].get_color(), label='Mean')
# ax.vlines(df_r_h.loc[name_hyp,'mode'], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_pdf[0].get_color(), label='Mode')
# #plot true value
# ymax_hyp = ymax_mean
# # ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
# #edit figure
# ax.set_title(r'Comparison $\delta c_{0}$', fontsize=30)
# ax.set_xlabel('$\delta c_{0}$', fontsize=25)
# ax.set_ylabel('probability density function ', fontsize=25)
# ax.grid(which='both')
# ax.tick_params(axis='x', labelsize=22)
# ax.tick_params(axis='y', labelsize=22)
# #plot limits
# ax.set_xlim([-1,1])
# ax.set_ylim([0,ymax_hyp])
# #save figure
# fig.tight_layout()
# # fig.savefig( dir_fig + fname_fig + '.png' )
| 30,341 | 38.71466 | 155 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds1/main_cmdstan_model1_NGAWest3CA.py | """
Created on Wed Jul 14 14:17:52 2021
@author: glavrent
"""
# Working directory and Packages
#load libraries
import os
import sys
import numpy as np
import pandas as pd
import time
#user functions
sys.path.insert(0,'../../../Python_lib/regression/cmdstan/')
from regression_cmdstan_model1_unbounded_hyp import RunStan
# Define variables
#filename suffix
# synds_suffix = '_small_corr_len'
# synds_suffix = '_large_corr_len'
#synthetic datasets directory
ds_dir = '../../../../Data/Verification/synthetic_datasets/ds1'
ds_dir = r'%s%s/'%(ds_dir, synds_suffix)
# dataset info
# ds_fname_main = 'CatalogNGAWest3CA_synthetic_data'
ds_fname_main = 'CatalogNGAWest3CALite_synthetic_data'
ds_id = np.arange(1,6)
#stan model
# sm_fname = '../../../Stan_lib/regression_stan_model1_unbounded_hyp.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model1_unbounded_hyp_chol.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model1_unbounded_hyp_chol_efficient.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model1_unbounded_hyp_chol_efficient2.stan'
#output info
#main output filename
out_fname_main = 'NGAWest3CA_syndata'
#main output directory
out_dir_main = '../../../../Data/Verification/regression/ds1/'
#output sub-directory
# out_dir_sub = 'CMDSTAN_NGAWest3CA'
# out_dir_sub = 'CMDSTAN_NGAWest3CA_chol'
# out_dir_sub = 'CMDSTAN_NGAWest3CA_chol_eff'
# out_dir_sub = 'CMDSTAN_NGAWest3CA_chol_eff2'
#stan parameters
res_name='tot'
n_iter_warmup = 500
n_iter_sampling = 500
n_chains = 4
adapt_delta = 0.8
max_treedepth = 10
#parallel options
stan_parallel=False
#output sub-dir with corr with suffix info
out_dir_sub = f'%s%s'%(out_dir_sub, synds_suffix)
# Run stan regression
#create datafame with computation time
df_run_info = list()
#iterate over all synthetic datasets
for d_id in ds_id:
print('Synthetic dataset %i fo %i'%(d_id, len(ds_id)))
#run time start
run_t_strt = time.time()
#input flatfile
ds_fname = '%s%s%s_Y%i.csv'%(ds_dir, ds_fname_main, synds_suffix, d_id)
#load flatfile
df_flatfile = pd.read_csv(ds_fname)
#output file name and directory
out_fname = '%s%s_Y%i'%(out_fname_main, synds_suffix, d_id)
out_dir = '%s/%s/Y%i/'%(out_dir_main, out_dir_sub, d_id)
#run stan model
RunStan(df_flatfile, sm_fname,
out_fname, out_dir, res_name,
n_iter_warmup=n_iter_warmup, n_iter_sampling=n_iter_sampling, n_chains=n_chains,
adapt_delta=adapt_delta, max_treedepth=max_treedepth,
stan_parallel=stan_parallel)
#run time end
run_t_end = time.time()
#compute run time
run_tm = (run_t_end - run_t_strt)/60
#log run time
df_run_info.append(pd.DataFrame({'computer_name':os.uname()[1],'out_name':out_dir_sub,
'ds_id':d_id,'run_time':run_tm}, index=[d_id]))
#write out run info
out_fname = '%s%s/run_info.csv'%(out_dir_main, out_dir_sub)
pd.concat(df_run_info).reset_index(drop=True).to_csv(out_fname, index=False)
| 3,227 | 29.742857 | 92 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds1/comparison_inla_model1_misfit_mesh.py | """
Created on Tue Mar 15 14:50:27 2022
@author: glavrent
"""
# Working directory and Packages
#load variables
import os
import sys
import pathlib
#arithmetic libraries
import numpy as np
#statistics libraries
import pandas as pd
#plot libraries
import matplotlib as mpl
import matplotlib.pyplot as plt
#user functions
def PlotRSMCmp(df_rms_all, c_name, fig_fname):
#create figure axes
fig, ax = plt.subplots(figsize = (10,10))
ltype_array = ['-','--',':']
for j, k in enumerate(df_rms_all):
df_rms = df_rms_all[k]
ds_id = np.array(range(len(df_rms)))
#plot info
lcol = mpl.cm.get_cmap('tab10')( np.floor_divide(j,3) )
ltype = ltype_array[ np.mod(j,3) ]
ax.plot(ds_id, df_rms.loc[:,c_name+'_rms'], marker='o', linewidth=2, markersize=10, label=k, linestyle=ltype, color=lcol)
#figure properties
ax.set_ylim([0, max(0.50, max(ax.get_ylim()))])
ax.set_xlabel('synthetic dataset', fontsize=35)
ax.set_ylabel('RMSE', fontsize=35)
ax.grid(which='both')
ax.set_xticks(ds_id)
ax.set_xticklabels(labels=df_rms.index)
ax.tick_params(axis='x', labelsize=32)
ax.tick_params(axis='y', labelsize=32)
#legend
# ax.legend(loc='upper left', fontsize=32)
#save figure
fig.tight_layout()
fig.savefig( fig_fname + '.png' )
return fig, ax
def PlotKLCmp(df_KL_all, c_name, fig_fname):
#create figure axes
fig, ax = plt.subplots(figsize = (10,10))
for k in df_KL_all:
df_KL = df_KL_all[k]
ds_id = np.array(range(len(df_KL)))
ax.plot(ds_id, df_KL.loc[:,c_name+'_KL'], linestyle='-', marker='o', linewidth=2, markersize=10, label=k)
#figure properties
ax.set_ylim([0, max(0.50, max(ax.get_ylim()))])
ax.set_xlabel('synthetic dataset', fontsize=35)
ax.set_ylabel('KL divergence', fontsize=35)
ax.grid(which='both')
ax.set_xticks(ds_id)
ax.set_xticklabels(labels=df_KL.index)
ax.tick_params(axis='x', labelsize=32)
ax.tick_params(axis='y', labelsize=32)
#legend
# ax.legend(loc='upper left', fontsize=32)
# ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=25)
#save figure
fig.tight_layout()
fig.savefig( fig_fname + '.png' )
return fig, ax
# Define variables
# COMPARISONS
# Different Mesh sizes
cmp_name = 'INLA_mesh'
reg_title = [f'NGAW3* CA\nfine', f'NGAW3* CA \nmedium', f'NGAW3 CA \ncoarse',
f'NGAW2 CA \nfine', f'NGAW2 CA \nmedium', f'NGAW2 CA \ncoarse',
f'NGAW2 CA, North\nfine', f'NGAW2 CA, North\nmedium', f'NGAW2 CA, North\ncoarse']
reg_fname = ['INLA_NGAWest3CA_fine_small_corr_len', 'INLA_NGAWest3CA_medium_small_corr_len', 'INLA_NGAWest3CA_coarse_small_corr_len',
'INLA_NGAWest2CA_fine_small_corr_len', 'INLA_NGAWest2CA_medium_small_corr_len', 'INLA_NGAWest2CA_coarse_small_corr_len',
'INLA_NGAWest2CANorth_fine_small_corr_len','INLA_NGAWest2CANorth_medium_small_corr_len','INLA_NGAWest2CANorth_coarse_small_corr_len']
ylim_time = [0, 50]
# # Different Implementations
# cmp_name = 'STAN_impl_cmp_NGAWest2CANorth'
# reg_title = ['CMDSTANPY Chol.', 'CMDSTANPY Chol. Eff.']
# reg_fname = ['CMDSTAN_NGAWest2CANorth_chol_small_corr_len','CMDSTAN_NGAWest2CANorth_chol_eff_small_corr_len']
# ylim_time = [0, 700]
# # Different Software
# cmp_name = 'STAN_vs_INLA_cmp_NGAWest2CANorth'
# reg_title = ['STAN','INLA']
# reg_fname = ['CMDSTAN_NGAWest2CANorth_chol_eff_small_corr_len','INLA_NGAWest2CANorth_coarse_small_corr_len']
# ylim_time = [0, 700]
# Different
# # NGAWest2CANorth
# cmp_name = 'INLA_mesh_cmp_NGAWest2CANorth'
# reg_title = ['INLA coarse mesh', 'INLA medium mesh', 'INLA fine mesh']
# reg_fname = ['INLA_NGAWest2CANorth_coarse_small_corr_len','INLA_NGAWest2CANorth_medium_small_corr_len','INLA_NGAWest2CANorth_fine_small_corr_len']
# ylim_time = [0, 20]
# # NGAWest2CANorth
# cmp_name = 'INLA_mesh_cmp_NGAWest3CA'
# reg_title = ['INLA coarse mesh', 'INLA medium mesh', 'INLA fine mesh']
# reg_fname = ['INLA_NGAWest3CA_coarse_small_corr_len','INLA_NGAWest3CA_medium_small_corr_len','INLA_NGAWest3CA_fine_small_corr_len']
# ylim_time = [0, 100]
#directories regressions
reg_dir = [f'../../../../Data/Verification/regression/ds1/%s/'%r_f for r_f in reg_fname]
#directory output
dir_out = '../../../../Data/Verification/regression/ds1/comparisons/'
# Load Data
#initialize misfit dataframe
df_sum_misfit_all = {};
#read misfit info
for k, (r_t, r_d) in enumerate(zip(reg_title, reg_dir)):
#filename misfit info
fname_sum = r_d + 'summary/misfit_summary.csv'
#read KL score for coefficients
df_sum_misfit_all[r_t] = pd.read_csv(fname_sum, index_col=0)
#initialize run time dataframe
df_runinfo_all = {};
#read run time info
for k, (r_t, r_d) in enumerate(zip(reg_title, reg_dir)):
#filename run time
fname_runinfo = r_d + '/run_info.csv'
#store calc time
df_runinfo_all[r_t] = pd.read_csv(fname_runinfo)
#
print(f'%s: %.1f min'%( r_t, df_runinfo_all[r_t].run_time.mean() ))
# Comparison Figures
pathlib.Path(dir_out).mkdir(parents=True, exist_ok=True)
# RMSE divergence
#coefficient name
c_name = 'nerg_tot'
#figure name
fig_fname = '%s/%s_%s_RMSE'%(dir_out, cmp_name, c_name)
#plotting
PlotRSMCmp(df_sum_misfit_all , c_name, fig_fname);
#coefficient name
c_name = 'dc_1e'
#figure name
fig_fname = '%s/%s_%s_RMSE'%(dir_out, cmp_name, c_name)
#plotting
PlotRSMCmp(df_sum_misfit_all , c_name, fig_fname);
#coefficient name
c_name = 'dc_1as'
#figure name
fig_fname = '%s/%s_%s_RMSE'%(dir_out, cmp_name, c_name)
#plotting
PlotRSMCmp(df_sum_misfit_all , c_name, fig_fname);
#coefficient name
c_name = 'dc_1bs'
#figure name
fig_fname = '%s/%s_%s_RMSE'%(dir_out, cmp_name, c_name)
#plotting
PlotRSMCmp(df_sum_misfit_all , c_name, fig_fname);
# KL divergence
#coefficient name
c_name = 'nerg_tot'
#figure name
fig_fname = '%s/%s_%s_KLdiv'%(dir_out, cmp_name, c_name)
#plotting
PlotKLCmp(df_sum_misfit_all , c_name, fig_fname);
#coefficient name
c_name = 'dc_1e'
#figure name
fig_fname = '%s/%s_%s_KLdiv'%(dir_out, cmp_name, c_name)
#plotting
PlotKLCmp(df_sum_misfit_all , c_name, fig_fname);
#coefficient name
c_name = 'dc_1as'
#figure name
fig_fname = '%s/%s_%s_KLdiv'%(dir_out, cmp_name, c_name)
#plotting
PlotKLCmp(df_sum_misfit_all , c_name, fig_fname);
#coefficient name
c_name = 'dc_1bs'
#figure name
fig_fname = '%s/%s_%s_KLdiv'%(dir_out, cmp_name, c_name)
#plotting
PlotKLCmp(df_sum_misfit_all , c_name, fig_fname);
# Run Time
#run time figure
fig_fname = '%s/%s_run_time'%(dir_out, cmp_name)
#create figure axes
# fig, ax = plt.subplots(figsize = (10,10))
fig, ax = plt.subplots(figsize = (14,10))
ltype_array = ['-','--',':']
#iterate over different analyses
for j, k in enumerate(df_runinfo_all):
ds_id = df_runinfo_all[k].ds_id
ds_name = ['Y%i'%d_i for d_i in ds_id]
run_time = df_runinfo_all[k].run_time
lcol = mpl.cm.get_cmap('tab10')( np.floor_divide(j,3) )
ltype = ltype_array[ np.mod(j,3) ]
ax.plot(ds_id, run_time, marker='o', linewidth=2, markersize=10, label=k, linestyle=ltype, color=lcol)
#figure properties
ax.set_ylim(ylim_time)
ax.set_xlabel('synthetic dataset', fontsize=35)
ax.set_ylabel('Run Time (min)', fontsize=35)
ax.grid(which='both')
ax.set_xticks(ds_id)
ax.set_xticklabels(labels=ds_name)
ax.tick_params(axis='x', labelsize=32)
ax.tick_params(axis='y', labelsize=32)
#legend
# ax.legend(loc='lower left', fontsize=32)
# ax.legend(loc='upper left', fontsize=32)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=25)
#save figure
fig.tight_layout()
fig.savefig( fig_fname + '.png' )
| 8,074 | 32.094262 | 148 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds1/comparison_stan_model1_misfit.py | """
Created on Tue Mar 15 14:50:27 2022
@author: glavrent
"""
# Working directory and Packages
#change working directory
import os
os.chdir('/mnt/halcloud_nfs/glavrent/Research/Nonerg_GMM_methodology/Analyses/Code_Verification/regressions/ds1')
#load variables
import os
import sys
import pathlib
#arithmetic libraries
import numpy as np
#statistics libraries
import pandas as pd
#plot libraries
import matplotlib as mpl
import matplotlib.pyplot as plt
#user functions
def PlotRSMCmp(df_rms_all, c_name, fig_fname):
#create figure axes
fig, ax = plt.subplots(figsize = (10,10))
for k in df_rms_all:
df_rms = df_rms_all[k]
ds_id = np.array(range(len(df_rms)))
ax.plot(ds_id, df_rms.loc[:,c_name+'_rms'], linestyle='-', marker='o', linewidth=2, markersize=10, label=k)
#figure properties
ax.set_ylim([0, max(0.50, max(ax.get_ylim()))])
ax.set_xlabel('synthetic dataset', fontsize=35)
ax.set_ylabel('RMSE', fontsize=35)
ax.grid(which='both')
ax.set_xticks(ds_id)
ax.set_xticklabels(labels=df_rms.index)
ax.tick_params(axis='x', labelsize=32)
ax.tick_params(axis='y', labelsize=32)
#legend
ax.legend(loc='upper left', fontsize=32)
#save figure
fig.tight_layout()
fig.savefig( fig_fname + '.png' )
return fig, ax
def PlotKLCmp(df_KL_all, c_name, fig_fname):
#create figure axes
fig, ax = plt.subplots(figsize = (10,10))
for k in df_KL_all:
df_KL = df_KL_all[k]
ds_id = np.array(range(len(df_KL)))
ax.plot(ds_id, df_KL.loc[:,c_name+'_KL'], linestyle='-', marker='o', linewidth=2, markersize=10, label=k)
#figure properties
ax.set_ylim([0, max(0.50, max(ax.get_ylim()))])
ax.set_xlabel('synthetic dataset', fontsize=35)
ax.set_ylabel('KL divergence', fontsize=35)
ax.grid(which='both')
ax.set_xticks(ds_id)
ax.set_xticklabels(labels=df_KL.index)
ax.tick_params(axis='x', labelsize=32)
ax.tick_params(axis='y', labelsize=32)
#legend
ax.legend(loc='upper left', fontsize=32)
#save figure
fig.tight_layout()
fig.savefig( fig_fname + '.png' )
return fig, ax
# Define variables
# COMPARISONS
# # Different Packages
# cmp_name = 'STAN_pckg_cmp_NGAWest2CANorth'
# reg_title = ['PYSTAN2', 'PYSTAN3', 'CMDSTANPY']
# reg_fname = ['PYSTAN_NGAWest2CANorth_chol_eff_small_corr_len','PYSTAN3_NGAWest2CANorth_chol_eff_small_corr_len','CMDSTAN_NGAWest2CANorth_chol_eff_small_corr_len']
# ylim_time = [0, 700]
# Different Implementations
cmp_name = 'STAN_impl_cmp_NGAWest2CANorth'
reg_title = ['CMDSTANPY Chol.', 'CMDSTANPY Chol. Eff.']
reg_fname = ['CMDSTAN_NGAWest2CANorth_chol_small_corr_len','CMDSTAN_NGAWest2CANorth_chol_eff_small_corr_len']
# reg_fname = ['PYSTAN_NGAWest2CANorth_chol_small_corr_len','PYSTAN_NGAWest2CANorth_chol_eff_small_corr_len']
ylim_time = [0, 700]
#directories regressions
reg_dir = [f'../../../../Data/Verification/regression/ds1/%s/'%r_f for r_f in reg_fname]
#directory output
dir_out = '../../../../Data/Verification/regression/ds1/comparisons/'
# Load Data
#initialize misfit dataframe
df_sum_misfit_all = {};
#read misfit info
for k, (r_t, r_d) in enumerate(zip(reg_title, reg_dir)):
#filename misfit info
fname_sum = r_d + 'summary/misfit_summary.csv'
#read KL score for coefficients
df_sum_misfit_all[r_t] = pd.read_csv(fname_sum, index_col=0)
#initialize run time dataframe
df_runinfo_all = {};
#read run time info
for k, (r_t, r_d) in enumerate(zip(reg_title, reg_dir)):
#filename run time
fname_runinfo = r_d + '/run_info.csv'
#store calc time
df_runinfo_all[r_t] = pd.read_csv(fname_runinfo)
# Comparison Figures
pathlib.Path(dir_out).mkdir(parents=True, exist_ok=True)
# RMSE divergence
#coefficient name
c_name = 'nerg_tot'
#figure name
fig_fname = '%s/%s_%s_RMSE'%(dir_out, cmp_name, c_name)
#plotting
PlotRSMCmp(df_sum_misfit_all , c_name, fig_fname);
#coefficient name
c_name = 'dc_1e'
#figure name
fig_fname = '%s/%s_%s_RMSE'%(dir_out, cmp_name, c_name)
#plotting
PlotRSMCmp(df_sum_misfit_all , c_name, fig_fname);
#coefficient name
c_name = 'dc_1as'
#figure name
fig_fname = '%s/%s_%s_RMSE'%(dir_out, cmp_name, c_name)
#plotting
PlotRSMCmp(df_sum_misfit_all , c_name, fig_fname);
#coefficient name
c_name = 'dc_1bs'
#figure name
fig_fname = '%s/%s_%s_RMSE'%(dir_out, cmp_name, c_name)
#plotting
PlotRSMCmp(df_sum_misfit_all , c_name, fig_fname);
# KL divergence
#coefficient name
c_name = 'nerg_tot'
#figure name
fig_fname = '%s/%s_%s_KLdiv'%(dir_out, cmp_name, c_name)
#plotting
PlotKLCmp(df_sum_misfit_all , c_name, fig_fname);
#coefficient name
c_name = 'dc_1e'
#figure name
fig_fname = '%s/%s_%s_KLdiv'%(dir_out, cmp_name, c_name)
#plotting
PlotKLCmp(df_sum_misfit_all , c_name, fig_fname);
#coefficient name
c_name = 'dc_1as'
#figure name
fig_fname = '%s/%s_%s_KLdiv'%(dir_out, cmp_name, c_name)
#plotting
PlotKLCmp(df_sum_misfit_all , c_name, fig_fname);
#coefficient name
c_name = 'dc_1bs'
#figure name
fig_fname = '%s/%s_%s_KLdiv'%(dir_out, cmp_name, c_name)
#plotting
PlotKLCmp(df_sum_misfit_all , c_name, fig_fname);
# RMSE divergence
#run time figure
fig_fname = '%s/%s_run_time'%(dir_out, cmp_name)
#create figure axes
fig, ax = plt.subplots(figsize = (10,10))
#iterate over different analyses
for j, k in enumerate(df_runinfo_all):
ds_id = df_runinfo_all[k].ds_id
ds_name = ['Y%i'%d_i for d_i in ds_id]
run_time = df_runinfo_all[k].run_time
ax.plot(ds_id, run_time, marker='o', linewidth=2, markersize=10, label=k)
#figure properties
ax.set_ylim(ylim_time)
ax.set_xlabel('synthetic dataset', fontsize=35)
ax.set_ylabel('Run Time (min)', fontsize=35)
ax.grid(which='both')
ax.set_xticks(ds_id)
ax.set_xticklabels(labels=ds_name)
ax.tick_params(axis='x', labelsize=32)
ax.tick_params(axis='y', labelsize=32)
#legend
ax.legend(loc='lower left', fontsize=32)
# ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=25)
#save figure
fig.tight_layout()
fig.savefig( fig_fname + '.png' ) | 6,363 | 28.738318 | 164 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds1/main_pystan_model1_NGAWest2CA.py | """
Created on Wed Jul 14 14:17:52 2021
@author: glavrent
"""
# Working directory and Packages
#load libraries
import os
import sys
import numpy as np
import pandas as pd
import time
#user functions
sys.path.insert(0,'../../../Python_lib/regression/pystan/')
from regression_pystan_model1_unbounded_hyp import RunStan
# Define variables
#filename suffix
# synds_suffix = '_small_corr_len'
# synds_suffix = '_large_corr_len'
#synthetic datasets directory
ds_dir = '../../../../Data/Verification/synthetic_datasets/ds1'
ds_dir = r'%s%s/'%(ds_dir, synds_suffix)
# dataset info
# ds_fname_main = 'CatalogNGAWest3CA_synthetic_data'
ds_fname_main = 'CatalogNGAWest3CALite_synthetic_data'
ds_id = np.arange(1,6)
#stan model
# sm_fname = '../../../Stan_lib/regression_stan_model1_unbounded_hyp.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model1_unbounded_hyp_chol.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model1_unbounded_hyp_chol_efficient.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model1_unbounded_hyp_chol_efficient2.stan'
#output info
#main output filename
out_fname_main = 'NGAWest2CA_syndata'
#main output directory
out_dir_main = '../../../../Data/Verification/regression/ds1/'
#output sub-directory
#python 2
# out_dir_sub = 'PYSTAN_NGAWest2CA'
# out_dir_sub = 'PYSTAN_NGAWest2CA_chol'
# out_dir_sub = 'PYSTAN_NGAWest2CA_chol_eff'
# out_dir_sub = 'PYSTAN_NGAWest2CA_chol_eff2'
#python 3
# out_dir_sub = 'PYSTAN3_NGAWest2CA'
# out_dir_sub = 'PYSTAN3_NGAWest2CA_chol'
# out_dir_sub = 'PYSTAN3_NGAWest2CA_chol_eff'
# out_dir_sub = 'PYSTAN3_NGAWest2CA_chol_eff2'
#stan parameters
runstan_flag = True
#pystan_ver = 2
pystan_ver = 3
res_name = 'tot'
n_iter = 1000
n_chains = 4
adapt_delta = 0.8
max_treedepth = 10
#parallel options
# flag_parallel = True
flag_parallel = False
#output sub-dir with corr with suffix info
out_dir_sub = f'%s%s'%(out_dir_sub, synds_suffix)
# Run stan regression
#create datafame with computation time
df_run_info = list()
#iterate over all synthetic datasets
for d_id in ds_id:
print('Synthetic dataset %i fo %i'%(d_id, len(ds_id)))
#run time start
run_t_strt = time.time()
#input flatfile
ds_fname = '%s%s%s_Y%i.csv'%(ds_dir, ds_fname_main, synds_suffix, d_id)
#load flatfile
df_flatfile = pd.read_csv(ds_fname)
#keep only NGAWest2 records
df_flatfile = df_flatfile.loc[df_flatfile.dsid==0,:]
#output file name and directory
out_fname = '%s%s_Y%i'%(out_fname_main, synds_suffix, d_id)
out_dir = '%s/%s/Y%i/'%(out_dir_main, out_dir_sub, d_id)
#run stan model
RunStan(df_flatfile, sm_fname, out_fname, out_dir, res_name,
runstan_flag=runstan_flag, n_iter=n_iter, n_chains=n_chains,
adapt_delta=adapt_delta, max_treedepth=max_treedepth,
pystan_ver=pystan_ver, pystan_parallel=flag_parallel)
#run time end
run_t_end = time.time()
#compute run time
run_tm = (run_t_end - run_t_strt)/60
#log run time
df_run_info.append(pd.DataFrame({'computer_name':os.uname()[1],'out_name':out_dir_sub,
'ds_id':d_id,'run_time':run_tm}, index=[d_id]))
#write out run info
out_fname = '%s%s/run_info.csv'%(out_dir_main, out_dir_sub)
pd.concat(df_run_info).reset_index(drop=True).to_csv(out_fname, index=False)
| 3,544 | 29.826087 | 90 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds1/main_cmdstan_model1_NGAWest2CANorth.py | """
Created on Wed Jul 14 14:17:52 2021
@author: glavrent
"""
# Working directory and Packages
#load libraries
import os
import sys
import numpy as np
import pandas as pd
import time
#user functions
sys.path.insert(0,'../../../Python_lib/regression/cmdstan/')
from regression_cmdstan_model1_unbounded_hyp import RunStan
# Define variables
#filename suffix
# synds_suffix = '_small_corr_len'
# synds_suffix = '_large_corr_len'
#synthetic datasets directory
ds_dir = '../../../../Data/Verification/synthetic_datasets/ds1'
ds_dir = r'%s%s/'%(ds_dir, synds_suffix)
# dataset info
# ds_fname_main = 'CatalogNGAWest3CA_synthetic_data'
ds_fname_main = 'CatalogNGAWest3CALite_synthetic_data'
ds_id = np.arange(1,6)
#stan model
# sm_fname = '../../../Stan_lib/regression_stan_model1_unbounded_hyp.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model1_unbounded_hyp_chol.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model1_unbounded_hyp_chol_efficient.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model1_unbounded_hyp_chol_efficient2.stan'
#output info
#main output filename
out_fname_main = 'NGAWest2CANorth_syndata'
#main output directory
out_dir_main = '../../../../Data/Verification/regression/ds1/'
#output sub-directory
# out_dir_sub = 'CMDSTAN_NGAWest2CANorth'
# out_dir_sub = 'CMDSTAN_NGAWest2CANorth_chol'
# out_dir_sub = 'CMDSTAN_NGAWest2CANorth_chol_eff'
# out_dir_sub = 'CMDSTAN_NGAWest2CANorth_chol_eff2'
#stan parameters
res_name='tot'
n_iter_warmup = 500
n_iter_sampling = 500
n_chains = 4
adapt_delta = 0.8
max_treedepth = 10
#parallel options
stan_parallel=False
#output sub-dir with corr with suffix info
out_dir_sub = f'%s%s'%(out_dir_sub, synds_suffix)
# Run stan regression
#create datafame with computation time
df_run_info = list()
#iterate over all synthetic datasets
for d_id in ds_id:
print('Synthetic dataset %i fo %i'%(d_id, len(ds_id)))
#run time start
run_t_strt = time.time()
#input flatfile
ds_fname = '%s%s%s_Y%i.csv'%(ds_dir, ds_fname_main, synds_suffix, d_id)
#load flatfile
df_flatfile = pd.read_csv(ds_fname)
#keep only North records of NGAWest2
df_flatfile = df_flatfile.loc[np.logical_and(df_flatfile.dsid==0,
df_flatfile.sreg==1),:]
#output file name and directory
out_fname = '%s%s_Y%i'%(out_fname_main, synds_suffix, d_id)
out_dir = '%s/%s/Y%i/'%(out_dir_main, out_dir_sub, d_id)
#run stan model
RunStan(df_flatfile, sm_fname,
out_fname, out_dir, res_name,
n_iter_warmup=n_iter_warmup, n_iter_sampling=n_iter_sampling, n_chains=n_chains,
adapt_delta=adapt_delta, max_treedepth=max_treedepth,
stan_parallel=stan_parallel)
#run time end
run_t_end = time.time()
#compute run time
run_tm = (run_t_end - run_t_strt)/60
#log run time
df_run_info.append(pd.DataFrame({'computer_name':os.uname()[1],'out_name':out_dir_sub,
'ds_id':d_id,'run_time':run_tm}, index=[d_id]))
#write out run info
out_fname = '%s%s/run_info.csv'%(out_dir_main, out_dir_sub)
pd.concat(df_run_info).reset_index(drop=True).to_csv(out_fname, index=False)
| 3,436 | 30.824074 | 92 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds1/main_cmdstan_model1_NGAWest2CA.py | """
Created on Wed Jul 14 14:17:52 2021
@author: glavrent
"""
# Working directory and Packages
#load libraries
import os
import sys
import numpy as np
import pandas as pd
import time
#user functions
sys.path.insert(0,'../../../Python_lib/regression/cmdstan/')
from regression_cmdstan_model1_unbounded_hyp import RunStan
# Define variables
#filename suffix
# synds_suffix = '_small_corr_len'
# synds_suffix = '_large_corr_len'
#synthetic datasets directory
ds_dir = '../../../../Data/Verification/synthetic_datasets/ds1'
ds_dir = r'%s%s/'%(ds_dir, synds_suffix)
# dataset info
# ds_fname_main = 'CatalogNGAWest3CA_synthetic_data'
ds_fname_main = 'CatalogNGAWest3CALite_synthetic_data'
ds_id = np.arange(1,6)
#stan model
# sm_fname = '../../../Stan_lib/regression_stan_model1_unbounded_hyp.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model1_unbounded_hyp_chol.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model1_unbounded_hyp_chol_efficient.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model1_unbounded_hyp_chol_efficient2.stan'
#output info
#main output filename
out_fname_main = 'NGAWest2CA_syndata'
#main output directory
out_dir_main = '../../../../Data/Verification/regression/ds1/'
#output sub-directory
# out_dir_sub = 'CMDSTAN_NGAWest2CA'
# out_dir_sub = 'CMDSTAN_NGAWest2CA_chol'
# out_dir_sub = 'CMDSTAN_NGAWest2CA_chol_eff'
# out_dir_sub = 'CMDSTAN_NGAWest2CA_chol_eff2'
#stan parameters
res_name='tot'
n_iter_warmup = 500
n_iter_sampling = 500
n_chains = 4
adapt_delta = 0.8
max_treedepth = 10
#parallel options
stan_parallel=False
#output sub-dir with corr with suffix info
out_dir_sub = f'%s%s'%(out_dir_sub, synds_suffix)
# Run stan regression
#create datafame with computation time
df_run_info = list()
#iterate over all synthetic datasets
for d_id in ds_id:
print('Synthetic dataset %i fo %i'%(d_id, len(ds_id)))
#run time start
run_t_strt = time.time()
#input flatfile
ds_fname = '%s%s%s_Y%i.csv'%(ds_dir, ds_fname_main, synds_suffix, d_id)
#load flatfile
df_flatfile = pd.read_csv(ds_fname)
#keep only NGAWest2 records
df_flatfile = df_flatfile.loc[df_flatfile.dsid==0,:]
#output file name and directory
out_fname = '%s%s_Y%i'%(out_fname_main, synds_suffix, d_id)
out_dir = '%s/%s/Y%i/'%(out_dir_main, out_dir_sub, d_id)
#run stan model
RunStan(df_flatfile, sm_fname,
out_fname, out_dir, res_name,
n_iter_warmup=n_iter_warmup, n_iter_sampling=n_iter_sampling, n_chains=n_chains,
adapt_delta=adapt_delta, max_treedepth=max_treedepth,
stan_parallel=stan_parallel)
#run time end
run_t_end = time.time()
#compute run time
run_tm = (run_t_end - run_t_strt)/60
#log run time
df_run_info.append(pd.DataFrame({'computer_name':os.uname()[1],'out_name':out_dir_sub,
'ds_id':d_id,'run_time':run_tm}, index=[d_id]))
#write out run info
out_fname = '%s%s/run_info.csv'%(out_dir_main, out_dir_sub)
pd.concat(df_run_info).reset_index(drop=True).to_csv(out_fname, index=False)
| 3,316 | 30 | 92 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds1/main_pystan_model1_NGAWest3CA.py | """
Created on Wed Jul 14 14:17:52 2021
@author: glavrent
"""
# Working directory and Packages
#load libraries
import os
import sys
import numpy as np
import pandas as pd
import time
#user functions
sys.path.insert(0,'../../../Python_lib/regression/pystan/')
from regression_pystan_model1_unbounded_hyp import RunStan
# Define variables
#filename suffix
# synds_suffix = '_small_corr_len'
# synds_suffix = '_large_corr_len'
#synthetic datasets directory
ds_dir = '../../../../Data/Verification/synthetic_datasets/ds1'
ds_dir = r'%s%s/'%(ds_dir, synds_suffix)
# dataset info
#ds_fname_main = 'CatalogNGAWest3CA_synthetic_data'
ds_fname_main = 'CatalogNGAWest3CALite_synthetic_data'
ds_id = np.arange(1,6)
#stan model
# sm_fname = '../../../Stan_lib/regression_stan_model1_unbounded_hyp.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model1_unbounded_hyp_chol.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model1_unbounded_hyp_chol_efficient.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model1_unbounded_hyp_chol_efficient2.stan'
#output info
#main output filename
out_fname_main = 'NGAWest3CA_syndata'
#main output directory
out_dir_main = '../../../../Data/Verification/regression/ds1/'
#output sub-directory
# out_dir_sub = 'PYSTAN_NGAWest3CA'
# out_dir_sub = 'PYSTAN_NGAWest3CA_chol'
# out_dir_sub = 'PYSTAN_NGAWest3CA_chol_eff'
# out_dir_sub = 'PYSTAN_NGAWest3CA_chol_eff2'
#stan parameters
runstan_flag = True
# pystan_ver = 2
pystan_ver = 3
res_name = 'tot'
n_iter = 1000
n_chains = 4
adapt_delta = 0.8
max_treedepth = 10
#parallel options
# flag_parallel = True
flag_parallel = False
#output sub-dir with corr with suffix info
out_dir_sub = f'%s%s'%(out_dir_sub, synds_suffix)
# Run stan regression
#create datafame with computation time
df_run_info = list()
#iterate over all synthetic datasets
for d_id in ds_id:
print('Synthetic dataset %i fo %i'%(d_id, len(ds_id)))
#run time start
run_t_strt = time.time()
#input flatfile
ds_fname = '%s%s%s_Y%i.csv'%(ds_dir, ds_fname_main, synds_suffix, d_id)
#load flatfile
df_flatfile = pd.read_csv(ds_fname)
#output file name and directory
out_fname = '%s%s_Y%i'%(out_fname_main, synds_suffix, d_id)
out_dir = '%s/%s/Y%i/'%(out_dir_main, out_dir_sub, d_id)
#run stan model
RunStan(df_flatfile, sm_fname, out_fname, out_dir, res_name,
runstan_flag=runstan_flag, n_iter=n_iter, n_chains=n_chains,
adapt_delta=adapt_delta, max_treedepth=max_treedepth,
pystan_ver=pystan_ver, pystan_parallel=flag_parallel)
#run time end
run_t_end = time.time()
#compute run time
run_tm = (run_t_end - run_t_strt)/60
#log run time
df_run_info.append(pd.DataFrame({'computer_name':os.uname()[1],'out_name':out_dir_sub,
'ds_id':d_id,'run_time':run_tm}, index=[d_id]))
#write out run info
out_fname = '%s%s/run_info.csv'%(out_dir_main, out_dir_sub)
pd.concat(df_run_info).reset_index(drop=True).to_csv(out_fname, index=False)
| 3,244 | 29.327103 | 90 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds1/comparison_inla_model1_misfit.py | """
Created on Tue Mar 15 14:50:27 2022
@author: glavrent
"""
# Working directory and Packages
#change working directory
import os
os.chdir('/mnt/halcloud_nfs/glavrent/Research/Nonerg_GMM_methodology/Analyses/Code_Verification/regressions/ds1')
#load variables
import os
import sys
import pathlib
#arithmetic libraries
import numpy as np
#statistics libraries
import pandas as pd
#plot libraries
import matplotlib as mpl
import matplotlib.pyplot as plt
#user functions
def PlotRSMCmp(df_KL_all, c_name, fig_fname):
#create figure axes
fig, ax = plt.subplots(figsize = (10,10))
for m_i in df_KL_all:
df_KL = df_KL_all[m_i]
ds_id = np.array(range(len(df_KL)))
ax.plot(ds_id, df_KL.loc[:,c_name], linestyle='-', marker='o', linewidth=2, markersize=10, label=m_i)
#figure properties
ax.set_ylim([0, max(0.50, max(ax.get_ylim()))])
ax.set_xlabel('synthetic dataset', fontsize=30)
ax.set_ylabel('RMSE divergence', fontsize=30)
ax.grid(which='both')
ax.set_xticks(ds_id)
ax.set_xticklabels(labels=df_KL.index)
ax.tick_params(axis='x', labelsize=30)
ax.tick_params(axis='y', labelsize=30)
#legend
ax.legend(loc='upper left', fontsize=30)
#save figure
fig.tight_layout()
fig.savefig( fig_fname + '.png' )
return fig, ax
def PlotKLCmp(df_KL_all, c_name, fig_fname):
#create figure axes
fig, ax = plt.subplots(figsize = (10,10))
for m_i in df_KL_all:
df_KL = df_KL_all[m_i]
ds_id = np.array(range(len(df_KL)))
ax.plot(ds_id, df_KL.loc[:,c_name], linestyle='-', marker='o', linewidth=2, markersize=10, label=m_i)
#figure properties
ax.set_ylim([0, max(0.50, max(ax.get_ylim()))])
ax.set_xlabel('synthetic dataset', fontsize=30)
ax.set_ylabel('KL divergence', fontsize=30)
ax.grid(which='both')
ax.set_xticks(ds_id)
ax.set_xticklabels(labels=df_KL.index)
ax.tick_params(axis='x', labelsize=25)
ax.tick_params(axis='y', labelsize=25)
#legend
ax.legend(loc='upper left', fontsize=25)
#save figure
fig.tight_layout()
fig.savefig( fig_fname + '.png' )
return fig, ax
# Define variables
#comparisons
name_reg = ['PYSTAN_NGAWest2CANorth_chol_eff_small_corr_len','PYSTAN3_NGAWest2CANorth_chol_eff_small_corr_len',]
#dataset info
ds_id = 1
#correlation info
# 1: Small Correlation Lengths
# 2: Large Correlation Lenghts
corr_id = 1
#packages comparison
packg_info = ['PYSTAN', 'PYSTAN3', 'fine']
#correlation name
if corr_id == 1: synds_suffix = '_small_corr_len'
elif corr_id == 2: synds_suffix = '_large_corr_len'
#dataset name
if ds_id == 1: name_dataset = 'NGAWest2CANorth'
elif ds_id == 2: name_dataset = 'NGAWest2CA'
elif ds_id == 3: name_dataset = 'NGAWest3CA'
#directories regressions
dir_reg = [f'../../../../Data/Verification/regression/ds1/%s_%s_%s%s/'%(name_dataset, m_info, synds_suffix) for m_info in mesh_info]
#directory output
dir_out = '../../../../Data/Verification/regression/ds1/comparisons/'
# Load Data
#initialize dataframe
df_KL_all = {};
#read KL scores
for k, (d_r, m_i) in enumerate(zip(dir_reg, mesh_info)):
#filename KL score
fname_KL = d_r + 'summary/coeffs_KL_divergence.csv'
#read KL score for coefficients
df_KL_all[m_i] = pd.read_csv(fname_KL, index_col=0)
# Comparison Figures
pathlib.Path(dir_out).mkdir(parents=True, exist_ok=True)
#coefficient name
c_name = 'nerg_tot'
#figure name
fig_fname = '%s/%s%s_KLdiv_%s'%(dir_out, name_dataset, synds_suffix, c_name)
#plotting
PlotKLCmp(df_KL_all, c_name, fig_fname);
#coefficient name
c_name = 'dc_1e'
#figure name
fig_fname = '%s/%s%s_KLdiv_%s'%(dir_out, name_dataset, synds_suffix, c_name)
#plotting
PlotKLCmp(df_KL_all, c_name, fig_fname);
#coefficient name
c_name = 'dc_1as'
#figure name
fig_fname = '%s/%s%s_KLdiv_%s'%(dir_out, name_dataset, synds_suffix, c_name)
#plotting
PlotKLCmp(df_KL_all, c_name, fig_fname);
#coefficient name
c_name = 'dc_1bs'
#figure name
fig_fname = '%s/%s%s_KLdiv_%s'%(dir_out, name_dataset, synds_suffix, c_name)
#plotting
PlotKLCmp(df_KL_all, c_name, fig_fname);
| 4,303 | 26.767742 | 132 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds2/main_pystan_model2_corr_cells_NGAWest2CANorth_sparse.py | """
Created on Wed Jul 14 14:17:52 2021
@author: glavrent
"""
# Working directory and Packages
#load libraries
import os
import sys
import numpy as np
import pandas as pd
import time
#user functions
sys.path.insert(0,'../../../Python_lib/regression/pystan/')
from regression_pystan_model2_corr_cells_sparse_unbounded_hyp import RunStan
# Define variables
#filename suffix
# synds_suffix = '_small_corr_len'
# synds_suffix = '_large_corr_len'
#synthetic datasets directory
ds_dir = '../../../../Data/Verification/synthetic_datasets/ds2'
ds_dir = r'%s%s/'%(ds_dir, synds_suffix)
# dataset info
#ds_fname_main = 'CatalogNGAWest3CA_synthetic_data'
ds_fname_main = 'CatalogNGAWest3CALite_synthetic_data'
ds_id = np.arange(1,6)
#cell specific anelastic attenuation
ds_fname_cellinfo = 'CatalogNGAWest3CALite_cellinfo'
ds_fname_celldist = 'CatalogNGAWest3CALite_distancematrix'
#stan model
sm_fname = '../../../Stan_lib/regression_stan_model2_corr_cells_sparse_unbounded_hyp_chol_efficient.stan'
#output info
#main output filename
out_fname_main = 'NGAWest2CANorth_syndata'
#main output directory
out_dir_main = '../../../../Data/Verification/regression/ds2/'
#output sub-directory
out_dir_sub = 'PYSTAN_NGAWest2CANorth_corr_cells_chol_eff_sp'
#stan parameters
runstan_flag = True
# pystan_ver = 2
pystan_ver = 3
res_name = 'tot'
n_iter = 1000
n_chains = 4
adapt_delta = 0.8
max_treedepth = 10
#ergodic coefficients
c_a_erg=0.0
#parallel options
# flag_parallel = True
flag_parallel = False
#output sub-dir with corr with suffix info
out_dir_sub = f'%s%s'%(out_dir_sub, synds_suffix)
#load cell dataframes
cellinfo_fname = '%s%s.csv'%(ds_dir, ds_fname_cellinfo)
celldist_fname = '%s%s.csv'%(ds_dir, ds_fname_celldist)
df_cellinfo = pd.read_csv(cellinfo_fname)
df_celldist = pd.read_csv(celldist_fname)
# Run stan regression
#create datafame with computation time
df_run_info = list()
#iterate over all synthetic datasets
for d_id in ds_id:
print('Synthetic dataset %i fo %i'%(d_id, len(ds_id)))
#run time start
run_t_strt = time.time()
#input flatfile
ds_fname = '%s%s%s_Y%i.csv'%(ds_dir, ds_fname_main, synds_suffix, d_id)
#load flatfile
df_flatfile = pd.read_csv(ds_fname)
#keep only North records of NGAWest2
df_flatfile = df_flatfile.loc[np.logical_and(df_flatfile.dsid==0,
df_flatfile.sreg==1),:]
#output file name and directory
out_fname = '%s%s_Y%i'%(out_fname_main, synds_suffix, d_id)
out_dir = '%s/%s/Y%i/'%(out_dir_main, out_dir_sub, d_id)
#run stan model
RunStan(df_flatfile, df_cellinfo, df_celldist, sm_fname,
out_fname, out_dir, res_name, c_a_erg=c_a_erg,
runstan_flag=runstan_flag, n_iter=n_iter, n_chains=n_chains,
adapt_delta=adapt_delta, max_treedepth=max_treedepth,
pystan_ver=pystan_ver, pystan_parallel=flag_parallel)
#run time end
run_t_end = time.time()
#compute run time
run_tm = (run_t_end - run_t_strt)/60
#log run time
df_run_info.append(pd.DataFrame({'computer_name':os.uname()[1],'out_name':out_dir_sub,
'ds_id':d_id,'run_time':run_tm}, index=[d_id]))
#write out run info
out_fname = '%s%s/run_info.csv'%(out_dir_main, out_dir_sub)
pd.concat(df_run_info).reset_index(drop=True).to_csv(out_fname, index=False)
| 3,573 | 29.547009 | 105 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds2/main_pystan_model2_corr_cells_NGAWest2CANorth.py | """
Created on Wed Jul 14 14:17:52 2021
@author: glavrent
"""
# Working directory and Packages
#load libraries
import os
import sys
import numpy as np
import pandas as pd
import time
#user functions
sys.path.insert(0,'../../../Python_lib/regression/pystan/')
from regression_pystan_model2_corr_cells_unbounded_hyp import RunStan
# from regression_pystan_model2_corr_cells_sparse_unbounded_hyp import RunStan
# Define variables
#filename suffix
# synds_suffix = '_small_corr_len'
# synds_suffix = '_large_corr_len'
#synthetic datasets directory
ds_dir = '../../../../Data/Verification/synthetic_datasets/ds2'
ds_dir = r'%s%s/'%(ds_dir, synds_suffix)
# dataset info
#ds_fname_main = 'CatalogNGAWest3CA_synthetic_data'
ds_fname_main = 'CatalogNGAWest3CALite_synthetic_data'
ds_id = np.arange(1,6)
#cell specific anelastic attenuation
ds_fname_cellinfo = 'CatalogNGAWest3CALite_cellinfo'
ds_fname_celldist = 'CatalogNGAWest3CALite_distancematrix'
#stan model
# sm_fname = '../../../Stan_lib/regression_stan_model2_corr_cells_unbounded_hyp.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_corr_cells_unbounded_hyp_chol.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_corr_cells_unbounded_hyp_chol_efficient.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_corr_cells_unbounded_hyp_chol_efficient2.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_corr_cells_sparse_unbounded_hyp_chol_efficient.stan'
#output info
#main output filename
out_fname_main = 'NGAWest2CANorth_syndata'
#main output directory
out_dir_main = '../../../../Data/Verification/regression/ds2/'
#output sub-directory
#pystan 2
# out_dir_sub = 'PYSTAN_NGAWest2CANorth_corr_cells'
# out_dir_sub = 'PYSTAN_NGAWest2CANorth_corr_cells_chol'
# out_dir_sub = 'PYSTAN_NGAWest2CANorth_corr_cells_chol_eff'
# out_dir_sub = 'PYSTAN_NGAWest2CANorth_corr_cells_chol_eff2'
# out_dir_sub = 'PYSTAN_NGAWest2CANorth_corr_cells_chol_eff_sp'
#pystan 3
# out_dir_sub = 'PYSTAN3_NGAWest2CANorth_corr_cells'
# out_dir_sub = 'PYSTAN3_NGAWest2CANorth_corr_cells_chol'
# out_dir_sub = 'PYSTAN3_NGAWest2CANorth_corr_cells_chol_eff'
# out_dir_sub = 'PYSTAN3_NGAWest2CANorth_corr_cells_chol_eff2'
# out_dir_sub = 'PYSTAN3_NGAWest2CANorth_corr_cells_chol_eff_sp'
#stan parameters
runstan_flag = True
# pystan_ver = 2
pystan_ver = 3
res_name = 'tot'
n_iter = 1000
n_chains = 4
adapt_delta = 0.8
max_treedepth = 10
#ergodic coefficients
c_a_erg=0.0
#parallel options
# flag_parallel = True
flag_parallel = False
#output sub-dir with corr with suffix info
out_dir_sub = f'%s%s'%(out_dir_sub, synds_suffix)
#load cell dataframes
cellinfo_fname = '%s%s.csv'%(ds_dir, ds_fname_cellinfo)
celldist_fname = '%s%s.csv'%(ds_dir, ds_fname_celldist)
df_cellinfo = pd.read_csv(cellinfo_fname)
df_celldist = pd.read_csv(celldist_fname)
# Run stan regression
#create datafame with computation time
df_run_info = list()
#iterate over all synthetic datasets
for d_id in ds_id:
print('Synthetic dataset %i fo %i'%(d_id, len(ds_id)))
#run time start
run_t_strt = time.time()
#input flatfile
ds_fname = '%s%s%s_Y%i.csv'%(ds_dir, ds_fname_main, synds_suffix, d_id)
#load flatfile
df_flatfile = pd.read_csv(ds_fname)
#keep only North records of NGAWest2
df_flatfile = df_flatfile.loc[np.logical_and(df_flatfile.dsid==0,
df_flatfile.sreg==1),:]
#output file name and directory
out_fname = '%s%s_Y%i'%(out_fname_main, synds_suffix, d_id)
out_dir = '%s/%s/Y%i/'%(out_dir_main, out_dir_sub, d_id)
#run stan model
RunStan(df_flatfile, df_cellinfo, df_celldist, sm_fname,
out_fname, out_dir, res_name, c_a_erg=c_a_erg,
runstan_flag=runstan_flag, n_iter=n_iter, n_chains=n_chains,
adapt_delta=adapt_delta, max_treedepth=max_treedepth,
pystan_ver=pystan_ver, pystan_parallel=flag_parallel)
#run time end
run_t_end = time.time()
#compute run time
run_tm = (run_t_end - run_t_strt)/60
#log run time
df_run_info.append(pd.DataFrame({'computer_name':os.uname()[1],'out_name':out_dir_sub,
'ds_id':d_id,'run_time':run_tm}, index=[d_id]))
#write out run info
out_fname = '%s%s/run_info.csv'%(out_dir_main, out_dir_sub)
pd.concat(df_run_info).reset_index(drop=True).to_csv(out_fname, index=False)
| 4,608 | 33.916667 | 107 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds2/comparison_stan_model2_corr_cells.py | """
Created on Thu Aug 12 10:26:06 2021
@author: glavrent
"""
# Working directory and Packages
#load packages
import os
import sys
import pathlib
import glob
import re #regular expression package
import pickle
#arithmetic libraries
import numpy as np
#statistics libraries
import pandas as pd
#plot libraries
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoLocator as plt_autotick
#user functions
sys.path.insert(0,'../../../Python_lib/regression/')
from pylib_stats import CalcRMS
from pylib_stats import CalcLKDivergece
# Define variables
# USER SETS DIRECTORIES AND FILE INFO OF SYNTHETIC DS AND REGRESSION RESULTS
# ++++++++++++++++++++++++++++++++++++++++
#processed dataset
# name_dataset = 'NGAWest2CANorth'
name_dataset = 'NGAWest2CA'
# name_dataset = 'NGAWest3CA'
#correlation info
# 1: Small Correlation Lengths
# 2: Large Correlation Lenghts
corr_id = 1
#package
# 1: Pystan v2
# 2: Pystan v3
# 3: stancmd
pkg_id = 3
#approximation type
# 1: multivariate normal
# 2: cholesky
# 3: cholesky efficient
# 4: cholesky efficient v2
# 5: cholesky efficient, sparse cells
aprox_id = 5
#directories (synthetic dataset)
if corr_id == 1:
dir_syndata = '../../../../Data/Verification/synthetic_datasets/ds2_small_corr_len'
elif corr_id == 2:
dir_syndata = '../../../../Data/Verification/synthetic_datasets/ds2_large_corr_len'
#cell info
fname_cellinfo = dir_syndata + '/' + 'CatalogNGAWest3CALite_cellinfo.csv'
fname_distmat = dir_syndata + '/' + 'CatalogNGAWest3CALite_distancematrix.csv'
#directories (regression results)
if pkg_id == 1:
dir_results = f'../../../../Data/Verification/regression/ds2/PYSTAN_%s'%name_dataset
elif pkg_id == 2:
dir_results = f'../../../../Data/Verification/regression/ds2/PYSTAN3_%s'%name_dataset
elif pkg_id == 3:
dir_results = f'../../../../Data/Verification/regression/ds2/CMDSTAN_%s'%name_dataset
#directories (regression results)
if pkg_id == 1:
dir_results = f'../../../../Data/Verification/regression_old/ds2/PYSTAN_%s'%name_dataset
elif pkg_id == 2:
dir_results = f'../../../../Data/Verification/regression_old/ds2/PYSTAN3_%s'%name_dataset
elif pkg_id == 3:
dir_results = f'../../../../Data/Verification/regression_old/ds2/CMDSTAN_%s'%name_dataset
#prefix for synthetic data and results
prfx_syndata = 'CatalogNGAWest3CALite_synthetic'
#regression results filename prefix
prfx_results = f'%s_syndata'%name_dataset
# FILE INFO FOR REGRESSION RESULTS
# ++++++++++++++++++++++++++++++++++++++++
#output filename sufix (synthetic dataset)
if corr_id == 1: synds_suffix = '_small_corr_len'
elif corr_id == 2: synds_suffix = '_large_corr_len'
#output filename sufix (regression results)
if aprox_id == 1: synds_suffix_stan = '_corr_cells' + synds_suffix
elif aprox_id == 2: synds_suffix_stan = '_corr_cells' + '_chol' + synds_suffix
elif aprox_id == 3: synds_suffix_stan = '_corr_cells' + '_chol_eff' + synds_suffix
elif aprox_id == 4: synds_suffix_stan = '_corr_cells' + '_chol_eff2' + synds_suffix
elif aprox_id == 5: synds_suffix_stan = '_corr_cells' + '_chol_eff_sp' + synds_suffix
# dataset info
ds_id = np.arange(1,6)
# ++++++++++++++++++++++++++++++++++++++++
# USER NEEDS TO SPECIFY HYPERPARAMETERS OF SYNTHETIC DATASET
# ++++++++++++++++++++++++++++++++++++++++
# hyper-parameters
if corr_id == 1:
# small correlation lengths
hyp = {'omega_0': 0.1, 'omega_1e':0.1, 'omega_1as': 0.35, 'omega_1bs': 0.25,
'ell_1e':60, 'ell_1as':30,
'c_cap_erg': -0.011,
'omega_cap_mu': 0.005, 'omega_ca1p':0.004, 'omega_ca2p':0.002,
'ell_ca1p': 75,
'phi_0':0.4, 'tau_0':0.3 }
elif corr_id == 2:
#large correlation lengths
hyp = {'omega_0': 0.1, 'omega_1e':0.2, 'omega_1as': 0.4, 'omega_1bs': 0.3,
'ell_1e':100, 'ell_1as':70,
'c_cap_erg': -0.02,
'omega_cap_mu': 0.008, 'omega_ca1p':0.005, 'omega_ca2p':0.003,
'ell_ca1p': 120,
'phi_0':0.4, 'tau_0':0.3}
# ++++++++++++++++++++++++++++++++++++++++
#ploting options
flag_report = True
# Compare results
#load cell data
df_cellinfo = pd.read_csv(fname_cellinfo).set_index('cellid')
df_distmat = pd.read_csv(fname_distmat).set_index('rsn')
#initialize misfit metrics dataframe
df_misfit = pd.DataFrame(index=['Y%i'%d_id for d_id in ds_id])
#iterate over different datasets
for d_id in ds_id:
# Load Data
#file names
#synthetic data
fname_sdata_gmotion = '%s/%s_%s%s_Y%i'%(dir_syndata, prfx_syndata, 'data', synds_suffix, d_id) + '.csv'
fname_sdata_atten = '%s/%s_%s%s_Y%i'%(dir_syndata, prfx_syndata, 'atten', synds_suffix, d_id) + '.csv'
#regression results
fname_reg_gmotion = '%s%s/Y%i/%s%s_Y%i_stan_%s'%(dir_results, synds_suffix_stan, d_id, prfx_results, synds_suffix, d_id, 'residuals') + '.csv'
fname_reg_coeff = '%s%s/Y%i/%s%s_Y%i_stan_%s'%(dir_results, synds_suffix_stan, d_id, prfx_results, synds_suffix, d_id, 'coefficients') + '.csv'
fname_reg_atten = '%s%s/Y%i/%s%s_Y%i_stan_%s'%(dir_results, synds_suffix_stan, d_id, prfx_results, synds_suffix, d_id, 'catten') + '.csv'
#load synthetic results
df_sdata_gmotion = pd.read_csv(fname_sdata_gmotion).set_index('rsn')
df_sdata_atten = pd.read_csv(fname_sdata_atten).set_index('cellid')
#load regression results
df_reg_gmotion = pd.read_csv(fname_reg_gmotion, index_col=0)
df_reg_coeff = pd.read_csv(fname_reg_coeff, index_col=0)
df_reg_atten = pd.read_csv(fname_reg_atten, index_col=0)
# Processing
#keep only relevant columns from synthetic dataset
df_sdata_gmotion = df_sdata_gmotion.reindex(df_reg_gmotion.index)
df_sdata_atten = df_sdata_atten.reindex(df_reg_atten.index)
#distance matrix for records of interest
df_dmat = df_distmat.reindex(df_sdata_gmotion.index)
#find unique earthqakes and stations
eq_id, eq_idx, eq_nrec = np.unique(df_sdata_gmotion.eqid, return_index=True, return_counts=True)
sta_id, sta_idx, sta_nrec = np.unique(df_sdata_gmotion.ssn, return_index=True, return_counts=True)
#number of paths per cell
cell_npath = np.sum(df_dmat.loc[:,df_reg_atten.cellname] > 0, axis=0)
# Compute Root Mean Square Error
df_misfit.loc['Y%i'%d_id,'nerg_tot_rms'] = CalcRMS(df_sdata_gmotion.nerg_gm.values, df_reg_gmotion.nerg_mu.values)
df_misfit.loc['Y%i'%d_id,'dc_1e_rms'] = CalcRMS(df_sdata_gmotion['dc_1e'].values[eq_idx], df_reg_coeff['dc_1e_mean'].values[eq_idx])
df_misfit.loc['Y%i'%d_id,'dc_1as_rms'] = CalcRMS(df_sdata_gmotion['dc_1as'].values[sta_idx], df_reg_coeff['dc_1as_mean'].values[sta_idx])
df_misfit.loc['Y%i'%d_id,'dc_1bs_rms'] = CalcRMS(df_sdata_gmotion['dc_1bs'].values[sta_idx], df_reg_coeff['dc_1bs_mean'].values[sta_idx])
df_misfit.loc['Y%i'%d_id,'c_cap_rms'] = CalcRMS(df_sdata_atten['c_cap'].values, df_reg_atten['c_cap_mean'].values)
# Compute Divergence
df_misfit.loc['Y%i'%d_id,'nerg_tot_KL'] = CalcLKDivergece(df_sdata_gmotion.nerg_gm.values, df_reg_gmotion.nerg_mu.values)
df_misfit.loc['Y%i'%d_id,'dc_1e_KL'] = CalcLKDivergece(df_sdata_gmotion['dc_1e'].values[eq_idx], df_reg_coeff['dc_1e_mean'].values[eq_idx])
df_misfit.loc['Y%i'%d_id,'dc_1as_KL'] = CalcLKDivergece(df_sdata_gmotion['dc_1as'].values[sta_idx], df_reg_coeff['dc_1as_mean'].values[sta_idx])
df_misfit.loc['Y%i'%d_id,'dc_1bs_KL'] = CalcLKDivergece(df_sdata_gmotion['dc_1bs'].values[sta_idx], df_reg_coeff['dc_1bs_mean'].values[sta_idx])
df_misfit.loc['Y%i'%d_id,'c_cap_KL'] = CalcLKDivergece(df_sdata_atten['c_cap'].values, df_reg_atten['c_cap_mean'].values)
# Output
#figure directory
dir_fig = '%s%s/Y%i/figures_cmp/'%(dir_results,synds_suffix_stan,d_id)
pathlib.Path(dir_fig).mkdir(parents=True, exist_ok=True)
#compare ground motion predictions
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_scatter_tot_res'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#median
ax.scatter(df_sdata_gmotion.nerg_gm.values, df_reg_gmotion.nerg_mu.values)
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title('Comparison total residuals, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=35)
ax.set_ylabel('Estimated', fontsize=35)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=32)
ax.tick_params(axis='y', labelsize=32)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-10,2])
ax.set_ylim([-10,2])
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#compare dc_1e
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_dc_1e_scatter'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_sdata_gmotion['dc_1e'].values[eq_idx], df_reg_coeff['dc_1e_mean'].values[eq_idx])
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1,E}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-.4,.4])
ax.set_ylim([-.4,.4])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1e_accuracy'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_reg_coeff['dc_1e_sig'].values[eq_idx],
df_sdata_gmotion['dc_1e'].values[eq_idx] - df_reg_coeff['dc_1e_mean'].values[eq_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1,E}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Standard Deviation', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0,.15])
ax.set_ylim([-.4,.4])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1e_nrec'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(eq_nrec,
df_sdata_gmotion['dc_1e'].values[eq_idx] - df_reg_coeff['dc_1e_mean'].values[eq_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1,E}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Number of records', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.set_xscale('log')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0.9,1e3])
ax.set_ylim([-.4,.4])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#compare dc_1as
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_dc_1as_scatter'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_sdata_gmotion['dc_1as'].values[sta_idx], df_reg_coeff['dc_1as_mean'].values[sta_idx])
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1a,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-1.5,1.5])
ax.set_ylim([-1.5,1.5])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1as_accuracy'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#accuray
ax.scatter(df_reg_coeff['dc_1as_sig'].values[sta_idx],
df_sdata_gmotion['dc_1as'].values[sta_idx] - df_reg_coeff['dc_1as_mean'].values[sta_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1a,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Standard Deviation', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0,.4])
ax.set_ylim([-1.5,1.5])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1as_nrec'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#accuray
ax.scatter(sta_nrec,
df_sdata_gmotion['dc_1as'].values[sta_idx] - df_reg_coeff['dc_1as_mean'].values[sta_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1a,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Number of records', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.set_xscale('log')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([.9,1000])
ax.set_ylim([-1.5,1.5])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#compare dc_1bs
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_dc_1bs_scatter'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_sdata_gmotion['dc_1bs'].values[sta_idx], df_reg_coeff['dc_1bs_mean'].values[sta_idx])
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1b,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-1.5,1.5])
ax.set_ylim([-1.5,1.5])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1bs_accuracy'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#accuray
ax.scatter(df_reg_coeff['dc_1bs_sig'].values[sta_idx],
df_sdata_gmotion['dc_1bs'].values[sta_idx] - df_reg_coeff['dc_1bs_mean'].values[sta_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1b,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Standard Deviation', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0,.4])
ax.set_ylim([-1.5,1.5])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1bs_nrec'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#accuray
ax.scatter(sta_nrec,
df_sdata_gmotion['dc_1bs'].values[sta_idx] - df_reg_coeff['dc_1bs_mean'].values[sta_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1b,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Number of records', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.set_xscale('log')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([.9,1000])
ax.set_ylim([-1.5,1.5])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#compare c_cap
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_c_cap_scatter'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_sdata_atten['c_cap'].values, df_reg_atten['c_cap_mean'].values)
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $c_{ca,P}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-0.05,0.02])
ax.set_ylim([-0.05,0.02])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_c_cap_accuracy'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_reg_atten['c_cap_sig'],
df_sdata_atten['c_cap'].values - df_reg_atten['c_cap_mean'].values)
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $c_{ca,P}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Standard Deviation', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0.00,0.03])
ax.set_ylim([-0.04,0.04])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_c_cap_npath'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(cell_npath,
df_sdata_atten['c_cap'].values - df_reg_atten['c_cap_mean'].values)
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $c_{ca,P}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Number of paths', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.set_xscale('log')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([.9,5e4])
ax.set_ylim([-0.04,0.04])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Compare Misfit Metrics
#summary directory
dir_sum = '%s%s/summary/'%(dir_results,synds_suffix_stan)
pathlib.Path(dir_fig).mkdir(parents=True, exist_ok=True)
#figure directory
dir_fig = '%s/figures/'%(dir_sum)
pathlib.Path(dir_fig).mkdir(parents=True, exist_ok=True)
#save
df_misfit.to_csv(dir_sum + 'misfit_summary.csv')
#RMS misfit
fname_fig = 'misfit_score'
#plot KL divergence
fig, ax = plt.subplots(figsize = (10,10))
ax.plot(ds_id, df_misfit.nerg_tot_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label= 'tot nerg')
ax.plot(ds_id, df_misfit.dc_1e_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1,E}$')
ax.plot(ds_id, df_misfit.dc_1as_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1a,S}$')
ax.plot(ds_id, df_misfit.dc_1bs_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1b,S}$')
ax.plot(ds_id, df_misfit.c_cap_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$c_{ca,P}$')
#figure properties
ax.set_ylim([0,0.50])
ax.set_xlabel('synthetic dataset', fontsize=25)
ax.set_ylabel('RSME', fontsize=25)
ax.grid(which='both')
ax.set_xticks(ds_id)
ax.set_xticklabels(labels=df_misfit.index)
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#legend
ax.legend(loc='upper left', fontsize=25)
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#KL divergence
fname_fig = 'KLdiv_score'
#plot KL divergence
fig, ax = plt.subplots(figsize = (10,10))
ax.plot(ds_id, df_misfit.nerg_tot_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label= 'tot nerg')
ax.plot(ds_id, df_misfit.dc_1e_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1,E}$')
ax.plot(ds_id, df_misfit.dc_1as_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1a,S}$')
ax.plot(ds_id, df_misfit.dc_1bs_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1b,S}$')
ax.plot(ds_id, df_misfit.c_cap_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$c_{ca,P}$')
#figure properties
ax.set_ylim([0,0.50])
ax.set_xlabel('synthetic dataset', fontsize=25)
ax.set_ylabel('KL divergence', fontsize=25)
ax.grid(which='both')
ax.set_xticks(ds_id)
ax.set_xticklabels(labels=df_misfit.index)
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#legend
ax.legend(loc='upper left', fontsize=25)
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Compare hyper-paramters
#iterate over different datasets
df_reg_hyp = list()
df_reg_hyp_post = list()
for d_id in ds_id:
# Load Data
#regression hyperparamters results
fname_reg_hyp = '%s%s/Y%i/%s%s_Y%i_stan_%s'%(dir_results,synds_suffix_stan, d_id,prfx_results, synds_suffix, d_id, 'hyperparameters') + '.csv'
fname_reg_hyp_post = '%s%s/Y%i/%s%s_Y%i_stan_%s'%(dir_results,synds_suffix_stan, d_id,prfx_results, synds_suffix, d_id, 'hyperposterior') + '.csv'
#load regression results
df_reg_hyp.append( pd.read_csv(fname_reg_hyp, index_col=0) )
df_reg_hyp_post.append( pd.read_csv(fname_reg_hyp_post, index_col=0) )
# Omega_1e
#hyper-paramter name
name_hyp = 'omega_1e'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 40
ymax_mean = 40
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\omega_{1,E}$', fontsize=30)
ax.set_xlabel('$\omega_{1,E}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.25])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Omega_1as
#hyper-paramter name
name_hyp = 'omega_1as'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 30
ymax_mean = 30
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\omega_{1a,S}$', fontsize=30)
ax.set_xlabel('$\omega_{1a,S}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.5])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Omega_1bs
#hyper-paramter name
name_hyp = 'omega_1bs'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 60
ymax_mean = 60
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\omega_{1b,S}$', fontsize=30)
ax.set_xlabel('$\omega_{1b,S}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.5])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Ell_1e
#hyper-paramter name
name_hyp = 'ell_1e'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 0.02
ymax_mean = 0.02
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\ell_{1,E}$', fontsize=30)
ax.set_xlabel('$\ell_{1,E}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,500])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Ell_1as
#hyper-paramter name
name_hyp = 'ell_1as'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 0.1
ymax_mean = 0.1
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\ell_{1a,S}$', fontsize=30)
ax.set_xlabel('$\ell_{1a,S}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,150])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Tau_0
#hyper-paramter name
name_hyp = 'tau_0'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 150
ymax_mean = 150
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\tau_{0}$', fontsize=30)
ax.set_xlabel(r'$\tau_{0}$', fontsize=25)
ax.set_ylabel(r'probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.5])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Phi_0
#hyper-paramter name
name_hyp = 'phi_0'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 1000
ymax_mean = 1000
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\phi_{0}$', fontsize=30)
ax.set_xlabel('$\phi_{0}$', fontsize=25)
ax.set_ylabel(r'probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.6])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Omega_ca1
#hyper-paramter name
name_hyp = 'omega_ca1p'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 1500
ymax_mean = 1500
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp['omega_ca2p'], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\omega_{ca1,P}$', fontsize=30)
ax.set_xlabel('$\omega_{ca1,P}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.05])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Omega_ca2
#hyper-paramter name
name_hyp = 'omega_ca2p'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 1500
ymax_mean = 1500
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp['omega_ca2p'], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\omega_{ca2,P}$', fontsize=30)
ax.set_xlabel('$\omega_{ca2,P}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.05])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Ell_ca1p
#hyper-paramter name
name_hyp = 'ell_ca1p'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 0.02
ymax_mean = 0.02
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\ell_{ca1,P}$', fontsize=30)
ax.set_xlabel('$\ell_{ca1,P}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,500])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# # Delta c_0
# #hyper-paramter name
# name_hyp = 'dc_0'
# #figure title
# fname_fig = 'post_dist_' + name_hyp
# #create figure
# fig, ax = plt.subplots(figsize = (10,10))
# for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
# #estimate vertical line height for mean and mode
# ymax_mode = df_r_h_p.loc[:,name_hyp+'_pdf'].max()
# ymax_mean = 1.5*np.ceil(ymax_mode/10)*10
# ymax_mean = 15
# #plot posterior dist
# pl_pdf = ax.plot(df_r_h_p.loc[:,name_hyp], df_r_h_p.loc[:,name_hyp+'_pdf'])
# ax.vlines(df_r_h.loc[name_hyp,'mean'], ymin=0, ymax=ymax_mean, linestyle='-', color=pl_pdf[0].get_color(), label='Mean')
# ax.vlines(df_r_h.loc[name_hyp,'mode'], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_pdf[0].get_color(), label='Mode')
# #plot true value
# ymax_hyp = ymax_mean
# # ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
# #edit figure
# ax.set_title(r'Comparison $\delta c_{0}$', fontsize=30)
# ax.set_xlabel('$\delta c_{0}$', fontsize=25)
# ax.set_ylabel('probability density function ', fontsize=25)
# ax.grid(which='both')
# ax.tick_params(axis='x', labelsize=22)
# ax.tick_params(axis='y', labelsize=22)
# #plot limits
# ax.set_xlim([-1,1])
# ax.set_ylim([0,ymax_hyp])
# #save figure
# fig.tight_layout()
# # fig.savefig( dir_fig + fname_fig + '.png' )
| 37,471 | 38.320042 | 153 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds2/main_pystan_model2_corr_cells_NGAWest2CA.py | """
Created on Wed Jul 14 14:17:52 2021
@author: glavrent
"""
# Working directory and Packages
#load libraries
import os
import sys
import numpy as np
import pandas as pd
import time
#user functions
sys.path.insert(0,'../../../Python_lib/regression/pystan/')
from regression_pystan_model2_corr_cells_unbounded_hyp import RunStan
# from regression_pystan_model2_corr_cells_sparse_unbounded_hyp import RunStan
# Define variables
#filename suffix
# synds_suffix = '_small_corr_len'
# synds_suffix = '_large_corr_len'
#synthetic datasets directory
ds_dir = '../../../../Data/Verification/synthetic_datasets/ds2'
ds_dir = r'%s%s/'%(ds_dir, synds_suffix)
# dataset info
#ds_fname_main = 'CatalogNGAWest3CA_synthetic_data'
ds_fname_main = 'CatalogNGAWest3CALite_synthetic_data'
ds_id = np.arange(1,6)
#cell specific anelastic attenuation
ds_fname_cellinfo = 'CatalogNGAWest3CALite_cellinfo'
ds_fname_celldist = 'CatalogNGAWest3CALite_distancematrix'
#stan model
# sm_fname = '../../../Stan_lib/regression_stan_model2_corr_cells_unbounded_hyp.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_corr_cells_unbounded_hyp_chol.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_corr_cells_unbounded_hyp_chol_efficient.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_corr_cells_unbounded_hyp_chol_efficient2.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_corr_cells_sparse_unbounded_hyp_chol_efficient2.stan'
#output info
#main output filename
out_fname_main = 'NGAWest2CA_syndata'
#main output directory
out_dir_main = '../../../../Data/Verification/regression/ds2/'
#output sub-directory
#python 2
# out_dir_sub = 'PYSTAN_NGAWest2CA_corr_cells'
# out_dir_sub = 'PYSTAN_NGAWest2CA_corr_cells_chol'
# out_dir_sub = 'PYSTAN_NGAWest2CA_corr_cells_chol_eff'
# out_dir_sub = 'PYSTAN_NGAWest2CA_corr_cells_chol_eff2'
#python 3
# out_dir_sub = 'PYSTAN3_NGAWest2CA_corr_cells'
# out_dir_sub = 'PYSTAN3_NGAWest2CA_corr_cells_chol'
# out_dir_sub = 'PYSTAN3_NGAWest2CA_corr_cells_chol_eff'
# out_dir_sub = 'PYSTAN3_NGAWest2CA_corr_cells_chol_eff2'
# out_dir_sub = 'PYSTAN3_NGAWest2CA_corr_cells_chol_eff_sp'
#stan parameters
runstan_flag = True
pystan_ver = 2
# pystan_ver = 3
res_name = 'tot'
n_iter = 1000
n_chains = 4
adapt_delta = 0.8 #0.9
max_treedepth = 10
#ergodic coefficients
c_a_erg=0.0
#parallel options
# flag_parallel = True
flag_parallel = False
#output sub-dir with corr with suffix info
out_dir_sub = f'%s%s'%(out_dir_sub, synds_suffix)
#load cell dataframes
cellinfo_fname = '%s%s.csv'%(ds_dir, ds_fname_cellinfo)
celldist_fname = '%s%s.csv'%(ds_dir, ds_fname_celldist)
df_cellinfo = pd.read_csv(cellinfo_fname)
df_celldist = pd.read_csv(celldist_fname)
# Run stan regression
#create datafame with computation time
df_run_info = list()
#iterate over all synthetic datasets
for d_id in ds_id:
print('Synthetic dataset %i fo %i'%(d_id, len(ds_id)))
#run time start
run_t_strt = time.time()
#input flatfile
ds_fname = '%s%s%s_Y%i.csv'%(ds_dir, ds_fname_main, synds_suffix, d_id)
#load flatfile
df_flatfile = pd.read_csv(ds_fname)
#keep only NGAWest2 records
df_flatfile = df_flatfile.loc[df_flatfile.dsid==0,:]
#output file name and directory
out_fname = '%s%s_Y%i'%(out_fname_main, synds_suffix, d_id)
out_dir = '%s/%s/Y%i/'%(out_dir_main, out_dir_sub, d_id)
#run stan model
RunStan(df_flatfile, df_cellinfo, df_celldist, sm_fname,
out_fname, out_dir, res_name, c_a_erg=c_a_erg,
runstan_flag=runstan_flag, n_iter=n_iter, n_chains=n_chains,
adapt_delta=adapt_delta, max_treedepth=max_treedepth,
pystan_ver=pystan_ver, pystan_parallel=flag_parallel)
#run time end
run_t_end = time.time()
#compute run time
run_tm = (run_t_end - run_t_strt)/60
#log run time
df_run_info.append(pd.DataFrame({'computer_name':os.uname()[1],'out_name':out_dir_sub,
'ds_id':d_id,'run_time':run_tm}, index=[d_id]))
#write out run info
out_fname = '%s%s/run_info.csv'%(out_dir_main, out_dir_sub)
pd.concat(df_run_info).reset_index(drop=True).to_csv(out_fname, index=False)
| 4,412 | 32.687023 | 108 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds2/comparison_model2_misfit_stan_sparse.py | """
Created on Tue Mar 15 14:50:27 2022
@author: glavrent
"""
# Working directory and Packages
#load variables
import os
import sys
import pathlib
#arithmetic libraries
import numpy as np
#statistics libraries
import pandas as pd
#plot libraries
import matplotlib as mpl
import matplotlib.pyplot as plt
#user functions
def PlotRSMCmp(df_rms_all, c_name, fig_fname):
#create figure axes
fig, ax = plt.subplots(figsize = (10,10))
for j, k in enumerate(df_rms_all):
df_rms = df_rms_all[k]
ds_id = np.array(range(len(df_rms)))
lcol = mpl.cm.get_cmap('tab10')(0) if j in [0,2] else mpl.cm.get_cmap('tab10')(1)
ltype = '-' if j in [0,1] else '--'
ax.plot(ds_id, df_rms.loc[:,c_name+'_rms'], marker='o', linewidth=2, markersize=10, label=k, linestyle=ltype, color=lcol)
#figure properties
ax.set_ylim([0, max(0.50, max(ax.get_ylim()))])
ax.set_xlabel('synthetic dataset', fontsize=35)
ax.set_ylabel('RMSE', fontsize=35)
ax.grid(which='both')
ax.set_xticks(ds_id)
ax.set_xticklabels(labels=df_rms.index)
ax.tick_params(axis='x', labelsize=32)
ax.tick_params(axis='y', labelsize=32)
#legend
ax.legend(loc='upper left', fontsize=32)
#save figure
fig.tight_layout()
fig.savefig( fig_fname + '.png' )
return fig, ax
def PlotKLCmp(df_KL_all, c_name, fig_fname):
#create figure axes
fig, ax = plt.subplots(figsize = (10,10))
for k in df_KL_all:
df_KL = df_KL_all[k]
ds_id = np.array(range(len(df_KL)))
ax.plot(ds_id, df_KL.loc[:,c_name+'_KL'], linestyle='-', marker='o', linewidth=2, markersize=10, label=k)
#figure properties
ax.set_ylim([0, max(0.50, max(ax.get_ylim()))])
ax.set_xlabel('synthetic dataset', fontsize=35)
ax.set_ylabel('KL divergence', fontsize=35)
ax.grid(which='both')
ax.set_xticks(ds_id)
ax.set_xticklabels(labels=df_KL.index)
ax.tick_params(axis='x', labelsize=32)
ax.tick_params(axis='y', labelsize=32)
#legend
ax.legend(loc='upper left', fontsize=32)
#save figure
fig.tight_layout()
fig.savefig( fig_fname + '.png' )
return fig, ax
# Define variables
# # Sparse Distance Matrix
# # NGAWest 2 CA North
# cmp_name = 'STAN_sparse_cmp_NGAWest2CA'
# reg_title = ['STAN','STAN w/ sp dist matrix']
# reg_fname = ['CMDSTAN_NGAWest2CANorth_corr_cells_chol_eff_small_corr_len','CMDSTAN_NGAWest2CANorth_corr_cells_chol_eff_sp_small_corr_len']
# ylim_time = [0, 800]
# NGAWest 2 CA
cmp_name = 'STAN_sparse_cmp_NGAWest2CA'
reg_title = ['STAN','STAN w/ sp dist matrix']
reg_fname = ['CMDSTAN_NGAWest2CA_corr_cells_chol_eff_small_corr_len','CMDSTAN_NGAWest2CA_corr_cells_chol_eff_sp_small_corr_len']
ylim_time = [0, 7000]
# NGAWest 2 CA & NGAWest 2 CA North
cmp_name = 'STAN_sparse_cmp_NGAWest2CA_'
reg_title = ['STAN - NGAW2 CA','STAN - NGAW2 CA North',
'STAN - NGAW2 CA\nw/ sp dist matrix',f'STAN NGAW2 CA North\nw/ sp dist matrix, ']
reg_fname = ['CMDSTAN_NGAWest2CA_corr_cells_chol_eff_small_corr_len', 'CMDSTAN_NGAWest2CANorth_corr_cells_chol_eff_small_corr_len',
'CMDSTAN_NGAWest2CA_corr_cells_chol_eff_sp_small_corr_len', 'CMDSTAN_NGAWest2CANorth_corr_cells_chol_eff_sp_small_corr_len']
ylim_time = [0, 7000]
# # Different Software
# cmp_name = 'STAN_vs_INLA_cmp_NGAWest2CANorth'
# reg_title = ['STAN corr. cells','STAN uncorr. cells','INLA uncorr. cells']
# reg_fname = ['CMDSTAN_NGAWest2CANorth_corr_cells_chol_eff_small_corr_len','CMDSTAN_NGAWest2CANorth_corr_cells_chol_eff_small_corr_len',
# 'INLA_NGAWest2CANorth_uncorr_cells_coarse_small_corr_len']
# reg_fname = ['PYSTAN_NGAWest2CANorth_corr_cells_chol_eff_small_corr_len','PYSTAN_NGAWest2CANorth_uncorr_cells_chol_eff_small_corr_len',
# 'INLA_NGAWest2CANorth_uncorr_cells_coarse_small_corr_len']
# ylim_time = [0, 800]
#directories regressions
reg_dir = [f'../../../../Data/Verification/regression/ds2/%s/'%r_f for r_f in reg_fname]
#directory output
dir_out = '../../../../Data/Verification/regression/ds2/comparisons/'
# Load Data
#initialize misfit dataframe
df_sum_misfit_all = {};
#read misfit info
for k, (r_t, r_d) in enumerate(zip(reg_title, reg_dir)):
#filename misfit info
fname_sum = r_d + 'summary/misfit_summary.csv'
#read KL score for coefficients
df_sum_misfit_all[r_t] = pd.read_csv(fname_sum, index_col=0)
#initialize run time dataframe
df_runinfo_all = {};
#read run time info
for k, (r_t, r_d) in enumerate(zip(reg_title, reg_dir)):
#filename run time
fname_runinfo = r_d + '/run_info.csv'
#store calc time
df_runinfo_all[r_t] = pd.read_csv(fname_runinfo)
# Comparison Figures
pathlib.Path(dir_out).mkdir(parents=True, exist_ok=True)
# RMSE divergence
#coefficient name
c_name = 'nerg_tot'
#figure name
fig_fname = '%s/%s_%s_RMSE'%(dir_out, cmp_name, c_name)
#plotting
PlotRSMCmp(df_sum_misfit_all , c_name, fig_fname);
#coefficient name
c_name = 'dc_1e'
#figure name
fig_fname = '%s/%s_%s_RMSE'%(dir_out, cmp_name, c_name)
#plotting
PlotRSMCmp(df_sum_misfit_all , c_name, fig_fname);
#coefficient name
c_name = 'dc_1as'
#figure name
fig_fname = '%s/%s_%s_RMSE'%(dir_out, cmp_name, c_name)
#plotting
PlotRSMCmp(df_sum_misfit_all , c_name, fig_fname);
#coefficient name
c_name = 'dc_1bs'
#figure name
fig_fname = '%s/%s_%s_RMSE'%(dir_out, cmp_name, c_name)
#plotting
PlotRSMCmp(df_sum_misfit_all , c_name, fig_fname);
# KL divergence
#coefficient name
c_name = 'nerg_tot'
#figure name
fig_fname = '%s/%s_%s_KLdiv'%(dir_out, cmp_name, c_name)
#plotting
PlotKLCmp(df_sum_misfit_all , c_name, fig_fname);
#coefficient name
c_name = 'dc_1e'
#figure name
fig_fname = '%s/%s_%s_KLdiv'%(dir_out, cmp_name, c_name)
#plotting
PlotKLCmp(df_sum_misfit_all , c_name, fig_fname);
#coefficient name
c_name = 'dc_1as'
#figure name
fig_fname = '%s/%s_%s_KLdiv'%(dir_out, cmp_name, c_name)
#plotting
PlotKLCmp(df_sum_misfit_all , c_name, fig_fname);
#coefficient name
c_name = 'dc_1bs'
#figure name
fig_fname = '%s/%s_%s_KLdiv'%(dir_out, cmp_name, c_name)
#plotting
PlotKLCmp(df_sum_misfit_all , c_name, fig_fname);
# Run Time
#run time figure
fig_fname = '%s/%s_run_time'%(dir_out, cmp_name)
#create figure axes
fig, ax = plt.subplots(figsize = (10,10))
#iterate over different analyses
for j, k in enumerate(df_runinfo_all):
ds_id = df_runinfo_all[k].ds_id
ds_name = ['Y%i'%d_i for d_i in ds_id]
run_time = df_runinfo_all[k].run_time
#
lcol = mpl.cm.get_cmap('tab10')(0) if j in [0,2] else mpl.cm.get_cmap('tab10')(1)
ltype = '-' if j in [0,1] else '--'
ax.plot(ds_id, run_time, marker='o', linewidth=2, markersize=10, label=k, linestyle=ltype, color=lcol)
#figure properties
ax.set_ylim(ylim_time)
ax.set_xlabel('synthetic dataset', fontsize=35)
ax.set_ylabel('Run Time (min)', fontsize=35)
ax.grid(which='both')
ax.set_xticks(ds_id)
ax.set_xticklabels(labels=ds_name)
ax.tick_params(axis='x', labelsize=32)
ax.tick_params(axis='y', labelsize=32)
#legend
# ax.legend(loc='lower left', fontsize=32)
# ax.legend(loc='upper left', fontsize=32)
# ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=25)
#save figure
fig.tight_layout()
fig.savefig( fig_fname + '.png' )
| 7,569 | 31.48927 | 140 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds2/main_pystan_model2_uncorr_cells_NGAWest2CANorth.py | """
Created on Wed Jul 14 14:17:52 2021
@author: glavrent
"""
# Working directory and Packages
#load libraries
import os
import sys
import numpy as np
import pandas as pd
import time
#user functions
sys.path.insert(0,'../../../Python_lib/regression/pystan/')
from regression_pystan_model2_uncorr_cells_unbounded_hyp import RunStan
# Define variables
#filename suffix
# synds_suffix = '_small_corr_len'
# synds_suffix = '_large_corr_len'
#synthetic datasets directory
ds_dir = '../../../../Data/Verification/synthetic_datasets/ds2'
ds_dir = r'%s%s/'%(ds_dir, synds_suffix)
# dataset info
#ds_fname_main = 'CatalogNGAWest3CA_synthetic_data'
ds_fname_main = 'CatalogNGAWest3CALite_synthetic_data'
ds_id = np.arange(1,6)
#cell specific anelastic attenuation
ds_fname_cellinfo = 'CatalogNGAWest3CALite_cellinfo'
ds_fname_celldist = 'CatalogNGAWest3CALite_distancematrix'
#stan model
# sm_fname = '../../../Stan_lib/regression_stan_model2_uncorr_cells_unbounded_hyp.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_uncorr_cells_unbounded_hyp_chol.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_uncorr_cells_unbounded_hyp_chol_efficient.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_uncorr_cells_unbounded_hyp_chol_efficient2.stan'
#output info
#main output filename
out_fname_main = 'NGAWest2CANorth_syndata'
#main output directory
out_dir_main = '../../../../Data/Verification/regression/ds2/'
#output sub-directory
#pystan2
# out_dir_sub = 'PYSTAN_NGAWest2CANorth_uncorr_cells'
# out_dir_sub = 'PYSTAN_NGAWest2CANorth_uncorr_cells_chol'
# out_dir_sub = 'PYSTAN_NGAWest2CANorth_uncorr_cells_chol_eff'
# out_dir_sub = 'PYSTAN_NGAWest2CANorth_uncorr_cells_chol_eff2'
#pystan3
# out_dir_sub = 'PYSTAN3_NGAWest2CANorth_uncorr_cells'
# out_dir_sub = 'PYSTAN3_NGAWest2CANorth_uncorr_cells_chol'
# out_dir_sub = 'PYSTAN3_NGAWest2CANorth_uncorr_cells_chol_eff'
# out_dir_sub = 'PYSTAN3_NGAWest2CANorth_uncorr_cells_chol_eff2'
#stan parameters
runstan_flag = True
# pystan_ver = 2
pystan_ver = 3
res_name = 'tot'
n_iter = 1000
n_chains = 4
adapt_delta = 0.8
max_treedepth = 10
#ergodic coefficients
c_a_erg=0.0
#parallel options
# flag_parallel = True
flag_parallel = False
#output sub-dir with corr with suffix info
out_dir_sub = f'%s%s'%(out_dir_sub, synds_suffix)
#load cell dataframes
cellinfo_fname = '%s%s.csv'%(ds_dir, ds_fname_cellinfo)
celldist_fname = '%s%s.csv'%(ds_dir, ds_fname_celldist)
df_cellinfo = pd.read_csv(cellinfo_fname)
df_celldist = pd.read_csv(celldist_fname)
# Run stan regression
#create datafame with computation time
df_run_info = list()
#iterate over all synthetic datasets
for d_id in ds_id:
print('Synthetic dataset %i fo %i'%(d_id, len(ds_id)))
#run time start
run_t_strt = time.time()
#input flatfile
ds_fname = '%s%s%s_Y%i.csv'%(ds_dir, ds_fname_main, synds_suffix, d_id)
#load flatfile
df_flatfile = pd.read_csv(ds_fname)
#keep only North records of NGAWest2
df_flatfile = df_flatfile.loc[np.logical_and(df_flatfile.dsid==0,
df_flatfile.sreg==1),:]
#output file name and directory
out_fname = '%s%s_Y%i'%(out_fname_main, synds_suffix, d_id)
out_dir = '%s/%s/Y%i/'%(out_dir_main, out_dir_sub, d_id)
#run stan model
RunStan(df_flatfile, df_cellinfo, df_celldist, sm_fname,
out_fname, out_dir, res_name, c_a_erg=c_a_erg,
runstan_flag=runstan_flag, n_iter=n_iter, n_chains=n_chains,
adapt_delta=adapt_delta, max_treedepth=max_treedepth,
pystan_ver=pystan_ver, pystan_parallel=flag_parallel)
#run time end
run_t_end = time.time()
#compute run time
run_tm = (run_t_end - run_t_strt)/60
#log run time
df_run_info.append(pd.DataFrame({'computer_name':os.uname()[1],'out_name':out_dir_sub,
'ds_id':d_id,'run_time':run_tm}, index=[d_id]))
#write out run info
out_fname = '%s%s/run_info.csv'%(out_dir_main, out_dir_sub)
pd.concat(df_run_info).reset_index(drop=True).to_csv(out_fname, index=False)
| 4,311 | 32.169231 | 103 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds2/main_pystan_model2_corr_cells_NGAWest2CA_sparse.py | """
Created on Wed Jul 14 14:17:52 2021
@author: glavrent
"""
# Working directory and Packages
#load libraries
import os
import sys
import numpy as np
import pandas as pd
import time
#user functions
sys.path.insert(0,'../../../Python_lib/regression/pystan/')
from regression_pystan_model2_corr_cells_sparse_unbounded_hyp import RunStan
# Define variables
#filename suffix
# synds_suffix = '_small_corr_len'
# synds_suffix = '_large_corr_len'
#synthetic datasets directory
ds_dir = '../../../../Data/Verification/synthetic_datasets/ds2'
ds_dir = r'%s%s/'%(ds_dir, synds_suffix)
# dataset info
#ds_fname_main = 'CatalogNGAWest3CA_synthetic_data'
ds_fname_main = 'CatalogNGAWest3CALite_synthetic_data'
ds_id = np.arange(1,6)
#cell specific anelastic attenuation
ds_fname_cellinfo = 'CatalogNGAWest3CALite_cellinfo'
ds_fname_celldist = 'CatalogNGAWest3CALite_distancematrix'
#stan model
sm_fname = '../../../Stan_lib/regression_stan_model2_corr_cells_sparse_unbounded_hyp_chol_efficient.stan'
#output info
#main output filename
out_fname_main = 'NGAWest2CA_syndata'
#main output directory
out_dir_main = '../../../../Data/Verification/regression/ds2/'
#output sub-directory
# out_dir_sub = 'PYSTAN_NGAWest2CA_corr_cells_chol_eff_sp'
out_dir_sub = 'PYSTAN3_NGAWest2CA_corr_cells_chol_eff_sp'
#stan parameters
runstan_flag = True
# pystan_ver = 2
pystan_ver = 3
res_name = 'tot'
n_iter = 1000
n_chains = 4
adapt_delta = 0.8 #0.9
max_treedepth = 10
#ergodic coefficients
c_a_erg=0.0
#parallel options
# flag_parallel = True
flag_parallel = False
#output sub-dir with corr with suffix info
out_dir_sub = f'%s%s'%(out_dir_sub, synds_suffix)
#load cell dataframes
cellinfo_fname = '%s%s.csv'%(ds_dir, ds_fname_cellinfo)
celldist_fname = '%s%s.csv'%(ds_dir, ds_fname_celldist)
df_cellinfo = pd.read_csv(cellinfo_fname)
df_celldist = pd.read_csv(celldist_fname)
# Run stan regression
#create datafame with computation time
df_run_info = list()
#iterate over all synthetic datasets
for d_id in ds_id:
print('Synthetic dataset %i fo %i'%(d_id, len(ds_id)))
#run time start
run_t_strt = time.time()
#input flatfile
ds_fname = '%s%s%s_Y%i.csv'%(ds_dir, ds_fname_main, synds_suffix, d_id)
#load flatfile
df_flatfile = pd.read_csv(ds_fname)
#keep only NGAWest2 records
df_flatfile = df_flatfile.loc[df_flatfile.dsid==0,:]
#output file name and directory
out_fname = '%s%s_Y%i'%(out_fname_main, synds_suffix, d_id)
out_dir = '%s/%s/Y%i/'%(out_dir_main, out_dir_sub, d_id)
#run stan model
RunStan(df_flatfile, df_cellinfo, df_celldist, sm_fname,
out_fname, out_dir, res_name, c_a_erg=c_a_erg,
runstan_flag=runstan_flag, n_iter=n_iter, n_chains=n_chains,
adapt_delta=adapt_delta, max_treedepth=max_treedepth,
pystan_ver=pystan_ver, pystan_parallel=flag_parallel)
#run time end
run_t_end = time.time()
#compute run time
run_tm = (run_t_end - run_t_strt)/60
#log run time
df_run_info.append(pd.DataFrame({'computer_name':os.uname()[1],'out_name':out_dir_sub,
'ds_id':d_id,'run_time':run_tm}, index=[d_id]))
#write out run info
out_fname = '%s%s/run_info.csv'%(out_dir_main, out_dir_sub)
pd.concat(df_run_info).reset_index(drop=True).to_csv(out_fname, index=False)
| 3,545 | 29.307692 | 105 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds2/comparison_inla_model2_uncorr_cells.py | """
Created on Thu Aug 12 10:26:06 2021
@author: glavrent
"""
# Working directory and Packages
#load packages
import os
import sys
import pathlib
import glob
import re #regular expression package
import pickle
#arithmetic libraries
import numpy as np
#statistics libraries
import pandas as pd
#plot libraries
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoLocator as plt_autotick
#user functions
sys.path.insert(0,'../../../Python_lib/regression/')
from pylib_stats import CalcRMS
from pylib_stats import CalcLKDivergece
# Define variables
# USER SETS DIRECTORIES AND FILE INFO OF SYNTHETIC DS AND REGRESSION RESULTS
# ++++++++++++++++++++++++++++++++++++++++
#processed dataset
# name_dataset = 'NGAWest2CANorth'
# name_dataset = 'NGAWest2CA'
# name_dataset = 'NGAWest3CA'
#correlation info
# 1: Small Correlation Lengths
# 2: Large Correlation Lenghts
corr_id = 1
#kernel function
# 1: Mattern kernel (alpha=2)
# 2: Negative Exp (alpha=3/2)
ker_id = 1
#mesh type
# 1: Fine Mesh
# 2: Medium Mesh
# 3: Coarse Mesh
mesh_id = 1
#directories (synthetic dataset)
if corr_id == 1:
dir_syndata = '../../../../Data/Verification/synthetic_datasets/ds2_small_corr_len'
elif corr_id == 2:
dir_syndata = '../../../../Data/Verification/synthetic_datasets/ds2_large_corr_len'
#directories (regression results)
if mesh_id == 1:
dir_results = f'../../../../Data/Verification/regression/ds2/INLA_%s_uncorr_cells_fine'%name_dataset
elif mesh_id == 2:
dir_results = f'../../../../Data/Verification/regression/ds2/INLA_%s_uncorr_cells_medium'%name_dataset
elif mesh_id == 3:
dir_results = f'../../../../Data/Verification/regression/ds2/INLA_%s_uncorr_cells_coarse'%name_dataset
#cell info
fname_cellinfo = dir_syndata + '/' + 'CatalogNGAWest3CALite_cellinfo.csv'
fname_distmat = dir_syndata + '/' + 'CatalogNGAWest3CALite_distancematrix.csv'
#prefix for synthetic data and results
prfx_syndata = 'CatalogNGAWest3CALite_synthetic'
#regression results filename prefix
prfx_results = f'%s_syndata'%name_dataset
# dataset info
ds_id = np.arange(1,6)
# ++++++++++++++++++++++++++++++++++++++++
# USER NEEDS TO SPECIFY HYPERPARAMETERS OF SYNTHETIC DATASET
# ++++++++++++++++++++++++++++++++++++++++
# hyper-parameters
if corr_id == 1:
# small correlation lengths
hyp = {'omega_0': 0.1, 'omega_1e':0.1, 'omega_1as': 0.35, 'omega_1bs': 0.25,
'ell_1e':60, 'ell_1as':30,
'c_cap_erg': -0.011,
'omega_cap_mu': 0.005, 'omega_ca1p':0.004, 'omega_ca2p':0.002,
'ell_ca1p': 75,
'phi_0':0.4, 'tau_0':0.3 }
elif corr_id == 2:
#large correlation lengths
hyp = {'omega_0': 0.1, 'omega_1e':0.2, 'omega_1as': 0.4, 'omega_1bs': 0.3,
'ell_1e':100, 'ell_1as':70,
'c_cap_erg': -0.02,
'omega_cap_mu': 0.008, 'omega_ca1p':0.005, 'omega_ca2p':0.003,
'ell_ca1p': 120,
'phi_0':0.4, 'tau_0':0.3}
# ++++++++++++++++++++++++++++++++++++++++
# FILE INFO FOR REGRESSION RESULTS
# ++++++++++++++++++++++++++++++++++++++++
#output filename sufix
if corr_id == 1:
synds_suffix = '_small_corr_len'
elif corr_id == 2:
synds_suffix = '_large_corr_len'
#kenel info
if ker_id == 1:
ker_suffix = ''
elif ker_id == 2:
ker_suffix = '_nexp'
# ++++++++++++++++++++++++++++++++++++++++
#ploting options
flag_report = True
# Compare results
#load cell data
df_cellinfo = pd.read_csv(fname_cellinfo).set_index('cellid')
df_distmat = pd.read_csv(fname_distmat).set_index('rsn')
#initialize misfit metrics dataframe
df_misfit = pd.DataFrame(index=['Y%i'%d_id for d_id in ds_id])
#iterate over different datasets
for d_id in ds_id:
# Load Data
#file names
#synthetic data
fname_sdata_gmotion = '%s/%s_%s%s_Y%i'%(dir_syndata, prfx_syndata, 'data', synds_suffix, d_id) + '.csv'
fname_sdata_atten = '%s/%s_%s%s_Y%i'%(dir_syndata, prfx_syndata, 'atten', synds_suffix, d_id) + '.csv'
#regression results
fname_reg_gmotion = '%s%s/Y%i/%s%s_Y%i_inla_%s'%(dir_results, ker_suffix+synds_suffix, d_id, prfx_results, synds_suffix, d_id, 'residuals') + '.csv'
fname_reg_coeff = '%s%s/Y%i/%s%s_Y%i_inla_%s'%(dir_results, ker_suffix+synds_suffix, d_id, prfx_results, synds_suffix, d_id, 'coefficients') + '.csv'
fname_reg_atten = '%s%s/Y%i/%s%s_Y%i_inla_%s'%(dir_results, ker_suffix+synds_suffix, d_id, prfx_results, synds_suffix, d_id, 'catten') + '.csv'
#load synthetic results
df_sdata_gmotion = pd.read_csv(fname_sdata_gmotion).set_index('rsn')
df_sdata_atten = pd.read_csv(fname_sdata_atten).set_index('cellid')
#load regression results
df_reg_gmotion = pd.read_csv(fname_reg_gmotion).set_index('rsn')
df_reg_coeff = pd.read_csv(fname_reg_coeff).set_index('rsn')
df_reg_atten = pd.read_csv(fname_reg_atten).set_index('cellid')
# Processing
#keep only relevant columns from synthetic dataset
df_sdata_gmotion = df_sdata_gmotion.reindex(df_reg_gmotion.index)
df_sdata_atten = df_sdata_atten.reindex(df_reg_atten.index)
#distance matrix for records of interest
df_dmat = df_distmat.reindex(df_sdata_gmotion.index)
#find unique earthqakes and stations
eq_id, eq_idx, eq_nrec = np.unique(df_sdata_gmotion.eqid, return_index=True, return_counts=True)
sta_id, sta_idx, sta_nrec = np.unique(df_sdata_gmotion.ssn, return_index=True, return_counts=True)
#number of paths per cell
cell_npath = np.sum(df_dmat.loc[:,df_reg_atten.cellname] > 0, axis=0)
# Compute Root Mean Square Error
df_misfit.loc['Y%i'%d_id,'nerg_tot_rms'] = CalcRMS(df_sdata_gmotion.nerg_gm.values, df_reg_gmotion.nerg_mu.values)
df_misfit.loc['Y%i'%d_id,'dc_1e_rms'] = CalcRMS(df_sdata_gmotion['dc_1e'].values[eq_idx], df_reg_coeff['dc_1e_mean'].values[eq_idx])
df_misfit.loc['Y%i'%d_id,'dc_1as_rms'] = CalcRMS(df_sdata_gmotion['dc_1as'].values[sta_idx], df_reg_coeff['dc_1as_mean'].values[sta_idx])
df_misfit.loc['Y%i'%d_id,'dc_1bs_rms'] = CalcRMS(df_sdata_gmotion['dc_1bs'].values[sta_idx], df_reg_coeff['dc_1bs_mean'].values[sta_idx])
df_misfit.loc['Y%i'%d_id,'c_cap_rms'] = CalcRMS(df_sdata_atten['c_cap'].values, df_reg_atten['c_cap_mean'].values)
# Compute Divergence
df_misfit.loc['Y%i'%d_id,'nerg_tot_KL'] = CalcLKDivergece(df_sdata_gmotion.nerg_gm.values, df_reg_gmotion.nerg_mu.values)
df_misfit.loc['Y%i'%d_id,'dc_1e_KL'] = CalcLKDivergece(df_sdata_gmotion['dc_1e'].values[eq_idx], df_reg_coeff['dc_1e_mean'].values[eq_idx])
df_misfit.loc['Y%i'%d_id,'dc_1as_KL'] = CalcLKDivergece(df_sdata_gmotion['dc_1as'].values[sta_idx], df_reg_coeff['dc_1as_mean'].values[sta_idx])
df_misfit.loc['Y%i'%d_id,'dc_1bs_KL'] = CalcLKDivergece(df_sdata_gmotion['dc_1bs'].values[sta_idx], df_reg_coeff['dc_1bs_mean'].values[sta_idx])
df_misfit.loc['Y%i'%d_id,'c_cap_KL'] = CalcLKDivergece(df_sdata_atten['c_cap'].values, df_reg_atten['c_cap_mean'].values)
# Output
#figure directory
dir_fig = '%s%s/Y%i/figures_cmp/'%(dir_results, ker_suffix+synds_suffix, d_id)
pathlib.Path(dir_fig).mkdir(parents=True, exist_ok=True)
#compare ground motion predictions
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_scatter_tot_res'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#median
ax.scatter(df_sdata_gmotion.nerg_gm.values, df_reg_gmotion.nerg_mu.values)
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title('Comparison total residuals, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=35)
ax.set_ylabel('Estimated', fontsize=35)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=32)
ax.tick_params(axis='y', labelsize=32)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-10,2])
ax.set_ylim([-10,2])
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#compare dc_1e
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_dc_1e_scatter'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_sdata_gmotion['dc_1e'].values[eq_idx], df_reg_coeff['dc_1e_mean'].values[eq_idx])
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1,E}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-2,2])
ax.set_ylim([-2,2])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1e_accuracy'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_reg_coeff['dc_1e_sig'].values[eq_idx],
df_sdata_gmotion['dc_1e'].values[eq_idx] - df_reg_coeff['dc_1e_mean'].values[eq_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1,E}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Standard Deviation', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0,.5])
ax.set_ylim([-2,2])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1e_nrec'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(eq_nrec,
df_sdata_gmotion['dc_1e'].values[eq_idx] - df_reg_coeff['dc_1e_mean'].values[eq_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1,E}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Number of records', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.set_xscale('log')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0.9,1e3])
ax.set_ylim([-2,2])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#compare dc_1as
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_dc_1as_scatter'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_sdata_gmotion['dc_1as'].values[sta_idx], df_reg_coeff['dc_1as_mean'].values[sta_idx])
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1a,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-2,2])
ax.set_ylim([-2,2])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1as_accuracy'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#accuray
ax.scatter(df_reg_coeff['dc_1as_sig'].values[sta_idx],
df_sdata_gmotion['dc_1as'].values[sta_idx] - df_reg_coeff['dc_1as_mean'].values[sta_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1a,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Standard Deviation', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0,.5])
ax.set_ylim([-2,2])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1as_nrec'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#accuray
ax.scatter(sta_nrec,
df_sdata_gmotion['dc_1as'].values[sta_idx] - df_reg_coeff['dc_1as_mean'].values[sta_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1a,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Number of records', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.set_xscale('log')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([.9,1000])
ax.set_ylim([-2,2])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#compare dc_1bs
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_dc_1bs_scatter'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_sdata_gmotion['dc_1bs'].values[sta_idx], df_reg_coeff['dc_1bs_mean'].values[sta_idx])
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1b,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-2,2])
ax.set_ylim([-2,2])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1bs_accuracy'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#accuray
ax.scatter(df_reg_coeff['dc_1bs_sig'].values[sta_idx],
df_sdata_gmotion['dc_1bs'].values[sta_idx] - df_reg_coeff['dc_1bs_mean'].values[sta_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1b,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Standard Deviation', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0,.4])
ax.set_ylim([-2,2])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1bs_nrec'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#accuray
ax.scatter(sta_nrec,
df_sdata_gmotion['dc_1bs'].values[sta_idx] - df_reg_coeff['dc_1bs_mean'].values[sta_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1b,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Number of records', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.set_xscale('log')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([.9,1000])
ax.set_ylim([-2,2])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#compare c_cap
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_c_cap_scatter'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_sdata_atten['c_cap'].values, df_reg_atten['c_cap_mean'].values)
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $c_{ca,P}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-0.05,0.02])
ax.set_ylim([-0.05,0.02])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_c_cap_accuracy'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_reg_atten['c_cap_sig'],
df_sdata_atten['c_cap'].values - df_reg_atten['c_cap_mean'].values)
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $c_{ca,P}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Standard Deviation', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0.00,0.03])
ax.set_ylim([-0.04,0.04])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_c_cap_npath'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(cell_npath,
df_sdata_atten['c_cap'].values - df_reg_atten['c_cap_mean'].values)
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $c_{ca,P}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Number of paths', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.set_xscale('log')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([.9,5e4])
ax.set_ylim([-0.04,0.04])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Compare Misfit Metrics
#summary directory
dir_sum = '%s%s/summary/'%(dir_results,ker_suffix+synds_suffix)
pathlib.Path(dir_fig).mkdir(parents=True, exist_ok=True)
#figure directory
dir_fig = '%s/figures/'%(dir_sum)
pathlib.Path(dir_fig).mkdir(parents=True, exist_ok=True)
#save
df_misfit.to_csv(dir_sum + 'misfit_summary.csv')
#RMS misfit
fname_fig = 'misfit_score'
#plot KL divergence
fig, ax = plt.subplots(figsize = (10,10))
ax.plot(ds_id, df_misfit.nerg_tot_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label= 'tot nerg')
ax.plot(ds_id, df_misfit.dc_1e_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1,E}$')
ax.plot(ds_id, df_misfit.dc_1as_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1a,S}$')
ax.plot(ds_id, df_misfit.dc_1bs_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1b,S}$')
ax.plot(ds_id, df_misfit.c_cap_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$c_{ca,P}$')
#figure properties
ax.set_ylim([0,0.50])
ax.set_xlabel('synthetic dataset', fontsize=25)
ax.set_ylabel('RSME', fontsize=25)
ax.grid(which='both')
ax.set_xticks(ds_id)
ax.set_xticklabels(labels=df_misfit.index)
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#legend
ax.legend(loc='upper left', fontsize=25)
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#KL divergence
fname_fig = 'KLdiv_score'
#plot KL divergence
fig, ax = plt.subplots(figsize = (10,10))
ax.plot(ds_id, df_misfit.nerg_tot_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label= 'tot nerg')
ax.plot(ds_id, df_misfit.dc_1e_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1,E}$')
ax.plot(ds_id, df_misfit.dc_1as_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1a,S}$')
ax.plot(ds_id, df_misfit.dc_1bs_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1b,S}$')
ax.plot(ds_id, df_misfit.c_cap_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$c_{ca,P}$')
#figure properties
ax.set_ylim([0,0.50])
ax.set_xlabel('synthetic dataset', fontsize=25)
ax.set_ylabel('KL divergence', fontsize=25)
ax.grid(which='both')
ax.set_xticks(ds_id)
ax.set_xticklabels(labels=df_misfit.index)
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#legend
ax.legend(loc='upper left', fontsize=25)
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Compare hyper-paramters
#iterate over different datasets
df_reg_hyp = list()
df_reg_hyp_post = list()
for d_id in ds_id:
# Load Data
#regression hyperparamters results
fname_reg_hyp = '%s%s/Y%i/%s%s_Y%i_inla_%s'%(dir_results, ker_suffix+synds_suffix, d_id,prfx_results, synds_suffix, d_id, 'hyperparameters') + '.csv'
fname_reg_hyp_post = '%s%s/Y%i/%s%s_Y%i_inla_%s'%(dir_results, ker_suffix+synds_suffix, d_id,prfx_results, synds_suffix, d_id, 'hyperposterior') + '.csv'
#load regression results
df_reg_hyp.append( pd.read_csv(fname_reg_hyp, index_col=0) )
df_reg_hyp_post.append( pd.read_csv(fname_reg_hyp_post, index_col=0) )
# Omega_1e
#hyper-paramter name
name_hyp = 'omega_1e'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = df_r_h_p.loc[:,name_hyp+'_pdf'].max()
# ymax_mean = 1.5*np.ceil(ymax_mode/10)*10
ymax_mean = 40
#plot posterior dist
pl_pdf = ax.plot(df_r_h_p.loc[:,name_hyp], df_r_h_p.loc[:,name_hyp+'_pdf'])
ax.vlines(df_r_h.loc[name_hyp,'mean'], ymin=0, ymax=ymax_mean, linestyle='-', color=pl_pdf[0].get_color(), label='Mean')
ax.vlines(df_r_h.loc[name_hyp,'mode'], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_pdf[0].get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\omega_{1,E}$', fontsize=30)
ax.set_xlabel('$\omega_{1,e}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.25])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Omega_1as
#hyper-paramter name
name_hyp = 'omega_1as'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = df_r_h_p.loc[:,name_hyp+'_pdf'].max()
# ymax_mean = 1.5*np.ceil(ymax_mode/10)*10
ymax_mean = 30
#plot posterior dist
pl_pdf = ax.plot(df_r_h_p.loc[:,name_hyp], df_r_h_p.loc[:,name_hyp+'_pdf'])
ax.vlines(df_r_h.loc[name_hyp,'mean'], ymin=0, ymax=ymax_mean, linestyle='-', color=pl_pdf[0].get_color(), label='Mean')
ax.vlines(df_r_h.loc[name_hyp,'mode'], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_pdf[0].get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\omega_{1a,S}$', fontsize=30)
ax.set_xlabel('$\omega_{1a,s}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.5])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Omega_1bs
#hyper-paramter name
name_hyp = 'omega_1bs'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = df_r_h_p.loc[:,name_hyp+'_pdf'].max()
# ymax_mean = 1.5*np.ceil(ymax_mode/10)*10
ymax_mean = 60
#plot posterior dist
pl_pdf = ax.plot(df_r_h_p.loc[:,name_hyp], df_r_h_p.loc[:,name_hyp+'_pdf'])
ax.vlines(df_r_h.loc[name_hyp,'mean'], ymin=0, ymax=ymax_mean, linestyle='-', color=pl_pdf[0].get_color(), label='Mean')
ax.vlines(df_r_h.loc[name_hyp,'mode'], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_pdf[0].get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\omega_{1b,S}$', fontsize=30)
ax.set_xlabel('$\omega_{1b,s}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.5])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Ell_1e
#hyper-paramter name
name_hyp = 'ell_1e'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = df_r_h_p.loc[:,name_hyp+'_pdf'].max()
# ymax_mean = 1.5*np.ceil(ymax_mode/10)*10
ymax_mean = 0.02
#plot posterior dist
pl_pdf = ax.plot(df_r_h_p.loc[:,name_hyp], df_r_h_p.loc[:,name_hyp+'_pdf'])
ax.vlines(df_r_h.loc[name_hyp,'mean'], ymin=0, ymax=ymax_mean, linestyle='-', color=pl_pdf[0].get_color(), label='Mean')
ax.vlines(df_r_h.loc[name_hyp,'mode'], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_pdf[0].get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\ell_{1,E}$', fontsize=30)
ax.set_xlabel('$\ell_{1,e}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,500])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Ell_1as
#hyper-paramter name
name_hyp = 'ell_1as'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = df_r_h_p.loc[:,name_hyp+'_pdf'].max()
# ymax_mean = 1.5*np.ceil(ymax_mode/10)*10
ymax_mean = 0.1
#plot posterior dist
pl_pdf = ax.plot(df_r_h_p.loc[:,name_hyp], df_r_h_p.loc[:,name_hyp+'_pdf'])
ax.vlines(df_r_h.loc[name_hyp,'mean'], ymin=0, ymax=ymax_mean, linestyle='-', color=pl_pdf[0].get_color(), label='Mean')
ax.vlines(df_r_h.loc[name_hyp,'mode'], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_pdf[0].get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\ell_{1a,S}$', fontsize=30)
ax.set_xlabel('$\ell_{1a,s}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,150])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Tau_0
#hyper-paramter name
name_hyp = 'tau_0'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = df_r_h_p.loc[:,name_hyp+'_pdf'].max()
# ymax_mean = 1.5*np.ceil(ymax_mode/10)*10
ymax_mean = 60
#plot posterior dist
pl_pdf = ax.plot(df_r_h_p.loc[:,name_hyp], df_r_h_p.loc[:,name_hyp+'_pdf'])
ax.vlines(df_r_h.loc[name_hyp,'mean'], ymin=0, ymax=ymax_mean, linestyle='-', color=pl_pdf[0].get_color(), label='Mean')
ax.vlines(df_r_h.loc[name_hyp,'mode'], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_pdf[0].get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\tau_{0}$', fontsize=30)
ax.set_xlabel(r'$\tau_{0}$', fontsize=25)
ax.set_ylabel(r'probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.5])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Phi_0
#hyper-paramter name
name_hyp = 'phi_0'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = df_r_h_p.loc[:,name_hyp+'_pdf'].max()
# ymax_mean = 1.5*np.ceil(ymax_mode/10)*10
ymax_mean = 100
#plot posterior dist
pl_pdf = ax.plot(df_r_h_p.loc[:,name_hyp], df_r_h_p.loc[:,name_hyp+'_pdf'])
ax.vlines(df_r_h.loc[name_hyp,'mean'], ymin=0, ymax=ymax_mean, linestyle='-', color=pl_pdf[0].get_color(), label='Mean')
ax.vlines(df_r_h.loc[name_hyp,'mode'], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_pdf[0].get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\phi_{0}$', fontsize=30)
ax.set_xlabel('$\phi_{0}$', fontsize=25)
ax.set_ylabel(r'probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.6])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Omega_ca
#hyper-paramter name
name_hyp = 'omega_cap'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = df_r_h_p.loc[:,name_hyp+'_pdf'].max()
# ymax_mean = 1.5*np.ceil(ymax_mode/10)*10
ymax_mean = 1500
#plot posterior dist
pl_pdf = ax.plot(df_r_h_p.loc[:,name_hyp], df_r_h_p.loc[:,name_hyp+'_pdf'])
ax.vlines(df_r_h.loc[name_hyp,'mean'], ymin=0, ymax=ymax_mean, linestyle='-', color=pl_pdf[0].get_color(), label='Mean')
ax.vlines(df_r_h.loc[name_hyp,'mode'], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_pdf[0].get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(np.sqrt(hyp['omega_ca1p']**2+hyp['omega_ca2p']**2), ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\omega_{ca,P}$', fontsize=30)
ax.set_xlabel('$\omega_{ca,p}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.05])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# # Delta c_0
# #hyper-paramter name
# name_hyp = 'dc_0'
# #figure title
# fname_fig = 'post_dist_' + name_hyp
# #create figure
# fig, ax = plt.subplots(figsize = (10,10))
# for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
# #estimate vertical line height for mean and mode
# ymax_mode = df_r_h_p.loc[:,name_hyp+'_pdf'].max()
# ymax_mean = 1.5*np.ceil(ymax_mode/10)*10
# ymax_mean = 15
# #plot posterior dist
# pl_pdf = ax.plot(df_r_h_p.loc[:,name_hyp], df_r_h_p.loc[:,name_hyp+'_pdf'])
# ax.vlines(df_r_h.loc[name_hyp,'mean'], ymin=0, ymax=ymax_mean, linestyle='-', color=pl_pdf[0].get_color(), label='Mean')
# ax.vlines(df_r_h.loc[name_hyp,'mode'], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_pdf[0].get_color(), label='Mode')
# #plot true value
# ymax_hyp = ymax_mean
# # ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
# #edit figure
# ax.set_title(r'Comparison $\delta c_{0}$', fontsize=30)
# ax.set_xlabel('$\delta c_{0}$', fontsize=25)
# ax.set_ylabel('probability density function ', fontsize=25)
# ax.grid(which='both')
# ax.tick_params(axis='x', labelsize=22)
# ax.tick_params(axis='y', labelsize=22)
# #plot limits
# ax.set_xlim([-1,1])
# ax.set_ylim([0,ymax_hyp])
# #save figure
# fig.tight_layout()
# # fig.savefig( dir_fig + fname_fig + '.png' )
| 35,819 | 39.022346 | 160 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds2/main_pystan_model2_uncorr_cells_NGAWest3CA.py | """
Created on Wed Jul 14 14:17:52 2021
@author: glavrent
"""
# Working directory and Packages
#load libraries
import os
import sys
import numpy as np
import pandas as pd
import time
#user functions
sys.path.insert(0,'../../../Python_lib/regression/pystan/')
from regression_pystan_model2_uncorr_cells_unbounded_hyp import RunStan
# Define variables
#filename suffix
# synds_suffix = '_small_corr_len'
# synds_suffix = '_large_corr_len'
#synthetic datasets directory
ds_dir = '../../../../Data/Verification/synthetic_datasets/ds2'
ds_dir = r'%s%s/'%(ds_dir, synds_suffix)
# dataset info
#ds_fname_main = 'CatalogNGAWest3CA_synthetic_data'
ds_fname_main = 'CatalogNGAWest3CALite_synthetic_data'
ds_id = np.arange(1,6)
#cell specific anelastic attenuation
ds_fname_cellinfo = 'CatalogNGAWest3CALite_cellinfo'
ds_fname_celldist = 'CatalogNGAWest3CALite_distancematrix'
#stan model
# sm_fname = '../../../Stan_lib/regression_stan_model2_uncorr_cells_unbounded_hyp.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_uncorr_cells_unbounded_hyp_chol.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_uncorr_cells_unbounded_hyp_chol_efficient.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_uncorr_cells_unbounded_hyp_chol_efficient2.stan'
#output info
#main output filename
out_fname_main = 'NGAWest3CA_syndata'
#main output directory
out_dir_main = '../../../../Data/Verification/regression/ds2/'
#output sub-directory
# out_dir_sub = 'PYSTAN_NGAWest3CA_uncorr_cells'
# out_dir_sub = 'PYSTAN_NGAWest3CA_uncorr_cells_chol'
# out_dir_sub = 'PYSTAN_NGAWest3CA_uncorr_cells_chol_eff'
# out_dir_sub = 'PYSTAN_NGAWest3CA_uncorr_cells_chol_eff2'
#stan parameters
runstan_flag = True
# pystan_ver = 2
pystan_ver = 3
res_name = 'tot'
n_iter = 1000
n_chains = 4
adapt_delta = 0.8
max_treedepth = 10
#ergodic coefficients
c_a_erg=0.0
#parallel options
# flag_parallel = True
flag_parallel = False
#output sub-dir with corr with suffix info
out_dir_sub = f'%s%s'%(out_dir_sub, synds_suffix)
#load cell dataframes
cellinfo_fname = '%s%s.csv'%(ds_dir, ds_fname_cellinfo)
celldist_fname = '%s%s.csv'%(ds_dir, ds_fname_celldist)
df_cellinfo = pd.read_csv(cellinfo_fname)
df_celldist = pd.read_csv(celldist_fname)
# Run stan regression
#create datafame with computation time
df_run_info = list()
#iterate over all synthetic datasets
for d_id in ds_id:
print('Synthetic dataset %i fo %i'%(d_id, len(ds_id)))
#run time start
run_t_strt = time.time()
#input flatfile
ds_fname = '%s%s%s_Y%i.csv'%(ds_dir, ds_fname_main, synds_suffix, d_id)
#load flatfile
df_flatfile = pd.read_csv(ds_fname)
#output file name and directory
out_fname = '%s%s_Y%i'%(out_fname_main, synds_suffix, d_id)
out_dir = '%s/%s/Y%i/'%(out_dir_main, out_dir_sub, d_id)
#run stan model
RunStan(df_flatfile, df_cellinfo, df_celldist, sm_fname,
out_fname, out_dir, res_name, c_a_erg=c_a_erg,
runstan_flag=runstan_flag, n_iter=n_iter, n_chains=n_chains,
adapt_delta=adapt_delta, max_treedepth=max_treedepth,
pystan_ver=pystan_ver, pystan_parallel=flag_parallel)
#run time end
run_t_end = time.time()
#compute run time
run_tm = (run_t_end - run_t_strt)/60
#log run time
df_run_info.append(pd.DataFrame({'computer_name':os.uname()[1],'out_name':out_dir_sub,
'ds_id':d_id,'run_time':run_tm}, index=[d_id]))
#write out run info
out_fname = '%s%s/run_info.csv'%(out_dir_main, out_dir_sub)
pd.concat(df_run_info).reset_index(drop=True).to_csv(out_fname, index=False)
| 3,831 | 30.933333 | 103 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds2/main_cmdstan_model2_uncorr_cells_NGAWest2CANorth.py | """
Created on Wed Dec 29 15:16:15 2021
@author: glavrent
"""
# Working directory and Packages
#load libraries
import os
import sys
import numpy as np
import pandas as pd
import time
#user functions
sys.path.insert(0,'../../../Python_lib/regression/cmdstan/')
# from regression_cmdstan_model2_uncorr_cells_unbounded_hyp import RunStan
from regression_cmdstan_model2_uncorr_cells_sparse_unbounded_hyp import RunStan
# Define variables
#filename suffix
# synds_suffix = '_small_corr_len'
# synds_suffix = '_large_corr_len'
#synthetic datasets directory
ds_dir = '../../../../Data/Verification/synthetic_datasets/ds2'
ds_dir = r'%s%s/'%(ds_dir, synds_suffix)
# dataset info
#ds_fname_main = 'CatalogNGAWest3CA_synthetic_data'
ds_fname_main = 'CatalogNGAWest3CALite_synthetic_data'
ds_id = np.arange(1,6)
#cell specific anelastic attenuation
ds_fname_cellinfo = 'CatalogNGAWest3CALite_cellinfo'
ds_fname_celldist = 'CatalogNGAWest3CALite_distancematrix'
#stan model
# sm_fname = '../../../Stan_lib/regression_stan_model2_uncorr_cells_unbounded_hyp.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_uncorr_cells_unbounded_hyp_chol.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_uncorr_cells_unbounded_hyp_chol_efficient.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_uncorr_cells_unbounded_hyp_chol_efficient2.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_uncorr_cells_sparse_unbounded_hyp_chol_efficient.stan'
#output info
#main output filename
out_fname_main = 'NGAWest2CANorth_syndata'
#main output directory
out_dir_main = '../../../../Data/Verification/regression/ds2/'
#output sub-directory
# out_dir_sub = 'CMDSTAN_NGAWest2CANorth_uncorr_cells'
# out_dir_sub = 'CMDSTAN_NGAWest2CANorth_uncorr_cells_chol'
# out_dir_sub = 'CMDSTAN_NGAWest2CANorth_uncorr_cells_chol_eff'
# out_dir_sub = 'CMDSTAN_NGAWest2CANorth_uncorr_cells_chol_eff2'
# out_dir_sub = 'CMDSTAN_NGAWest2CANorth_uncorr_cells_chol_eff_sp'
#stan parameters
res_name = 'tot'
n_iter_warmup = 500
n_iter_sampling = 500
n_chains = 4
adapt_delta = 0.8
max_treedepth = 10
#ergodic coefficients
c_a_erg=0.0
#parallel options
# flag_parallel = True
flag_parallel = False
#output sub-dir with corr with suffix info
out_dir_sub = f'%s%s'%(out_dir_sub, synds_suffix)
#load cell dataframes
cellinfo_fname = '%s%s.csv'%(ds_dir, ds_fname_cellinfo)
celldist_fname = '%s%s.csv'%(ds_dir, ds_fname_celldist)
df_cellinfo = pd.read_csv(cellinfo_fname)
df_celldist = pd.read_csv(celldist_fname)
# Run stan regression
#create datafame with computation time
df_run_info = list()
#iterate over all synthetic datasets
for d_id in ds_id:
print('Synthetic dataset %i fo %i'%(d_id, len(ds_id)))
#run time start
run_t_strt = time.time()
#input flatfile
ds_fname = '%s%s%s_Y%i.csv'%(ds_dir, ds_fname_main, synds_suffix, d_id)
#load flatfile
df_flatfile = pd.read_csv(ds_fname)
#keep only North records of NGAWest2
df_flatfile = df_flatfile.loc[np.logical_and(df_flatfile.dsid==0,
df_flatfile.sreg==1),:]
#output file name and directory
out_fname = '%s%s_Y%i'%(out_fname_main, synds_suffix, d_id)
out_dir = '%s/%s/Y%i/'%(out_dir_main, out_dir_sub, d_id)
#run stan model
RunStan(df_flatfile, df_cellinfo, df_celldist, sm_fname,
out_fname, out_dir, res_name, c_a_erg=c_a_erg,
n_iter_warmup=n_iter_warmup, n_iter_sampling=n_iter_sampling, n_chains=n_chains,
adapt_delta=adapt_delta, max_treedepth=max_treedepth,
stan_parallel=flag_parallel)
#run time end
run_t_end = time.time()
#compute run time
run_tm = (run_t_end - run_t_strt)/60
#log run time
df_run_info.append(pd.DataFrame({'computer_name':os.uname()[1],'out_name':out_dir_sub,
'ds_id':d_id,'run_time':run_tm}, index=[d_id]))
#write out run info
out_fname = '%s%s/run_info.csv'%(out_dir_main, out_dir_sub)
pd.concat(df_run_info).reset_index(drop=True).to_csv(out_fname, index=False)
| 4,298 | 33.669355 | 109 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds2/main_cmdstan_model2_uncorr_cells_NGAWest2CA.py | """
Created on Wed Dec 29 15:16:15 2021
@author: glavrent
"""
# Working directory and Packages
#load libraries
import os
import sys
import numpy as np
import pandas as pd
import time
#user functions
sys.path.insert(0,'../../../Python_lib/regression/cmdstan/')
# from regression_cmdstan_model2_uncorr_cells_unbounded_hyp import RunStan
from regression_cmdstan_model2_uncorr_cells_sparse_unbounded_hyp import RunStan
# Define variables
#filename suffix
# synds_suffix = '_small_corr_len'
# synds_suffix = '_large_corr_len'
#synthetic datasets directory
ds_dir = '../../../../Data/Verification/synthetic_datasets/ds2'
ds_dir = r'%s%s/'%(ds_dir, synds_suffix)
# dataset info
#ds_fname_main = 'CatalogNGAWest3CA_synthetic_data'
ds_fname_main = 'CatalogNGAWest3CALite_synthetic_data'
ds_id = np.arange(1,6)
#cell specific anelastic attenuation
ds_fname_cellinfo = 'CatalogNGAWest3CALite_cellinfo'
ds_fname_celldist = 'CatalogNGAWest3CALite_distancematrix'
#stan model
# sm_fname = '../../../Stan_lib/regression_stan_model2_uncorr_cells_unbounded_hyp.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_uncorr_cells_unbounded_hyp_chol.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_uncorr_cells_unbounded_hyp_chol_efficient.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_uncorr_cells_unbounded_hyp_chol_efficient2.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_uncorr_cells_sparse_unbounded_hyp_chol_efficient.stan'
#output info
#main output filename
out_fname_main = 'NGAWest2CA_syndata'
#main output directory
out_dir_main = '../../../../Data/Verification/regression/ds2/'
#output sub-directory
# out_dir_sub = 'CMDSTAN_NGAWest2CA_uncorr_cells'
# out_dir_sub = 'CMDSTAN_NGAWest2CA_uncorr_cells_chol'
# out_dir_sub = 'CMDSTAN_NGAWest2CA_uncorr_cells_chol_eff'
# out_dir_sub = 'CMDSTAN_NGAWest2CA_uncorr_cells_chol_eff2'
# out_dir_sub = 'CMDSTAN_NGAWest2CA_uncorr_cells_chol_eff_sp'
#stan parameters
res_name = 'tot'
n_iter_warmup = 500
n_iter_sampling = 500
n_chains = 4
adapt_delta = 0.8
max_treedepth = 10
#ergodic coefficients
c_a_erg=0.0
#parallel options
# flag_parallel = True
flag_parallel = False
#output sub-dir with corr with suffix info
out_dir_sub = f'%s%s'%(out_dir_sub, synds_suffix)
#load cell dataframes
cellinfo_fname = '%s%s.csv'%(ds_dir, ds_fname_cellinfo)
celldist_fname = '%s%s.csv'%(ds_dir, ds_fname_celldist)
df_cellinfo = pd.read_csv(cellinfo_fname)
df_celldist = pd.read_csv(celldist_fname)
# Run stan regression
#create datafame with computation time
df_run_info = list()
#iterate over all synthetic datasets
for d_id in ds_id:
print('Synthetic dataset %i fo %i'%(d_id, len(ds_id)))
#run time start
run_t_strt = time.time()
#input flatfile
ds_fname = '%s%s%s_Y%i.csv'%(ds_dir, ds_fname_main, synds_suffix, d_id)
#load flatfile
df_flatfile = pd.read_csv(ds_fname)
#keep only NGAWest2 records
df_flatfile = df_flatfile.loc[df_flatfile.dsid==0,:]
#output file name and directory
out_fname = '%s%s_Y%i'%(out_fname_main, synds_suffix, d_id)
out_dir = '%s/%s/Y%i/'%(out_dir_main, out_dir_sub, d_id)
#run stan model
RunStan(df_flatfile, df_cellinfo, df_celldist, sm_fname,
out_fname, out_dir, res_name, c_a_erg=c_a_erg,
n_iter_warmup=n_iter_warmup, n_iter_sampling=n_iter_sampling, n_chains=n_chains,
adapt_delta=adapt_delta, max_treedepth=max_treedepth,
stan_parallel=flag_parallel)
#run time end
run_t_end = time.time()
#compute run time
run_tm = (run_t_end - run_t_strt)/60
#log run time
df_run_info.append(pd.DataFrame({'computer_name':os.uname()[1],'out_name':out_dir_sub,
'ds_id':d_id,'run_time':run_tm}, index=[d_id]))
#write out run info
out_fname = '%s%s/run_info.csv'%(out_dir_main, out_dir_sub)
pd.concat(df_run_info).reset_index(drop=True).to_csv(out_fname, index=False)
| 4,177 | 32.96748 | 109 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds2/main_cmdstan_model2_corr_cells_NGAWest3CA.py | """
Created on Wed Dec 29 15:16:15 2021
@author: glavrent
"""
# Working directory and Packages
#load libraries
import os
import sys
import numpy as np
import pandas as pd
import time
#user functions
sys.path.insert(0,'../../../Python_lib/regression/cmdstan/')
# from regression_cmdstan_model2_corr_cells_unbounded_hyp import RunStan
from regression_cmdstan_model2_corr_cells_sparse_unbounded_hyp import RunStan
# Define variables
#filename suffix
# synds_suffix = '_small_corr_len'
# synds_suffix = '_large_corr_len'
#synthetic datasets directory
ds_dir = '../../../../Data/Verification/synthetic_datasets/ds2'
ds_dir = r'%s%s/'%(ds_dir, synds_suffix)
# dataset info
#ds_fname_main = 'CatalogNGAWest3CA_synthetic_data'
ds_fname_main = 'CatalogNGAWest3CALite_synthetic_data'
ds_id = np.arange(1,6)
#cell specific anelastic attenuation
ds_fname_cellinfo = 'CatalogNGAWest3CALite_cellinfo'
ds_fname_celldist = 'CatalogNGAWest3CALite_distancematrix'
#stan model
# sm_fname = '../../../Stan_lib/regression_stan_model2_corr_cells_unbounded_hyp.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_corr_cells_unbounded_hyp_chol.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_corr_cells_unbounded_hyp_chol_efficient.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_corr_cells_unbounded_hyp_chol_efficient2.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_corr_cells_sparse_unbounded_hyp_chol_efficient.stan'
#output info
#main output filename
out_fname_main = 'NGAWest3CA_syndata'
#main output directory
out_dir_main = '../../../../Data/Verification/regression/ds2/'
#output sub-directory
# out_dir_sub = 'CMDSTAN_NGAWest3CA_corr_cells'
# out_dir_sub = 'CMDSTAN_NGAWest3CA_corr_cells_chol'
# out_dir_sub = 'CMDSTAN_NGAWest3CA_corr_cells_chol_efficient'
# out_dir_sub = 'CMDSTAN_NGAWest3CA_corr_cells_chol_efficient2'
# out_dir_sub = 'CMDSTAN_NGAWest3CA_corr_cells_chol_efficient_sp'
#stan parameters
res_name = 'tot'
n_iter_warmup = 500
n_iter_sampling = 500
n_chains = 4
adapt_delta = 0.8
max_treedepth = 10
#ergodic coefficients
c_a_erg=0.0
#parallel options
# flag_parallel = True
flag_parallel = False
#output sub-dir with corr with suffix info
out_dir_sub = f'%s%s'%(out_dir_sub, synds_suffix)
#load cell dataframes
cellinfo_fname = '%s%s.csv'%(ds_dir, ds_fname_cellinfo)
celldist_fname = '%s%s.csv'%(ds_dir, ds_fname_celldist)
df_cellinfo = pd.read_csv(cellinfo_fname)
df_celldist = pd.read_csv(celldist_fname)
# Run stan regression
#create datafame with computation time
df_run_info = list()
#iterate over all synthetic datasets
for d_id in ds_id:
print('Synthetic dataset %i fo %i'%(d_id, len(ds_id)))
#run time start
run_t_strt = time.time()
#input flatfile
ds_fname = '%s%s%s_Y%i.csv'%(ds_dir, ds_fname_main, synds_suffix, d_id)
#load flatfile
df_flatfile = pd.read_csv(ds_fname)
#output file name and directory
out_fname = '%s%s_Y%i'%(out_fname_main, synds_suffix, d_id)
out_dir = '%s/%s/Y%i/'%(out_dir_main, out_dir_sub, d_id)
#run stan model
RunStan(df_flatfile, df_cellinfo, df_celldist, sm_fname,
out_fname, out_dir, res_name, c_a_erg=c_a_erg,
n_iter_warmup=n_iter_warmup, n_iter_sampling=n_iter_sampling, n_chains=n_chains,
adapt_delta=adapt_delta, max_treedepth=max_treedepth,
stan_parallel=flag_parallel)
#run time end
run_t_end = time.time()
#compute run time
run_tm = (run_t_end - run_t_strt)/60
#log run time
df_run_info.append(pd.DataFrame({'computer_name':os.uname()[1],'out_name':out_dir_sub,
'ds_id':d_id,'run_time':run_tm}, index=[d_id]))
#write out run info
out_fname = '%s%s/run_info.csv'%(out_dir_main, out_dir_sub)
pd.concat(df_run_info).reset_index(drop=True).to_csv(out_fname, index=False)
| 4,078 | 32.710744 | 107 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds2/main_cmdstan_model2_corr_cells_NGAWest2CANorth.py | """
Created on Wed Dec 29 15:16:15 2021
@author: glavrent
"""
# Working directory and Packages
#load libraries
import os
import sys
import numpy as np
import pandas as pd
import time
#user functions
sys.path.insert(0,'../../../Python_lib/regression/cmdstan/')
# from regression_cmdstan_model2_corr_cells_unbounded_hyp import RunStan
from regression_cmdstan_model2_corr_cells_sparse_unbounded_hyp import RunStan
# Define variables
#filename suffix
# synds_suffix = '_small_corr_len'
# synds_suffix = '_large_corr_len'
#synthetic datasets directory
ds_dir = '../../../../Data/Verification/synthetic_datasets/ds2'
ds_dir = r'%s%s/'%(ds_dir, synds_suffix)
# dataset info
#ds_fname_main = 'CatalogNGAWest3CA_synthetic_data'
ds_fname_main = 'CatalogNGAWest3CALite_synthetic_data'
ds_id = np.arange(1,6)
#cell specific anelastic attenuation
ds_fname_cellinfo = 'CatalogNGAWest3CALite_cellinfo'
ds_fname_celldist = 'CatalogNGAWest3CALite_distancematrix'
#stan model
# sm_fname = '../../../Stan_lib/regression_stan_model2_corr_cells_unbounded_hyp.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_corr_cells_unbounded_hyp_chol.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_corr_cells_unbounded_hyp_chol_efficient.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_corr_cells_unbounded_hyp_chol_efficient2.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_corr_cells_sparse_unbounded_hyp_chol_efficient.stan'
#output info
#main output filename
out_fname_main = 'NGAWest2CANorth_syndata'
#main output directory
out_dir_main = '../../../../Data/Verification/regression/ds2/'
#output sub-directory
# out_dir_sub = 'CMDSTAN_NGAWest2CANorth_corr_cells'
# out_dir_sub = 'CMDSTAN_NGAWest2CANorth_corr_cells_chol'
# out_dir_sub = 'CMDSTAN_NGAWest2CANorth_corr_cells_chol_eff'
# out_dir_sub = 'CMDSTAN_NGAWest2CANorth_corr_cells_chol_eff2'
# out_dir_sub = 'CMDSTAN_NGAWest2CANorth_corr_cells_chol_eff_sp'
#stan parameters
res_name = 'tot'
n_iter_warmup = 500
n_iter_sampling = 500
n_chains = 4
adapt_delta = 0.8
max_treedepth = 10
#ergodic coefficients
c_a_erg=0.0
#parallel options
# flag_parallel = True
flag_parallel = False
#output sub-dir with corr with suffix info
out_dir_sub = f'%s%s'%(out_dir_sub, synds_suffix)
#load cell dataframes
cellinfo_fname = '%s%s.csv'%(ds_dir, ds_fname_cellinfo)
celldist_fname = '%s%s.csv'%(ds_dir, ds_fname_celldist)
df_cellinfo = pd.read_csv(cellinfo_fname)
df_celldist = pd.read_csv(celldist_fname)
# Run stan regression
#create datafame with computation time
df_run_info = list()
#iterate over all synthetic datasets
for d_id in ds_id:
print('Synthetic dataset %i fo %i'%(d_id, len(ds_id)))
#run time start
run_t_strt = time.time()
#input flatfile
ds_fname = '%s%s%s_Y%i.csv'%(ds_dir, ds_fname_main, synds_suffix, d_id)
#load flatfile
df_flatfile = pd.read_csv(ds_fname)
#keep only North records of NGAWest2
df_flatfile = df_flatfile.loc[np.logical_and(df_flatfile.dsid==0,
df_flatfile.sreg==1),:]
#output file name and directory
out_fname = '%s%s_Y%i'%(out_fname_main, synds_suffix, d_id)
out_dir = '%s/%s/Y%i/'%(out_dir_main, out_dir_sub, d_id)
#run stan model
RunStan(df_flatfile, df_cellinfo, df_celldist, sm_fname,
out_fname, out_dir, res_name, c_a_erg=c_a_erg,
n_iter_warmup=n_iter_warmup, n_iter_sampling=n_iter_sampling, n_chains=n_chains,
adapt_delta=adapt_delta, max_treedepth=max_treedepth,
stan_parallel=flag_parallel)
#run time end
run_t_end = time.time()
#compute run time
run_tm = (run_t_end - run_t_strt)/60
#log run time
df_run_info.append(pd.DataFrame({'computer_name':os.uname()[1],'out_name':out_dir_sub,
'ds_id':d_id,'run_time':run_tm}, index=[d_id]))
#write out run info
out_fname = '%s%s/run_info.csv'%(out_dir_main, out_dir_sub)
pd.concat(df_run_info).reset_index(drop=True).to_csv(out_fname, index=False)
| 4,274 | 33.475806 | 107 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds2/main_pystan_model2_uncorr_cells_NGAWest2CA.py | """
Created on Wed Jul 14 14:17:52 2021
@author: glavrent
"""
# Working directory and Packages
#load libraries
import os
import sys
import numpy as np
import pandas as pd
import time
#user functions
sys.path.insert(0,'../../../Python_lib/regression/pystan/')
from regression_pystan_model2_uncorr_cells_unbounded_hyp import RunStan
# Define variables
#filename suffix
# synds_suffix = '_small_corr_len'
# synds_suffix = '_large_corr_len'
#synthetic datasets directory
ds_dir = '../../../../Data/Verification/synthetic_datasets/ds2'
ds_dir = r'%s%s/'%(ds_dir, synds_suffix)
# dataset info
#ds_fname_main = 'CatalogNGAWest3CA_synthetic_data'
ds_fname_main = 'CatalogNGAWest3CALite_synthetic_data'
ds_id = np.arange(1,6)
#cell specific anelastic attenuation
ds_fname_cellinfo = 'CatalogNGAWest3CALite_cellinfo'
ds_fname_celldist = 'CatalogNGAWest3CALite_distancematrix'
#stan model
# sm_fname = '../../../Stan_lib/regression_stan_model2_uncorr_cells_unbounded_hyp.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_uncorr_cells_unbounded_hyp_chol.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_uncorr_cells_unbounded_hyp_chol_efficient.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_uncorr_cells_unbounded_hyp_chol_efficient2.stan'
#output info
#main output filename
out_fname_main = 'NGAWest2CA_syndata'
#main output directory
out_dir_main = '../../../../Data/Verification/regression/ds2/'
#output sub-directory
#pystan 2
# out_dir_sub = 'PYSTAN_NGAWest2CA_uncorr_cells'
# out_dir_sub = 'PYSTAN_NGAWest2CA_uncorr_cells_chol'
# out_dir_sub = 'PYSTAN_NGAWest2CA_uncorr_cells_chol_eff'
# out_dir_sub = 'PYSTAN_NGAWest2CA_uncorr_cells_chol_eff2'
#pystan 3
# out_dir_sub = 'PYSTAN3_NGAWest2CA_uncorr_cells'
# out_dir_sub = 'PYSTAN3_NGAWest2CA_uncorr_cells_chol'
# out_dir_sub = 'PYSTAN3_NGAWest2CA_uncorr_cells_chol_eff'
# out_dir_sub = 'PYSTAN3_NGAWest2CA_uncorr_cells_chol_eff2'
#stan parameters
runstan_flag = True
# pystan_ver = 2
pystan_ver = 3
res_name = 'tot'
n_iter = 1000
n_chains = 4
adapt_delta = 0.8
max_treedepth = 10
#ergodic coefficients
c_a_erg=0.0
#parallel options
# flag_parallel = True
flag_parallel = False
#output sub-dir with corr with suffix info
out_dir_sub = f'%s%s'%(out_dir_sub, synds_suffix)
#load cell dataframes
cellinfo_fname = '%s%s.csv'%(ds_dir, ds_fname_cellinfo)
celldist_fname = '%s%s.csv'%(ds_dir, ds_fname_celldist)
df_cellinfo = pd.read_csv(cellinfo_fname)
df_celldist = pd.read_csv(celldist_fname)
# Run stan regression
#create datafame with computation time
df_run_info = list()
#iterate over all synthetic datasets
for d_id in ds_id:
print('Synthetic dataset %i fo %i'%(d_id, len(ds_id)))
#run time start
run_t_strt = time.time()
#input flatfile
ds_fname = '%s%s%s_Y%i.csv'%(ds_dir, ds_fname_main, synds_suffix, d_id)
#load flatfile
df_flatfile = pd.read_csv(ds_fname)
#keep only NGAWest2 records
df_flatfile = df_flatfile.loc[df_flatfile.dsid==0,:]
#output file name and directory
out_fname = '%s%s_Y%i'%(out_fname_main, synds_suffix, d_id)
out_dir = '%s/%s/Y%i/'%(out_dir_main, out_dir_sub, d_id)
#run stan model
RunStan(df_flatfile, df_cellinfo, df_celldist, sm_fname,
out_fname, out_dir, res_name, c_a_erg=c_a_erg,
runstan_flag=runstan_flag, n_iter=n_iter, n_chains=n_chains,
adapt_delta=adapt_delta, max_treedepth=max_treedepth,
pystan_ver=pystan_ver, pystan_parallel=flag_parallel)
#run time end
run_t_end = time.time()
#compute run time
run_tm = (run_t_end - run_t_strt)/60
#log run time
df_run_info.append(pd.DataFrame({'computer_name':os.uname()[1],'out_name':out_dir_sub,
'ds_id':d_id,'run_time':run_tm}, index=[d_id]))
#write out run info
out_fname = '%s%s/run_info.csv'%(out_dir_main, out_dir_sub)
pd.concat(df_run_info).reset_index(drop=True).to_csv(out_fname, index=False)
| 4,181 | 31.418605 | 103 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds2/main_cmdstan_model2_uncorr_cells_NGAWest3CA.py | """
Created on Wed Dec 29 15:16:15 2021
@author: glavrent
"""
# Working directory and Packages
#load libraries
import os
import sys
import numpy as np
import pandas as pd
import time
#user functions
sys.path.insert(0,'../../../Python_lib/regression/cmdstan/')
# from regression_cmdstan_model2_uncorr_cells_unbounded_hyp import RunStan
from regression_cmdstan_model2_uncorr_cells_sparse_unbounded_hyp import RunStan
# Define variables
#filename suffix
# synds_suffix = '_small_corr_len'
# synds_suffix = '_large_corr_len'
#synthetic datasets directory
ds_dir = '../../../../Data/Verification/synthetic_datasets/ds2'
ds_dir = r'%s%s/'%(ds_dir, synds_suffix)
# dataset info
#ds_fname_main = 'CatalogNGAWest3CA_synthetic_data'
ds_fname_main = 'CatalogNGAWest3CALite_synthetic_data'
ds_id = np.arange(1,6)
#cell specific anelastic attenuation
ds_fname_cellinfo = 'CatalogNGAWest3CALite_cellinfo'
ds_fname_celldist = 'CatalogNGAWest3CALite_distancematrix'
#stan model
# sm_fname = '../../../Stan_lib/regression_stan_model2_uncorr_cells_unbounded_hyp.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_uncorr_cells_unbounded_hyp_chol.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_uncorr_cells_unbounded_hyp_chol_efficient.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_uncorr_cells_unbounded_hyp_chol_efficient2.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_uncorr_cells_sparse_unbounded_hyp_chol_efficient.stan'
#output info
#main output filename
out_fname_main = 'NGAWest3CA_syndata'
#main output directory
out_dir_main = '../../../../Data/Verification/regression/ds2/'
#output sub-directory
# out_dir_sub = 'CMDSTAN_NGAWest3CA_uncorr_cells'
# out_dir_sub = 'CMDSTAN_NGAWest3CA_uncorr_cells_chol'
# out_dir_sub = 'CMDSTAN_NGAWest3CA_uncorr_cells_chol_eff'
# out_dir_sub = 'CMDSTAN_NGAWest3CA_uncorr_cells_chol_eff2'
# out_dir_sub = 'CMDSTAN_NGAWest3CA_uncorr_cells_chol_eff_sp'
#stan parameters
res_name = 'tot'
n_iter_warmup = 500
n_iter_sampling = 500
n_chains = 4
adapt_delta = 0.8
max_treedepth = 10
#ergodic coefficients
c_a_erg=0.0
#parallel options
# flag_parallel = True
flag_parallel = False
#output sub-dir with corr with suffix info
out_dir_sub = f'%s%s'%(out_dir_sub, synds_suffix)
#load cell dataframes
cellinfo_fname = '%s%s.csv'%(ds_dir, ds_fname_cellinfo)
celldist_fname = '%s%s.csv'%(ds_dir, ds_fname_celldist)
df_cellinfo = pd.read_csv(cellinfo_fname)
df_celldist = pd.read_csv(celldist_fname)
# Run stan regression
#create datafame with computation time
df_run_info = list()
#iterate over all synthetic datasets
for d_id in ds_id:
print('Synthetic dataset %i fo %i'%(d_id, len(ds_id)))
#run time start
run_t_strt = time.time()
#input flatfile
ds_fname = '%s%s%s_Y%i.csv'%(ds_dir, ds_fname_main, synds_suffix, d_id)
#load flatfile
df_flatfile = pd.read_csv(ds_fname)
#output file name and directory
out_fname = '%s%s_Y%i'%(out_fname_main, synds_suffix, d_id)
out_dir = '%s/%s/Y%i/'%(out_dir_main, out_dir_sub, d_id)
#run stan model
RunStan(df_flatfile, df_cellinfo, df_celldist, sm_fname,
out_fname, out_dir, res_name, c_a_erg=c_a_erg,
n_iter_warmup=n_iter_warmup, n_iter_sampling=n_iter_sampling, n_chains=n_chains,
adapt_delta=adapt_delta, max_treedepth=max_treedepth,
stan_parallel=flag_parallel)
#run time end
run_t_end = time.time()
#compute run time
run_tm = (run_t_end - run_t_strt)/60
#log run time
df_run_info.append(pd.DataFrame({'computer_name':os.uname()[1],'out_name':out_dir_sub,
'ds_id':d_id,'run_time':run_tm}, index=[d_id]))
#write out run info
out_fname = '%s%s/run_info.csv'%(out_dir_main, out_dir_sub)
pd.concat(df_run_info).reset_index(drop=True).to_csv(out_fname, index=False)
| 4,084 | 32.760331 | 109 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds2/main_pystan_model2_corr_cells_NGAWest3CA_sparse.py | """
Created on Wed Jul 14 14:17:52 2021
@author: glavrent
"""
# Working directory and Packages
#load libraries
import os
import sys
import numpy as np
import pandas as pd
import time
#user functions
sys.path.insert(0,'../../../Python_lib/regression/pystan/')
from regression_pystan_model2_corr_cells_sparse_unbounded_hyp import RunStan
# Define variables
#filename suffix
# synds_suffix = '_small_corr_len'
# synds_suffix = '_large_corr_len'
#synthetic datasets directory
ds_dir = '../../../../Data/Verification/synthetic_datasets/ds2'
ds_dir = r'%s%s/'%(ds_dir, synds_suffix)
# dataset info
#ds_fname_main = 'CatalogNGAWest3CA_synthetic_data'
ds_fname_main = 'CatalogNGAWest3CALite_synthetic_data'
ds_id = np.arange(1,6)
#cell specific anelastic attenuation
ds_fname_cellinfo = 'CatalogNGAWest3CALite_cellinfo'
ds_fname_celldist = 'CatalogNGAWest3CALite_distancematrix'
#stan model
sm_fname = '../../../Stan_lib/regression_stan_model2_corr_cells_sparse_unbounded_hyp_chol_efficient.stan'
#output info
#main output filename
out_fname_main = 'NGAWest3CA_syndata'
#main output directory
out_dir_main = '../../../../Data/Verification/regression/ds2/'
#output sub-directory
out_dir_sub = 'PYSTAN_NGAWest3CA_corr_cells_chol_eff_sp'
#stan parameters
runstan_flag = True
pystan_ver = 2
# pystan_ver = 3
res_name = 'tot'
n_iter = 1000
n_chains = 4
adapt_delta = 0.8
max_treedepth = 10
#ergodic coefficients
c_a_erg=0.0
#parallel options
# flag_parallel = True
flag_parallel = False
#output sub-dir with corr with suffix info
out_dir_sub = f'%s%s'%(out_dir_sub, synds_suffix)
#load cell dataframes
cellinfo_fname = '%s%s.csv'%(ds_dir, ds_fname_cellinfo)
celldist_fname = '%s%s.csv'%(ds_dir, ds_fname_celldist)
df_cellinfo = pd.read_csv(cellinfo_fname)
df_celldist = pd.read_csv(celldist_fname)
# Run stan regression
#create datafame with computation time
df_run_info = list()
#iterate over all synthetic datasets
for d_id in ds_id:
print('Synthetic dataset %i fo %i'%(d_id, len(ds_id)))
#run time start
run_t_strt = time.time()
#input flatfile
ds_fname = '%s%s%s_Y%i.csv'%(ds_dir, ds_fname_main, synds_suffix, d_id)
#load flatfile
df_flatfile = pd.read_csv(ds_fname)
#output file name and directory
out_fname = '%s%s_Y%i'%(out_fname_main, synds_suffix, d_id)
out_dir = '%s/%s/Y%i/'%(out_dir_main, out_dir_sub, d_id)
#run stan model
RunStan(df_flatfile, df_cellinfo, df_celldist, sm_fname,
out_fname, out_dir, res_name, c_a_erg=c_a_erg,
runstan_flag=runstan_flag, n_iter=n_iter, n_chains=n_chains,
adapt_delta=adapt_delta, max_treedepth=max_treedepth,
pystan_ver=pystan_ver, pystan_parallel=flag_parallel)
#run time end
run_t_end = time.time()
#compute run time
run_tm = (run_t_end - run_t_strt)/60
#log run time
df_run_info.append(pd.DataFrame({'computer_name':os.uname()[1],'out_name':out_dir_sub,
'ds_id':d_id,'run_time':run_tm}, index=[d_id]))
#write out run info
out_fname = '%s%s/run_info.csv'%(out_dir_main, out_dir_sub)
pd.concat(df_run_info).reset_index(drop=True).to_csv(out_fname, index=False)
| 3,379 | 28.911504 | 105 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds2/comparison_stan_model2_uncorr_cells.py | """
Created on Thu Aug 12 10:26:06 2021
@author: glavrent
"""
# Working directory and Packages
#load packages
import os
import sys
import pathlib
import glob
import re #regular expression package
import pickle
#arithmetic libraries
import numpy as np
#statistics libraries
import pandas as pd
#plot libraries
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoLocator as plt_autotick
#user functions
sys.path.insert(0,'../../../Python_lib/regression/')
from pylib_stats import CalcRMS
from pylib_stats import CalcLKDivergece
# Define variables
# USER SETS DIRECTORIES AND FILE INFO OF SYNTHETIC DS AND REGRESSION RESULTS
# ++++++++++++++++++++++++++++++++++++++++
#processed dataset
# name_dataset = 'NGAWest2CANorth'
name_dataset = 'NGAWest2CA'
# name_dataset = 'NGAWest3CA'
#correlation info
# 1: Small Correlation Lengths
# 2: Large Correlation Lenghts
corr_id = 1
#package
# 1: Pystan v2
# 2: Pystan v3
# 3: stancmd
pkg_id = 1
#approximation type
# 1: multivariate normal
# 2: cholesky
# 3: cholesky efficient
# 4: cholesky efficient v2
# 5: cholesky efficient, sparse cells
aprox_id = 3
#directories (synthetic dataset)
if corr_id == 1:
dir_syndata = '../../../../Data/Verification/synthetic_datasets/ds2_small_corr_len'
elif corr_id == 2:
dir_syndata = '../../../../Data/Verification/synthetic_datasets/ds2_large_corr_len'
#cell info
fname_cellinfo = dir_syndata + '/' + 'CatalogNGAWest3CALite_cellinfo.csv'
fname_distmat = dir_syndata + '/' + 'CatalogNGAWest3CALite_distancematrix.csv'
#directories (regression results)
if pkg_id == 1:
dir_results = f'../../../../Data/Verification/regression/ds2/PYSTAN_%s'%name_dataset
elif pkg_id == 2:
dir_results = f'../../../../Data/Verification/regression/ds2/PYSTAN3_%s'%name_dataset
elif pkg_id == 3:
dir_results = f'../../../../Data/Verification/regression/ds2/CMDSTAN_%s'%name_dataset
# #directories (regression results)
# if pkg_id == 1:
# dir_results = f'../../../../Data/Verification/regression_old/ds2/PYSTAN_%s'%name_dataset
# elif pkg_id == 2:
# dir_results = f'../../../../Data/Verification/regression_old/ds2/PYSTAN3_%s'%name_dataset
# elif pkg_id == 3:
# dir_results = f'../../../../Data/Verification/regression_old/ds2/CMDSTAN_%s'%name_dataset
#prefix for synthetic data and results
prfx_syndata = 'CatalogNGAWest3CALite_synthetic'
#regression results filename prefix
prfx_results = f'%s_syndata'%name_dataset
# FILE INFO FOR REGRESSION RESULTS
# ++++++++++++++++++++++++++++++++++++++++
#output filename sufix (synthetic dataset)
if corr_id == 1: synds_suffix = '_small_corr_len'
elif corr_id == 2: synds_suffix = '_large_corr_len'
#output filename sufix (regression results)
if aprox_id == 1: synds_suffix_stan = '_corr_cells' + synds_suffix
elif aprox_id == 2: synds_suffix_stan = '_corr_cells' + '_chol' + synds_suffix
elif aprox_id == 3: synds_suffix_stan = '_corr_cells' + '_chol_eff' + synds_suffix
elif aprox_id == 4: synds_suffix_stan = '_corr_cells' + '_chol_eff2' + synds_suffix
elif aprox_id == 5: synds_suffix_stan = '_corr_cells' + '_chol_eff_sp' + synds_suffix
# FILE INFO FOR REGRESSION RESULTS
# ++++++++++++++++++++++++++++++++++++++++
#output filename sufix (synthetic dataset)
if corr_id == 1: synds_suffix = '_small_corr_len'
elif corr_id == 2: synds_suffix = '_large_corr_len'
#output filename sufix (regression results)
if aprox_id == 1: synds_suffix_stan = '_uncorr_cells' + synds_suffix
elif aprox_id == 2: synds_suffix_stan = '_uncorr_cells' + '_chol' + synds_suffix
elif aprox_id == 3: synds_suffix_stan = '_uncorr_cells' + '_chol_eff' + synds_suffix
elif aprox_id == 4: synds_suffix_stan = '_uncorr_cells' + '_chol_eff2' + synds_suffix
elif aprox_id == 5: synds_suffix_stan = '_uncorr_cells' + '_chol_eff_sp' + synds_suffix
# dataset info
ds_id = np.arange(1,6)
# ++++++++++++++++++++++++++++++++++++++++
# USER NEEDS TO SPECIFY HYPERPARAMETERS OF SYNTHETIC DATASET
# ++++++++++++++++++++++++++++++++++++++++
# hyper-parameters
if corr_id == 1:
# small correlation lengths
hyp = {'omega_0': 0.1, 'omega_1e':0.1, 'omega_1as': 0.35, 'omega_1bs': 0.25,
'ell_1e':60, 'ell_1as':30,
'c_cap_erg': -0.011,
'omega_cap_mu': 0.005, 'omega_ca1p':0.004, 'omega_ca2p':0.002,
'ell_ca1p': 75,
'phi_0':0.4, 'tau_0':0.3 }
elif corr_id == 2:
#large correlation lengths
hyp = {'omega_0': 0.1, 'omega_1e':0.2, 'omega_1as': 0.4, 'omega_1bs': 0.3,
'ell_1e':100, 'ell_1as':70,
'c_cap_erg': -0.02,
'omega_cap_mu': 0.008, 'omega_ca1p':0.005, 'omega_ca2p':0.003,
'ell_ca1p': 120,
'phi_0':0.4, 'tau_0':0.3}
# ++++++++++++++++++++++++++++++++++++++++
#ploting options
flag_report = True
# Compare results
#load cell data
df_cellinfo = pd.read_csv(fname_cellinfo).set_index('cellid')
df_distmat = pd.read_csv(fname_distmat).set_index('rsn')
#initialize misfit metrics dataframe
df_misfit = pd.DataFrame(index=['Y%i'%d_id for d_id in ds_id])
#iterate over different datasets
for d_id in ds_id:
# Load Data
#file names
#synthetic data
fname_sdata_gmotion = '%s/%s_%s%s_Y%i'%(dir_syndata, prfx_syndata, 'data', synds_suffix, d_id) + '.csv'
fname_sdata_atten = '%s/%s_%s%s_Y%i'%(dir_syndata, prfx_syndata, 'atten', synds_suffix, d_id) + '.csv'
#regression results
fname_reg_gmotion = '%s%s/Y%i/%s%s_Y%i_stan_%s'%(dir_results, synds_suffix_stan, d_id, prfx_results, synds_suffix, d_id, 'residuals') + '.csv'
fname_reg_coeff = '%s%s/Y%i/%s%s_Y%i_stan_%s'%(dir_results, synds_suffix_stan, d_id, prfx_results, synds_suffix, d_id, 'coefficients') + '.csv'
fname_reg_atten = '%s%s/Y%i/%s%s_Y%i_stan_%s'%(dir_results, synds_suffix_stan, d_id, prfx_results, synds_suffix, d_id, 'catten') + '.csv'
#load synthetic results
df_sdata_gmotion = pd.read_csv(fname_sdata_gmotion).set_index('rsn')
df_sdata_atten = pd.read_csv(fname_sdata_atten).set_index('cellid')
#load regression results
df_reg_gmotion = pd.read_csv(fname_reg_gmotion, index_col=0)
df_reg_coeff = pd.read_csv(fname_reg_coeff, index_col=0)
df_reg_atten = pd.read_csv(fname_reg_atten, index_col=0)
# Processing
#keep only relevant columns from synthetic dataset
df_sdata_gmotion = df_sdata_gmotion.reindex(df_reg_gmotion.index)
df_sdata_atten = df_sdata_atten.reindex(df_reg_atten.index)
#distance matrix for records of interest
df_dmat = df_distmat.reindex(df_sdata_gmotion.index)
#find unique earthqakes and stations
eq_id, eq_idx, eq_nrec = np.unique(df_sdata_gmotion.eqid, return_index=True, return_counts=True)
sta_id, sta_idx, sta_nrec = np.unique(df_sdata_gmotion.ssn, return_index=True, return_counts=True)
#number of paths per cell
cell_npath = np.sum(df_dmat.loc[:,df_reg_atten.cellname] > 0, axis=0)
# Compute Root Mean Square Error
df_misfit.loc['Y%i'%d_id,'nerg_tot_rms'] = CalcRMS(df_sdata_gmotion.nerg_gm.values, df_reg_gmotion.nerg_mu.values)
df_misfit.loc['Y%i'%d_id,'dc_1e_rms'] = CalcRMS(df_sdata_gmotion['dc_1e'].values[eq_idx], df_reg_coeff['dc_1e_mean'].values[eq_idx])
df_misfit.loc['Y%i'%d_id,'dc_1as_rms'] = CalcRMS(df_sdata_gmotion['dc_1as'].values[sta_idx], df_reg_coeff['dc_1as_mean'].values[sta_idx])
df_misfit.loc['Y%i'%d_id,'dc_1bs_rms'] = CalcRMS(df_sdata_gmotion['dc_1bs'].values[sta_idx], df_reg_coeff['dc_1bs_mean'].values[sta_idx])
df_misfit.loc['Y%i'%d_id,'c_cap_rms'] = CalcRMS(df_sdata_atten['c_cap'].values, df_reg_atten['c_cap_mean'].values)
# Compute Divergence
df_misfit.loc['Y%i'%d_id,'nerg_tot_KL'] = CalcLKDivergece(df_sdata_gmotion.nerg_gm.values, df_reg_gmotion.nerg_mu.values)
df_misfit.loc['Y%i'%d_id,'dc_1e_KL'] = CalcLKDivergece(df_sdata_gmotion['dc_1e'].values[eq_idx], df_reg_coeff['dc_1e_mean'].values[eq_idx])
df_misfit.loc['Y%i'%d_id,'dc_1as_KL'] = CalcLKDivergece(df_sdata_gmotion['dc_1as'].values[sta_idx], df_reg_coeff['dc_1as_mean'].values[sta_idx])
df_misfit.loc['Y%i'%d_id,'dc_1bs_KL'] = CalcLKDivergece(df_sdata_gmotion['dc_1bs'].values[sta_idx], df_reg_coeff['dc_1bs_mean'].values[sta_idx])
df_misfit.loc['Y%i'%d_id,'c_cap_KL'] = CalcLKDivergece(df_sdata_atten['c_cap'].values, df_reg_atten['c_cap_mean'].values)
# Output
#figure directory
dir_fig = '%s%s/Y%i/figures_cmp/'%(dir_results,synds_suffix_stan,d_id)
pathlib.Path(dir_fig).mkdir(parents=True, exist_ok=True)
#compare ground motion predictions
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_scatter_tot_res'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#median
ax.scatter(df_sdata_gmotion.nerg_gm.values, df_reg_gmotion.nerg_mu.values)
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title('Comparison total residuals, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=35)
ax.set_ylabel('Estimated', fontsize=35)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=32)
ax.tick_params(axis='y', labelsize=32)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-10,2])
ax.set_ylim([-10,2])
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#compare dc_1e
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_dc_1e_scatter'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_sdata_gmotion['dc_1e'].values[eq_idx], df_reg_coeff['dc_1e_mean'].values[eq_idx])
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1,E}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-.4,.4])
ax.set_ylim([-.4,.4])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1e_accuracy'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_reg_coeff['dc_1e_sig'].values[eq_idx],
df_sdata_gmotion['dc_1e'].values[eq_idx] - df_reg_coeff['dc_1e_mean'].values[eq_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1,E}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Standard Deviation', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0,.15])
ax.set_ylim([-.4,.4])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1e_nrec'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(eq_nrec,
df_sdata_gmotion['dc_1e'].values[eq_idx] - df_reg_coeff['dc_1e_mean'].values[eq_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1,E}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Number of records', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.set_xscale('log')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0.9,1e3])
ax.set_ylim([-.4,.4])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#compare dc_1as
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_dc_1as_scatter'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_sdata_gmotion['dc_1as'].values[sta_idx], df_reg_coeff['dc_1as_mean'].values[sta_idx])
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1a,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-1.5,1.5])
ax.set_ylim([-1.5,1.5])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1as_accuracy'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#accuray
ax.scatter(df_reg_coeff['dc_1as_sig'].values[sta_idx],
df_sdata_gmotion['dc_1as'].values[sta_idx] - df_reg_coeff['dc_1as_mean'].values[sta_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1a,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Standard Deviation', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0,.4])
ax.set_ylim([-1.5,1.5])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1as_nrec'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#accuray
ax.scatter(sta_nrec,
df_sdata_gmotion['dc_1as'].values[sta_idx] - df_reg_coeff['dc_1as_mean'].values[sta_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1a,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Number of records', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.set_xscale('log')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([.9,1000])
ax.set_ylim([-1.5,1.5])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#compare dc_1bs
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_dc_1bs_scatter'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_sdata_gmotion['dc_1bs'].values[sta_idx], df_reg_coeff['dc_1bs_mean'].values[sta_idx])
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1b,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-1.5,1.5])
ax.set_ylim([-1.5,1.5])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1bs_accuracy'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#accuray
ax.scatter(df_reg_coeff['dc_1bs_sig'].values[sta_idx],
df_sdata_gmotion['dc_1bs'].values[sta_idx] - df_reg_coeff['dc_1bs_mean'].values[sta_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1b,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Standard Deviation', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0,.4])
ax.set_ylim([-1.5,1.5])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1bs_nrec'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#accuray
ax.scatter(sta_nrec,
df_sdata_gmotion['dc_1bs'].values[sta_idx] - df_reg_coeff['dc_1bs_mean'].values[sta_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1b,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Number of records', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.set_xscale('log')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([.9,1000])
ax.set_ylim([-1.5,1.5])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#compare c_cap
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_c_cap_scatter'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_sdata_atten['c_cap'].values, df_reg_atten['c_cap_mean'].values)
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $c_{ca,P}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-0.05,0.02])
ax.set_ylim([-0.05,0.02])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_c_cap_accuracy'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_reg_atten['c_cap_sig'],
df_sdata_atten['c_cap'].values - df_reg_atten['c_cap_mean'].values)
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $c_{ca,P}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Standard Deviation', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0.00,0.03])
ax.set_ylim([-0.04,0.04])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_c_cap_npath'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(cell_npath,
df_sdata_atten['c_cap'].values - df_reg_atten['c_cap_mean'].values)
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $c_{ca,P}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Number of paths', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.set_xscale('log')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([.9,5e4])
ax.set_ylim([-0.04,0.04])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Compare Misfit Metrics
#summary directory
dir_sum = '%s%s/summary/'%(dir_results,synds_suffix_stan)
pathlib.Path(dir_fig).mkdir(parents=True, exist_ok=True)
#figure directory
dir_fig = '%s/figures/'%(dir_sum)
pathlib.Path(dir_fig).mkdir(parents=True, exist_ok=True)
#save
df_misfit.to_csv(dir_sum + 'misfit_summary.csv')
#RMS misfit
fname_fig = 'misfit_score'
#plot KL divergence
fig, ax = plt.subplots(figsize = (10,10))
ax.plot(ds_id, df_misfit.nerg_tot_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label= 'tot nerg')
ax.plot(ds_id, df_misfit.dc_1e_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1,E}$')
ax.plot(ds_id, df_misfit.dc_1as_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1a,S}$')
ax.plot(ds_id, df_misfit.dc_1bs_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1b,S}$')
ax.plot(ds_id, df_misfit.c_cap_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$c_{ca,P}$')
#figure properties
ax.set_ylim([0,0.50])
ax.set_xlabel('synthetic dataset', fontsize=25)
ax.set_ylabel('RSME', fontsize=25)
ax.grid(which='both')
ax.set_xticks(ds_id)
ax.set_xticklabels(labels=df_misfit.index)
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#legend
ax.legend(loc='upper left', fontsize=25)
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#KL divergence
fname_fig = 'KLdiv_score'
#plot KL divergence
fig, ax = plt.subplots(figsize = (10,10))
ax.plot(ds_id, df_misfit.nerg_tot_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label= 'tot nerg')
ax.plot(ds_id, df_misfit.dc_1e_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1,E}$')
ax.plot(ds_id, df_misfit.dc_1as_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1a,S}$')
ax.plot(ds_id, df_misfit.dc_1bs_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1b,S}$')
ax.plot(ds_id, df_misfit.c_cap_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$c_{ca,P}$')
#figure properties
ax.set_ylim([0,0.50])
ax.set_xlabel('synthetic dataset', fontsize=25)
ax.set_ylabel('KL divergence', fontsize=25)
ax.grid(which='both')
ax.set_xticks(ds_id)
ax.set_xticklabels(labels=df_misfit.index)
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#legend
ax.legend(loc='upper left', fontsize=25)
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Compare hyper-paramters
#iterate over different datasets
df_reg_hyp = list()
df_reg_hyp_post = list()
for d_id in ds_id:
# Load Data
#regression hyperparamters results
fname_reg_hyp = '%s%s/Y%i/%s%s_Y%i_stan_%s'%(dir_results,synds_suffix_stan, d_id,prfx_results, synds_suffix, d_id, 'hyperparameters') + '.csv'
fname_reg_hyp_post = '%s%s/Y%i/%s%s_Y%i_stan_%s'%(dir_results,synds_suffix_stan, d_id,prfx_results, synds_suffix, d_id, 'hyperposterior') + '.csv'
#load regression results
df_reg_hyp.append( pd.read_csv(fname_reg_hyp, index_col=0) )
df_reg_hyp_post.append( pd.read_csv(fname_reg_hyp_post, index_col=0) )
# Omega_1e
#hyper-paramter name
name_hyp = 'omega_1e'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 40
ymax_mean = 40
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\omega_{1,E}$', fontsize=30)
ax.set_xlabel('$\omega_{1,E}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.25])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Omega_1as
#hyper-paramter name
name_hyp = 'omega_1as'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 30
ymax_mean = 30
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\omega_{1a,S}$', fontsize=30)
ax.set_xlabel('$\omega_{1a,S}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.5])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Omega_1bs
#hyper-paramter name
name_hyp = 'omega_1bs'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 60
ymax_mean = 60
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\omega_{1b,S}$', fontsize=30)
ax.set_xlabel('$\omega_{1b,S}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.5])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Ell_1e
#hyper-paramter name
name_hyp = 'ell_1e'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 0.02
ymax_mean = 0.02
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\ell_{1,E}$', fontsize=30)
ax.set_xlabel('$\ell_{1,E}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,500])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Ell_1as
#hyper-paramter name
name_hyp = 'ell_1as'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 0.1
ymax_mean = 0.1
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\ell_{1a,S}$', fontsize=30)
ax.set_xlabel('$\ell_{1a,S}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,150])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Tau_0
#hyper-paramter name
name_hyp = 'tau_0'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 150
ymax_mean = 150
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\tau_{0}$', fontsize=30)
ax.set_xlabel(r'$\tau_{0}$', fontsize=25)
ax.set_ylabel(r'probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.5])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Phi_0
#hyper-paramter name
name_hyp = 'phi_0'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 1000
ymax_mean = 1000
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\phi_{0}$', fontsize=30)
ax.set_xlabel('$\phi_{0}$', fontsize=25)
ax.set_ylabel(r'probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.6])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Omega_ca
#hyper-paramter name
name_hyp = 'omega_cap'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 1500
ymax_mean = 1500
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(np.sqrt(hyp['omega_ca1p']**2+hyp['omega_ca2p']**2), ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\omega_{ca,P}$', fontsize=30)
ax.set_xlabel('$\omega_{ca,P}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.05])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# # Delta c_0
# #hyper-paramter name
# name_hyp = 'dc_0'
# #figure title
# fname_fig = 'post_dist_' + name_hyp
# #create figure
# fig, ax = plt.subplots(figsize = (10,10))
# for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
# #estimate vertical line height for mean and mode
# ymax_mode = df_r_h_p.loc[:,name_hyp+'_pdf'].max()
# ymax_mean = 1.5*np.ceil(ymax_mode/10)*10
# ymax_mean = 15
# #plot posterior dist
# pl_pdf = ax.plot(df_r_h_p.loc[:,name_hyp], df_r_h_p.loc[:,name_hyp+'_pdf'])
# ax.vlines(df_r_h.loc[name_hyp,'mean'], ymin=0, ymax=ymax_mean, linestyle='-', color=pl_pdf[0].get_color(), label='Mean')
# ax.vlines(df_r_h.loc[name_hyp,'mode'], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_pdf[0].get_color(), label='Mode')
# #plot true value
# ymax_hyp = ymax_mean
# # ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
# #edit figure
# ax.set_title(r'Comparison $\delta c_{0}$', fontsize=30)
# ax.set_xlabel('$\delta c_{0}$', fontsize=25)
# ax.set_ylabel('probability density function ', fontsize=25)
# ax.grid(which='both')
# ax.tick_params(axis='x', labelsize=22)
# ax.tick_params(axis='y', labelsize=22)
# #plot limits
# ax.set_xlim([-1,1])
# ax.set_ylim([0,ymax_hyp])
# #save figure
# fig.tight_layout()
# # fig.savefig( dir_fig + fname_fig + '.png' )
| 35,753 | 38.682575 | 153 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds2/comparison_model2_misfit.py | """
Created on Tue Mar 15 14:50:27 2022
@author: glavrent
"""
# Working directory and Packages
#load variables
import os
import sys
import pathlib
#arithmetic libraries
import numpy as np
#statistics libraries
import pandas as pd
#plot libraries
import matplotlib as mpl
import matplotlib.pyplot as plt
#user functions
def PlotRSMCmp(df_rms_all, c_name, fig_fname):
#create figure axes
fig, ax = plt.subplots(figsize = (10,10))
for k in df_rms_all:
df_rms = df_rms_all[k]
ds_id = np.array(range(len(df_rms)))
ax.plot(ds_id, df_rms.loc[:,c_name+'_rms'], linestyle='-', marker='o', linewidth=2, markersize=10, label=k)
#figure properties
ax.set_ylim([0, max(0.50, max(ax.get_ylim()))])
ax.set_xlabel('synthetic dataset', fontsize=35)
ax.set_ylabel('RMSE', fontsize=35)
ax.grid(which='both')
ax.set_xticks(ds_id)
ax.set_xticklabels(labels=df_rms.index)
ax.tick_params(axis='x', labelsize=32)
ax.tick_params(axis='y', labelsize=32)
#legend
ax.legend(loc='upper left', fontsize=32)
#save figure
fig.tight_layout()
fig.savefig( fig_fname + '.png' )
return fig, ax
def PlotKLCmp(df_KL_all, c_name, fig_fname):
#create figure axes
fig, ax = plt.subplots(figsize = (10,10))
for k in df_KL_all:
df_KL = df_KL_all[k]
ds_id = np.array(range(len(df_KL)))
ax.plot(ds_id, df_KL.loc[:,c_name+'_KL'], linestyle='-', marker='o', linewidth=2, markersize=10, label=k)
#figure properties
ax.set_ylim([0, max(0.50, max(ax.get_ylim()))])
ax.set_xlabel('synthetic dataset', fontsize=35)
ax.set_ylabel('KL divergence', fontsize=35)
ax.grid(which='both')
ax.set_xticks(ds_id)
ax.set_xticklabels(labels=df_KL.index)
ax.tick_params(axis='x', labelsize=32)
ax.tick_params(axis='y', labelsize=32)
#legend
ax.legend(loc='upper left', fontsize=32)
#save figure
fig.tight_layout()
fig.savefig( fig_fname + '.png' )
return fig, ax
# Define variables
# # Sparse Distance Matrix
# # NGAWest 2 CA North
# cmp_name = 'STAN_sparse_cmp_NGAWest2CA'
# reg_title = ['STAN','STAN w/ sp dist matrix']
# reg_fname = ['CMDSTAN_NGAWest2CANorth_corr_cells_chol_eff_small_corr_len','CMDSTAN_NGAWest2CANorth_corr_cells_chol_eff_sp_small_corr_len']
# ylim_time = [0, 800]
# NGAWest 2 CA
cmp_name = 'STAN_sparse_cmp_NGAWest2CA'
reg_title = ['STAN','STAN w/ sp dist matrix']
reg_fname = ['CMDSTAN_NGAWest2CA_corr_cells_chol_eff_small_corr_len','CMDSTAN_NGAWest2CA_corr_cells_chol_eff_sp_small_corr_len']
ylim_time = [0, 7000]
# # Different Software
# cmp_name = 'STAN_vs_INLA_cmp_NGAWest2CANorth'
# reg_title = ['STAN corr. cells','STAN uncorr. cells','INLA uncorr. cells']
# reg_fname = ['CMDSTAN_NGAWest2CANorth_corr_cells_chol_eff_small_corr_len','CMDSTAN_NGAWest2CANorth_corr_cells_chol_eff_small_corr_len',
# 'INLA_NGAWest2CANorth_uncorr_cells_coarse_small_corr_len']
# reg_fname = ['PYSTAN_NGAWest2CANorth_corr_cells_chol_eff_small_corr_len','PYSTAN_NGAWest2CANorth_uncorr_cells_chol_eff_small_corr_len',
# 'INLA_NGAWest2CANorth_uncorr_cells_coarse_small_corr_len']
# ylim_time = [0, 800]
#directories regressions
# reg_dir = [f'../../../../Data/Verification/regression/ds2/%s/'%r_f for r_f in reg_fname]
reg_dir = [f'../../../../Data/Verification/regression_old/ds2/%s/'%r_f for r_f in reg_fname]
#directory output
# dir_out = '../../../../Data/Verification/regression/ds2/comparisons/'
dir_out = '../../../../Data/Verification/regression_old/ds2/comparisons/'
# Load Data
#initialize misfit dataframe
df_sum_misfit_all = {};
#read misfit info
for k, (r_t, r_d) in enumerate(zip(reg_title, reg_dir)):
#filename misfit info
fname_sum = r_d + 'summary/misfit_summary.csv'
#read KL score for coefficients
df_sum_misfit_all[r_t] = pd.read_csv(fname_sum, index_col=0)
#initialize run time dataframe
df_runinfo_all = {};
#read run time info
for k, (r_t, r_d) in enumerate(zip(reg_title, reg_dir)):
#filename run time
fname_runinfo = r_d + '/run_info.csv'
#store calc time
df_runinfo_all[r_t] = pd.read_csv(fname_runinfo)
# Comparison Figures
pathlib.Path(dir_out).mkdir(parents=True, exist_ok=True)
# RMSE divergence
#coefficient name
c_name = 'nerg_tot'
#figure name
fig_fname = '%s/%s_%s_RMSE'%(dir_out, cmp_name, c_name)
#plotting
PlotRSMCmp(df_sum_misfit_all , c_name, fig_fname);
#coefficient name
c_name = 'dc_1e'
#figure name
fig_fname = '%s/%s_%s_RMSE'%(dir_out, cmp_name, c_name)
#plotting
PlotRSMCmp(df_sum_misfit_all , c_name, fig_fname);
#coefficient name
c_name = 'dc_1as'
#figure name
fig_fname = '%s/%s_%s_RMSE'%(dir_out, cmp_name, c_name)
#plotting
PlotRSMCmp(df_sum_misfit_all , c_name, fig_fname);
#coefficient name
c_name = 'dc_1bs'
#figure name
fig_fname = '%s/%s_%s_RMSE'%(dir_out, cmp_name, c_name)
#plotting
PlotRSMCmp(df_sum_misfit_all , c_name, fig_fname);
# KL divergence
#coefficient name
c_name = 'nerg_tot'
#figure name
fig_fname = '%s/%s_%s_KLdiv'%(dir_out, cmp_name, c_name)
#plotting
PlotKLCmp(df_sum_misfit_all , c_name, fig_fname);
#coefficient name
c_name = 'dc_1e'
#figure name
fig_fname = '%s/%s_%s_KLdiv'%(dir_out, cmp_name, c_name)
#plotting
PlotKLCmp(df_sum_misfit_all , c_name, fig_fname);
#coefficient name
c_name = 'dc_1as'
#figure name
fig_fname = '%s/%s_%s_KLdiv'%(dir_out, cmp_name, c_name)
#plotting
PlotKLCmp(df_sum_misfit_all , c_name, fig_fname);
#coefficient name
c_name = 'dc_1bs'
#figure name
fig_fname = '%s/%s_%s_KLdiv'%(dir_out, cmp_name, c_name)
#plotting
PlotKLCmp(df_sum_misfit_all , c_name, fig_fname);
# Run Time
#run time figure
fig_fname = '%s/%s_run_time'%(dir_out, cmp_name)
#create figure axes
fig, ax = plt.subplots(figsize = (10,10))
#iterate over different analyses
for j, k in enumerate(df_runinfo_all):
ds_id = df_runinfo_all[k].ds_id
ds_name = ['Y%i'%d_i for d_i in ds_id]
run_time = df_runinfo_all[k].run_time
ax.plot(ds_id, run_time, marker='o', linewidth=2, markersize=10, label=k)
#figure properties
ax.set_ylim(ylim_time)
ax.set_xlabel('synthetic dataset', fontsize=35)
ax.set_ylabel('Run Time (min)', fontsize=35)
ax.grid(which='both')
ax.set_xticks(ds_id)
ax.set_xticklabels(labels=ds_name)
ax.tick_params(axis='x', labelsize=32)
ax.tick_params(axis='y', labelsize=32)
#legend
ax.legend(loc='lower left', fontsize=32)
# ax.legend(loc='upper left', fontsize=32)
# ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=25)
#save figure
fig.tight_layout()
fig.savefig( fig_fname + '.png' ) | 6,888 | 29.892377 | 140 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds2/main_cmdstan_model2_corr_cells_NGAWest2CA.py | """
Created on Wed Dec 29 15:16:15 2021
@author: glavrent
"""
# Working directory and Packages
#load libraries
import os
import sys
import numpy as np
import pandas as pd
import time
#user functions
sys.path.insert(0,'../../../Python_lib/regression/cmdstan/')
# from regression_cmdstan_model2_corr_cells_unbounded_hyp import RunStan
from regression_cmdstan_model2_corr_cells_sparse_unbounded_hyp import RunStan
# Define variables
#filename suffix
# synds_suffix = '_small_corr_len'
# synds_suffix = '_large_corr_len'
#synthetic datasets directory
ds_dir = '../../../../Data/Validation/synthetic_datasets/ds2'
ds_dir = r'%s%s/'%(ds_dir, synds_suffix)
# dataset info
#ds_fname_main = 'CatalogNGAWest3CA_synthetic_data'
ds_fname_main = 'CatalogNGAWest3CALite_synthetic_data'
ds_id = np.arange(1,6)
#cell specific anelastic attenuation
ds_fname_cellinfo = 'CatalogNGAWest3CALite_cellinfo'
ds_fname_celldist = 'CatalogNGAWest3CALite_distancematrix'
#stan model
# sm_fname = '../../../Stan_lib/regression_stan_model2_corr_cells_unbounded_hyp.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_corr_cells_unbounded_hyp_chol.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_corr_cells_unbounded_hyp_chol_efficient.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_corr_cells_unbounded_hyp_chol_efficient2.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_corr_cells_sparse_unbounded_hyp_chol_efficient.stan'
#output info
#main output filename
out_fname_main = 'NGAWest2CA_syndata'
#main output directory
out_dir_main = '../../../../Data/Validation/regression/ds2/'
#output sub-directory
# out_dir_sub = 'CMDSTAN_NGAWest2CA_corr_cells'
# out_dir_sub = 'CMDSTAN_NGAWest2CA_corr_cells_chol'
# out_dir_sub = 'CMDSTAN_NGAWest2CA_corr_cells_chol_efficient'
# out_dir_sub = 'CMDSTAN_NGAWest2CA_corr_cells_chol_efficient2'
# out_dir_sub = 'CMDSTAN_NGAWest2CA_corr_cells_chol_efficient_sp'
#stan parameters
res_name = 'tot'
n_iter_warmup = 500
n_iter_sampling = 500
n_chains = 4
adapt_delta = 0.8
max_treedepth = 10
#ergodic coefficients
c_a_erg=0.0
#parallel options
# flag_parallel = True
flag_parallel = False
#output sub-dir with corr with suffix info
out_dir_sub = f'%s%s'%(out_dir_sub, synds_suffix)
#load cell dataframes
cellinfo_fname = '%s%s.csv'%(ds_dir, ds_fname_cellinfo)
celldist_fname = '%s%s.csv'%(ds_dir, ds_fname_celldist)
df_cellinfo = pd.read_csv(cellinfo_fname)
df_celldist = pd.read_csv(celldist_fname)
# Run stan regression
#create datafame with computation time
df_run_info = list()
#iterate over all synthetic datasets
for d_id in ds_id:
print('Synthetic dataset %i fo %i'%(d_id, len(ds_id)))
#run time start
run_t_strt = time.time()
#input flatfile
ds_fname = '%s%s%s_Y%i.csv'%(ds_dir, ds_fname_main, synds_suffix, d_id)
#load flatfile
df_flatfile = pd.read_csv(ds_fname)
#keep only NGAWest2 records
df_flatfile = df_flatfile.loc[df_flatfile.dsid==0,:]
#output file name and directory
out_fname = '%s%s_Y%i'%(out_fname_main, synds_suffix, d_id)
out_dir = '%s/%s/Y%i/'%(out_dir_main, out_dir_sub, d_id)
#run stan model
RunStan(df_flatfile, df_cellinfo, df_celldist, sm_fname,
out_fname, out_dir, res_name, c_a_erg=c_a_erg,
n_iter_warmup=n_iter_warmup, n_iter_sampling=n_iter_sampling, n_chains=n_chains,
adapt_delta=adapt_delta, max_treedepth=max_treedepth,
stan_parallel=flag_parallel)
#run time end
run_t_end = time.time()
#compute run time
run_tm = (run_t_end - run_t_strt)/60
#log run time
df_run_info.append(pd.DataFrame({'computer_name':os.uname()[1],'out_name':out_dir_sub,
'ds_id':d_id,'run_time':run_tm}, index=[d_id]))
#write out run info
out_fname = '%s%s/run_info.csv'%(out_dir_main, out_dir_sub)
pd.concat(df_run_info).reset_index(drop=True).to_csv(out_fname, index=False)
| 4,163 | 32.853659 | 107 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds2/main_pystan_model2_corr_cells_NGAWest3CA.py | """
Created on Wed Jul 14 14:17:52 2021
@author: glavrent
"""
# Working directory and Packages
#load libraries
import os
import sys
import numpy as np
import pandas as pd
import time
#user functions
sys.path.insert(0,'../../../Python_lib/regression/pystan/')
from regression_pystan_model2_corr_cells_unbounded_hyp import RunStan
# Define variables
#filename suffix
# synds_suffix = '_small_corr_len'
# synds_suffix = '_large_corr_len'
#synthetic datasets directory
ds_dir = '../../../../Data/Verification/synthetic_datasets/ds2'
ds_dir = r'%s%s/'%(ds_dir, synds_suffix)
# dataset info
#ds_fname_main = 'CatalogNGAWest3CA_synthetic_data'
ds_fname_main = 'CatalogNGAWest3CALite_synthetic_data'
ds_id = np.arange(1,6)
#cell specific anelastic attenuation
ds_fname_cellinfo = 'CatalogNGAWest3CALite_cellinfo'
ds_fname_celldist = 'CatalogNGAWest3CALite_distancematrix'
#stan model
# sm_fname = '../../../Stan_lib/regression_stan_model2_corr_cells_unbounded_hyp.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_corr_cells_unbounded_hyp_chol.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_corr_cells_unbounded_hyp_chol_efficient.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model2_corr_cells_unbounded_hyp_chol_efficient2.stan'
#output info
#main output filename
out_fname_main = 'NGAWest3CA_syndata'
#main output directory
out_dir_main = '../../../../Data/Verification/regression/ds2/'
#output sub-directory
# out_dir_sub = 'PYSTAN_NGAWest3CA_corr_cells'
# out_dir_sub = 'PYSTAN_NGAWest3CA_corr_cells_chol'
# out_dir_sub = 'PYSTAN_NGAWest3CA_corr_cells_chol_eff'
# out_dir_sub = 'PYSTAN_NGAWest3CA_corr_cells_chol_eff2'
#stan parameters
runstan_flag = True
# pystan_ver = 2
pystan_ver = 3
res_name = 'tot'
n_iter = 1000
n_chains = 4
adapt_delta = 0.8
max_treedepth = 10
#ergodic coefficients
c_a_erg=0.0
#parallel options
# flag_parallel = True
flag_parallel = False
#output sub-dir with corr with suffix info
out_dir_sub = f'%s%s'%(out_dir_sub, synds_suffix)
#load cell dataframes
cellinfo_fname = '%s%s.csv'%(ds_dir, ds_fname_cellinfo)
celldist_fname = '%s%s.csv'%(ds_dir, ds_fname_celldist)
df_cellinfo = pd.read_csv(cellinfo_fname)
df_celldist = pd.read_csv(celldist_fname)
# Run stan regression
#create datafame with computation time
df_run_info = list()
#iterate over all synthetic datasets
for d_id in ds_id:
print('Synthetic dataset %i fo %i'%(d_id, len(ds_id)))
#run time start
run_t_strt = time.time()
#input flatfile
ds_fname = '%s%s%s_Y%i.csv'%(ds_dir, ds_fname_main, synds_suffix, d_id)
#load flatfile
df_flatfile = pd.read_csv(ds_fname)
#output file name and directory
out_fname = '%s%s_Y%i'%(out_fname_main, synds_suffix, d_id)
out_dir = '%s/%s/Y%i/'%(out_dir_main, out_dir_sub, d_id)
#run stan model
RunStan(df_flatfile, df_cellinfo, df_celldist, sm_fname,
out_fname, out_dir, res_name, c_a_erg=c_a_erg,
runstan_flag=runstan_flag, n_iter=n_iter, n_chains=n_chains,
adapt_delta=adapt_delta, max_treedepth=max_treedepth,
pystan_ver=pystan_ver, pystan_parallel=flag_parallel)
#run time end
run_t_end = time.time()
#compute run time
run_tm = (run_t_end - run_t_strt)/60
#log run time
df_run_info.append(pd.DataFrame({'computer_name':os.uname()[1],'out_name':out_dir_sub,
'ds_id':d_id,'run_time':run_tm}, index=[d_id]))
#write out run info
out_fname = '%s%s/run_info.csv'%(out_dir_main, out_dir_sub)
pd.concat(df_run_info).reset_index(drop=True).to_csv(out_fname, index=False)
| 3,811 | 30.766667 | 101 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds3/main_pystan_model3_uncorr_cells_NGAWest3CA.py | """
Created on Wed Jul 14 14:17:52 2021
@author: glavrent
"""
# Working directory and Packages
#load libraries
import os
import sys
import numpy as np
import pandas as pd
import time
#user functions
sys.path.insert(0,'../../../Python_lib/regression/pystan/')
from regression_pystan_model3_uncorr_cells_unbounded_hyp import RunStan
# Define variables
#filename suffix
# synds_suffix = '_small_corr_len'
# synds_suffix = '_large_corr_len'
#synthetic datasets directory
ds_dir = '../../../../Data/Validation/synthetic_datasets/ds3'
ds_dir = r'%s%s/'%(ds_dir, synds_suffix)
# dataset info
#ds_fname_main = 'CatalogNGAWest3CA_synthetic_data'
ds_fname_main = 'CatalogNGAWest3CALite_synthetic_data'
ds_id = np.arange(1,6)
#cell specific anelastic attenuation
ds_fname_cellinfo = 'CatalogNGAWest3CALite_cellinfo'
ds_fname_celldist = 'CatalogNGAWest3CALite_distancematrix'
#stan model
sm_fname = '../../../Stan_lib/regression_stan_model3_uncorr_cells_unbounded_hyp_chol_efficient.stan'
#output info
#main output filename
out_fname_main = 'NGAWest3CA_syndata'
#main output directory
out_dir_main = '../../../../Data/Validation/regression/ds3/'
#output sub-directory
out_dir_sub = 'PYSTAN_NGAWest3CA_uncorr_cells_chol_eff'
#stan parameters
runstan_flag = True
# pystan_ver = 2
pystan_ver = 3
res_name = 'tot'
n_iter = 1000
n_chains = 4
adapt_delta = 0.8
max_treedepth = 10
#ergodic coefficients
c_2_erg=-2.0
c_3_erg=-0.6
c_a_erg=0.0
#parallel options
# flag_parallel = True
flag_parallel = False
#output sub-dir with corr with suffix info
out_dir_sub = f'%s%s'%(out_dir_sub, synds_suffix)
#load cell dataframes
cellinfo_fname = '%s%s.csv'%(ds_dir, ds_fname_cellinfo)
celldist_fname = '%s%s.csv'%(ds_dir, ds_fname_celldist)
df_cellinfo = pd.read_csv(cellinfo_fname)
df_celldist = pd.read_csv(celldist_fname)
# Run stan regression
#create datafame with computation time
df_run_info = list()
#iterate over all synthetic datasets
for d_id in ds_id:
print('Synthetic dataset %i fo %i'%(d_id, len(ds_id)))
#run time start
run_t_strt = time.time()
#input flatfile
ds_fname = '%s%s%s_Y%i.csv'%(ds_dir, ds_fname_main, synds_suffix, d_id)
#load flatfile
df_flatfile = pd.read_csv(ds_fname)
#output file name and directory
out_fname = '%s%s_Y%i'%(out_fname_main, synds_suffix, d_id)
out_dir = '%s/%s/Y%i/'%(out_dir_main, out_dir_sub, d_id)
#run stan model
RunStan(df_flatfile, df_cellinfo, df_celldist, sm_fname,
out_fname, out_dir, res_name,
c_2_erg=c_2_erg, c_3_erg=c_3_erg, c_a_erg=c_a_erg,
runstan_flag=runstan_flag, n_iter=n_iter, n_chains=n_chains,
adapt_delta=adapt_delta, max_treedepth=max_treedepth,
pystan_ver=pystan_ver, pystan_parallel=flag_parallel)
#run time end
run_t_end = time.time()
#compute run time
run_tm = (run_t_end - run_t_strt)/60
#log run time
df_run_info.append(pd.DataFrame({'computer_name':os.uname()[1],'out_name':out_dir_sub,
'ds_id':d_id,'run_time':run_tm}, index=[d_id]))
#write out run info
out_fname = '%s%s/run_info.csv'%(out_dir_main, out_dir_sub)
pd.concat(df_run_info).reset_index(drop=True).to_csv(out_fname, index=False)
| 3,445 | 28.20339 | 100 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds3/main_cmdstan_model3_corr_cells_NGAWest2CA.py | """
Created on Wed Dec 29 15:16:15 2021
@author: glavrent
"""
# Working directory and Packages
#load libraries
import os
import sys
import numpy as np
import pandas as pd
import time
#user functions
sys.path.insert(0,'../../../Python_lib/regression/cmdstan/')
# from regression_cmdstan_model3_corr_cells_unbounded_hyp import RunStan
# from regression_cmdstan_model3_corr_cells_sparse_unbounded_hyp import RunStan
# Define variables
#filename suffix
# synds_suffix = '_small_corr_len'
# synds_suffix = '_large_corr_len'
#synthetic datasets directory
ds_dir = '../../../../Data/Verification/synthetic_datasets/ds3'
ds_dir = r'%s%s/'%(ds_dir, synds_suffix)
# dataset info
#ds_fname_main = 'CatalogNGAWest3CA_synthetic_data'
ds_fname_main = 'CatalogNGAWest3CALite_synthetic_data'
ds_id = np.arange(1,6)
#cell specific anelastic attenuation
ds_fname_cellinfo = 'CatalogNGAWest3CALite_cellinfo'
ds_fname_celldist = 'CatalogNGAWest3CALite_distancematrix'
#stan model
# sm_fname = '../../../Stan_lib/regression_stan_model3_corr_cells_unbounded_hyp_chol_efficient.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model3_corr_cells_sparse_unbounded_hyp_chol_efficient.stan'
#output info
#main output filename
out_fname_main = 'NGAWest2CA_syndata'
#main output directory
out_dir_main = '../../../../Data/Verification/regression/ds3/'
#output sub-directory
# out_dir_sub = 'CMDSTAN_NGAWest2CA_corr_cells_chol_eff'
# out_dir_sub = 'CMDSTAN_NGAWest2CA_corr_cells_chol_eff_sp'
#stan parameters
res_name = 'tot'
n_iter_warmup = 500
n_iter_sampling = 500
n_chains = 4
adapt_delta = 0.8
max_treedepth = 10
#ergodic coefficients
c_2_erg=-2.0
c_3_erg=-0.6
c_a_erg= 0.0
#parallel options
# flag_parallel = True
flag_parallel = False
#output sub-dir with corr with suffix info
out_dir_sub = f'%s%s'%(out_dir_sub, synds_suffix)
#load cell dataframes
cellinfo_fname = '%s%s.csv'%(ds_dir, ds_fname_cellinfo)
celldist_fname = '%s%s.csv'%(ds_dir, ds_fname_celldist)
df_cellinfo = pd.read_csv(cellinfo_fname)
df_celldist = pd.read_csv(celldist_fname)
# Run stan regression
#create datafame with computation time
df_run_info = list()
#iterate over all synthetic datasets
for d_id in ds_id:
print('Synthetic dataset %i fo %i'%(d_id, len(ds_id)))
#run time start
run_t_strt = time.time()
#input flatfile
ds_fname = '%s%s%s_Y%i.csv'%(ds_dir, ds_fname_main, synds_suffix, d_id)
#load flatfile
df_flatfile = pd.read_csv(ds_fname)
#keep only NGAWest2 records
df_flatfile = df_flatfile.loc[df_flatfile.dsid==0,:]
#output file name and directory
out_fname = '%s%s_Y%i'%(out_fname_main, synds_suffix, d_id)
out_dir = '%s/%s/Y%i/'%(out_dir_main, out_dir_sub, d_id)
#run stan model
RunStan(df_flatfile, df_cellinfo, df_celldist, sm_fname,
out_fname, out_dir, res_name,
c_2_erg=c_2_erg, c_3_erg=c_3_erg, c_a_erg=c_a_erg,
n_iter_warmup=n_iter_warmup, n_iter_sampling=n_iter_sampling, n_chains=n_chains,
adapt_delta=adapt_delta, max_treedepth=max_treedepth,
stan_parallel=flag_parallel)
#run time end
run_t_end = time.time()
#compute run time
run_tm = (run_t_end - run_t_strt)/60
#log run time
df_run_info.append(pd.DataFrame({'computer_name':os.uname()[1],'out_name':out_dir_sub,
'ds_id':d_id,'run_time':run_tm}, index=[d_id]))
#write out run info
out_fname = '%s%s/run_info.csv'%(out_dir_main, out_dir_sub)
pd.concat(df_run_info).reset_index(drop=True).to_csv(out_fname, index=False)
| 3,778 | 30.491667 | 107 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds3/main_cmdstan_model3_corr_cells_NGAWest2CANorth.py | """
Created on Wed Dec 29 15:16:15 2021
@author: glavrent
"""
# Working directory and Packages
#load libraries
import os
import sys
import numpy as np
import pandas as pd
import time
#user functions
sys.path.insert(0,'../../../Python_lib/regression/cmdstan/')
# from regression_cmdstan_model3_corr_cells_unbounded_hyp import RunStan
# from regression_cmdstan_model3_corr_cells_sparse_unbounded_hyp import RunStan
# Define variables
#filename suffix
# synds_suffix = '_small_corr_len'
# synds_suffix = '_large_corr_len'
#synthetic datasets directory
ds_dir = '../../../../Data/Verification/synthetic_datasets/ds3'
ds_dir = r'%s%s/'%(ds_dir, synds_suffix)
# dataset info
#ds_fname_main = 'CatalogNGAWest3CA_synthetic_data'
ds_fname_main = 'CatalogNGAWest3CALite_synthetic_data'
ds_id = np.arange(1,6)
#cell specific anelastic attenuation
ds_fname_cellinfo = 'CatalogNGAWest3CALite_cellinfo'
ds_fname_celldist = 'CatalogNGAWest3CALite_distancematrix'
#stan model
# sm_fname = '../../../Stan_lib/regression_stan_model3_corr_cells_unbounded_hyp_chol_efficient.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model3_corr_cells_sparse_unbounded_hyp_chol_efficient.stan'
#output info
#main output filename
out_fname_main = 'NGAWest2CANorth_syndata'
#main output directory
out_dir_main = '../../../../Data/Verification/regression/ds3/'
#output sub-directory
# out_dir_sub = 'CMDSTAN_NGAWest2CANorth_corr_cells_chol_eff'
# out_dir_sub = 'CMDSTAN_NGAWest2CANorth_corr_cells_chol_eff_sp'
#stan parameters
res_name = 'tot'
n_iter_warmup = 500
n_iter_sampling = 500
n_chains = 4
adapt_delta = 0.8
max_treedepth = 10
#ergodic coefficients
c_2_erg=-2.0
c_3_erg=-0.6
c_a_erg= 0.0
#parallel options
# flag_parallel = True
flag_parallel = False
#output sub-dir with corr with suffix info
out_dir_sub = f'%s%s'%(out_dir_sub, synds_suffix)
#load cell dataframes
cellinfo_fname = '%s%s.csv'%(ds_dir, ds_fname_cellinfo)
celldist_fname = '%s%s.csv'%(ds_dir, ds_fname_celldist)
df_cellinfo = pd.read_csv(cellinfo_fname)
df_celldist = pd.read_csv(celldist_fname)
# Run stan regression
#create datafame with computation time
df_run_info = list()
#iterate over all synthetic datasets
for d_id in ds_id:
print('Synthetic dataset %i fo %i'%(d_id, len(ds_id)))
#run time start
run_t_strt = time.time()
#input flatfile
ds_fname = '%s%s%s_Y%i.csv'%(ds_dir, ds_fname_main, synds_suffix, d_id)
#load flatfile
df_flatfile = pd.read_csv(ds_fname)
#keep only North records of NGAWest2
df_flatfile = df_flatfile.loc[np.logical_and(df_flatfile.dsid==0,
df_flatfile.sreg==1),:]
#output file name and directory
out_fname = '%s%s_Y%i'%(out_fname_main, synds_suffix, d_id)
out_dir = '%s/%s/Y%i/'%(out_dir_main, out_dir_sub, d_id)
#run stan model
RunStan(df_flatfile, df_cellinfo, df_celldist, sm_fname,
out_fname, out_dir, res_name,
c_2_erg=c_2_erg, c_3_erg=c_3_erg, c_a_erg=c_a_erg,
n_iter_warmup=n_iter_warmup, n_iter_sampling=n_iter_sampling, n_chains=n_chains,
adapt_delta=adapt_delta, max_treedepth=max_treedepth,
stan_parallel=flag_parallel)
#run time end
run_t_end = time.time()
#compute run time
run_tm = (run_t_end - run_t_strt)/60
#log run time
df_run_info.append(pd.DataFrame({'computer_name':os.uname()[1],'out_name':out_dir_sub,
'ds_id':d_id,'run_time':run_tm}, index=[d_id]))
#write out run info
out_fname = '%s%s/run_info.csv'%(out_dir_main, out_dir_sub)
pd.concat(df_run_info).reset_index(drop=True).to_csv(out_fname, index=False)
| 3,888 | 31.140496 | 107 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds3/main_pystan_model3_corr_cells_NGAWest3CA.py | """
Created on Wed Jul 14 14:17:52 2021
@author: glavrent
"""
# Working directory and Packages
#load libraries
import os
import sys
import numpy as np
import pandas as pd
import time
#user functions
sys.path.insert(0,'../../../Python_lib/regression/pystan/')
from regression_pystan_model3_corr_cells_unbounded_hyp import RunStan
# Define variables
#filename suffix
# synds_suffix = '_small_corr_len'
# synds_suffix = '_large_corr_len'
#synthetic datasets directory
ds_dir = '../../../../Data/Validation/synthetic_datasets/ds3'
ds_dir = r'%s%s/'%(ds_dir, synds_suffix)
# dataset info
#ds_fname_main = 'CatalogNGAWest3CA_synthetic_data'
ds_fname_main = 'CatalogNGAWest3CALite_synthetic_data'
ds_id = np.arange(1,6)
#cell specific anelastic attenuation
ds_fname_cellinfo = 'CatalogNGAWest3CALite_cellinfo'
ds_fname_celldist = 'CatalogNGAWest3CALite_distancematrix'
#stan model
sm_fname = '../../../Stan_lib/regression_stan_model3_corr_cells_unbounded_hyp_chol_efficient.stan'
#output info
#main output filename
out_fname_main = 'NGAWest3CA_syndata'
#main output directory
out_dir_main = '../../../../Data/Validation/regression/ds3/'
#output sub-directory
out_dir_sub = 'PYSTAN_NGAWest3CA_corr_cells_chol_eff'
#stan parameters
runstan_flag = True
# pystan_ver = 2
pystan_ver = 3
res_name = 'tot'
n_iter = 1000
n_chains = 4
adapt_delta = 0.8
max_treedepth = 10
#ergodic coefficients
c_2_erg=-2.0
c_3_erg=-0.6
c_a_erg=0.0
#parallel options
# flag_parallel = True
flag_parallel = False
#output sub-dir with corr with suffix info
out_dir_sub = f'%s%s'%(out_dir_sub, synds_suffix)
#load cell dataframes
cellinfo_fname = '%s%s.csv'%(ds_dir, ds_fname_cellinfo)
celldist_fname = '%s%s.csv'%(ds_dir, ds_fname_celldist)
df_cellinfo = pd.read_csv(cellinfo_fname)
df_celldist = pd.read_csv(celldist_fname)
# Run stan regression
#create datafame with computation time
df_run_info = list()
#iterate over all synthetic datasets
for d_id in ds_id:
print('Synthetic dataset %i fo %i'%(d_id, len(ds_id)))
#run time start
run_t_strt = time.time()
#input flatfile
ds_fname = '%s%s%s_Y%i.csv'%(ds_dir, ds_fname_main, synds_suffix, d_id)
#load flatfile
df_flatfile = pd.read_csv(ds_fname)
#output file name and directory
out_fname = '%s%s_Y%i'%(out_fname_main, synds_suffix, d_id)
out_dir = '%s/%s/Y%i/'%(out_dir_main, out_dir_sub, d_id)
#run stan model
RunStan(df_flatfile, df_cellinfo, df_celldist, sm_fname,
out_fname, out_dir, res_name,
c_2_erg=c_2_erg, c_3_erg=c_3_erg, c_a_erg=c_a_erg,
runstan_flag=runstan_flag, n_iter=n_iter, n_chains=n_chains,
adapt_delta=adapt_delta, max_treedepth=max_treedepth,
pystan_ver=pystan_ver, pystan_parallel=flag_parallel)
#run time end
run_t_end = time.time()
#compute run time
run_tm = (run_t_end - run_t_strt)/60
#log run time
df_run_info.append(pd.DataFrame({'computer_name':os.uname()[1],'out_name':out_dir_sub,
'ds_id':d_id,'run_time':run_tm}, index=[d_id]))
#write out run info
out_fname = '%s%s/run_info.csv'%(out_dir_main, out_dir_sub)
pd.concat(df_run_info).reset_index(drop=True).to_csv(out_fname, index=False)
| 3,441 | 28.169492 | 98 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds3/main_cmdstan_model3_uncorr_cells_NGAWest2CANorth.py | """
Created on Wed Dec 29 15:16:15 2021
@author: glavrent
"""
# Working directory and Packages
#load libraries
import os
import sys
import numpy as np
import pandas as pd
import time
#user functions
sys.path.insert(0,'../../../Python_lib/regression/cmdstan/')
# from regression_cmdstan_model3_uncorr_cells_unbounded_hyp import RunStan
# from regression_cmdstan_model3_uncorr_cells_sparse_unbounded_hyp import RunStan
# Define variables
#filename suffix
# synds_suffix = '_small_corr_len'
# synds_suffix = '_large_corr_len'
#synthetic datasets directory
ds_dir = '../../../../Data/Verification/synthetic_datasets/ds3'
ds_dir = r'%s%s/'%(ds_dir, synds_suffix)
# dataset info
#ds_fname_main = 'CatalogNGAWest3CA_synthetic_data'
ds_fname_main = 'CatalogNGAWest3CALite_synthetic_data'
ds_id = np.arange(1,6)
#cell specific anelastic attenuation
ds_fname_cellinfo = 'CatalogNGAWest3CALite_cellinfo'
ds_fname_celldist = 'CatalogNGAWest3CALite_distancematrix'
#stan model
# sm_fname = '../../../Stan_lib/regression_stan_model3_uncorr_cells_unbounded_hyp_chol_efficient.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model3_uncorr_cells_sparse_unbounded_hyp_chol_efficient.stan'
#output info
#main output filename
out_fname_main = 'NGAWest2CANorth_syndata'
#main output directory
out_dir_main = '../../../../Data/Verification/regression/ds3/'
#output sub-directory
# out_dir_sub = 'CMDSTAN_NGAWest2CANorth_uncorr_cells_chol_eff'
# out_dir_sub = 'CMDSTAN_NGAWest2CANorth_uncorr_cells_chol_eff_sp'
#stan parameters
res_name = 'tot'
n_iter_warmup = 500
n_iter_sampling = 500
n_chains = 4
adapt_delta = 0.8
max_treedepth = 10
#ergodic coefficients
c_2_erg=-2.0
c_3_erg=-0.6
c_a_erg= 0.0
#parallel options
# flag_parallel = True
flag_parallel = False
#output sub-dir with corr with suffix info
out_dir_sub = f'%s%s'%(out_dir_sub, synds_suffix)
#load cell dataframes
cellinfo_fname = '%s%s.csv'%(ds_dir, ds_fname_cellinfo)
celldist_fname = '%s%s.csv'%(ds_dir, ds_fname_celldist)
df_cellinfo = pd.read_csv(cellinfo_fname)
df_celldist = pd.read_csv(celldist_fname)
# Run stan regression
#create datafame with computation time
df_run_info = list()
#iterate over all synthetic datasets
for d_id in ds_id:
print('Synthetic dataset %i fo %i'%(d_id, len(ds_id)))
#run time start
run_t_strt = time.time()
#input flatfile
ds_fname = '%s%s%s_Y%i.csv'%(ds_dir, ds_fname_main, synds_suffix, d_id)
#load flatfile
df_flatfile = pd.read_csv(ds_fname)
#keep only North records of NGAWest2
df_flatfile = df_flatfile.loc[np.logical_and(df_flatfile.dsid==0,
df_flatfile.sreg==1),:]
#output file name and directory
out_fname = '%s%s_Y%i'%(out_fname_main, synds_suffix, d_id)
out_dir = '%s/%s/Y%i/'%(out_dir_main, out_dir_sub, d_id)
#run stan model
RunStan(df_flatfile, df_cellinfo, df_celldist, sm_fname,
out_fname, out_dir, res_name,
c_2_erg=c_2_erg, c_3_erg=c_3_erg, c_a_erg=c_a_erg,
n_iter_warmup=n_iter_warmup, n_iter_sampling=n_iter_sampling, n_chains=n_chains,
adapt_delta=adapt_delta, max_treedepth=max_treedepth,
stan_parallel=flag_parallel)
#run time end
run_t_end = time.time()
#compute run time
run_tm = (run_t_end - run_t_strt)/60
#log run time
df_run_info.append(pd.DataFrame({'computer_name':os.uname()[1],'out_name':out_dir_sub,
'ds_id':d_id,'run_time':run_tm}, index=[d_id]))
#write out run info
out_fname = '%s%s/run_info.csv'%(out_dir_main, out_dir_sub)
pd.concat(df_run_info).reset_index(drop=True).to_csv(out_fname, index=False)
| 3,899 | 31.231405 | 109 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds3/comparison_inla_model3_uncorr_cells.py | """
Created on Thu Aug 12 10:26:06 2021
@author: glavrent
"""
# Working directory and Packages
#load packages
import os
import sys
import pathlib
import glob
import re #regular expression package
import pickle
#arithmetic libraries
import numpy as np
#statistics libraries
import pandas as pd
#plot libraries
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoLocator as plt_autotick
#user functions
sys.path.insert(0,'../../../Python_lib/regression/')
from pylib_stats import CalcRMS
from pylib_stats import CalcLKDivergece
# Define variables
# USER SETS DIRECTORIES AND FILE INFO OF SYNTHETIC DS AND REGRESSION RESULTS
# ++++++++++++++++++++++++++++++++++++++++
#processed dataset
# name_dataset = 'NGAWest2CANorth'
# name_dataset = 'NGAWest2CA'
# name_dataset = 'NGAWest3CA'
#correlation info
# 1: Small Correlation Lengths
# 2: Large Correlation Lenghts
corr_id = 1
#kernel function
# 1: Mattern kernel (alpha=2)
# 2: Negative Exp (alpha=3/2)
ker_id = 1
#mesh type
# 1: Fine Mesh
# 2: Medium Mesh
# 3: Coarse Mesh
mesh_id = 3
#directories (synthetic dataset)
if corr_id == 1:
dir_syndata = '../../../../Data/Verification/synthetic_datasets/ds3_small_corr_len'
elif corr_id == 2:
dir_syndata = '../../../../Data/Verification/synthetic_datasets/ds3_large_corr_len'
#directories (regression results)
if mesh_id == 1:
dir_results = f'../../../../Data/Verification/regression/ds3/INLA_%s_uncorr_cells_fine'%name_dataset
elif mesh_id == 2:
dir_results = f'../../../../Data/Verification/regression/ds3/INLA_%s_uncorr_cells_medium'%name_dataset
elif mesh_id == 3:
dir_results = f'../../../../Data/Verification/regression/ds3/INLA_%s_uncorr_cells_coarse'%name_dataset
#cell info
fname_cellinfo = dir_syndata + '/' + 'CatalogNGAWest3CALite_cellinfo.csv'
fname_distmat = dir_syndata + '/' + 'CatalogNGAWest3CALite_distancematrix.csv'
#prefix for synthetic data and results
prfx_syndata = 'CatalogNGAWest3CALite_synthetic'
#regression results filename prefix
prfx_results = f'%s_syndata'%name_dataset
# dataset info
ds_id = np.arange(1,6)
# ++++++++++++++++++++++++++++++++++++++++
# USER NEEDS TO SPECIFY HYPERPARAMETERS OF SYNTHETIC DATASET
# ++++++++++++++++++++++++++++++++++++++++
# hyper-parameters
if corr_id == 1:
# small correlation lengths
hyp = {'omega_0': 0.1, 'omega_1e':0.1, 'omega_1as': 0.35, 'omega_1bs': 0.25,
'ell_1e':60, 'ell_1as':30,
'c_2_erg': -2.0,
'omega_2': 0.2,
'omega_2p': 0.15, 'ell_2p': 80,
'c_3_erg':-0.6,
'omega_3': 0.15,
'omega_3s': 0.15, 'ell_3s': 130,
'c_cap_erg': -0.011,
'omega_cap_mu': 0.005, 'omega_ca1p':0.004, 'omega_ca2p':0.002,
'ell_ca1p': 75,
'phi_0':0.3, 'tau_0':0.25 }
elif corr_id == 2:
# large correlation lengths
hyp = {'omega_0': 0.1, 'omega_1e':0.2, 'omega_1as': 0.4, 'omega_1bs': 0.3,
'ell_1e':100, 'ell_1as':70,
'c_2_erg': -2.0,
'omega_2': 0.2,
'omega_2p': 0.15, 'ell_2e': 140,
'c_3_erg':-0.6,
'omega_3': 0.15,
'omega_3s': 0.15, 'ell_3s': 180,
'c_cap_erg': -0.02,
'omega_cap_mu': 0.008, 'omega_ca1p':0.005, 'omega_ca2p':0.003,
'ell_ca1p': 120,
'phi_0':0.3, 'tau_0':0.25}
# ++++++++++++++++++++++++++++++++++++++++
# FILE INFO FOR REGRESSION RESULTS
# ++++++++++++++++++++++++++++++++++++++++
#output filename sufix
if corr_id == 1:
synds_suffix = '_small_corr_len'
elif corr_id == 2:
synds_suffix = '_large_corr_len'
#kenel info
if ker_id == 1:
ker_suffix = ''
elif ker_id == 2:
ker_suffix = '_nexp'
# ++++++++++++++++++++++++++++++++++++++++
#ploting options
flag_report = True
# Compare results
#load cell data
df_cellinfo = pd.read_csv(fname_cellinfo).set_index('cellid')
df_distmat = pd.read_csv(fname_distmat).set_index('rsn')
#initialize misfit metrics dataframe
df_misfit = pd.DataFrame(index=['Y%i'%d_id for d_id in ds_id])
#iterate over different datasets
for d_id in ds_id:
# Load Data
#file names
#synthetic data
fname_sdata_gmotion = '%s/%s_%s%s_Y%i'%(dir_syndata, prfx_syndata, 'data', synds_suffix, d_id) + '.csv'
fname_sdata_atten = '%s/%s_%s%s_Y%i'%(dir_syndata, prfx_syndata, 'atten', synds_suffix, d_id) + '.csv'
#regression results
fname_reg_gmotion = '%s%s/Y%i/%s%s_Y%i_inla_%s'%(dir_results, ker_suffix+synds_suffix, d_id, prfx_results, synds_suffix, d_id, 'residuals') + '.csv'
fname_reg_coeff = '%s%s/Y%i/%s%s_Y%i_inla_%s'%(dir_results, ker_suffix+synds_suffix, d_id, prfx_results, synds_suffix, d_id, 'coefficients') + '.csv'
fname_reg_atten = '%s%s/Y%i/%s%s_Y%i_inla_%s'%(dir_results, ker_suffix+synds_suffix, d_id, prfx_results, synds_suffix, d_id, 'catten') + '.csv'
#load synthetic results
df_sdata_gmotion = pd.read_csv(fname_sdata_gmotion).set_index('rsn')
df_sdata_atten = pd.read_csv(fname_sdata_atten).set_index('cellid')
#load regression results
df_reg_gmotion = pd.read_csv(fname_reg_gmotion).set_index('rsn')
df_reg_coeff = pd.read_csv(fname_reg_coeff).set_index('rsn')
df_reg_atten = pd.read_csv(fname_reg_atten).set_index('cellid')
# Processing
#keep only relevant columns from synthetic dataset
df_sdata_gmotion = df_sdata_gmotion.reindex(df_reg_gmotion.index)
df_sdata_atten = df_sdata_atten.reindex(df_reg_atten.index)
#distance matrix for records of interest
df_dmat = df_distmat.reindex(df_sdata_gmotion.index)
#find unique earthqakes and stations
eq_id, eq_idx, eq_nrec = np.unique(df_sdata_gmotion.eqid, return_index=True, return_counts=True)
sta_id, sta_idx, sta_nrec = np.unique(df_sdata_gmotion.ssn, return_index=True, return_counts=True)
#number of paths per cell
cell_npath = np.sum(df_dmat.loc[:,df_reg_atten.cellname] > 0, axis=0)
# Compute Root Mean Square Error
df_misfit.loc['Y%i'%d_id,'nerg_tot_rms'] = CalcRMS(df_sdata_gmotion.nerg_gm.values, df_reg_gmotion.nerg_mu.values)
df_misfit.loc['Y%i'%d_id,'dc_1e_rms'] = CalcRMS(df_sdata_gmotion['dc_1e'].values[eq_idx], df_reg_coeff['dc_1e_mean'].values[eq_idx])
df_misfit.loc['Y%i'%d_id,'dc_1as_rms'] = CalcRMS(df_sdata_gmotion['dc_1as'].values[sta_idx], df_reg_coeff['dc_1as_mean'].values[sta_idx])
df_misfit.loc['Y%i'%d_id,'dc_1bs_rms'] = CalcRMS(df_sdata_gmotion['dc_1bs'].values[sta_idx], df_reg_coeff['dc_1bs_mean'].values[sta_idx])
df_misfit.loc['Y%i'%d_id,'c_2p_rms'] = CalcRMS(df_sdata_gmotion['c_2p'].values[eq_idx], df_reg_coeff['c_2p_mean'].values[eq_idx])
df_misfit.loc['Y%i'%d_id,'c_3s_rms'] = CalcRMS(df_sdata_gmotion['c_3s'].values[sta_idx], df_reg_coeff['c_3s_mean'].values[sta_idx])
df_misfit.loc['Y%i'%d_id,'c_cap_rms'] = CalcRMS(df_sdata_atten['c_cap'].values, df_reg_atten['c_cap_mean'].values)
# Compute Divergence
df_misfit.loc['Y%i'%d_id,'nerg_tot_KL'] = CalcLKDivergece(df_sdata_gmotion.nerg_gm.values, df_reg_gmotion.nerg_mu.values)
df_misfit.loc['Y%i'%d_id,'dc_1e_KL'] = CalcLKDivergece(df_sdata_gmotion['dc_1e'].values[eq_idx], df_reg_coeff['dc_1e_mean'].values[eq_idx])
df_misfit.loc['Y%i'%d_id,'dc_1as_KL'] = CalcLKDivergece(df_sdata_gmotion['dc_1as'].values[sta_idx], df_reg_coeff['dc_1as_mean'].values[sta_idx])
df_misfit.loc['Y%i'%d_id,'dc_1bs_KL'] = CalcLKDivergece(df_sdata_gmotion['dc_1bs'].values[sta_idx], df_reg_coeff['dc_1bs_mean'].values[sta_idx])
df_misfit.loc['Y%i'%d_id,'c_2p_KL'] = CalcLKDivergece(df_sdata_gmotion['c_2p'].values[eq_idx], df_reg_coeff['c_2p_mean'].values[eq_idx])
df_misfit.loc['Y%i'%d_id,'c_3s_KL'] = CalcLKDivergece(df_sdata_gmotion['c_3s'].values[sta_idx], df_reg_coeff['c_3s_mean'].values[sta_idx])
df_misfit.loc['Y%i'%d_id,'c_cap_KL'] = CalcLKDivergece(df_sdata_atten['c_cap'].values, df_reg_atten['c_cap_mean'].values)
# Output
#figure directory
dir_fig = '%s%s/Y%i/figures_cmp/'%(dir_results, ker_suffix+synds_suffix, d_id)
pathlib.Path(dir_fig).mkdir(parents=True, exist_ok=True)
#compare ground motion predictions
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_scatter_tot_res'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#median
ax.scatter(df_sdata_gmotion.nerg_gm.values, df_reg_gmotion.nerg_mu.values)
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title('Comparison total residuals, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=35)
ax.set_ylabel('Estimated', fontsize=35)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=32)
ax.tick_params(axis='y', labelsize=32)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-10,2])
ax.set_ylim([-10,2])
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#compare dc_1e
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_dc_1e_scatter'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_sdata_gmotion['dc_1e'].values[eq_idx], df_reg_coeff['dc_1e_mean'].values[eq_idx])
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1,E}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-2,2])
ax.set_ylim([-2,2])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1e_accuracy'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_reg_coeff['dc_1e_sig'].values[eq_idx],
df_sdata_gmotion['dc_1e'].values[eq_idx] - df_reg_coeff['dc_1e_mean'].values[eq_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1,E}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Standard Deviation', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0,.5])
ax.set_ylim([-2,2])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1e_nrec'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(eq_nrec,
df_sdata_gmotion['dc_1e'].values[eq_idx] - df_reg_coeff['dc_1e_mean'].values[eq_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1,E}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Number of records', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.set_xscale('log')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0.9,1e3])
ax.set_ylim([-2,2])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#compare dc_1as
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_dc_1as_scatter'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_sdata_gmotion['dc_1as'].values[sta_idx], df_reg_coeff['dc_1as_mean'].values[sta_idx])
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1a,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-2,2])
ax.set_ylim([-2,2])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1as_accuracy'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#accuray
ax.scatter(df_reg_coeff['dc_1as_sig'].values[sta_idx],
df_sdata_gmotion['dc_1as'].values[sta_idx] - df_reg_coeff['dc_1as_mean'].values[sta_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1a,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Standard Deviation', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0,.5])
ax.set_ylim([-2,2])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1as_nrec'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#accuray
ax.scatter(sta_nrec,
df_sdata_gmotion['dc_1as'].values[sta_idx] - df_reg_coeff['dc_1as_mean'].values[sta_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1a,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Number of records', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.set_xscale('log')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([.9,1000])
ax.set_ylim([-2,2])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#compare dc_1bs
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_dc_1bs_scatter'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_sdata_gmotion['dc_1bs'].values[sta_idx], df_reg_coeff['dc_1bs_mean'].values[sta_idx])
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1b,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-2,2])
ax.set_ylim([-2,2])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1bs_accuracy'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#accuray
ax.scatter(df_reg_coeff['dc_1bs_sig'].values[sta_idx],
df_sdata_gmotion['dc_1bs'].values[sta_idx] - df_reg_coeff['dc_1bs_mean'].values[sta_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1b,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Standard Deviation', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0,.4])
ax.set_ylim([-2,2])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1bs_nrec'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#accuray
ax.scatter(sta_nrec,
df_sdata_gmotion['dc_1bs'].values[sta_idx] - df_reg_coeff['dc_1bs_mean'].values[sta_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1b,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Number of records', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.set_xscale('log')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([.9,1000])
ax.set_ylim([-2,2])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#compare c_2p
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_c_2p_scatter'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_sdata_gmotion['c_2p'].values[eq_idx], df_reg_coeff['c_2p_mean'].values[eq_idx])
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $c_{2,P}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-2.3,-1.6])
ax.set_ylim([-2.3,-1.6])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_c_2p_accuracy'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_reg_coeff['c_2p_sig'].values[eq_idx],
df_sdata_gmotion['c_2p'].values[eq_idx] - df_reg_coeff['c_2p_mean'].values[eq_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $c_{2,P}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Standard Deviation', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0,.15])
ax.set_ylim([-.4,.4])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_c_2p_nrec'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(eq_nrec,
df_sdata_gmotion['c_2p'].values[eq_idx] - df_reg_coeff['c_2p_mean'].values[eq_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $c_{2,P}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Number of records', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.set_xscale('log')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0.9,1e3])
ax.set_ylim([-.4,.4])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#compare c_3s
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_c_3s_scatter'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_sdata_gmotion['c_3s'].values[sta_idx], df_reg_coeff['c_3s_mean'].values[sta_idx])
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $c_{3,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-1.2,-.2])
ax.set_ylim([-1.2,-.2])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_c_3s_accuracy'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_reg_coeff['c_3s_sig'].values[sta_idx],
df_sdata_gmotion['c_3s'].values[sta_idx] - df_reg_coeff['c_3s_mean'].values[sta_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $c_{3,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Standard Deviation', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0,.3])
ax.set_ylim([-.4,.4])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_c_3s_nrec'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(sta_nrec,
df_sdata_gmotion['c_3s'].values[sta_idx] - df_reg_coeff['c_3s_mean'].values[sta_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $c_{3,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Number of records', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.set_xscale('log')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0.9,1e3])
ax.set_ylim([-.4,.4])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#compare c_cap
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_c_cap_scatter'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_sdata_atten['c_cap'].values, df_reg_atten['c_cap_mean'].values)
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $c_{ca,P}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-0.05,0.02])
ax.set_ylim([-0.05,0.02])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_c_cap_accuracy'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_reg_atten['c_cap_sig'],
df_sdata_atten['c_cap'].values - df_reg_atten['c_cap_mean'].values)
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $c_{ca,P}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Standard Deviation', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0.00,0.03])
ax.set_ylim([-0.04,0.04])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_c_cap_npath'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(cell_npath,
df_sdata_atten['c_cap'].values - df_reg_atten['c_cap_mean'].values)
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $c_{ca,P}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Number of paths', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.set_xscale('log')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([.9,5e4])
ax.set_ylim([-0.04,0.04])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Compare Misfit Metrics
#summary directory
dir_sum = '%s%s/summary/'%(dir_results,ker_suffix+synds_suffix)
pathlib.Path(dir_fig).mkdir(parents=True, exist_ok=True)
#figure directory
dir_fig = '%s/figures/'%(dir_sum)
pathlib.Path(dir_fig).mkdir(parents=True, exist_ok=True)
#save
df_misfit.to_csv(dir_sum + 'misfit_summary.csv')
#RMS misfit
fname_fig = 'misfit_score'
#plot KL divergence
fig, ax = plt.subplots(figsize = (10,10))
ax.plot(ds_id, df_misfit.nerg_tot_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label= 'tot nerg')
ax.plot(ds_id, df_misfit.dc_1e_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1,E}$')
ax.plot(ds_id, df_misfit.dc_1as_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1a,S}$')
ax.plot(ds_id, df_misfit.dc_1bs_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1b,S}$')
ax.plot(ds_id, df_misfit.c_2p_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$c_{2,E}$')
ax.plot(ds_id, df_misfit.c_3s_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$c_{3,S}$')
ax.plot(ds_id, df_misfit.c_cap_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$c_{ca,P}$')
#figure properties
ax.set_ylim([0,0.50])
ax.set_xlabel('synthetic dataset', fontsize=25)
ax.set_ylabel('RSME', fontsize=25)
ax.grid(which='both')
ax.set_xticks(ds_id)
ax.set_xticklabels(labels=df_misfit.index)
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#legend
ax.legend(loc='upper left', fontsize=25)
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#KL divergence
fname_fig = 'KLdiv_score'
#plot KL divergence
fig, ax = plt.subplots(figsize = (10,10))
ax.plot(ds_id, df_misfit.nerg_tot_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label= 'tot nerg')
ax.plot(ds_id, df_misfit.dc_1e_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1,E}$')
ax.plot(ds_id, df_misfit.dc_1as_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1a,S}$')
ax.plot(ds_id, df_misfit.dc_1bs_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1b,S}$')
ax.plot(ds_id, df_misfit.c_2p_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$c_{2,P}$')
ax.plot(ds_id, df_misfit.c_3s_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$c_{3,S}$')
ax.plot(ds_id, df_misfit.c_cap_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$c_{ca,P}$')
#figure properties
ax.set_ylim([0,0.50])
ax.set_xlabel('synthetic dataset', fontsize=25)
ax.set_ylabel('KL divergence', fontsize=25)
ax.grid(which='both')
ax.set_xticks(ds_id)
ax.set_xticklabels(labels=df_misfit.index)
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#legend
ax.legend(loc='upper left', fontsize=25)
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Compare hyper-paramters
#iterate over different datasets
df_reg_hyp = list()
df_reg_hyp_post = list()
for d_id in ds_id:
# Load Data
#regression hyperparamters results
fname_reg_hyp = '%s%s/Y%i/%s%s_Y%i_inla_%s'%(dir_results, ker_suffix+synds_suffix, d_id,prfx_results, synds_suffix, d_id, 'hyperparameters') + '.csv'
fname_reg_hyp_post = '%s%s/Y%i/%s%s_Y%i_inla_%s'%(dir_results, ker_suffix+synds_suffix, d_id,prfx_results, synds_suffix, d_id, 'hyperposterior') + '.csv'
#load regression results
df_reg_hyp.append( pd.read_csv(fname_reg_hyp, index_col=0) )
df_reg_hyp_post.append( pd.read_csv(fname_reg_hyp_post, index_col=0) )
# Omega_1e
#hyper-paramter name
name_hyp = 'omega_1e'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = df_r_h_p.loc[:,name_hyp+'_pdf'].max()
# ymax_mean = 1.5*np.ceil(ymax_mode/10)*10
ymax_mean = 40
#plot posterior dist
pl_pdf = ax.plot(df_r_h_p.loc[:,name_hyp], df_r_h_p.loc[:,name_hyp+'_pdf'])
ax.vlines(df_r_h.loc[name_hyp,'mean'], ymin=0, ymax=ymax_mean, linestyle='-', color=pl_pdf[0].get_color(), label='Mean')
ax.vlines(df_r_h.loc[name_hyp,'mode'], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_pdf[0].get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\omega_{1,E}$', fontsize=30)
ax.set_xlabel('$\omega_{1,e}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.25])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Omega_1as
#hyper-paramter name
name_hyp = 'omega_1as'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = df_r_h_p.loc[:,name_hyp+'_pdf'].max()
# ymax_mean = 1.5*np.ceil(ymax_mode/10)*10
ymax_mean = 30
#plot posterior dist
pl_pdf = ax.plot(df_r_h_p.loc[:,name_hyp], df_r_h_p.loc[:,name_hyp+'_pdf'])
ax.vlines(df_r_h.loc[name_hyp,'mean'], ymin=0, ymax=ymax_mean, linestyle='-', color=pl_pdf[0].get_color(), label='Mean')
ax.vlines(df_r_h.loc[name_hyp,'mode'], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_pdf[0].get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\omega_{1a,S}$', fontsize=30)
ax.set_xlabel('$\omega_{1a,s}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.5])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Omega_1bs
#hyper-paramter name
name_hyp = 'omega_1bs'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = df_r_h_p.loc[:,name_hyp+'_pdf'].max()
# ymax_mean = 1.5*np.ceil(ymax_mode/10)*10
ymax_mean = 60
#plot posterior dist
pl_pdf = ax.plot(df_r_h_p.loc[:,name_hyp], df_r_h_p.loc[:,name_hyp+'_pdf'])
ax.vlines(df_r_h.loc[name_hyp,'mean'], ymin=0, ymax=ymax_mean, linestyle='-', color=pl_pdf[0].get_color(), label='Mean')
ax.vlines(df_r_h.loc[name_hyp,'mode'], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_pdf[0].get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\omega_{1b,S}$', fontsize=30)
ax.set_xlabel('$\omega_{1b,s}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.5])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Omega_2p
#hyper-paramter name
name_hyp = 'omega_2p'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 60
ymax_mean = 60
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\omega_{2,P}$', fontsize=30)
ax.set_xlabel('$\omega_{2,p}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.5])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Omega_3s
#hyper-paramter name
name_hyp = 'omega_3s'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 60
ymax_mean = 60
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\omega_{3,S}$', fontsize=30)
ax.set_xlabel('$\omega_{3,s}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.5])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Ell_1e
#hyper-paramter name
name_hyp = 'ell_1e'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = df_r_h_p.loc[:,name_hyp+'_pdf'].max()
# ymax_mean = 1.5*np.ceil(ymax_mode/10)*10
ymax_mean = 0.02
#plot posterior dist
pl_pdf = ax.plot(df_r_h_p.loc[:,name_hyp], df_r_h_p.loc[:,name_hyp+'_pdf'])
ax.vlines(df_r_h.loc[name_hyp,'mean'], ymin=0, ymax=ymax_mean, linestyle='-', color=pl_pdf[0].get_color(), label='Mean')
ax.vlines(df_r_h.loc[name_hyp,'mode'], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_pdf[0].get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\ell_{1,E}$', fontsize=30)
ax.set_xlabel('$\ell_{1,e}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,500])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Ell_1as
#hyper-paramter name
name_hyp = 'ell_1as'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = df_r_h_p.loc[:,name_hyp+'_pdf'].max()
# ymax_mean = 1.5*np.ceil(ymax_mode/10)*10
ymax_mean = 0.1
#plot posterior dist
pl_pdf = ax.plot(df_r_h_p.loc[:,name_hyp], df_r_h_p.loc[:,name_hyp+'_pdf'])
ax.vlines(df_r_h.loc[name_hyp,'mean'], ymin=0, ymax=ymax_mean, linestyle='-', color=pl_pdf[0].get_color(), label='Mean')
ax.vlines(df_r_h.loc[name_hyp,'mode'], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_pdf[0].get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\ell_{1a,S}$', fontsize=30)
ax.set_xlabel('$\ell_{1a,s}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,150])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Ell_2p
#hyper-paramter name
name_hyp = 'ell_2p'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 0.1
ymax_mean = 0.1
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\ell_{2,P}$', fontsize=30)
ax.set_xlabel('$\ell_{2,p}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,150])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Ell_3s
#hyper-paramter name
name_hyp = 'ell_3s'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 0.1
ymax_mean = 0.1
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\ell_{3,S}$', fontsize=30)
ax.set_xlabel('$\ell_{3,s}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,150])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Tau_0
#hyper-paramter name
name_hyp = 'tau_0'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = df_r_h_p.loc[:,name_hyp+'_pdf'].max()
# ymax_mean = 1.5*np.ceil(ymax_mode/10)*10
ymax_mean = 60
#plot posterior dist
pl_pdf = ax.plot(df_r_h_p.loc[:,name_hyp], df_r_h_p.loc[:,name_hyp+'_pdf'])
ax.vlines(df_r_h.loc[name_hyp,'mean'], ymin=0, ymax=ymax_mean, linestyle='-', color=pl_pdf[0].get_color(), label='Mean')
ax.vlines(df_r_h.loc[name_hyp,'mode'], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_pdf[0].get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\tau_{0}$', fontsize=30)
ax.set_xlabel(r'$\tau_{0}$', fontsize=25)
ax.set_ylabel(r'probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.5])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Phi_0
#hyper-paramter name
name_hyp = 'phi_0'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = df_r_h_p.loc[:,name_hyp+'_pdf'].max()
# ymax_mean = 1.5*np.ceil(ymax_mode/10)*10
ymax_mean = 100
#plot posterior dist
pl_pdf = ax.plot(df_r_h_p.loc[:,name_hyp], df_r_h_p.loc[:,name_hyp+'_pdf'])
ax.vlines(df_r_h.loc[name_hyp,'mean'], ymin=0, ymax=ymax_mean, linestyle='-', color=pl_pdf[0].get_color(), label='Mean')
ax.vlines(df_r_h.loc[name_hyp,'mode'], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_pdf[0].get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\phi_{0}$', fontsize=30)
ax.set_xlabel('$\phi_{0}$', fontsize=25)
ax.set_ylabel(r'probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.6])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Omega_ca
#hyper-paramter name
name_hyp = 'omega_cap'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = df_r_h_p.loc[:,name_hyp+'_pdf'].max()
# ymax_mean = 1.5*np.ceil(ymax_mode/10)*10
ymax_mean = 1500
#plot posterior dist
pl_pdf = ax.plot(df_r_h_p.loc[:,name_hyp], df_r_h_p.loc[:,name_hyp+'_pdf'])
ax.vlines(df_r_h.loc[name_hyp,'mean'], ymin=0, ymax=ymax_mean, linestyle='-', color=pl_pdf[0].get_color(), label='Mean')
ax.vlines(df_r_h.loc[name_hyp,'mode'], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_pdf[0].get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(np.sqrt(hyp['omega_ca1p']**2+hyp['omega_ca2p']**2), ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\omega_{ca,P}$', fontsize=30)
ax.set_xlabel('$\omega_{ca,p}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.05])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# # Delta c_0
# #hyper-paramter name
# name_hyp = 'dc_0'
# #figure title
# fname_fig = 'post_dist_' + name_hyp
# #create figure
# fig, ax = plt.subplots(figsize = (10,10))
# for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
# #estimate vertical line height for mean and mode
# ymax_mode = df_r_h_p.loc[:,name_hyp+'_pdf'].max()
# ymax_mean = 1.5*np.ceil(ymax_mode/10)*10
# ymax_mean = 15
# #plot posterior dist
# pl_pdf = ax.plot(df_r_h_p.loc[:,name_hyp], df_r_h_p.loc[:,name_hyp+'_pdf'])
# ax.vlines(df_r_h.loc[name_hyp,'mean'], ymin=0, ymax=ymax_mean, linestyle='-', color=pl_pdf[0].get_color(), label='Mean')
# ax.vlines(df_r_h.loc[name_hyp,'mode'], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_pdf[0].get_color(), label='Mode')
# #plot true value
# ymax_hyp = ymax_mean
# # ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
# #edit figure
# ax.set_title(r'Comparison $\delta c_{0}$', fontsize=30)
# ax.set_xlabel('$\delta c_{0}$', fontsize=25)
# ax.set_ylabel('probability density function ', fontsize=25)
# ax.grid(which='both')
# ax.tick_params(axis='x', labelsize=22)
# ax.tick_params(axis='y', labelsize=22)
# #plot limits
# ax.set_xlim([-1,1])
# ax.set_ylim([0,ymax_hyp])
# #save figure
# fig.tight_layout()
# # fig.savefig( dir_fig + fname_fig + '.png' )
| 47,785 | 39.088926 | 160 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds3/main_pystan_model3_uncorr_cells_NGAWest2CA.py | """
Created on Wed Jul 14 14:17:52 2021
@author: glavrent
"""
# Working directory and Packages
#load libraries
import os
import sys
import numpy as np
import pandas as pd
import time
#user functions
sys.path.insert(0,'../../../Python_lib/regression/pystan/')
from regression_pystan_model3_uncorr_cells_unbounded_hyp import RunStan
# Define variables
#filename suffix
# synds_suffix = '_small_corr_len'
# synds_suffix = '_large_corr_len'
#synthetic datasets directory
ds_dir = '../../../../Data/Validation/synthetic_datasets/ds3'
ds_dir = r'%s%s/'%(ds_dir, synds_suffix)
# dataset info
#ds_fname_main = 'CatalogNGAWest3CA_synthetic_data'
ds_fname_main = 'CatalogNGAWest3CALite_synthetic_data'
ds_id = np.arange(1,6)
#cell specific anelastic attenuation
ds_fname_cellinfo = 'CatalogNGAWest3CALite_cellinfo'
ds_fname_celldist = 'CatalogNGAWest3CALite_distancematrix'
#stan model
sm_fname = '../../../Stan_lib/regression_stan_model3_uncorr_cells_unbounded_hyp_chol_efficient.stan'
#output info
#main output filename
out_fname_main = 'NGAWest2CA_syndata'
#main output directory
out_dir_main = '../../../../Data/Validation/regression/ds3/'
#output sub-directory
out_dir_sub = 'PYSTAN_NGAWest2CA_uncorr_cells_chol_eff'
#stan parameters
runstan_flag = True
# pystan_ver = 2
pystan_ver = 3
res_name = 'tot'
n_iter = 1000
n_chains = 4
adapt_delta = 0.8
max_treedepth = 10
#ergodic coefficients
c_2_erg=-2.0
c_3_erg=-0.6
c_a_erg=0.0
#parallel options
# flag_parallel = True
flag_parallel = False
#output sub-dir with corr with suffix info
out_dir_sub = f'%s%s'%(out_dir_sub, synds_suffix)
#load cell dataframes
cellinfo_fname = '%s%s.csv'%(ds_dir, ds_fname_cellinfo)
celldist_fname = '%s%s.csv'%(ds_dir, ds_fname_celldist)
df_cellinfo = pd.read_csv(cellinfo_fname)
df_celldist = pd.read_csv(celldist_fname)
# Run stan regression
#create datafame with computation time
df_run_info = list()
#iterate over all synthetic datasets
for d_id in ds_id:
print('Synthetic dataset %i fo %i'%(d_id, len(ds_id)))
#run time start
run_t_strt = time.time()
#input flatfile
ds_fname = '%s%s%s_Y%i.csv'%(ds_dir, ds_fname_main, synds_suffix, d_id)
#load flatfile
df_flatfile = pd.read_csv(ds_fname)
#keep only NGAWest2 records
df_flatfile = df_flatfile.loc[df_flatfile.dsid==0,:]
#output file name and directory
out_fname = '%s%s_Y%i'%(out_fname_main, synds_suffix, d_id)
out_dir = '%s/%s/Y%i/'%(out_dir_main, out_dir_sub, d_id)
#run stan model
RunStan(df_flatfile, df_cellinfo, df_celldist, sm_fname,
out_fname, out_dir, res_name,
c_2_erg=c_2_erg, c_3_erg=c_3_erg, c_a_erg=c_a_erg,
runstan_flag=runstan_flag, n_iter=n_iter, n_chains=n_chains,
adapt_delta=adapt_delta, max_treedepth=max_treedepth,
pystan_ver=pystan_ver, pystan_parallel=flag_parallel)
#run time end
run_t_end = time.time()
#compute run time
run_tm = (run_t_end - run_t_strt)/60
#log run time
df_run_info.append(pd.DataFrame({'computer_name':os.uname()[1],'out_name':out_dir_sub,
'ds_id':d_id,'run_time':run_tm}, index=[d_id]))
#write out run info
out_fname = '%s%s/run_info.csv'%(out_dir_main, out_dir_sub)
pd.concat(df_run_info).reset_index(drop=True).to_csv(out_fname, index=False)
| 3,534 | 28.458333 | 100 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds3/main_pystan_model3_corr_cells_NGAWest2CA.py | """
Created on Wed Jul 14 14:17:52 2021
@author: glavrent
"""
# Working directory and Packages
#load libraries
import os
import sys
import numpy as np
import pandas as pd
import time
#user functions
sys.path.insert(0,'../../../Python_lib/regression/pystan/')
from regression_pystan_model3_corr_cells_unbounded_hyp import RunStan
# Define variables
#filename suffix
# synds_suffix = '_small_corr_len'
# synds_suffix = '_large_corr_len'
#synthetic datasets directory
ds_dir = '../../../../Data/Validation/synthetic_datasets/ds3'
ds_dir = r'%s%s/'%(ds_dir, synds_suffix)
# dataset info
#ds_fname_main = 'CatalogNGAWest3CA_synthetic_data'
ds_fname_main = 'CatalogNGAWest3CALite_synthetic_data'
ds_id = np.arange(1,6)
#cell specific anelastic attenuation
ds_fname_cellinfo = 'CatalogNGAWest3CALite_cellinfo'
ds_fname_celldist = 'CatalogNGAWest3CALite_distancematrix'
#stan model
sm_fname = '../../../Stan_lib/regression_stan_model3_corr_cells_unbounded_hyp_chol_efficient.stan'
#output info
#main output filename
out_fname_main = 'NGAWest2CA_syndata'
#main output directory
out_dir_main = '../../../../Data/Validation/regression/ds3/'
#output sub-directory
out_dir_sub = 'PYSTAN_NGAWest2CA_corr_cells_chol_eff'
#stan parameters
runstan_flag = True
# pystan_ver = 2
pystan_ver = 3
res_name = 'tot'
n_iter = 1000
n_chains = 4
adapt_delta = 0.8
max_treedepth = 10
#ergodic coefficients
c_2_erg=-2.0
c_3_erg=-0.6
c_a_erg=0.0
#parallel options
# flag_parallel = True
flag_parallel = False
#output sub-dir with corr with suffix info
out_dir_sub = f'%s%s'%(out_dir_sub, synds_suffix)
#load cell dataframes
cellinfo_fname = '%s%s.csv'%(ds_dir, ds_fname_cellinfo)
celldist_fname = '%s%s.csv'%(ds_dir, ds_fname_celldist)
df_cellinfo = pd.read_csv(cellinfo_fname)
df_celldist = pd.read_csv(celldist_fname)
# Run stan regression
#create datafame with computation time
df_run_info = list()
#iterate over all synthetic datasets
for d_id in ds_id:
print('Synthetic dataset %i fo %i'%(d_id, len(ds_id)))
#run time start
run_t_strt = time.time()
#input flatfile
ds_fname = '%s%s%s_Y%i.csv'%(ds_dir, ds_fname_main, synds_suffix, d_id)
#load flatfile
df_flatfile = pd.read_csv(ds_fname)
#keep only NGAWest2 records
df_flatfile = df_flatfile.loc[df_flatfile.dsid==0,:]
#output file name and directory
out_fname = '%s%s_Y%i'%(out_fname_main, synds_suffix, d_id)
out_dir = '%s/%s/Y%i/'%(out_dir_main, out_dir_sub, d_id)
#run stan model
RunStan(df_flatfile, df_cellinfo, df_celldist, sm_fname,
out_fname, out_dir, res_name,
c_2_erg=c_2_erg, c_3_erg=c_3_erg, c_a_erg=c_a_erg,
runstan_flag=runstan_flag, n_iter=n_iter, n_chains=n_chains,
adapt_delta=adapt_delta, max_treedepth=max_treedepth,
pystan_ver=pystan_ver, pystan_parallel=flag_parallel)
#run time end
run_t_end = time.time()
#compute run time
run_tm = (run_t_end - run_t_strt)/60
#log run time
df_run_info.append(pd.DataFrame({'computer_name':os.uname()[1],'out_name':out_dir_sub,
'ds_id':d_id,'run_time':run_tm}, index=[d_id]))
#write out run info
out_fname = '%s%s/run_info.csv'%(out_dir_main, out_dir_sub)
pd.concat(df_run_info).reset_index(drop=True).to_csv(out_fname, index=False)
| 3,530 | 28.425 | 98 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds3/comparison_stan_model3_corr_cells.py | """
Created on Thu Aug 12 10:26:06 2021
@author: glavrent
"""
# Working directory and Packages
#load packages
import os
import sys
import pathlib
import glob
import re #regular expression package
import pickle
#arithmetic libraries
import numpy as np
#statistics libraries
import pandas as pd
#plot libraries
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoLocator as plt_autotick
#user functions
sys.path.insert(0,'../../../Python_lib/regression/')
from pylib_stats import CalcRMS
from pylib_stats import CalcLKDivergece
# Define variables
# USER SETS DIRECTORIES AND FILE INFO OF SYNTHETIC DS AND REGRESSION RESULTS
# ++++++++++++++++++++++++++++++++++++++++
#processed dataset
# name_dataset = 'NGAWest2CANorth'
# name_dataset = 'NGAWest2CA'
# name_dataset = 'NGAWest3CA'
#correlation info
# 1: Small Correlation Lengths
# 2: Large Correlation Lenghts
corr_id = 1
#package
# 1: Pystan v2
# 2: Pystan v3
# 3: stancmd
pkg_id = 3
#approximation type
# 1: multivariate normal
# 2: cholesky
# 3: cholesky efficient
# 4: cholesky efficient v2
# 5: cholesky efficient, sparse cells
aprox_id = 3
#directories (synthetic dataset)
if corr_id == 1:
dir_syndata = '../../../../Data/Verification/synthetic_datasets/ds3_small_corr_len'
elif corr_id == 2:
dir_syndata = '../../../../Data/Verification/synthetic_datasets/ds3_large_corr_len'
#cell info
fname_cellinfo = dir_syndata + '/' + 'CatalogNGAWest3CALite_cellinfo.csv'
fname_distmat = dir_syndata + '/' + 'CatalogNGAWest3CALite_distancematrix.csv'
#directories (regression results)
if pkg_id == 1:
dir_results = f'../../../../Data/Verification/regression/ds3/PYSTAN_%s'%name_dataset
elif pkg_id == 2:
dir_results = f'../../../../Data/Verification/regression/ds3/PYSTAN3_%s'%name_dataset
elif pkg_id == 3:
dir_results = f'../../../../Data/Verification/regression/ds3/CMDSTAN_%s'%name_dataset
#directories (regression results - old results)
if pkg_id == 1:
dir_results = f'../../../../Data/Verification/regression_old/ds3/PYSTAN_%s'%name_dataset
elif pkg_id == 2:
dir_results = f'../../../../Data/Verification/regression_old/ds3/PYSTAN3_%s'%name_dataset
elif pkg_id == 3:
dir_results = f'../../../../Data/Verification/regression_old/ds3/CMDSTAN_%s'%name_dataset
#prefix for synthetic data and results
prfx_syndata = 'CatalogNGAWest3CALite_synthetic'
#regression results filename prefix
prfx_results = f'%s_syndata'%name_dataset
# FILE INFO FOR REGRESSION RESULTS
# ++++++++++++++++++++++++++++++++++++++++
#output filename sufix (synthetic dataset)
if corr_id == 1: synds_suffix = '_small_corr_len'
elif corr_id == 2: synds_suffix = '_large_corr_len'
#output filename sufix (regression results)
if aprox_id == 1: synds_suffix_stan = '_corr_cells' + synds_suffix
elif aprox_id == 2: synds_suffix_stan = '_corr_cells' + '_chol' + synds_suffix
elif aprox_id == 3: synds_suffix_stan = '_corr_cells' + '_chol_eff' + synds_suffix
elif aprox_id == 4: synds_suffix_stan = '_corr_cells' + '_chol_eff2' + synds_suffix
elif aprox_id == 5: synds_suffix_stan = '_corr_cells' + '_chol_eff_sp' + synds_suffix
# dataset info
# ds_id = np.arange(1,6)
ds_id = np.arange(1,2)
# ++++++++++++++++++++++++++++++++++++++++
# USER NEEDS TO SPECIFY HYPERPARAMETERS OF SYNTHETIC DATASET
# ++++++++++++++++++++++++++++++++++++++++
# hyper-parameters
if corr_id == 1:
# small correlation lengths
hyp = {'omega_0': 0.1, 'omega_1e':0.1, 'omega_1as': 0.35, 'omega_1bs': 0.25,
'ell_1e':60, 'ell_1as':30,
'c_2_erg': -2.0,
'omega_2': 0.2,
'omega_2p': 0.15, 'ell_2p': 80,
'c_3_erg':-0.6,
'omega_3': 0.15,
'omega_3s': 0.15, 'ell_3s': 130,
'c_cap_erg': -0.011,
'omega_cap_mu': 0.005, 'omega_ca1p':0.004, 'omega_ca2p':0.002,
'ell_ca1p': 75,
'phi_0':0.3, 'tau_0':0.25 }
elif corr_id == 2:
# large correlation lengths
hyp = {'omega_0': 0.1, 'omega_1e':0.2, 'omega_1as': 0.4, 'omega_1bs': 0.3,
'ell_1e':100, 'ell_1as':70,
'c_2_erg': -2.0,
'omega_2': 0.2,
'omega_2p': 0.15, 'ell_2e': 140,
'c_3_erg':-0.6,
'omega_3': 0.15,
'omega_3s': 0.15, 'ell_3s': 180,
'c_cap_erg': -0.02,
'omega_cap_mu': 0.008, 'omega_ca1p':0.005, 'omega_ca2p':0.003,
'ell_ca1p': 120,
'phi_0':0.3, 'tau_0':0.25}
# ++++++++++++++++++++++++++++++++++++++++
#ploting options
flag_report = True
# Compare results
#load cell data
df_cellinfo = pd.read_csv(fname_cellinfo).set_index('cellid')
df_distmat = pd.read_csv(fname_distmat).set_index('rsn')
#initialize misfit metrics dataframe
df_misfit = pd.DataFrame(index=['Y%i'%d_id for d_id in ds_id])
#iterate over different datasets
for d_id in ds_id:
# Load Data
#file names
#synthetic data
fname_sdata_gmotion = '%s/%s_%s%s_Y%i'%(dir_syndata, prfx_syndata, 'data', synds_suffix, d_id) + '.csv'
fname_sdata_atten = '%s/%s_%s%s_Y%i'%(dir_syndata, prfx_syndata, 'atten', synds_suffix, d_id) + '.csv'
#regression results
fname_reg_gmotion = '%s%s/Y%i/%s%s_Y%i_stan_%s'%(dir_results, synds_suffix_stan, d_id, prfx_results, synds_suffix, d_id, 'residuals') + '.csv'
fname_reg_coeff = '%s%s/Y%i/%s%s_Y%i_stan_%s'%(dir_results, synds_suffix_stan, d_id, prfx_results, synds_suffix, d_id, 'coefficients') + '.csv'
fname_reg_atten = '%s%s/Y%i/%s%s_Y%i_stan_%s'%(dir_results, synds_suffix_stan, d_id, prfx_results, synds_suffix, d_id, 'catten') + '.csv'
#load synthetic results
df_sdata_gmotion = pd.read_csv(fname_sdata_gmotion).set_index('rsn')
df_sdata_atten = pd.read_csv(fname_sdata_atten).set_index('cellid')
#load regression results
df_reg_gmotion = pd.read_csv(fname_reg_gmotion, index_col=0)
df_reg_coeff = pd.read_csv(fname_reg_coeff, index_col=0)
df_reg_atten = pd.read_csv(fname_reg_atten, index_col=0)
# Processing
#keep only relevant columns from synthetic dataset
df_sdata_gmotion = df_sdata_gmotion.reindex(df_reg_gmotion.index)
df_sdata_atten = df_sdata_atten.reindex(df_reg_atten.index)
#distance matrix for records of interest
df_dmat = df_distmat.reindex(df_sdata_gmotion.index)
#find unique earthqakes and stations
eq_id, eq_idx, eq_nrec = np.unique(df_sdata_gmotion.eqid, return_index=True, return_counts=True)
sta_id, sta_idx, sta_nrec = np.unique(df_sdata_gmotion.ssn, return_index=True, return_counts=True)
#number of paths per cell
cell_npath = np.sum(df_dmat.loc[:,df_reg_atten.cellname] > 0, axis=0)
# Compute Root Mean Square Error
df_misfit.loc['Y%i'%d_id,'nerg_tot_rms'] = CalcRMS(df_sdata_gmotion.nerg_gm.values, df_reg_gmotion.nerg_mu.values)
df_misfit.loc['Y%i'%d_id,'dc_1e_rms'] = CalcRMS(df_sdata_gmotion['dc_1e'].values[eq_idx], df_reg_coeff['dc_1e_mean'].values[eq_idx])
df_misfit.loc['Y%i'%d_id,'dc_1as_rms'] = CalcRMS(df_sdata_gmotion['dc_1as'].values[sta_idx], df_reg_coeff['dc_1as_mean'].values[sta_idx])
df_misfit.loc['Y%i'%d_id,'dc_1bs_rms'] = CalcRMS(df_sdata_gmotion['dc_1bs'].values[sta_idx], df_reg_coeff['dc_1bs_mean'].values[sta_idx])
df_misfit.loc['Y%i'%d_id,'c_2p_rms'] = CalcRMS(df_sdata_gmotion['c_2p'].values[eq_idx], df_reg_coeff['c_2p_mean'].values[eq_idx])
df_misfit.loc['Y%i'%d_id,'c_3s_rms'] = CalcRMS(df_sdata_gmotion['c_3s'].values[sta_idx], df_reg_coeff['c_3s_mean'].values[sta_idx])
df_misfit.loc['Y%i'%d_id,'c_cap_rms'] = CalcRMS(df_sdata_atten['c_cap'].values, df_reg_atten['c_cap_mean'].values)
# Compute Divergence
df_misfit.loc['Y%i'%d_id,'nerg_tot_KL'] = CalcLKDivergece(df_sdata_gmotion.nerg_gm.values, df_reg_gmotion.nerg_mu.values)
df_misfit.loc['Y%i'%d_id,'dc_1e_KL'] = CalcLKDivergece(df_sdata_gmotion['dc_1e'].values[eq_idx], df_reg_coeff['dc_1e_mean'].values[eq_idx])
df_misfit.loc['Y%i'%d_id,'dc_1as_KL'] = CalcLKDivergece(df_sdata_gmotion['dc_1as'].values[sta_idx], df_reg_coeff['dc_1as_mean'].values[sta_idx])
df_misfit.loc['Y%i'%d_id,'dc_1bs_KL'] = CalcLKDivergece(df_sdata_gmotion['dc_1bs'].values[sta_idx], df_reg_coeff['dc_1bs_mean'].values[sta_idx])
df_misfit.loc['Y%i'%d_id,'c_2p_KL'] = CalcLKDivergece(df_sdata_gmotion['c_2p'].values[eq_idx], df_reg_coeff['c_2p_mean'].values[eq_idx])
df_misfit.loc['Y%i'%d_id,'c_3s_KL'] = CalcLKDivergece(df_sdata_gmotion['c_3s'].values[sta_idx], df_reg_coeff['c_3s_mean'].values[sta_idx])
df_misfit.loc['Y%i'%d_id,'c_cap_KL'] = CalcLKDivergece(df_sdata_atten['c_cap'].values, df_reg_atten['c_cap_mean'].values)
# Output
#figure directory
dir_fig = '%s%s/Y%i/figures_cmp/'%(dir_results,synds_suffix_stan,d_id)
pathlib.Path(dir_fig).mkdir(parents=True, exist_ok=True)
#compare ground motion predictions
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_scatter_tot_res'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#median
ax.scatter(df_sdata_gmotion.nerg_gm.values, df_reg_gmotion.nerg_mu.values)
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title('Comparison total residuals, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-10,2])
ax.set_ylim([-10,2])
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#compare dc_1e
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_dc_1e_scatter'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_sdata_gmotion['dc_1e'].values[eq_idx], df_reg_coeff['dc_1e_mean'].values[eq_idx])
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1,E}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-.4,.4])
ax.set_ylim([-.4,.4])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1e_accuracy'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_reg_coeff['dc_1e_sig'].values[eq_idx],
df_sdata_gmotion['dc_1e'].values[eq_idx] - df_reg_coeff['dc_1e_mean'].values[eq_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1,E}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Standard Deviation', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0,.15])
ax.set_ylim([-.4,.4])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1e_nrec'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(eq_nrec,
df_sdata_gmotion['dc_1e'].values[eq_idx] - df_reg_coeff['dc_1e_mean'].values[eq_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1,E}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Number of records', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.set_xscale('log')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0.9,1e3])
ax.set_ylim([-.4,.4])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#compare dc_1as
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_dc_1as_scatter'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_sdata_gmotion['dc_1as'].values[sta_idx], df_reg_coeff['dc_1as_mean'].values[sta_idx])
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1a,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-1.5,1.5])
ax.set_ylim([-1.5,1.5])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1as_accuracy'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#accuray
ax.scatter(df_reg_coeff['dc_1as_sig'].values[sta_idx],
df_sdata_gmotion['dc_1as'].values[sta_idx] - df_reg_coeff['dc_1as_mean'].values[sta_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1a,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Standard Deviation', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0,.4])
ax.set_ylim([-1.5,1.5])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1as_nrec'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#accuray
ax.scatter(sta_nrec,
df_sdata_gmotion['dc_1as'].values[sta_idx] - df_reg_coeff['dc_1as_mean'].values[sta_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1a,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Number of records', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.set_xscale('log')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([.9,1000])
ax.set_ylim([-1.5,1.5])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#compare dc_1bs
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_dc_1bs_scatter'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_sdata_gmotion['dc_1bs'].values[sta_idx], df_reg_coeff['dc_1bs_mean'].values[sta_idx])
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1b,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-1.5,1.5])
ax.set_ylim([-1.5,1.5])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1bs_accuracy'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#accuray
ax.scatter(df_reg_coeff['dc_1bs_sig'].values[sta_idx],
df_sdata_gmotion['dc_1bs'].values[sta_idx] - df_reg_coeff['dc_1bs_mean'].values[sta_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1b,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Standard Deviation', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0,.4])
ax.set_ylim([-1.5,1.5])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1bs_nrec'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#accuray
ax.scatter(sta_nrec,
df_sdata_gmotion['dc_1bs'].values[sta_idx] - df_reg_coeff['dc_1bs_mean'].values[sta_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1b,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Number of records', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.set_xscale('log')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([.9,1000])
ax.set_ylim([-1.5,1.5])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#compare c_2p
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_c_2p_scatter'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_sdata_gmotion['c_2p'].values[eq_idx], df_reg_coeff['c_2p_mean'].values[eq_idx])
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $c_{2,P}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-2.3,-1.6])
ax.set_ylim([-2.3,-1.6])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_c_2p_accuracy'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_reg_coeff['c_2p_sig'].values[eq_idx],
df_sdata_gmotion['c_2p'].values[eq_idx] - df_reg_coeff['c_2p_mean'].values[eq_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $c_{2,P}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Standard Deviation', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0,.15])
ax.set_ylim([-.4,.4])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_c_2p_nrec'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(eq_nrec,
df_sdata_gmotion['c_2p'].values[eq_idx] - df_reg_coeff['c_2p_mean'].values[eq_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $c_{2,P}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Number of records', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.set_xscale('log')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0.9,1e3])
ax.set_ylim([-.4,.4])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#compare c_3s
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_c_3s_scatter'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_sdata_gmotion['c_3s'].values[sta_idx], df_reg_coeff['c_3s_mean'].values[sta_idx])
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $c_{3,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-1.2,-.2])
ax.set_ylim([-1.2,-.2])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_c_3s_accuracy'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_reg_coeff['c_3s_sig'].values[sta_idx],
df_sdata_gmotion['c_3s'].values[sta_idx] - df_reg_coeff['c_3s_mean'].values[sta_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $c_{3,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Standard Deviation', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0,.3])
ax.set_ylim([-.4,.4])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_c_3s_nrec'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(sta_nrec,
df_sdata_gmotion['c_3s'].values[sta_idx] - df_reg_coeff['c_3s_mean'].values[sta_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $c_{3,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Number of records', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.set_xscale('log')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0.9,1e3])
ax.set_ylim([-.4,.4])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#compare c_cap
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_c_cap_scatter'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_sdata_atten['c_cap'].values, df_reg_atten['c_cap_mean'].values)
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $c_{ca,P}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-0.05,0.02])
ax.set_ylim([-0.05,0.02])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_c_cap_accuracy'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_reg_atten['c_cap_sig'],
df_sdata_atten['c_cap'].values - df_reg_atten['c_cap_mean'].values)
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $c_{ca,P}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Standard Deviation', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0.00,0.03])
ax.set_ylim([-0.04,0.04])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_c_cap_npath'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(cell_npath,
df_sdata_atten['c_cap'].values - df_reg_atten['c_cap_mean'].values)
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $c_{ca,P}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Number of paths', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.set_xscale('log')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([.9,5e4])
ax.set_ylim([-0.04,0.04])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Compare Misfit Metrics
#summary directory
dir_sum = '%s%s/summary/'%(dir_results,synds_suffix_stan)
pathlib.Path(dir_fig).mkdir(parents=True, exist_ok=True)
#figure directory
dir_fig = '%s/figures/'%(dir_sum)
pathlib.Path(dir_fig).mkdir(parents=True, exist_ok=True)
#save
df_misfit.to_csv(dir_sum + 'misfit_summary.csv')
#RMS misfit
fname_fig = 'misfit_score'
#plot KL divergence
fig, ax = plt.subplots(figsize = (10,10))
ax.plot(ds_id, df_misfit.nerg_tot_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label= 'tot nerg')
ax.plot(ds_id, df_misfit.dc_1e_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1,E}$')
ax.plot(ds_id, df_misfit.dc_1as_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1a,S}$')
ax.plot(ds_id, df_misfit.dc_1bs_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1b,S}$')
ax.plot(ds_id, df_misfit.c_2p_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$c_{2,E}$')
ax.plot(ds_id, df_misfit.c_3s_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$c_{3,S}$')
ax.plot(ds_id, df_misfit.c_cap_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$c_{ca,P}$')
#figure properties
ax.set_ylim([0,0.50])
ax.set_xlabel('synthetic dataset', fontsize=25)
ax.set_ylabel('RSME', fontsize=25)
ax.grid(which='both')
ax.set_xticks(ds_id)
ax.set_xticklabels(labels=df_misfit.index)
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#legend
ax.legend(loc='upper left', fontsize=25)
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#KL divergence
fname_fig = 'KLdiv_score'
#plot KL divergence
fig, ax = plt.subplots(figsize = (10,10))
ax.plot(ds_id, df_misfit.nerg_tot_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label= 'tot nerg')
ax.plot(ds_id, df_misfit.dc_1e_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1,E}$')
ax.plot(ds_id, df_misfit.dc_1as_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1a,S}$')
ax.plot(ds_id, df_misfit.dc_1bs_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1b,S}$')
ax.plot(ds_id, df_misfit.c_2p_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$c_{2,P}$')
ax.plot(ds_id, df_misfit.c_3s_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$c_{3,S}$')
ax.plot(ds_id, df_misfit.c_cap_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$c_{ca,P}$')
#figure properties
ax.set_ylim([0,0.50])
ax.set_xlabel('synthetic dataset', fontsize=25)
ax.set_ylabel('KL divergence', fontsize=25)
ax.grid(which='both')
ax.set_xticks(ds_id)
ax.set_xticklabels(labels=df_misfit.index)
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#legend
ax.legend(loc='upper left', fontsize=25)
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Compare hyper-paramters
#iterate over different datasets
df_reg_hyp = list()
df_reg_hyp_post = list()
for d_id in ds_id:
# Load Data
#regression hyperparamters results
fname_reg_hyp = '%s%s/Y%i/%s%s_Y%i_stan_%s'%(dir_results,synds_suffix_stan, d_id,prfx_results, synds_suffix, d_id, 'hyperparameters') + '.csv'
fname_reg_hyp_post = '%s%s/Y%i/%s%s_Y%i_stan_%s'%(dir_results,synds_suffix_stan, d_id,prfx_results, synds_suffix, d_id, 'hyperposterior') + '.csv'
#load regression results
df_reg_hyp.append( pd.read_csv(fname_reg_hyp, index_col=0) )
df_reg_hyp_post.append( pd.read_csv(fname_reg_hyp_post, index_col=0) )
#figure directory
dir_fig = '%s%s/figures_cmp_hyp/'%(dir_results,synds_suffix_stan)
pathlib.Path(dir_fig).mkdir(parents=True, exist_ok=True)
# Omega_1e
#hyper-paramter name
name_hyp = 'omega_1e'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 40
ymax_mean = 40
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\omega_{1,E}$', fontsize=30)
ax.set_xlabel('$\omega_{1,E}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.25])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Omega_1as
#hyper-paramter name
name_hyp = 'omega_1as'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 30
ymax_mean = 30
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\omega_{1a,S}$', fontsize=30)
ax.set_xlabel('$\omega_{1a,S}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.5])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Omega_1bs
#hyper-paramter name
name_hyp = 'omega_1bs'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 60
ymax_mean = 60
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\omega_{1b,S}$', fontsize=30)
ax.set_xlabel('$\omega_{1b,S}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.5])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Omega_2p
#hyper-paramter name
name_hyp = 'omega_2p'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 60
ymax_mean = 60
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\omega_{2,P}$', fontsize=30)
ax.set_xlabel('$\omega_{2,P}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.5])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Omega_3s
#hyper-paramter name
name_hyp = 'omega_3s'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 60
ymax_mean = 60
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\omega_{3,S}$', fontsize=30)
ax.set_xlabel('$\omega_{3,S}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.5])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Ell_1e
#hyper-paramter name
name_hyp = 'ell_1e'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 0.02
ymax_mean = 0.02
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\ell_{1,E}$', fontsize=30)
ax.set_xlabel('$\ell_{1,E}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,500])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Ell_1as
#hyper-paramter name
name_hyp = 'ell_1as'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 0.1
ymax_mean = 0.1
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\ell_{1a,S}$', fontsize=30)
ax.set_xlabel('$\ell_{1a,S}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,150])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Ell_2p
#hyper-paramter name
name_hyp = 'ell_2p'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 0.1
ymax_mean = 0.1
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\ell_{2,P}$', fontsize=30)
ax.set_xlabel('$\ell_{2,P}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,150])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Ell_3s
#hyper-paramter name
name_hyp = 'ell_3s'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 0.1
ymax_mean = 0.1
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\ell_{3,S}$', fontsize=30)
ax.set_xlabel('$\ell_{3,S}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,150])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Tau_0
#hyper-paramter name
name_hyp = 'tau_0'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 150
ymax_mean = 150
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\tau_{0}$', fontsize=30)
ax.set_xlabel(r'$\tau_{0}$', fontsize=25)
ax.set_ylabel(r'probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.5])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Phi_0
#hyper-paramter name
name_hyp = 'phi_0'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 1000
ymax_mean = 1000
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\phi_{0}$', fontsize=30)
ax.set_xlabel('$\phi_{0}$', fontsize=25)
ax.set_ylabel(r'probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.6])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Ell_ca1p
#hyper-paramter name
name_hyp = 'ell_ca1p'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 0.02
ymax_mean = 0.02
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\ell_{ca1,P}$', fontsize=30)
ax.set_xlabel('$\ell_{ca1,P}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,500])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Omega_ca1
#hyper-paramter name
name_hyp = 'omega_ca1p'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 1500
ymax_mean = 1500
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp['omega_ca2p'], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\omega_{ca1,P}$', fontsize=30)
ax.set_xlabel('$\omega_{ca1,P}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.05])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Omega_ca2
#hyper-paramter name
name_hyp = 'omega_ca2p'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 1500
ymax_mean = 1500
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp['omega_ca2p'], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\omega_{ca2,p}$', fontsize=30)
ax.set_xlabel('$\omega_{ca2,P}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.05])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# # Delta c_0
# #hyper-paramter name
# name_hyp = 'dc_0'
# #figure title
# fname_fig = 'post_dist_' + name_hyp
# #create figure
# fig, ax = plt.subplots(figsize = (10,10))
# for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
# #estimate vertical line height for mean and mode
# ymax_mode = df_r_h_p.loc[:,name_hyp+'_pdf'].max()
# ymax_mean = 1.5*np.ceil(ymax_mode/10)*10
# ymax_mean = 15
# #plot posterior dist
# pl_pdf = ax.plot(df_r_h_p.loc[:,name_hyp], df_r_h_p.loc[:,name_hyp+'_pdf'])
# ax.vlines(df_r_h.loc[name_hyp,'mean'], ymin=0, ymax=ymax_mean, linestyle='-', color=pl_pdf[0].get_color(), label='Mean')
# ax.vlines(df_r_h.loc[name_hyp,'mode'], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_pdf[0].get_color(), label='Mode')
# #plot true value
# ymax_hyp = ymax_mean
# # ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
# #edit figure
# ax.set_title(r'Comparison $\delta c_{0}$', fontsize=30)
# ax.set_xlabel('$\delta c_{0}$', fontsize=25)
# ax.set_ylabel('probability density function ', fontsize=25)
# ax.grid(which='both')
# ax.tick_params(axis='x', labelsize=22)
# ax.tick_params(axis='y', labelsize=22)
# #plot limits
# ax.set_xlim([-1,1])
# ax.set_ylim([0,ymax_hyp])
# #save figure
# fig.tight_layout()
# # fig.savefig( dir_fig + fname_fig + '.png' )
| 49,612 | 38.658673 | 153 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds3/main_cmdstan_model3_uncorr_cells_NGAWest3CA.py | """
Created on Wed Dec 29 15:16:15 2021
@author: glavrent
"""
# Working directory and Packages
#load libraries
import os
import sys
import numpy as np
import pandas as pd
import time
#user functions
sys.path.insert(0,'../../../Python_lib/regression/cmdstan/')
# from regression_cmdstan_model3_uncorr_cells_unbounded_hyp import RunStan
# from regression_cmdstan_model3_uncorr_cells_sparse_unbounded_hyp import RunStan
# Define variables
#filename suffix
# synds_suffix = '_small_corr_len'
# synds_suffix = '_large_corr_len'
#synthetic datasets directory
ds_dir = '../../../../Data/Verification/synthetic_datasets/ds3'
ds_dir = r'%s%s/'%(ds_dir, synds_suffix)
# dataset info
#ds_fname_main = 'CatalogNGAWest3CA_synthetic_data'
ds_fname_main = 'CatalogNGAWest3CALite_synthetic_data'
ds_id = np.arange(1,6)
#cell specific anelastic attenuation
ds_fname_cellinfo = 'CatalogNGAWest3CALite_cellinfo'
ds_fname_celldist = 'CatalogNGAWest3CALite_distancematrix'
#stan model
# sm_fname = '../../../Stan_lib/regression_stan_model3_uncorr_cells_unbounded_hyp_chol_efficient.stan'
sm_fname = '../../../Stan_lib/regression_stan_model3_uncorr_cells_sparse_unbounded_hyp_chol_efficient.stan'
#output info
#main output filename
out_fname_main = 'NGAWest3CA_syndata'
#main output directory
out_dir_main = '../../../../Data/Verification/regression/ds3/'
#output sub-directory
# out_dir_sub = 'CMDSTAN_NGAWest3CA_uncorr_cells_chol_eff'
# out_dir_sub = 'CMDSTAN_NGAWest3CA_uncorr_cells_chol_eff_sp'
#stan parameters
res_name = 'tot'
n_iter_warmup = 500
n_iter_sampling = 500
n_chains = 4
adapt_delta = 0.8
max_treedepth = 10
#ergodic coefficients
c_2_erg=-2.0
c_3_erg=-0.6
c_a_erg= 0.0
#parallel options
# flag_parallel = True
flag_parallel = False
#output sub-dir with corr with suffix info
out_dir_sub = f'%s%s'%(out_dir_sub, synds_suffix)
#load cell dataframes
cellinfo_fname = '%s%s.csv'%(ds_dir, ds_fname_cellinfo)
celldist_fname = '%s%s.csv'%(ds_dir, ds_fname_celldist)
df_cellinfo = pd.read_csv(cellinfo_fname)
df_celldist = pd.read_csv(celldist_fname)
# Run stan regression
#create datafame with computation time
df_run_info = list()
#iterate over all synthetic datasets
for d_id in ds_id:
print('Synthetic dataset %i fo %i'%(d_id, len(ds_id)))
#run time start
run_t_strt = time.time()
#input flatfile
ds_fname = '%s%s%s_Y%i.csv'%(ds_dir, ds_fname_main, synds_suffix, d_id)
#load flatfile
df_flatfile = pd.read_csv(ds_fname)
#output file name and directory
out_fname = '%s%s_Y%i'%(out_fname_main, synds_suffix, d_id)
out_dir = '%s/%s/Y%i/'%(out_dir_main, out_dir_sub, d_id)
#run stan model
RunStan(df_flatfile, df_cellinfo, df_celldist, sm_fname,
out_fname, out_dir, res_name,
c_2_erg=c_2_erg, c_3_erg=c_3_erg, c_a_erg=c_a_erg,
n_iter_warmup=n_iter_warmup, n_iter_sampling=n_iter_sampling, n_chains=n_chains,
adapt_delta=adapt_delta, max_treedepth=max_treedepth,
stan_parallel=flag_parallel)
#run time end
run_t_end = time.time()
#compute run time
run_tm = (run_t_end - run_t_strt)/60
#log run time
df_run_info.append(pd.DataFrame({'computer_name':os.uname()[1],'out_name':out_dir_sub,
'ds_id':d_id,'run_time':run_tm}, index=[d_id]))
#write out run info
out_fname = '%s%s/run_info.csv'%(out_dir_main, out_dir_sub)
pd.concat(df_run_info).reset_index(drop=True).to_csv(out_fname, index=False)
| 3,698 | 30.347458 | 107 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds3/main_cmdstan_model3_uncorr_cells_NGAWest2CA.py | """
Created on Wed Dec 29 15:16:15 2021
@author: glavrent
"""
# Working directory and Packages
#load libraries
import os
import sys
import numpy as np
import pandas as pd
import time
#user functions
sys.path.insert(0,'../../../Python_lib/regression/cmdstan/')
# from regression_cmdstan_model3_uncorr_cells_unbounded_hyp import RunStan
# from regression_cmdstan_model3_uncorr_cells_sparse_unbounded_hyp import RunStan
# Define variables
#filename suffix
# synds_suffix = '_small_corr_len'
# synds_suffix = '_large_corr_len'
#synthetic datasets directory
ds_dir = '../../../../Data/Verification/synthetic_datasets/ds3'
ds_dir = r'%s%s/'%(ds_dir, synds_suffix)
# dataset info
#ds_fname_main = 'CatalogNGAWest3CA_synthetic_data'
ds_fname_main = 'CatalogNGAWest3CALite_synthetic_data'
ds_id = np.arange(1,6)
#cell specific anelastic attenuation
ds_fname_cellinfo = 'CatalogNGAWest3CALite_cellinfo'
ds_fname_celldist = 'CatalogNGAWest3CALite_distancematrix'
#stan model
# sm_fname = '../../../Stan_lib/regression_stan_model3_uncorr_cells_unbounded_hyp_chol_efficient.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model3_uncorr_cells_sparse_unbounded_hyp_chol_efficient.stan'
#output info
#main output filename
out_fname_main = 'NGAWest2CA_syndata'
#main output directory
out_dir_main = '../../../../Data/Verification/regression/ds3/'
#output sub-directory
# out_dir_sub = 'CMDSTAN_NGAWest2CA_uncorr_cells_chol_eff'
# out_dir_sub = 'CMDSTAN_NGAWest2CA_uncorr_cells_chol_eff_sp'
#stan parameters
res_name = 'tot'
n_iter_warmup = 500
n_iter_sampling = 500
n_chains = 4
adapt_delta = 0.8
max_treedepth = 10
#ergodic coefficients
c_2_erg=-2.0
c_3_erg=-0.6
c_a_erg= 0.0
#parallel options
# flag_parallel = True
flag_parallel = False
#output sub-dir with corr with suffix info
out_dir_sub = f'%s%s'%(out_dir_sub, synds_suffix)
#load cell dataframes
cellinfo_fname = '%s%s.csv'%(ds_dir, ds_fname_cellinfo)
celldist_fname = '%s%s.csv'%(ds_dir, ds_fname_celldist)
df_cellinfo = pd.read_csv(cellinfo_fname)
df_celldist = pd.read_csv(celldist_fname)
# Run stan regression
#create datafame with computation time
df_run_info = list()
#iterate over all synthetic datasets
for d_id in ds_id:
print('Synthetic dataset %i fo %i'%(d_id, len(ds_id)))
#run time start
run_t_strt = time.time()
#input flatfile
ds_fname = '%s%s%s_Y%i.csv'%(ds_dir, ds_fname_main, synds_suffix, d_id)
#load flatfile
df_flatfile = pd.read_csv(ds_fname)
#keep only NGAWest2 records
df_flatfile = df_flatfile.loc[df_flatfile.dsid==0,:]
#output file name and directory
out_fname = '%s%s_Y%i'%(out_fname_main, synds_suffix, d_id)
out_dir = '%s/%s/Y%i/'%(out_dir_main, out_dir_sub, d_id)
#run stan model
RunStan(df_flatfile, df_cellinfo, df_celldist, sm_fname,
out_fname, out_dir, res_name,
c_2_erg=c_2_erg, c_3_erg=c_3_erg, c_a_erg=c_a_erg,
n_iter_warmup=n_iter_warmup, n_iter_sampling=n_iter_sampling, n_chains=n_chains,
adapt_delta=adapt_delta, max_treedepth=max_treedepth,
stan_parallel=flag_parallel)
#run time end
run_t_end = time.time()
#compute run time
run_tm = (run_t_end - run_t_strt)/60
#log run time
df_run_info.append(pd.DataFrame({'computer_name':os.uname()[1],'out_name':out_dir_sub,
'ds_id':d_id,'run_time':run_tm}, index=[d_id]))
#write out run info
out_fname = '%s%s/run_info.csv'%(out_dir_main, out_dir_sub)
pd.concat(df_run_info).reset_index(drop=True).to_csv(out_fname, index=False)
| 3,789 | 30.583333 | 109 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds3/main_cmdstan_model3_corr_cells_NGAWest3CA.py | """
Created on Wed Dec 29 15:16:15 2021
@author: glavrent
"""
# Working directory and Packages
#load libraries
import os
import sys
import numpy as np
import pandas as pd
import time
#user functions
sys.path.insert(0,'../../../Python_lib/regression/cmdstan/')
# from regression_cmdstan_model3_corr_cells_unbounded_hyp import RunStan
# from regression_cmdstan_model3_corr_cells_sparse_unbounded_hyp import RunStan
# Define variables
#filename suffix
# synds_suffix = '_small_corr_len'
# synds_suffix = '_large_corr_len'
#synthetic datasets directory
ds_dir = '../../../../Data/Verification/synthetic_datasets/ds3'
ds_dir = r'%s%s/'%(ds_dir, synds_suffix)
# dataset info
#ds_fname_main = 'CatalogNGAWest3CA_synthetic_data'
ds_fname_main = 'CatalogNGAWest3CALite_synthetic_data'
ds_id = np.arange(1,6)
#cell specific anelastic attenuation
ds_fname_cellinfo = 'CatalogNGAWest3CALite_cellinfo'
ds_fname_celldist = 'CatalogNGAWest3CALite_distancematrix'
#stan model
# sm_fname = '../../../Stan_lib/regression_stan_model3_corr_cells_unbounded_hyp_chol_efficient.stan'
# sm_fname = '../../../Stan_lib/regression_stan_model3_corr_cells_sparse_unbounded_hyp_chol_efficient.stan'
#output info
#main output filename
out_fname_main = 'NGAWest3CA_syndata'
#main output directory
out_dir_main = '../../../../Data/Verification/regression/ds3/'
#output sub-directory
# out_dir_sub = 'CMDSTAN_NGAWest3CA_corr_cells_chol_eff'
# out_dir_sub = 'CMDSTAN_NGAWest3CA_corr_cells_chol_eff_sp'
#stan parameters
res_name = 'tot'
n_iter_warmup = 500
n_iter_sampling = 500
n_chains = 4
adapt_delta = 0.8
max_treedepth = 10
#ergodic coefficients
c_2_erg=-2.0
c_3_erg=-0.6
c_a_erg= 0.0
#parallel options
# flag_parallel = True
flag_parallel = False
#output sub-dir with corr with suffix info
out_dir_sub = f'%s%s'%(out_dir_sub, synds_suffix)
#load cell dataframes
cellinfo_fname = '%s%s.csv'%(ds_dir, ds_fname_cellinfo)
celldist_fname = '%s%s.csv'%(ds_dir, ds_fname_celldist)
df_cellinfo = pd.read_csv(cellinfo_fname)
df_celldist = pd.read_csv(celldist_fname)
# Run stan regression
#create datafame with computation time
df_run_info = list()
#iterate over all synthetic datasets
for d_id in ds_id:
print('Synthetic dataset %i fo %i'%(d_id, len(ds_id)))
#run time start
run_t_strt = time.time()
#input flatfile
ds_fname = '%s%s%s_Y%i.csv'%(ds_dir, ds_fname_main, synds_suffix, d_id)
#load flatfile
df_flatfile = pd.read_csv(ds_fname)
#keep only NGAWest2 records
df_flatfile = df_flatfile.loc[df_flatfile.dsid==0,:]
#output file name and directory
out_fname = '%s%s_Y%i'%(out_fname_main, synds_suffix, d_id)
out_dir = '%s/%s/Y%i/'%(out_dir_main, out_dir_sub, d_id)
#run stan model
RunStan(df_flatfile, df_cellinfo, df_celldist, sm_fname,
out_fname, out_dir, res_name,
c_2_erg=c_2_erg, c_3_erg=c_3_erg, c_a_erg=c_a_erg,
n_iter_warmup=n_iter_warmup, n_iter_sampling=n_iter_sampling, n_chains=n_chains,
adapt_delta=adapt_delta, max_treedepth=max_treedepth,
stan_parallel=flag_parallel)
#run time end
run_t_end = time.time()
#compute run time
run_tm = (run_t_end - run_t_strt)/60
#log run time
df_run_info.append(pd.DataFrame({'computer_name':os.uname()[1],'out_name':out_dir_sub,
'ds_id':d_id,'run_time':run_tm}, index=[d_id]))
#write out run info
out_fname = '%s%s/run_info.csv'%(out_dir_main, out_dir_sub)
pd.concat(df_run_info).reset_index(drop=True).to_csv(out_fname, index=False)
| 3,777 | 30.747899 | 107 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds3/comparison_stan_model3_uncorr_cells.py | """
Created on Thu Aug 12 10:26:06 2021
@author: glavrent
"""
# Working directory and Packages
#load packages
import os
import sys
import pathlib
import glob
import re #regular expression package
import pickle
#arithmetic libraries
import numpy as np
#statistics libraries
import pandas as pd
#plot libraries
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoLocator as plt_autotick
#user functions
sys.path.insert(0,'../../../Python_lib/regression/')
from pylib_stats import CalcRMS
from pylib_stats import CalcLKDivergece
# Define variables
# USER SETS DIRECTORIES AND FILE INFO OF SYNTHETIC DS AND REGRESSION RESULTS
# ++++++++++++++++++++++++++++++++++++++++
#processed dataset
# name_dataset = 'NGAWest2CANorth'
# name_dataset = 'NGAWest2CA'
# name_dataset = 'NGAWest3CA'
#correlation info
# 1: Small Correlation Lengths
# 2: Large Correlation Lenghts
corr_id = 1
#package
# 1: Pystan v2
# 2: Pystan v3
# 3: stancmd
pkg_id = 3
#approximation type
# 1: multivariate normal
# 2: cholesky
# 3: cholesky efficient
# 4: cholesky efficient v2
# 5: cholesky efficient, sparse cells
aprox_id = 3
#directories (synthetic dataset)
if corr_id == 1:
dir_syndata = '../../../../Data/Verification/synthetic_datasets/ds3_small_corr_len'
elif corr_id == 2:
dir_syndata = '../../../../Data/Verification/synthetic_datasets/ds3_large_corr_len'
#cell info
fname_cellinfo = dir_syndata + '/' + 'CatalogNGAWest3CALite_cellinfo.csv'
fname_distmat = dir_syndata + '/' + 'CatalogNGAWest3CALite_distancematrix.csv'
#directories (regression results)
if pkg_id == 1:
dir_results = f'../../../../Data/Verification/regression/ds3/PYSTAN_%s'%name_dataset
elif pkg_id == 2:
dir_results = f'../../../../Data/Verification/regression/ds3/PYSTAN3_%s'%name_dataset
elif pkg_id == 3:
dir_results = f'../../../../Data/Verification/regression/ds3/CMDSTAN_%s'%name_dataset
#directories (regression results - old results)
if pkg_id == 1:
dir_results = f'../../../../Data/Verification/regression_old/ds3/PYSTAN_%s'%name_dataset
elif pkg_id == 2:
dir_results = f'../../../../Data/Verification/regression_old/ds3/PYSTAN3_%s'%name_dataset
elif pkg_id == 3:
dir_results = f'../../../../Data/Verification/regression_old/ds3/CMDSTAN_%s'%name_dataset
#prefix for synthetic data and results
prfx_syndata = 'CatalogNGAWest3CALite_synthetic'
#regression results filename prefix
prfx_results = f'%s_syndata'%name_dataset
# FILE INFO FOR REGRESSION RESULTS
# ++++++++++++++++++++++++++++++++++++++++
#output filename sufix (synthetic dataset)
if corr_id == 1: synds_suffix = '_small_corr_len'
elif corr_id == 2: synds_suffix = '_large_corr_len'
#output filename sufix (regression results)
if aprox_id == 1: synds_suffix_stan = '_uncorr_cells' + synds_suffix
elif aprox_id == 2: synds_suffix_stan = '_uncorr_cells' + '_chol' + synds_suffix
elif aprox_id == 3: synds_suffix_stan = '_uncorr_cells' + '_chol_eff' + synds_suffix
elif aprox_id == 4: synds_suffix_stan = '_uncorr_cells' + '_chol_eff2' + synds_suffix
elif aprox_id == 5: synds_suffix_stan = '_uncorr_cells' + '_chol_eff_sp' + synds_suffix
# dataset info
ds_id = np.arange(1,2)
# ++++++++++++++++++++++++++++++++++++++++
# USER NEEDS TO SPECIFY HYPERPARAMETERS OF SYNTHETIC DATASET
# ++++++++++++++++++++++++++++++++++++++++
# hyper-parameters
if corr_id == 1:
# small correlation lengths
hyp = {'omega_0': 0.1, 'omega_1e':0.1, 'omega_1as': 0.35, 'omega_1bs': 0.25,
'ell_1e':60, 'ell_1as':30,
'c_2_erg': -2.0,
'omega_2': 0.2,
'omega_2p': 0.15, 'ell_2p': 80,
'c_3_erg':-0.6,
'omega_3': 0.15,
'omega_3s': 0.15, 'ell_3s': 130,
'c_cap_erg': -0.011,
'omega_cap_mu': 0.005, 'omega_ca1p':0.004, 'omega_ca2p':0.002,
'ell_ca1p': 75,
'phi_0':0.3, 'tau_0':0.25 }
elif corr_id == 2:
# large correlation lengths
hyp = {'omega_0': 0.1, 'omega_1e':0.2, 'omega_1as': 0.4, 'omega_1bs': 0.3,
'ell_1e':100, 'ell_1as':70,
'c_2_erg': -2.0,
'omega_2': 0.2,
'omega_2p': 0.15, 'ell_2e': 140,
'c_3_erg':-0.6,
'omega_3': 0.15,
'omega_3s': 0.15, 'ell_3s': 180,
'c_cap_erg': -0.02,
'omega_cap_mu': 0.008, 'omega_ca1p':0.005, 'omega_ca2p':0.003,
'ell_ca1p': 120,
'phi_0':0.3, 'tau_0':0.25}
# ++++++++++++++++++++++++++++++++++++++++
#ploting options
flag_report = True
# Compare results
#load cell data
df_cellinfo = pd.read_csv(fname_cellinfo).set_index('cellid')
df_distmat = pd.read_csv(fname_distmat).set_index('rsn')
#initialize misfit metrics dataframe
df_misfit = pd.DataFrame(index=['Y%i'%d_id for d_id in ds_id])
#iterate over different datasets
for d_id in ds_id:
# Load Data
#file names
#synthetic data
fname_sdata_gmotion = '%s/%s_%s%s_Y%i'%(dir_syndata, prfx_syndata, 'data', synds_suffix, d_id) + '.csv'
fname_sdata_atten = '%s/%s_%s%s_Y%i'%(dir_syndata, prfx_syndata, 'atten', synds_suffix, d_id) + '.csv'
#regression results
fname_reg_gmotion = '%s%s/Y%i/%s%s_Y%i_stan_%s'%(dir_results, synds_suffix_stan, d_id, prfx_results, synds_suffix, d_id, 'residuals') + '.csv'
fname_reg_coeff = '%s%s/Y%i/%s%s_Y%i_stan_%s'%(dir_results, synds_suffix_stan, d_id, prfx_results, synds_suffix, d_id, 'coefficients') + '.csv'
fname_reg_atten = '%s%s/Y%i/%s%s_Y%i_stan_%s'%(dir_results, synds_suffix_stan, d_id, prfx_results, synds_suffix, d_id, 'catten') + '.csv'
#load synthetic results
df_sdata_gmotion = pd.read_csv(fname_sdata_gmotion).set_index('rsn')
df_sdata_atten = pd.read_csv(fname_sdata_atten).set_index('cellid')
#load regression results
df_reg_gmotion = pd.read_csv(fname_reg_gmotion, index_col=0)
df_reg_coeff = pd.read_csv(fname_reg_coeff, index_col=0)
df_reg_atten = pd.read_csv(fname_reg_atten, index_col=0)
# Processing
#keep only relevant columns from synthetic dataset
df_sdata_gmotion = df_sdata_gmotion.reindex(df_reg_gmotion.index)
df_sdata_atten = df_sdata_atten.reindex(df_reg_atten.index)
#distance matrix for records of interest
df_dmat = df_distmat.reindex(df_sdata_gmotion.index)
#find unique earthqakes and stations
eq_id, eq_idx, eq_nrec = np.unique(df_sdata_gmotion.eqid, return_index=True, return_counts=True)
sta_id, sta_idx, sta_nrec = np.unique(df_sdata_gmotion.ssn, return_index=True, return_counts=True)
#number of paths per cell
cell_npath = np.sum(df_dmat.loc[:,df_reg_atten.cellname] > 0, axis=0)
# Compute Root Mean Square Error
df_misfit.loc['Y%i'%d_id,'nerg_tot_rms'] = CalcRMS(df_sdata_gmotion.nerg_gm.values, df_reg_gmotion.nerg_mu.values)
df_misfit.loc['Y%i'%d_id,'dc_1e_rms'] = CalcRMS(df_sdata_gmotion['dc_1e'].values[eq_idx], df_reg_coeff['dc_1e_mean'].values[eq_idx])
df_misfit.loc['Y%i'%d_id,'dc_1as_rms'] = CalcRMS(df_sdata_gmotion['dc_1as'].values[sta_idx], df_reg_coeff['dc_1as_mean'].values[sta_idx])
df_misfit.loc['Y%i'%d_id,'dc_1bs_rms'] = CalcRMS(df_sdata_gmotion['dc_1bs'].values[sta_idx], df_reg_coeff['dc_1bs_mean'].values[sta_idx])
df_misfit.loc['Y%i'%d_id,'c_2p_rms'] = CalcRMS(df_sdata_gmotion['c_2p'].values[eq_idx], df_reg_coeff['c_2p_mean'].values[eq_idx])
df_misfit.loc['Y%i'%d_id,'c_3s_rms'] = CalcRMS(df_sdata_gmotion['c_3s'].values[sta_idx], df_reg_coeff['c_3s_mean'].values[sta_idx])
df_misfit.loc['Y%i'%d_id,'c_cap_rms'] = CalcRMS(df_sdata_atten['c_cap'].values, df_reg_atten['c_cap_mean'].values)
# Compute Divergence
df_misfit.loc['Y%i'%d_id,'nerg_tot_KL'] = CalcLKDivergece(df_sdata_gmotion.nerg_gm.values, df_reg_gmotion.nerg_mu.values)
df_misfit.loc['Y%i'%d_id,'dc_1e_KL'] = CalcLKDivergece(df_sdata_gmotion['dc_1e'].values[eq_idx], df_reg_coeff['dc_1e_mean'].values[eq_idx])
df_misfit.loc['Y%i'%d_id,'dc_1as_KL'] = CalcLKDivergece(df_sdata_gmotion['dc_1as'].values[sta_idx], df_reg_coeff['dc_1as_mean'].values[sta_idx])
df_misfit.loc['Y%i'%d_id,'dc_1bs_KL'] = CalcLKDivergece(df_sdata_gmotion['dc_1bs'].values[sta_idx], df_reg_coeff['dc_1bs_mean'].values[sta_idx])
df_misfit.loc['Y%i'%d_id,'c_2p_KL'] = CalcLKDivergece(df_sdata_gmotion['c_2p'].values[eq_idx], df_reg_coeff['c_2p_mean'].values[eq_idx])
df_misfit.loc['Y%i'%d_id,'c_3s_KL'] = CalcLKDivergece(df_sdata_gmotion['c_3s'].values[sta_idx], df_reg_coeff['c_3s_mean'].values[sta_idx])
df_misfit.loc['Y%i'%d_id,'c_cap_KL'] = CalcLKDivergece(df_sdata_atten['c_cap'].values, df_reg_atten['c_cap_mean'].values)
# Output
#figure directory
dir_fig = '%s%s/Y%i/figures_cmp/'%(dir_results,synds_suffix_stan,d_id)
pathlib.Path(dir_fig).mkdir(parents=True, exist_ok=True)
#compare ground motion predictions
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_scatter_tot_res'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#median
ax.scatter(df_sdata_gmotion.nerg_gm.values, df_reg_gmotion.nerg_mu.values)
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title('Comparison total residuals, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-10,2])
ax.set_ylim([-10,2])
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#compare dc_1e
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_dc_1e_scatter'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_sdata_gmotion['dc_1e'].values[eq_idx], df_reg_coeff['dc_1e_mean'].values[eq_idx])
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1,E}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-.4,.4])
ax.set_ylim([-.4,.4])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1e_accuracy'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_reg_coeff['dc_1e_sig'].values[eq_idx],
df_sdata_gmotion['dc_1e'].values[eq_idx] - df_reg_coeff['dc_1e_mean'].values[eq_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1,E}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Standard Deviation', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0,.15])
ax.set_ylim([-.4,.4])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1e_nrec'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(eq_nrec,
df_sdata_gmotion['dc_1e'].values[eq_idx] - df_reg_coeff['dc_1e_mean'].values[eq_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1,E}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Number of records', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.set_xscale('log')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0.9,1e3])
ax.set_ylim([-.4,.4])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#compare dc_1as
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_dc_1as_scatter'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_sdata_gmotion['dc_1as'].values[sta_idx], df_reg_coeff['dc_1as_mean'].values[sta_idx])
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1a,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-1.5,1.5])
ax.set_ylim([-1.5,1.5])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1as_accuracy'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#accuray
ax.scatter(df_reg_coeff['dc_1as_sig'].values[sta_idx],
df_sdata_gmotion['dc_1as'].values[sta_idx] - df_reg_coeff['dc_1as_mean'].values[sta_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1a,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Standard Deviation', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0,.4])
ax.set_ylim([-1.5,1.5])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1as_nrec'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#accuray
ax.scatter(sta_nrec,
df_sdata_gmotion['dc_1as'].values[sta_idx] - df_reg_coeff['dc_1as_mean'].values[sta_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1a,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Number of records', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.set_xscale('log')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([.9,1000])
ax.set_ylim([-1.5,1.5])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#compare dc_1bs
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_dc_1bs_scatter'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_sdata_gmotion['dc_1bs'].values[sta_idx], df_reg_coeff['dc_1bs_mean'].values[sta_idx])
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1b,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-1.5,1.5])
ax.set_ylim([-1.5,1.5])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1bs_accuracy'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#accuray
ax.scatter(df_reg_coeff['dc_1bs_sig'].values[sta_idx],
df_sdata_gmotion['dc_1bs'].values[sta_idx] - df_reg_coeff['dc_1bs_mean'].values[sta_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1b,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Standard Deviation', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0,.4])
ax.set_ylim([-1.5,1.5])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_dc_1bs_nrec'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#accuray
ax.scatter(sta_nrec,
df_sdata_gmotion['dc_1bs'].values[sta_idx] - df_reg_coeff['dc_1bs_mean'].values[sta_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $\delta c_{1b,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Number of records', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.set_xscale('log')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([.9,1000])
ax.set_ylim([-1.5,1.5])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#compare c_2p
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_c_2p_scatter'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_sdata_gmotion['c_2p'].values[eq_idx], df_reg_coeff['c_2p_mean'].values[eq_idx])
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $c_{2,P}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-2.3,-1.6])
ax.set_ylim([-2.3,-1.6])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_c_2p_accuracy'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_reg_coeff['c_2p_sig'].values[eq_idx],
df_sdata_gmotion['c_2p'].values[eq_idx] - df_reg_coeff['c_2p_mean'].values[eq_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $c_{2,P}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Standard Deviation', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0,.15])
ax.set_ylim([-.4,.4])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_c_2p_nrec'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(eq_nrec,
df_sdata_gmotion['c_2p'].values[eq_idx] - df_reg_coeff['c_2p_mean'].values[eq_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $c_{2,P}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Number of records', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.set_xscale('log')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0.9,1e3])
ax.set_ylim([-.4,.4])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#compare c_3s
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_c_3s_scatter'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_sdata_gmotion['c_3s'].values[sta_idx], df_reg_coeff['c_3s_mean'].values[sta_idx])
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $c_{3,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-1.2,-.2])
ax.set_ylim([-1.2,-.2])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_c_3s_accuracy'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_reg_coeff['c_3s_sig'].values[sta_idx],
df_sdata_gmotion['c_3s'].values[sta_idx] - df_reg_coeff['c_3s_mean'].values[sta_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $c_{3,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Standard Deviation', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0,.3])
ax.set_ylim([-.4,.4])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_c_3s_nrec'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(sta_nrec,
df_sdata_gmotion['c_3s'].values[sta_idx] - df_reg_coeff['c_3s_mean'].values[sta_idx])
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $c_{3,S}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Number of records', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.set_xscale('log')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0.9,1e3])
ax.set_ylim([-.4,.4])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#compare c_cap
#... ... ... ... ... ...
#figure title
fname_fig = 'Y%i_c_cap_scatter'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_sdata_atten['c_cap'].values, df_reg_atten['c_cap_mean'].values)
ax.axline((0,0), slope=1, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $c_{ca,P}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Synthetic dataset', fontsize=25)
ax.set_ylabel('Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# plt_lim = np.array([ax.get_xlim(), ax.get_ylim()])
# plt_lim = (plt_lim[:,0].min(), plt_lim[:,1].max())
# ax.set_xlim(plt_lim)
# ax.set_ylim(plt_lim)
ax.set_xlim([-0.05,0.02])
ax.set_ylim([-0.05,0.02])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_c_cap_accuracy'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(df_reg_atten['c_cap_sig'],
df_sdata_atten['c_cap'].values - df_reg_atten['c_cap_mean'].values)
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $c_{ca,P}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Standard Deviation', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([0.00,0.03])
ax.set_ylim([-0.04,0.04])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#figure title
fname_fig = 'Y%i_c_cap_npath'%d_id
#create figure
fig, ax = plt.subplots(figsize = (10,10))
#coefficient scatter
ax.scatter(cell_npath,
df_sdata_atten['c_cap'].values - df_reg_atten['c_cap_mean'].values)
ax.axline((0,0), slope=0, color="black", linestyle="--")
#edit figure
if not flag_report: ax.set_title(r'Comparison $c_{ca,P}$, Y: %i'%d_id, fontsize=30)
ax.set_xlabel('Number of paths', fontsize=25)
ax.set_ylabel('Actual - Estimated', fontsize=25)
ax.grid(which='both')
ax.set_xscale('log')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
# ax.set_ylim(np.abs(ax.get_ylim()).max()*np.array([-1,1]))
ax.set_xlim([.9,5e4])
ax.set_ylim([-0.04,0.04])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Compare Misfit Metrics
#summary directory
dir_sum = '%s%s/summary/'%(dir_results,synds_suffix_stan)
pathlib.Path(dir_fig).mkdir(parents=True, exist_ok=True)
#figure directory
dir_fig = '%s/figures/'%(dir_sum)
pathlib.Path(dir_fig).mkdir(parents=True, exist_ok=True)
#save
df_misfit.to_csv(dir_sum + 'misfit_summary.csv')
#RMS misfit
fname_fig = 'misfit_score'
#plot KL divergence
fig, ax = plt.subplots(figsize = (10,10))
ax.plot(ds_id, df_misfit.nerg_tot_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label= 'tot nerg')
ax.plot(ds_id, df_misfit.dc_1e_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1,E}$')
ax.plot(ds_id, df_misfit.dc_1as_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1a,S}$')
ax.plot(ds_id, df_misfit.dc_1bs_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1b,S}$')
ax.plot(ds_id, df_misfit.c_2p_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$c_{2,E}$')
ax.plot(ds_id, df_misfit.c_3s_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$c_{3,S}$')
ax.plot(ds_id, df_misfit.c_cap_rms, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$c_{ca,P}$')
#figure properties
ax.set_ylim([0,0.50])
ax.set_xlabel('synthetic dataset', fontsize=25)
ax.set_ylabel('RSME', fontsize=25)
ax.grid(which='both')
ax.set_xticks(ds_id)
ax.set_xticklabels(labels=df_misfit.index)
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#legend
ax.legend(loc='upper left', fontsize=25)
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
#KL divergence
fname_fig = 'KLdiv_score'
#plot KL divergence
fig, ax = plt.subplots(figsize = (10,10))
ax.plot(ds_id, df_misfit.nerg_tot_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label= 'tot nerg')
ax.plot(ds_id, df_misfit.dc_1e_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1,E}$')
ax.plot(ds_id, df_misfit.dc_1as_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1a,S}$')
ax.plot(ds_id, df_misfit.dc_1bs_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$\delta c_{1b,S}$')
ax.plot(ds_id, df_misfit.c_2p_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$c_{2,P}$')
ax.plot(ds_id, df_misfit.c_3s_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$c_{3,S}$')
ax.plot(ds_id, df_misfit.c_cap_KL, linestyle='-', marker='o', linewidth=2, markersize=10, label=r'$c_{ca,P}$')
#figure properties
ax.set_ylim([0,0.50])
ax.set_xlabel('synthetic dataset', fontsize=25)
ax.set_ylabel('KL divergence', fontsize=25)
ax.grid(which='both')
ax.set_xticks(ds_id)
ax.set_xticklabels(labels=df_misfit.index)
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#legend
ax.legend(loc='upper left', fontsize=25)
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Compare hyper-paramters
#iterate over different datasets
df_reg_hyp = list()
df_reg_hyp_post = list()
for d_id in ds_id:
# Load Data
#regression hyperparamters results
fname_reg_hyp = '%s%s/Y%i/%s%s_Y%i_stan_%s'%(dir_results,synds_suffix_stan, d_id,prfx_results, synds_suffix, d_id, 'hyperparameters') + '.csv'
fname_reg_hyp_post = '%s%s/Y%i/%s%s_Y%i_stan_%s'%(dir_results,synds_suffix_stan, d_id,prfx_results, synds_suffix, d_id, 'hyperposterior') + '.csv'
#load regression results
df_reg_hyp.append( pd.read_csv(fname_reg_hyp, index_col=0) )
df_reg_hyp_post.append( pd.read_csv(fname_reg_hyp_post, index_col=0) )
#figure directory
dir_fig = '%s%s/figures_cmp_hyp/'%(dir_results,synds_suffix_stan)
pathlib.Path(dir_fig).mkdir(parents=True, exist_ok=True)
# Omega_1e
#hyper-paramter name
name_hyp = 'omega_1e'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 40
ymax_mean = 40
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\omega_{1,E}$', fontsize=30)
ax.set_xlabel('$\omega_{1,e}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.25])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Omega_1as
#hyper-paramter name
name_hyp = 'omega_1as'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 30
ymax_mean = 30
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\omega_{1a,S}$', fontsize=30)
ax.set_xlabel('$\omega_{1a,s}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.5])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Omega_1bs
#hyper-paramter name
name_hyp = 'omega_1bs'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 60
ymax_mean = 60
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\omega_{1b,S}$', fontsize=30)
ax.set_xlabel('$\omega_{1b,s}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.5])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Omega_2p
#hyper-paramter name
name_hyp = 'omega_2p'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 60
ymax_mean = 60
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\omega_{2,P}$', fontsize=30)
ax.set_xlabel('$\omega_{2,p}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.5])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Omega_3s
#hyper-paramter name
name_hyp = 'omega_3s'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 60
ymax_mean = 60
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\omega_{3,S}$', fontsize=30)
ax.set_xlabel('$\omega_{3,s}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.5])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Ell_1e
#hyper-paramter name
name_hyp = 'ell_1e'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 0.02
ymax_mean = 0.02
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\ell_{1,E}$', fontsize=30)
ax.set_xlabel('$\ell_{1,e}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,500])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Ell_1as
#hyper-paramter name
name_hyp = 'ell_1as'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 0.1
ymax_mean = 0.1
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\ell_{1a,S}$', fontsize=30)
ax.set_xlabel('$\ell_{1a,s}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,150])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Ell_2p
#hyper-paramter name
name_hyp = 'ell_2p'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 0.1
ymax_mean = 0.1
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\ell_{2,P}$', fontsize=30)
ax.set_xlabel('$\ell_{2,p}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,150])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Ell_3s
#hyper-paramter name
name_hyp = 'ell_3s'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 0.1
ymax_mean = 0.1
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\ell_{3,S}$', fontsize=30)
ax.set_xlabel('$\ell_{3,s}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,150])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Tau_0
#hyper-paramter name
name_hyp = 'tau_0'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 150
ymax_mean = 150
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\tau_{0}$', fontsize=30)
ax.set_xlabel(r'$\tau_{0}$', fontsize=25)
ax.set_ylabel(r'probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.5])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Phi_0
#hyper-paramter name
name_hyp = 'phi_0'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 1000
ymax_mean = 1000
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\phi_{0}$', fontsize=30)
ax.set_xlabel('$\phi_{0}$', fontsize=25)
ax.set_ylabel(r'probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.6])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Omega_ca
#hyper-paramter name
name_hyp = 'omega_cap'
#figure title
fname_fig = 'post_dist_' + name_hyp
#create figure
fig, ax = plt.subplots(figsize = (10,10))
for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
#estimate vertical line height for mean and mode
ymax_mode = 1500
ymax_mean = 1500
#plot posterior dist
pl_hyp = ax.vlines(df_r_h.loc['mean',name_hyp], ymin=0, ymax=ymax_mean, linestyle='-', label='Mean')
ax.vlines(df_r_h.loc['prc_0.50',name_hyp], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_hyp.get_color(), label='Mode')
#plot true value
ymax_hyp = ymax_mean
ax.vlines(np.sqrt(hyp['omega_ca1p']**2+hyp['omega_ca2p']**2), ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
#edit figure
if not flag_report: ax.set_title(r'Comparison $\omega_{ca,P}$', fontsize=30)
ax.set_xlabel('$\omega_{ca,p}$', fontsize=25)
ax.set_ylabel('probability density function ', fontsize=25)
ax.grid(which='both')
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
#plot limits
ax.set_xlim([0,0.05])
ax.set_ylim([0,ymax_hyp])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# # Delta c_0
# #hyper-paramter name
# name_hyp = 'dc_0'
# #figure title
# fname_fig = 'post_dist_' + name_hyp
# #create figure
# fig, ax = plt.subplots(figsize = (10,10))
# for d_id, df_r_h, df_r_h_p in zip(ds_id, df_reg_hyp, df_reg_hyp_post):
# #estimate vertical line height for mean and mode
# ymax_mode = df_r_h_p.loc[:,name_hyp+'_pdf'].max()
# ymax_mean = 1.5*np.ceil(ymax_mode/10)*10
# ymax_mean = 15
# #plot posterior dist
# pl_pdf = ax.plot(df_r_h_p.loc[:,name_hyp], df_r_h_p.loc[:,name_hyp+'_pdf'])
# ax.vlines(df_r_h.loc[name_hyp,'mean'], ymin=0, ymax=ymax_mean, linestyle='-', color=pl_pdf[0].get_color(), label='Mean')
# ax.vlines(df_r_h.loc[name_hyp,'mode'], ymin=0, ymax=ymax_mode, linestyle='--', color=pl_pdf[0].get_color(), label='Mode')
# #plot true value
# ymax_hyp = ymax_mean
# # ax.vlines(hyp[name_hyp], ymin=0, ymax=ymax_hyp, linestyle='-', linewidth=4, color='black', label='True value')
# #edit figure
# ax.set_title(r'Comparison $\delta c_{0}$', fontsize=30)
# ax.set_xlabel('$\delta c_{0}$', fontsize=25)
# ax.set_ylabel('probability density function ', fontsize=25)
# ax.grid(which='both')
# ax.tick_params(axis='x', labelsize=22)
# ax.tick_params(axis='y', labelsize=22)
# #plot limits
# ax.set_xlim([-1,1])
# ax.set_ylim([0,ymax_hyp])
# #save figure
# fig.tight_layout()
# # fig.savefig( dir_fig + fname_fig + '.png' )
| 47,186 | 38.686291 | 153 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds3/main_pystan_model3_corr_cells_NGAWest2CANorth.py | """
Created on Wed Jul 14 14:17:52 2021
@author: glavrent
"""
# Working directory and Packages
#load libraries
import os
import sys
import numpy as np
import pandas as pd
import time
#user functions
sys.path.insert(0,'../../../Python_lib/regression/pystan/')
from regression_pystan_model3_corr_cells_unbounded_hyp import RunStan
# Define variables
#filename suffix
# synds_suffix = '_small_corr_len'
# synds_suffix = '_large_corr_len'
#synthetic datasets directory
ds_dir = '../../../../Data/Validation/synthetic_datasets/ds3'
ds_dir = r'%s%s/'%(ds_dir, synds_suffix)
# dataset info
#ds_fname_main = 'CatalogNGAWest3CA_synthetic_data'
ds_fname_main = 'CatalogNGAWest3CALite_synthetic_data'
ds_id = np.arange(1,6)
#cell specific anelastic attenuation
ds_fname_cellinfo = 'CatalogNGAWest3CALite_cellinfo'
ds_fname_celldist = 'CatalogNGAWest3CALite_distancematrix'
#stan model
sm_fname = '../../../Stan_lib/regression_stan_model3_corr_cells_unbounded_hyp_chol_efficient.stan'
#output info
#main output filename
out_fname_main = 'NGAWest2CANorth_syndata'
#main output directory
out_dir_main = '../../../../Data/Validation/regression/ds3/'
#output sub-directory
out_dir_sub = 'PYSTAN_NGAWest2CANorth_corr_cells_chol_eff'
#stan parameters
runstan_flag = True
# pystan_ver = 2
pystan_ver = 3
res_name = 'tot'
n_iter = 1000
n_chains = 4
adapt_delta = 0.8
max_treedepth = 10
#ergodic coefficients
c_2_erg=-2.0
c_3_erg=-0.6
c_a_erg=0.0
#parallel options
# flag_parallel = True
flag_parallel = False
#output sub-dir with corr with suffix info
out_dir_sub = f'%s%s'%(out_dir_sub, synds_suffix)
#load cell dataframes
cellinfo_fname = '%s%s.csv'%(ds_dir, ds_fname_cellinfo)
celldist_fname = '%s%s.csv'%(ds_dir, ds_fname_celldist)
df_cellinfo = pd.read_csv(cellinfo_fname)
df_celldist = pd.read_csv(celldist_fname)
# Run stan regression
#create datafame with computation time
df_run_info = list()
#iterate over all synthetic datasets
for d_id in ds_id:
print('Synthetic dataset %i fo %i'%(d_id, len(ds_id)))
#run time start
run_t_strt = time.time()
#input flatfile
ds_fname = '%s%s%s_Y%i.csv'%(ds_dir, ds_fname_main, synds_suffix, d_id)
#load flatfile
df_flatfile = pd.read_csv(ds_fname)
#keep only North records of NGAWest2
df_flatfile = df_flatfile.loc[np.logical_and(df_flatfile.dsid==0,
df_flatfile.sreg==1),:]
#output file name and directory
out_fname = '%s%s_Y%i'%(out_fname_main, synds_suffix, d_id)
out_dir = '%s/%s/Y%i/'%(out_dir_main, out_dir_sub, d_id)
#run stan model
RunStan(df_flatfile, df_cellinfo, df_celldist, sm_fname,
out_fname, out_dir, res_name,
c_2_erg=c_2_erg, c_3_erg=c_3_erg, c_a_erg=c_a_erg,
runstan_flag=runstan_flag, n_iter=n_iter, n_chains=n_chains,
adapt_delta=adapt_delta, max_treedepth=max_treedepth,
pystan_ver=pystan_ver, pystan_parallel=flag_parallel)
#run time end
run_t_end = time.time()
#compute run time
run_tm = (run_t_end - run_t_strt)/60
#log run time
df_run_info.append(pd.DataFrame({'computer_name':os.uname()[1],'out_name':out_dir_sub,
'ds_id':d_id,'run_time':run_tm}, index=[d_id]))
#write out run info
out_fname = '%s%s/run_info.csv'%(out_dir_main, out_dir_sub)
pd.concat(df_run_info).reset_index(drop=True).to_csv(out_fname, index=False)
| 3,635 | 29.049587 | 98 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/regression/ds3/main_pystan_model3_uncorr_cells_NGAWest2CANorth.py | """
Created on Wed Jul 14 14:17:52 2021
@author: glavrent
"""
# Working directory and Packages
#load libraries
import os
import sys
import numpy as np
import pandas as pd
import time
#user functions
sys.path.insert(0,'../../../Python_lib/regression/pystan/')
from regression_pystan_model3_uncorr_cells_unbounded_hyp import RunStan
# Define variables
#filename suffix
# synds_suffix = '_small_corr_len'
# synds_suffix = '_large_corr_len'
#synthetic datasets directory
ds_dir = '../../../../Data/Validation/synthetic_datasets/ds3'
ds_dir = r'%s%s/'%(ds_dir, synds_suffix)
# dataset info
#ds_fname_main = 'CatalogNGAWest3CA_synthetic_data'
ds_fname_main = 'CatalogNGAWest3CALite_synthetic_data'
ds_id = np.arange(1,6)
#cell specific anelastic attenuation
ds_fname_cellinfo = 'CatalogNGAWest3CALite_cellinfo'
ds_fname_celldist = 'CatalogNGAWest3CALite_distancematrix'
#stan model
sm_fname = '../../../Stan_lib/regression_stan_model3_uncorr_cells_unbounded_hyp_chol_efficient.stan'
#output info
#main output filename
out_fname_main = 'NGAWest2CANorth_syndata'
#main output directory
out_dir_main = '../../../../Data/Validation/regression/ds3/'
#output sub-directory
out_dir_sub = 'PYSTAN_NGAWest2CANorth_uncorr_cells_chol_eff'
#stan parameters
runstan_flag = True
# pystan_ver = 2
pystan_ver = 3
res_name = 'tot'
n_iter = 1000
n_chains = 4
adapt_delta = 0.8
max_treedepth = 10
#ergodic coefficients
c_2_erg=-2.0
c_3_erg=-0.6
c_a_erg=0.0
#parallel options
# flag_parallel = True
flag_parallel = False
#output sub-dir with corr with suffix info
out_dir_sub = f'%s%s'%(out_dir_sub, synds_suffix)
#load cell dataframes
cellinfo_fname = '%s%s.csv'%(ds_dir, ds_fname_cellinfo)
celldist_fname = '%s%s.csv'%(ds_dir, ds_fname_celldist)
df_cellinfo = pd.read_csv(cellinfo_fname)
df_celldist = pd.read_csv(celldist_fname)
# Run stan regression
#create datafame with computation time
df_run_info = list()
#iterate over all synthetic datasets
for d_id in ds_id:
print('Synthetic dataset %i fo %i'%(d_id, len(ds_id)))
#run time start
run_t_strt = time.time()
#input flatfile
ds_fname = '%s%s%s_Y%i.csv'%(ds_dir, ds_fname_main, synds_suffix, d_id)
#load flatfile
df_flatfile = pd.read_csv(ds_fname)
#keep only North records of NGAWest2
df_flatfile = df_flatfile.loc[np.logical_and(df_flatfile.dsid==0,
df_flatfile.sreg==1),:]
#output file name and directory
out_fname = '%s%s_Y%i'%(out_fname_main, synds_suffix, d_id)
out_dir = '%s/%s/Y%i/'%(out_dir_main, out_dir_sub, d_id)
#run stan model
RunStan(df_flatfile, df_cellinfo, df_celldist, sm_fname,
out_fname, out_dir, res_name,
c_2_erg=c_2_erg, c_3_erg=c_3_erg, c_a_erg=c_a_erg,
runstan_flag=runstan_flag, n_iter=n_iter, n_chains=n_chains,
adapt_delta=adapt_delta, max_treedepth=max_treedepth,
pystan_ver=pystan_ver, pystan_parallel=flag_parallel)
#run time end
run_t_end = time.time()
#compute run time
run_tm = (run_t_end - run_t_strt)/60
#log run time
df_run_info.append(pd.DataFrame({'computer_name':os.uname()[1],'out_name':out_dir_sub,
'ds_id':d_id,'run_time':run_tm}, index=[d_id]))
#write out run info
out_fname = '%s%s/run_info.csv'%(out_dir_main, out_dir_sub)
pd.concat(df_run_info).reset_index(drop=True).to_csv(out_fname, index=False)
| 3,639 | 29.082645 | 100 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/preprocessing/CreateCatalogNGAWest2CA.py | """
Created on Sun Jun 27 22:58:16 2021
@author: glavrent
"""
# %% Required Packages
#load libraries
import os
import sys
import pathlib
import glob
import re #regular expression package
#arithmetic libraries
import numpy as np
import pandas as pd
#geographic coordinates
import pyproj
#plotting libraries
from matplotlib import pyplot as plt
import matplotlib.ticker as mticker
#user-derfined functions
sys.path.insert(0,'../../Python_lib/catalog')
sys.path.insert(0,'../../Python_lib/plotting')
import pylib_catalog as pylib_catalog
import pylib_contour_plots as pylib_cplt
# %% Define Input Data
#thresholds
thres_dist = 0.01 #collocated stations
# projection system
utm_zone = '11S'
# region id
region_id = 1
#input file names
fname_flatfile_NGA2ASK14 = '../../../Raw_files/nga_w2_resid/resid_T0.200.out2.txt'
fname_flatfile_NGA2coor = '../../../Raw_files/nga_w2/Updated_NGA_West2_Flatfile_coordinates.csv'
#flatfile file
fname_flatfile = 'CatalogNGAWest2CA_ASK14'
#output directory
dir_out = '../../../Data/Verification/preprocessing/flatfiles/NGAWest2_CA/'
dir_fig = dir_out + 'figures/'
# %% Load Data
#NGAWest2
df_flatfile_NGA2ASK14 = pd.read_csv(fname_flatfile_NGA2ASK14, delim_whitespace=True)
df_flatfile_NGA2coor = pd.read_csv(fname_flatfile_NGA2coor)
df_flatfile_NGA2 = pd.merge(df_flatfile_NGA2ASK14, df_flatfile_NGA2coor, left_on='recID', right_on='Record Sequence Number')
# %% Cleaning files
# NGA2
#keep only CA for NGA2
df_flatfile_NGA2 = df_flatfile_NGA2[ df_flatfile_NGA2.region == region_id ]
#reset indices
df_flatfile_NGA2.reset_index(inplace=True)
# %% Process Data
#coordinates and projection system
# projection system
utmProj = pyproj.Proj("+proj=utm +zone="+utm_zone+", +ellps=WGS84 +datum=WGS84 +units=m +no_defs")
#earthquake and station ids
eq_id_NGA2 = df_flatfile_NGA2['eqid'].values.astype(int)
sta_id_NGA2 = df_flatfile_NGA2['Station Sequence Number'].values.astype(int)
#unique earthquake and station ids
eq_id_NGA2_unq, eq_idx_NGA2 = np.unique(eq_id_NGA2, return_index=True)
sta_id_NGA2_unq, sta_idx_NGA2 = np.unique(sta_id_NGA2, return_index=True)
#number of earthquake and stations
neq_NGA2 = len(eq_id_NGA2_unq)
nsta_NGA2 = len(sta_id_NGA2_unq)
#earthquake and station coordinates
eq_latlon_NGA2_all = df_flatfile_NGA2[['Hypocenter Latitude (deg)','Hypocenter Longitude (deg)']].values
sta_latlon_NGA2_all = df_flatfile_NGA2[['Station Latitude','Station Longitude']].values
#utm coordinates
eq_X_NGA2_all = np.array([utmProj(e_lon, e_lat) for e_lat, e_lon in zip(eq_latlon_NGA2_all[:,0], eq_latlon_NGA2_all[:,1])]) / 1000
eq_z_NGA2_all = -1*df_flatfile_NGA2['Hypocenter Depth (km)'].values
sta_X_NGA2_all = np.array([utmProj(s_lon, s_lat) for s_lat, s_lon in zip(sta_latlon_NGA2_all[:,0], sta_latlon_NGA2_all[:,1])]) / 1000
mpt_X_NGA2_all = (eq_X_NGA2_all + sta_X_NGA2_all) / 2
#mid point coordinates
mpt_latlon_NGA2_all = np.flip( np.array([utmProj(pt_x, pt_y, inverse=True) for pt_x, pt_y in
zip(mpt_X_NGA2_all[:,0], mpt_X_NGA2_all[:,1]) ]), axis=1)
#ground motion parameteres
mag_NGA2 = df_flatfile_NGA2['mag'].values
rup_NGA2 = np.sqrt(np.linalg.norm(eq_X_NGA2_all-sta_X_NGA2_all, axis=1)**2+eq_z_NGA2_all**2)
vs30_NGA2 = df_flatfile_NGA2['VS30'].values
# %% Process Data to save
i_data2keep = np.full(len(df_flatfile_NGA2), True)
#records' info
rsn_array = df_flatfile_NGA2['recID'].values[i_data2keep].astype(int)
eqid_array = eq_id_NGA2[i_data2keep]
ssn_array = sta_id_NGA2[i_data2keep]
year_array = df_flatfile_NGA2['YEAR'].values[i_data2keep]
#records' parameters
mag_array = mag_NGA2[i_data2keep]
rrup_array = rup_NGA2[i_data2keep]
vs30_array = vs30_NGA2[i_data2keep]
#earthquake, station, mid-point latlon coordinates
eq_latlon = eq_latlon_NGA2_all[i_data2keep,:]
sta_latlon = sta_latlon_NGA2_all[i_data2keep,:]
mpt_latlon = mpt_latlon_NGA2_all[i_data2keep,:]
#earthquake, station, mid-point UTM coordinates
eq_utm = eq_X_NGA2_all[i_data2keep,:]
sta_utm = sta_X_NGA2_all[i_data2keep,:]
mpt_utm = mpt_X_NGA2_all[i_data2keep,:]
#earthquake source depth
eq_z = eq_z_NGA2_all[i_data2keep]
#indices for unique earthquakes and stations
_, eq_idx, eq_inv = np.unique(eqid_array, return_index=True, return_inverse=True)
_, sta_idx, sta_inv = np.unique(ssn_array, return_index=True, return_inverse=True)
n_eq_orig = len(eq_idx)
n_sta_orig = len(sta_idx)
# NGAWest2 dataframe
data_full = {'rsn':rsn_array, 'eqid':eqid_array, 'ssn':ssn_array,
'mag':mag_array, 'Rrup':rrup_array, 'Vs30': vs30_array, 'year': year_array,
'eqLat':eq_latlon[:,0], 'eqLon':eq_latlon[:,1], 'staLat':sta_latlon[:,0], 'staLon':sta_latlon[:,1], 'mptLat':mpt_latlon[:,0], 'mptLon':mpt_latlon[:,1],
'UTMzone':utm_zone,
'eqX':eq_utm[:,0], 'eqY':eq_utm[:,1], 'eqZ':eq_z, 'staX':sta_utm[:,0], 'staY':sta_utm[:,1], 'mptX':mpt_utm[:,0], 'mptY':mpt_utm[:,1]}
df_flatfile_full = pd.DataFrame(data_full)
# colocate stations
#update ssn for colocated stations
df_flatfile_full = pylib_catalog.ColocatePt(df_flatfile_full, 'ssn', ['staX','staY'], thres_dist=thres_dist)
#keep single record from each event after collocation
i_unq_eq_sta = np.unique(df_flatfile_full[['eqid','ssn']].values, return_index=True, axis=0)[1]
df_flatfile_full = df_flatfile_full.iloc[i_unq_eq_sta, :].sort_index()
_, eq_idx, eq_inv = np.unique(df_flatfile_full.loc[:,'eqid'], axis=0, return_index=True, return_inverse=True)
_, sta_idx, sta_inv = np.unique(df_flatfile_full.loc[:,'ssn'], axis=0, return_index=True, return_inverse=True)
n_eq = len(eq_idx)
n_sta = len(sta_idx)
# average gm parameters
df_flatfile_full = pylib_catalog.IndexAvgColumns(df_flatfile_full, 'eqid', ['mag','eqLat','eqLon','eqX','eqY','eqZ'])
df_flatfile_full = pylib_catalog.IndexAvgColumns(df_flatfile_full, 'ssn', ['Vs30','staLat','staLon','staX','staY'])
# create event and station dataframes
#event dataframe
df_flatfile_event = df_flatfile_full.iloc[eq_idx,:][['eqid','mag','year','eqLat','eqLon','UTMzone','eqX','eqY','eqZ']].reset_index(drop=True)
#station dataframe
df_flatfile_station = df_flatfile_full.iloc[sta_idx,:][['ssn','Vs30','staLat','staLon','UTMzone','staX','staY']].reset_index(drop=True)
# %% Save data
# create output directories
if not os.path.isdir(dir_out): pathlib.Path(dir_out).mkdir(parents=True, exist_ok=True)
if not os.path.isdir(dir_fig): pathlib.Path(dir_fig).mkdir(parents=True, exist_ok=True)
#save processed dataframes
fname_flatfile_full= '%s%s'%(dir_out, fname_flatfile)
df_flatfile_full.to_csv(fname_flatfile_full + '.csv', index=False)
df_flatfile_event.to_csv(fname_flatfile_full + '_event.csv', index=False)
df_flatfile_station.to_csv(fname_flatfile_full + '_station.csv', index=False)
# create figures
# Mag-Dist distribution
fname_fig = 'M-R_dist'
#create figure
fig, ax = plt.subplots(figsize = (10,9))
pl1 = ax.scatter(df_flatfile_full.Rrup, df_flatfile_full.mag, label='NGAWest2 CA')
#edit figure properties
ax.set_xlabel(r'Distance ($km$)', fontsize=30)
ax.set_ylabel(r'Magnitude', fontsize=30)
ax.grid(which='both')
ax.set_xscale('log')
# ax.set_xlim([0.1, 2000])
ax.set_ylim([2, 8])
ax.tick_params(axis='x', labelsize=25)
ax.tick_params(axis='y', labelsize=25)
# ax.legend(fontsize=25, loc='upper left')
ax.xaxis.set_tick_params(which='major', size=10, width=2, direction='in', top='on')
ax.xaxis.set_tick_params(which='minor', size=7, width=2, direction='in', top='on')
ax.yaxis.set_tick_params(which='major', size=10, width=2, direction='in', right='on')
ax.yaxis.set_tick_params(which='minor', size=7, width=2, direction='in', right='on')
fig.tight_layout()
#save figure
fig.savefig( dir_fig + fname_fig + '.png' )
# Mag-Year distribution
fname_fig = 'M-date_dist'
#create figure
fig, ax = plt.subplots(figsize = (10,9))
pl1 = ax.scatter(df_flatfile_event['year'].values, df_flatfile_event['mag'].values, label='NGAWest2 CA')
#edit figure properties
ax.set_xlabel(r'time ($year$)', fontsize=30)
ax.set_ylabel(r'Magnitude', fontsize=30)
ax.grid(which='both')
# ax.set_xscale('log')
ax.set_xlim([1965, 2025])
ax.set_ylim([2, 8])
ax.tick_params(axis='x', labelsize=25)
ax.tick_params(axis='y', labelsize=25)
# ax.legend(fontsize=25, loc='upper left')
ax.xaxis.set_tick_params(which='major', size=10, width=2, direction='in', top='on')
ax.xaxis.set_tick_params(which='minor', size=7, width=2, direction='in', top='on')
ax.yaxis.set_tick_params(which='major', size=10, width=2, direction='in', right='on')
ax.yaxis.set_tick_params(which='minor', size=7, width=2, direction='in', right='on')
fig.tight_layout()
#save figure
fig.savefig( dir_fig + fname_fig + '.png' )
#eq and sta location
fname_fig = 'eq_sta_locations'
fig, ax, data_crs, gl = pylib_cplt.PlotMap(flag_grid=True)
#plot earthquake and station locations
ax.plot(df_flatfile_event['eqLon'].values, df_flatfile_event['eqLat'].values, '*', transform = data_crs, markersize = 10, zorder=13, label='Events')
ax.plot(df_flatfile_station['staLon'].values, df_flatfile_station['staLat'].values, 'o', transform = data_crs, markersize = 6, zorder=12, label='Stations')
#edit figure properties
gl.xlabel_style = {'size': 25}
gl.ylabel_style = {'size': 25}
# gl.xlocator = mticker.FixedLocator([-124, -122, -120, -118, -116, -114])
# gl.ylocator = mticker.FixedLocator([32, 34, 36, 38, 40])
ax.legend(fontsize=25, loc='lower left')
# ax.set_xlim(plt_latlon_win[:,1])
# ax.set_ylim(plt_latlon_win[:,0])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Print data info
print(f'NGAWest2:')
print(f'\tnumber of rec: %.i'%len(df_flatfile_full))
print(f'\tnumber of rec (R<200km): %.i'%np.sum(df_flatfile_full.Rrup<=200))
print(f'\tnumber of rec (R<300km): %.i'%np.sum(df_flatfile_full.Rrup<=300))
print(f'\tnumber of eq: %.i'%len(df_flatfile_event))
print(f'\tnumber of sta: %.i'%len(df_flatfile_station))
print(f'\tcoverage: %.i to %i'%(df_flatfile_full.year.min(), df_flatfile_full.year.max()))
#write out summary
f = open(dir_out + 'summary_data' + '.txt', 'w')
f.write(f'NGAWest2:\n')
f.write(f'\tnumber of rec: %.i\n'%len(df_flatfile_full))
f.write(f'\tnumber of rec (R<200km): %.i\n'%np.sum(df_flatfile_full.Rrup<=200))
f.write(f'\tnumber of rec (R<300km): %.i\n'%np.sum(df_flatfile_full.Rrup<=300))
f.write(f'\tnumber of eq: %.i\n'%len(df_flatfile_event))
f.write(f'\tnumber of sta: %.i\n'%len(df_flatfile_station))
f.write(f'\tcoverage: %.i to %i\n'%(df_flatfile_full.year.min(), df_flatfile_full.year.max()))
f.close()
| 11,132 | 39.631387 | 164 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/preprocessing/CreateCatalogNewEvents2017.py | """
Created on Sun Jun 27 16:12:57 2021
@author: glavrent
"""
# Required Packages
#load libraries
import os
import sys
import pathlib
import glob
import re #regular expression package
#arithmetic libraries
import numpy as np
import pandas as pd
#geographic coordinates
import pyproj
#plotting libraries
from matplotlib import pyplot as plt
import matplotlib.ticker as mticker
#user-derfined functions
sys.path.insert(0,'../../Python_lib/ground_motions')
sys.path.insert(0,'../../Python_lib/plotting')
import pylib_Willis15CA_Vs30 as pylib_W15_Vs30
import pylib_contour_plots as pylib_cplt
# %% Define Input Data
#thresholds
dt_thres = 0.025
rrup_thres = 300
# projection system
utm_zone = '11S'
#input flatfiles
fname_flatfile_newrec_eq = '../../../Raw_files/nga_w3/California2011-2017/eventcatalog.csv'
fname_flatfile_newrec_sta = '../../../Raw_files/nga_w3/California2011-2017/Recorddata.csv'
#flatfile file
fname_flatfile = 'CatalogNewRecords_2011-2017_CA_NV'
#output directory
dir_out = '../../../Data/NGAWest_expansion/CA_NV_2011-2017/'
dir_out = '../../../Data/Verification/preprocessing/flatfiles/CA_NV_2011-2017/'
dir_fig = dir_out + 'figures/'
# %% Load Data
#merge event and station info
df_flatfile_newrec_eq = pd.read_csv(fname_flatfile_newrec_eq)
df_flatfile_newrec_sta = pd.read_csv(fname_flatfile_newrec_sta)
df_flatfile_newrec = pd.merge(df_flatfile_newrec_eq, df_flatfile_newrec_sta, left_on='EventIDs.i.', right_on='EventID')
# %% Cleaning files
#set -999 to nan
df_flatfile_newrec.replace(-999, np.nan, inplace=True)
#remove data based on timestep
df_flatfile_newrec = df_flatfile_newrec[ df_flatfile_newrec.timeStep <= dt_thres ]
#remove data with unknown mag
df_flatfile_newrec = df_flatfile_newrec[ ~np.isnan(df_flatfile_newrec['mag.event']) ]
#remove data with unknown coordinates
df_flatfile_newrec = df_flatfile_newrec[ ~np.isnan(df_flatfile_newrec[['latitude.event', 'longitude.event']]).any(axis=1) ]
df_flatfile_newrec = df_flatfile_newrec[ ~np.isnan(df_flatfile_newrec[['stnlat', 'stnlon']]).any(axis=1) ]
#keep single record from
df_flatfile_newrec.loc[:,'EventID'] = df_flatfile_newrec['EventID'].values.astype(int)
df_flatfile_newrec.loc[:,'stationID'] = np.unique(df_flatfile_newrec['station'], return_inverse=True)[1]
i_unq_eq_sta = np.unique( np.unique(df_flatfile_newrec[['stationID','EventID']].values, return_index=True, axis=0)[1] )
df_flatfile_newrec = df_flatfile_newrec.iloc[i_unq_eq_sta, :]
#reset indices
df_flatfile_newrec.reset_index(inplace=True)
# %% Process Data
#coordinates and projection system
# projection system
utmProj = pyproj.Proj("+proj=utm +zone="+utm_zone+", +ellps=WGS84 +datum=WGS84 +units=m +no_defs")
#earthquake and station ids
eq_id_newrec = df_flatfile_newrec['EventIDs.i.'].values.astype(int)
sta_id_newrec = df_flatfile_newrec['station'].values
sta_net_newrec = df_flatfile_newrec['network'].values
sta_netid_newrec = [f'{s_net}-{s_id}' for s_net, s_id in zip(sta_net_newrec, sta_id_newrec)]
#unique earthquake and station ids
eq_id_newrec_unq, eq_idx_newrec = np.unique(eq_id_newrec, return_index=True)
# sta_id_newrec_unq, sta_inv_newrec = np.unique(sta_id_newrec, return_inverse=True)
sta_id_newrec_unq, sta_inv_newrec = np.unique(sta_netid_newrec, return_inverse=True)
#number of earthquake and stations
neq_newrec = len(eq_id_newrec_unq)
nsta_newrec = len(sta_id_newrec_unq)
#earthquake and station coordinates
eq_latlon_newrec_all = df_flatfile_newrec[['latitude.event', 'longitude.event']].values
sta_latlon_newrec_all = df_flatfile_newrec[['stnlat', 'stnlon']].values
#utm coordinates
eq_X_newrec_all = np.array([utmProj(e_lon, e_lat) for e_lat, e_lon in zip(eq_latlon_newrec_all[:,0], eq_latlon_newrec_all[:,1])]) / 1000
eq_z_newrec_all = np.minimum(-1*df_flatfile_newrec['depth.event.1000'].values, 0)
sta_X_newrec_all = np.array([utmProj(s_lon, s_lat) for s_lat, s_lon in zip(sta_latlon_newrec_all[:,0], sta_latlon_newrec_all[:,1])]) / 1000
mpt_X_newrec_all = (eq_X_newrec_all + sta_X_newrec_all) / 2
#mid point coordinates
mpt_latlon_newrec_all = np.flip( np.array([utmProj(pt_x, pt_y, inverse=True) for pt_x, pt_y in
zip(mpt_X_newrec_all[:,0], mpt_X_newrec_all[:,1]) ]), axis=1 )
#earthquake parameteres
mag_newrec = df_flatfile_newrec['mag.event'].values
rup_newrec = np.sqrt(np.linalg.norm(eq_X_newrec_all-sta_X_newrec_all, axis=1)**2+eq_z_newrec_all**2)
#year of recording
df_flatfile_newrec['year'] = pd.DatetimeIndex(df_flatfile_newrec['rec.stime']).year
#estimate station vs30
Wills15Vs30 = pylib_W15_Vs30.Willis15Vs30CA()
vs30_newrec = Wills15Vs30.lookup(np.fliplr(sta_latlon_newrec_all))[0]
vs30_newrec[vs30_newrec<=50] = np.nan
# %% Process Data to save
#distance threshold for data to keep
i_data2keep = rup_newrec <= rrup_thres
#records' info
rsn_array = df_flatfile_newrec.loc[i_data2keep,'index'].values
eqid_array = eq_id_newrec[i_data2keep]
ssn_array = sta_inv_newrec[i_data2keep]
sid_array = sta_id_newrec[i_data2keep]
snet_array = sta_net_newrec[i_data2keep]
year_array = df_flatfile_newrec['year'].values[i_data2keep]
#records' parameters
mag_array = mag_newrec[i_data2keep]
rrup_array = rup_newrec[i_data2keep]
vs30_array = vs30_newrec[i_data2keep]
#earthquake, station, mid-point latlon coordinates
eq_latlon = eq_latlon_newrec_all[i_data2keep,:]
sta_latlon = sta_latlon_newrec_all[i_data2keep,:]
mpt_latlon = mpt_latlon_newrec_all[i_data2keep,:]
#earthquake, station, mid-point UTM coordinates
eq_utm = eq_X_newrec_all[i_data2keep,:]
sta_utm = sta_X_newrec_all[i_data2keep,:]
mpt_utm = mpt_X_newrec_all[i_data2keep,:]
#earthquake source depth
eq_z = eq_z_newrec_all[i_data2keep]
#indices for unique earthquakes and stations
eq_idx = np.unique(eqid_array, return_index=True)[1]
sta_idx = np.unique(ssn_array, return_index=True)[1]
#data to save
data_full = {'rsn':rsn_array, 'eqid':eqid_array, 'ssn':ssn_array,
'mag':mag_array, 'Rrup':rrup_array, 'Vs30': vs30_array, 'year': year_array,
'eqLat':eq_latlon[:,0], 'eqLon':eq_latlon[:,1], 'staLat':sta_latlon[:,0], 'staLon':sta_latlon[:,1], 'mptLat':mpt_latlon[:,0], 'mptLon':mpt_latlon[:,1],
'UTMzone':utm_zone,
'eqX':eq_utm[:,0], 'eqY':eq_utm[:,1], 'eqZ':eq_z, 'staX':sta_utm[:,0], 'staY':sta_utm[:,1], 'mptX':mpt_utm[:,0], 'mptY':mpt_utm[:,1]}
#processed dataframes
df_flatfile_full = pd.DataFrame(data_full)
#event dataframe
df_flatfile_event = df_flatfile_full.loc[eq_idx, ['eqid','mag','year','eqLat','eqLon','UTMzone','eqX','eqY','eqZ']].reset_index(drop=True)
#station dataframe
df_flatfile_station = df_flatfile_full.loc[sta_idx, ['ssn','Vs30','staLat','staLon','UTMzone','staX','staY']].reset_index(drop=True)
# %% Save data
# create output directories
if not os.path.isdir(dir_out): pathlib.Path(dir_out).mkdir(parents=True, exist_ok=True)
if not os.path.isdir(dir_fig): pathlib.Path(dir_fig).mkdir(parents=True, exist_ok=True)
#save processed dataframes
fname_flatfile_full= '%s%s'%(dir_out, fname_flatfile)
df_flatfile_full.to_csv(fname_flatfile_full + '.csv', index=True)
df_flatfile_event.to_csv(fname_flatfile_full + '_event.csv', index=False)
df_flatfile_station.to_csv(fname_flatfile_full + '_station.csv', index=False)
# create figures
# Mag-Dist distribution
fname_fig = 'M-R_dist'
#create figure
fig, ax = plt.subplots(figsize = (10,9))
pl1 = ax.scatter(df_flatfile_full.Rrup, df_flatfile_full.mag, label='New Records')
#edit figure properties
ax.set_xlabel(r'Distance ($km$)', fontsize=30)
ax.set_ylabel(r'Magnitude', fontsize=30)
ax.grid(which='both')
ax.set_xscale('log')
# ax.set_xlim([0.1, 2000])
ax.set_ylim([2, 8])
ax.tick_params(axis='x', labelsize=25)
ax.tick_params(axis='y', labelsize=25)
ax.legend(fontsize=25, loc='upper left')
ax.xaxis.set_tick_params(which='major', size=10, width=2, direction='in', top='on')
ax.xaxis.set_tick_params(which='minor', size=7, width=2, direction='in', top='on')
ax.yaxis.set_tick_params(which='major', size=10, width=2, direction='in', right='on')
ax.yaxis.set_tick_params(which='minor', size=7, width=2, direction='in', right='on')
fig.tight_layout()
#save figure
fig.savefig( dir_fig + fname_fig + '.png' )
# Mag-Year distribution
fname_fig = 'M-date_dist'
#create figure
fig, ax = plt.subplots(figsize = (10,9))
pl1 = ax.scatter(df_flatfile_event['year'].values, df_flatfile_event['mag'].values, label='New Records')
#edit figure properties
ax.set_xlabel(r'time ($year$)', fontsize=30)
ax.set_ylabel(r'Magnitude', fontsize=30)
ax.grid(which='both')
# ax.set_xscale('log')
ax.set_xlim([1965, 2025])
ax.set_ylim([2, 8])
ax.tick_params(axis='x', labelsize=25)
ax.tick_params(axis='y', labelsize=25)
ax.legend(fontsize=25, loc='upper left')
ax.xaxis.set_tick_params(which='major', size=10, width=2, direction='in', top='on')
ax.xaxis.set_tick_params(which='minor', size=7, width=2, direction='in', top='on')
ax.yaxis.set_tick_params(which='major', size=10, width=2, direction='in', right='on')
ax.yaxis.set_tick_params(which='minor', size=7, width=2, direction='in', right='on')
fig.tight_layout()
#save figure
fig.savefig( dir_fig + fname_fig + '.png' )
#eq and sta location
fname_fig = 'eq_sta_locations'
fig, ax, data_crs, gl = pylib_cplt.PlotMap(flag_grid=True)
#plot earthquake and station locations
ax.plot(df_flatfile_event['eqLon'].values, df_flatfile_event['eqLat'].values, '*', transform = data_crs, markersize = 10, zorder=13)
ax.plot(df_flatfile_station['staLon'].values, df_flatfile_station['staLat'].values, 'o', transform = data_crs, markersize = 6, zorder=13, label='STA')
ax.plot(df_flatfile_event['eqLon'].values, df_flatfile_event['eqLat'].values, '*', color='black', transform = data_crs, markersize = 14, zorder=13, label='EQ')
#edit figure properties
gl.xlabel_style = {'size': 25}
gl.ylabel_style = {'size': 25}
# gl.xlocator = mticker.FixedLocator([-124, -122, -120, -118, -116, -114])
# gl.ylocator = mticker.FixedLocator([32, 34, 36, 38, 40])
ax.legend(fontsize=25, loc='lower left')
# ax.set_xlim(plt_latlon_win[:,1])
# ax.set_ylim(plt_latlon_win[:,0])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Print data info
print(r'New Records:')
print(f'\tnumber of rec: %.i'%len(df_flatfile_full))
print(f'\tnumber of rec (R<200km): %.i'%np.sum(df_flatfile_full.Rrup<=200))
print(f'\tnumber of rec (R<%.1f): %.i'%(rrup_thres, np.sum(df_flatfile_full.Rrup<=rrup_thres)))
print(f'\tnumber of eq: %.i'%len(df_flatfile_event))
print(f'\tnumber of sta: %.i'%len(df_flatfile_station))
print(f'\tnumber of sta (R<300km): %.i'%len(np.unique(ssn_array[df_flatfile_full.Rrup<=300])))
print(f'\tcoverage: %.i to %i'%(df_flatfile_full.year.min(), df_flatfile_full.year.max()))
| 11,196 | 41.25283 | 165 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/preprocessing/CreateCatalogNewEvents2021Raw.py | """
Created on Tue Oct 5 09:37:23 2021
@author: glavrent
"""
# %% Required Packages
#load libraries
import os
import sys
import pathlib
import re
#arithmetic libraries
import numpy as np
import pandas as pd
#geographical libraries
import geopy
from geopy.distance import distance
#plotting libraries
from matplotlib import pyplot as plt
import matplotlib.ticker as mticker
#user-derfined functions
sys.path.insert(0,'../../Python_lib/catalog')
sys.path.insert(0,'../../Python_lib/plotting')
import pylib_catalog as pylib_catalog
import pylib_contour_plots as pylib_cplt
# %% Define variables
#input file names
fname_nrec_eq = '../../../Raw_files/nga_w3/IRIS/fdsnws-events_CA_2011-2021.csv'
fname_nerc_sta = '../../../Raw_files/nga_w3/IRIS/fdsn-station_USA_[HB][HN]?.csv'
fname_mag_rup_lim = '../../../Data/Verification/preprocessing/flatfiles/usable_mag_rrup/usable_Mag_Rrup_coeffs.csv'
#output directoy
dir_out = '../../../Data/Verification/preprocessing/flatfiles/CA_NV_2011-2021_raw/'
dir_fig = dir_out + 'figures/'
fname_new_cat = 'Catalog_California_2011-2021.ver02'
#station
srate_min = 10 #minimum sample rate for accepting instrument
net_seis = np.array(['AZ','BK','CI','NC','NN','NP','PB','PG','SB','WR','US','CJ','UO'])
# %% Load Data
df_nrec_eq = pd.read_csv(fname_nrec_eq, delimiter='|', skiprows=4)
df_nrec_sta = pd.read_csv(fname_nerc_sta, delimiter='|', skiprows=4)
#M/R limitis
df_lim_mag_rrup = pd.read_csv(fname_mag_rup_lim, index_col=0)
# %% Process Data
#rename earthquake and station coordinates
df_nrec_eq.rename(columns={'Latitude':'eqLat', 'Longitude':'eqLon', 'Depth':'eqDepth'}, inplace=True)
df_nrec_sta.rename(columns={'Latitude':'staLat', 'Longitude':'staLon','Depth':'staDepth'}, inplace=True)
#remove rows with nna columns
df_nrec_eq = df_nrec_eq.loc[~df_nrec_eq.isnull().any(axis=1).values,:]
df_nrec_sta = df_nrec_sta.loc[~df_nrec_sta.isnull().any(axis=1).values,:]
df_nrec_eq = df_nrec_eq.loc[~df_nrec_eq.isna().any(axis=1).values,:]
df_nrec_sta = df_nrec_sta.loc[~df_nrec_sta.isna().any(axis=1).values,:]
#remove invalid columns
i_nan_rec = np.array([bool(re.match('^#.*$',n)) for n in df_nrec_sta['Network']])
df_nrec_sta = df_nrec_sta.loc[~i_nan_rec,:]
#keep networks of interest
i_net = df_nrec_sta.Network.isin(net_seis)
df_nrec_sta = df_nrec_sta.loc[i_net,:]
#with only records with sufficient sampling rate
df_nrec_sta.SampleRate = df_nrec_sta.SampleRate.astype(float)
i_srate = df_nrec_sta.SampleRate > srate_min
df_nrec_sta = df_nrec_sta.loc[i_srate,:]
#create network and station IDs
_, df_nrec_sta.loc[:,'NetworkID'] = np.unique(df_nrec_sta['Network'].values.astype(str), return_inverse=True)
_, df_nrec_sta.loc[:,'StationID'] = np.unique(df_nrec_sta['Station'].values.astype(str), return_inverse=True)
#reduce to unique stations
_, i_sta_unq = np.unique(df_nrec_sta[['NetworkID','StationID']], return_index=True, axis=0)
df_nrec_sta = df_nrec_sta.iloc[i_sta_unq,:]
#station coordinates
sta_latlon = df_nrec_sta[['staLat','staLon']].values
#initialize rec catalog
cat_new_rec = []
#number of events and stations
n_eq = len(df_nrec_eq)
n_sta = len(df_nrec_sta)
#iterate evetns
for (k, eq) in df_nrec_eq.iterrows():
print('Processing event %i of %i'%(k, n_eq))
#earthquake info
eq_latlon = eq[['eqLat','eqLon']].values
eq_depth = eq['eqDepth']
eq_mag = eq['Magnitude']
#epicenter and hypocenter dist
dist_epi = np.array([distance(eq_latlon, sta_ll).km for sta_ll in sta_latlon])
dist_hyp = np.sqrt(dist_epi**2 + eq_depth**2)
#stations that satisfy the M/R limit
i_sta_event = pylib_catalog.UsableSta(np.full(n_sta, eq_mag), dist_hyp, df_lim_mag_rrup)
#create catalog for k^th event
df_new_r = df_nrec_sta.loc[i_sta_event,:].assign(**eq)
#add rupture info
df_new_r.loc[:,'HypDist'] = dist_hyp[i_sta_event]
#combine sta with event info
cat_new_rec.append(df_new_r)
#combine catalogs of all events into one
df_cat_new_rec = pd.concat(cat_new_rec).reset_index()
#re-roder columns
df_cat_new_rec = df_cat_new_rec[np.concatenate([df_nrec_eq.columns, df_nrec_sta.columns,['HypDist']])]
#create event and station dataframes
#indices for unique earthquakes and stations
eq_idx = np.unique(df_cat_new_rec.EventID, return_index=True)[1]
sta_idx = np.unique(df_cat_new_rec[['NetworkID','StationID']], return_index=True, axis=0)[1]
#event dataframe
df_cat_new_rec_eq = df_cat_new_rec.loc[eq_idx, df_nrec_eq.columns].reset_index(drop=True)
#station dataframe
df_cat_new_rec_sta = df_cat_new_rec.loc[sta_idx, df_nrec_sta.columns].reset_index(drop=True)
# %% Output
# create output directories
if not os.path.isdir(dir_out): pathlib.Path(dir_out).mkdir(parents=True, exist_ok=True)
if not os.path.isdir(dir_fig): pathlib.Path(dir_fig).mkdir(parents=True, exist_ok=True)
#save processed dataframes
fname_cat = '%s%s'%(dir_out, fname_new_cat )
df_cat_new_rec.to_csv(fname_cat + '.csv', index=False)
df_cat_new_rec_eq.to_csv(fname_cat + '_event.csv', index=False)
df_cat_new_rec_sta.to_csv(fname_cat + '_station.csv', index=False)
# create figures
# Mag-Dist distribution
fname_fig = 'M-R_dist'
#create figure
fig, ax = plt.subplots(figsize = (10,9))
pl1 = ax.scatter(df_cat_new_rec.HypDist, df_cat_new_rec.Magnitude)
#edit figure properties
ax.set_xlabel(r'Distance ($km$)', fontsize=30)
ax.set_ylabel(r'Magnitude', fontsize=30)
ax.grid(which='both')
# ax.set_xscale('log')
# ax.set_xlim([0.1, 2000])
ax.set_ylim([1, 8])
ax.tick_params(axis='x', labelsize=25)
ax.tick_params(axis='y', labelsize=25)
# ax.legend(fontsize=25, loc='upper left')
ax.xaxis.set_tick_params(which='major', size=10, width=2, direction='in', top='on')
ax.xaxis.set_tick_params(which='minor', size=7, width=2, direction='in', top='on')
ax.yaxis.set_tick_params(which='major', size=10, width=2, direction='in', right='on')
ax.yaxis.set_tick_params(which='minor', size=7, width=2, direction='in', right='on')
fig.tight_layout()
#save figure
fig.savefig( dir_fig + fname_fig + '.png' )
#log scale figure
ax.set_xscale('log')
fig.tight_layout()
#save figure
fig.savefig( dir_fig + fname_fig + '_log' + '.png' )
# Mag-Year distribution
fname_fig = 'M-date_dist'
#create figure
fig, ax = plt.subplots(figsize = (10,9))
pl1 = ax.scatter(pd.DatetimeIndex(df_cat_new_rec['Time']).year, df_cat_new_rec['Magnitude'].values)
#edit figure properties
ax.set_xlabel(r'time ($year$)', fontsize=30)
ax.set_ylabel(r'Magnitude', fontsize=30)
ax.grid(which='both')
# ax.set_xscale('log')
ax.set_xlim([1965, 2025])
ax.set_ylim([1, 8])
ax.tick_params(axis='x', labelsize=25)
ax.tick_params(axis='y', labelsize=25)
# ax.legend(fontsize=25, loc='upper left')
ax.xaxis.set_tick_params(which='major', size=10, width=2, direction='in', top='on')
ax.xaxis.set_tick_params(which='minor', size=7, width=2, direction='in', top='on')
ax.yaxis.set_tick_params(which='major', size=10, width=2, direction='in', right='on')
ax.yaxis.set_tick_params(which='minor', size=7, width=2, direction='in', right='on')
fig.tight_layout()
#save figure
fig.savefig( dir_fig + fname_fig + '.png' )
#eq and sta location
fname_fig = 'eq_sta_locations'
fig, ax, data_crs, gl = pylib_cplt.PlotMap(flag_grid=True)
#plot earthquake and station locations
ax.plot(df_cat_new_rec_eq['eqLon'].values, df_cat_new_rec_eq['eqLat'].values, '*', transform = data_crs, markersize = 10, zorder=13, label='Events')
ax.plot(df_cat_new_rec_sta['staLon'].values, df_cat_new_rec_sta['staLat'].values, 'o', transform = data_crs, markersize = 6, zorder=13, label='Stations')
#edit figure properties
gl.xlabel_style = {'size': 25}
gl.ylabel_style = {'size': 25}
# gl.xlocator = mticker.FixedLocator([-124, -122, -120, -118, -116, -114])
# gl.ylocator = mticker.FixedLocator([32, 34, 36, 38, 40])
ax.legend(fontsize=25, loc='lower left')
# ax.set_xlim(plt_latlon_win[:,1])
# ax.set_ylim(plt_latlon_win[:,0])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# Print data info
print(f'New Records:')
print(f'\tnumber of rec: %.i'%len(df_cat_new_rec))
print(f'\tnumber of rec (R<200km): %.i'%np.sum(df_cat_new_rec.HypDist<=200))
print(f'\tnumber of rec (R<300km): %.i'%np.sum(df_cat_new_rec.HypDist<=300))
print(f'\tnumber of eq: %.i'%len(df_cat_new_rec_eq))
print(f'\tnumber of sta: %.i'%len(df_cat_new_rec_sta))
| 8,640 | 36.569565 | 154 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/preprocessing/CreateCatalogNewEvents2021.py | """
Created on Sun Jun 27 16:12:57 2021
@author: glavrent
"""
# Required Packages
#load libraries
import os
import sys
import pathlib
import glob
import re #regular expression package
#arithmetic libraries
import numpy as np
import pandas as pd
#geographic coordinates
import pyproj
#plotting libraries
from matplotlib import pyplot as plt
import matplotlib.ticker as mticker
#user-derfined functions
sys.path.insert(0,'../../Python_lib/catalog')
sys.path.insert(0,'../../Python_lib/plotting')
import pylib_catalog as pylib_catalog
import pylib_contour_plots as pylib_cplt
# %% Define Input Data
#thresholds
#min sampling rate
thres_dt = 0.025
#collocated stations
thres_dist = 0.01
#maximum depth
eq_mag_depth = 25
#distance range
# rrup_thres = 300
rrup_thres = 400
#year range
year_min = 2011
year_max = 2021
# year_max = 2013
# projection system
utm_zone = '11S'
#input flatfiles
fname_flatfile_newrec = '../../../Data/Verification/preprocessing/flatfiles/CA_NV_2011-2021_raw/Catalog_California_2011-2021.ver02.csv'
fname_sta_vs30 = '../../../Raw_files/nga_w3/IRIS/Catalog_California_2011-2021.ver02_station_Vs30PW.csv'
#flatfile file
fname_flatfile = 'CatalogNewRecords_%.i-%.i_CA_NV'%(year_min, year_max )
#output directory
dir_out = '../../../Data/Verification/preprocessing/flatfiles/CA_NV_%.i-%.i/'%(year_min, year_max)
dir_fig = dir_out + 'figures/'
#latlon window
win_latlon = np.array([[30, 43],[-125, -110]])
#win_latlon = np.array([[32, 43],[-125, -114]])
# %% Load Data
#read event and station info
df_flatfile_newrec = pd.read_csv(fname_flatfile_newrec)
#read vs30 info
if fname_sta_vs30:
df_sta_vs30 = pd.read_csv(fname_sta_vs30)
else:
df_sta_vs30 = None
# %% Process Data
# projection system
utmProj = pyproj.Proj("+proj=utm +zone="+utm_zone+", +ellps=WGS84 +datum=WGS84 +units=km +no_defs")
# rename columns
df_flatfile_newrec.rename(columns={'EventID':'eventid', 'Time':'time', 'Latitude':'eqLat', 'Longitude':'eqLon',
'Author':'author', 'Catalog':'cat', 'Contributor':'contributor', 'ContributorID':'contributor_id',
'Magnitude':'mag', 'MagType':'mag_type', 'MagAuthor':'mag_author',
'EventLocationName':'eq_loc', 'ESN':'esn', 'Double.event':'double_event',
'event.ESN':'esn_y', 'event.EventID':'eventid', 'Network':'network', 'Station':'station',
'Latitude':'staLat', 'Longitude':'staLon', 'Elevation':'staElev'},
inplace=True)
#year of event
df_flatfile_newrec['year'] = pd.DatetimeIndex(df_flatfile_newrec['time']).year
#station vs30
if fname_sta_vs30:
df_sta_vs30.rename(columns={'Network':'network', 'Station':'station', 'Vs30_lnSD': 'Vs30sig'}, inplace=True)
_, df_sta_vs30.loc[:,'sta_id'] = np.unique(df_sta_vs30.station, return_inverse=True)
_, df_sta_vs30.loc[:,'net_id'] = np.unique(df_sta_vs30.network, return_inverse=True)
assert(len(df_sta_vs30) == len(np.unique(df_sta_vs30[['net_id','sta_id']], axis=1))),'Error. Non-unique network stations'
# cleaning files
#set -999 to nan
df_flatfile_newrec.replace(-999, np.nan, inplace=True)
#remove data with unknown mag
df_flatfile_newrec = df_flatfile_newrec[ ~np.isnan(df_flatfile_newrec['mag']) ]
#remove data with unknown coordinates
df_flatfile_newrec = df_flatfile_newrec[ ~np.isnan(df_flatfile_newrec[['eqLat', 'eqLon']]).any(axis=1) ]
df_flatfile_newrec = df_flatfile_newrec[ ~np.isnan(df_flatfile_newrec[['staLat', 'staLon']]).any(axis=1) ]
# keep only data in spatio-temporal window
#earthquakes
i_space_win_eq = np.all(np.array([df_flatfile_newrec.eqLat >= win_latlon[0,0],
df_flatfile_newrec.eqLat < win_latlon[0,1],
df_flatfile_newrec.eqLon >= win_latlon[1,0],
df_flatfile_newrec.eqLon < win_latlon[1,1]]),axis=0)
#stations
i_space_win_sta = np.all(np.array([df_flatfile_newrec.staLat >= win_latlon[0,0],
df_flatfile_newrec.staLat < win_latlon[0,1],
df_flatfile_newrec.staLon >= win_latlon[1,0],
df_flatfile_newrec.staLon < win_latlon[1,1]]),axis=0)
#depth limit
i_eq_depth = df_flatfile_newrec.eqDepth <= eq_mag_depth
#time
i_time_win = np.logical_and(df_flatfile_newrec.year >= year_min, df_flatfile_newrec.year <= year_max)
#records to keep
i_win = np.all(np.array([i_space_win_eq, i_space_win_sta, i_eq_depth, i_time_win]),axis=0)
df_flatfile_newrec = df_flatfile_newrec[i_win]
# Vs30
#Wills 2015 model for CA
# Wills15Vs30 = pylib_W15_Vs30.Willis15Vs30CA()
# df_flatfile_newrec.loc[:,'Vs30'] = Wills15Vs30.lookup(np.fliplr(df_flatfile_newrec[['staLat', 'staLon']]))[0]
# df_flatfile_newrec.loc[df_flatfile_newrec.loc[:,'Vs30'] < 50,'Vs30'] = np.nan
#geology based Vs30
if not df_sta_vs30 is None:
#Vs30 estimates from geology and topography (Pengfei, personal communication)
df_flatfile_newrec = pd.merge(df_flatfile_newrec, df_sta_vs30[['station','network','Vs30','Vs30sig']], on=['station','network'] )
else:
#Unavailable Vs30
df_flatfile_newrec.loc[:,['Vs30','Vs30sig']] = np.nan
# define earthquake, station and network ids
#original earthquake and station ids
df_flatfile_newrec.loc[:,'eventid'] = df_flatfile_newrec['eventid'].values.astype(int)
df_flatfile_newrec.loc[:,'staid'] = np.unique(df_flatfile_newrec['station'], return_inverse=True)[1] + 1
df_flatfile_newrec.loc[:,'netid'] = np.unique(df_flatfile_newrec['network'], return_inverse=True)[1] + 1
#keep single record from each event
i_unq_eq_sta = np.unique(df_flatfile_newrec[['eventid','staid','netid']].values, return_index=True, axis=0)[1]
df_flatfile_newrec = df_flatfile_newrec.iloc[i_unq_eq_sta, :]
# define rsn eqid and ssn
#reset indices
df_flatfile_newrec.reset_index(drop=True, inplace=True)
df_flatfile_newrec.rename_axis('rsn', inplace=True)
#updated earthquake and station ids
_, eq_idx, eq_inv = np.unique(df_flatfile_newrec.loc[:,'eventid'], axis=0, return_index=True, return_inverse=True)
_, sta_idx, sta_inv = np.unique(df_flatfile_newrec.loc[:,['netid','staid']], axis=0, return_index=True, return_inverse=True)
df_flatfile_newrec.loc[:,'eqid'] = eq_inv+1
df_flatfile_newrec.loc[:,'ssn'] = sta_inv+1
n_eq_orig = len(eq_idx)
n_sta_orig = len(sta_idx)
# cartesian coordinates
#utm coordinates
eq_X = np.array([utmProj(e.eqLon, e.eqLat) for _, e in df_flatfile_newrec.iloc[eq_idx,:].iterrows()])
sta_X = np.array([utmProj(s.staLon, s.staLat) for _, s in df_flatfile_newrec.iloc[sta_idx,:].iterrows()])
df_flatfile_newrec.loc[:,['eqX','eqY']] = eq_X[eq_inv,:]
df_flatfile_newrec.loc[:,['staX','staY']] = sta_X[sta_inv,:]
#eq depth
df_flatfile_newrec.loc[:,'eqZ'] = np.minimum(-1*df_flatfile_newrec['eqDepth'].values, 0)
#utm zone
df_flatfile_newrec.loc[:,'UTMzone'] = utm_zone
# rupture distance
df_flatfile_newrec.loc[:,'Rrup'] = np.sqrt(np.linalg.norm(df_flatfile_newrec[['eqX','eqY']].values-df_flatfile_newrec[['staX','staY']].values, axis=1)**2 +
df_flatfile_newrec['eqZ']**2)
#remove records based on rupture distance
i_rrup = df_flatfile_newrec['Rrup'] < rrup_thres
df_flatfile_newrec = df_flatfile_newrec.loc[i_rrup,:]
# colocate stations
#update ssn for colocated stations
df_flatfile_newrec = pylib_catalog.ColocatePt(df_flatfile_newrec, 'ssn', ['staX','staY'], thres_dist=thres_dist)
#keep single record from each event
i_unq_eq_sta = np.unique(df_flatfile_newrec[['eqid','ssn']].values, return_index=True, axis=0)[1]
df_flatfile_newrec = df_flatfile_newrec.iloc[i_unq_eq_sta, :].sort_index()
# average gm parameters
df_flatfile_newrec = pylib_catalog.IndexAvgColumns(df_flatfile_newrec, 'eqid', ['mag','eqX','eqY','eqZ'])
df_flatfile_newrec = pylib_catalog.IndexAvgColumns(df_flatfile_newrec, 'ssn', ['Vs30','staX','staY','staElev'])
#recalculated lat/lon coordinates
_, eq_idx, eq_inv = np.unique(df_flatfile_newrec.loc[:,'eqid'], axis=0, return_index=True, return_inverse=True)
_, sta_idx, sta_inv = np.unique(df_flatfile_newrec.loc[:,'ssn'], axis=0, return_index=True, return_inverse=True)
n_eq = len(eq_idx)
n_sta = len(sta_idx)
eq_latlon = np.flip([utmProj(e.eqX, e.eqY, inverse=True) for _, e in df_flatfile_newrec.iloc[eq_idx,:].iterrows()], axis=1)
sta_latlon = np.flip([utmProj(s.staX, s.staY, inverse=True) for _, s in df_flatfile_newrec.iloc[sta_idx,:].iterrows()], axis=1)
df_flatfile_newrec.loc[:,['eqLat','eqLon']] = eq_latlon[eq_inv,:]
df_flatfile_newrec.loc[:,['staLat','staLon']] = sta_latlon[sta_inv,:]
# midpoint coordinates
df_flatfile_newrec.loc[:,['mptX','mptY']] = (df_flatfile_newrec.loc[:,['eqX','eqY']].values + df_flatfile_newrec.loc[:,['staX','staY']].values) / 2
df_flatfile_newrec.loc[:,['mptLat','mptLon']] = np.flip( np.array([utmProj(pt.mptX, pt.mptY, inverse=True) for _, pt in df_flatfile_newrec.iterrows()]), axis=1 )
#recalculate rupture distance after averaging
df_flatfile_newrec.loc[:,'Rrup'] = np.sqrt(np.linalg.norm(df_flatfile_newrec[['eqX','eqY']].values-df_flatfile_newrec[['staX','staY']].values, axis=1)**2 +
df_flatfile_newrec['eqZ']**2)
# %% Save Data
# create output directories
if not os.path.isdir(dir_out): pathlib.Path(dir_out).mkdir(parents=True, exist_ok=True)
if not os.path.isdir(dir_fig): pathlib.Path(dir_fig).mkdir(parents=True, exist_ok=True)
#full dataframe
df_flatfile_full = df_flatfile_newrec[['eqid','ssn','eventid','staid','netid','station','network',
'mag','mag_type','mag_author','Rrup','Vs30','time','year',
'eqLat','eqLon','staLat','staLon','mptLat','mptLon',
'UTMzone','eqX','eqY','eqZ','staX','staY','staElev','mptX','mptY',
'author','cat','contributor','contributor_id','eq_loc']]
#event dataframe
df_flatfile_event = df_flatfile_newrec.iloc[eq_idx,:][['eqid','eventid','mag','mag_type','mag_author','year',
'eqLat','eqLon','UTMzone','eqX','eqY','eqZ',
'author','cat','contributor','contributor_id','eq_loc']].reset_index(drop=True)
#station dataframe
df_flatfile_station = df_flatfile_newrec.iloc[sta_idx,:][['ssn','Vs30',
'staLat','staLon','UTMzone','staX','staY','staElev']].reset_index(drop=True)
# save dataframe
#save processed dataframes
fname_flatfile_full= '%s%s'%(dir_out, fname_flatfile)
df_flatfile_full.to_csv(fname_flatfile_full + '.csv', index=True)
df_flatfile_event.to_csv(fname_flatfile_full + '_event.csv', index=False)
df_flatfile_station.to_csv(fname_flatfile_full + '_station.csv', index=False)
# create figures
# Mag-Dist distribution
fname_fig = 'M-R_dist'
#create figure
fig, ax = plt.subplots(figsize = (10,9))
pl1 = ax.scatter(df_flatfile_full.Rrup, df_flatfile_full.mag, label='new records')
#edit figure properties
ax.set_xlabel(r'Distance ($km$)', fontsize=30)
ax.set_ylabel(r'Magnitude', fontsize=30)
ax.grid(which='both')
ax.set_xscale('log')
# ax.set_xlim([0.1, 2000])
ax.set_ylim([1, 8])
ax.tick_params(axis='x', labelsize=25)
ax.tick_params(axis='y', labelsize=25)
# ax.legend(fontsize=25, loc='upper left')
ax.xaxis.set_tick_params(which='major', size=10, width=2, direction='in', top='on')
ax.xaxis.set_tick_params(which='minor', size=7, width=2, direction='in', top='on')
ax.yaxis.set_tick_params(which='major', size=10, width=2, direction='in', right='on')
ax.yaxis.set_tick_params(which='minor', size=7, width=2, direction='in', right='on')
fig.tight_layout()
#save figure
fig.savefig( dir_fig + fname_fig + '.png' )
# Mag-Year distribution
fname_fig = 'M-date_dist'
#create figure
fig, ax = plt.subplots(figsize = (10,9))
pl1 = ax.scatter(df_flatfile_event['year'].values, df_flatfile_event['mag'].values, label='new records')
#edit figure properties
ax.set_xlabel(r'time ($year$)', fontsize=30)
ax.set_ylabel(r'Magnitude', fontsize=30)
ax.grid(which='both')
# ax.set_xscale('log')
ax.set_xlim([1965, 2025])
ax.set_ylim([2, 8])
ax.tick_params(axis='x', labelsize=25)
ax.tick_params(axis='y', labelsize=25)
# ax.legend(fontsize=25, loc='upper left')
ax.xaxis.set_tick_params(which='major', size=10, width=2, direction='in', top='on')
ax.xaxis.set_tick_params(which='minor', size=7, width=2, direction='in', top='on')
ax.yaxis.set_tick_params(which='major', size=10, width=2, direction='in', right='on')
ax.yaxis.set_tick_params(which='minor', size=7, width=2, direction='in', right='on')
fig.tight_layout()
#save figure
fig.savefig( dir_fig + fname_fig + '.png' )
#eq and sta location
fname_fig = 'eq_sta_locations'
fig, ax, data_crs, gl = pylib_cplt.PlotMap(flag_grid=True)
#plot earthquake and station locations
ax.plot(df_flatfile_event['eqLon'].values, df_flatfile_event['eqLat'].values, '*', transform = data_crs, markersize = 10, zorder=12, label='Events')
ax.plot(df_flatfile_station['staLon'].values, df_flatfile_station['staLat'].values, 'o', transform = data_crs, markersize = 6, zorder=13, label='Stations')
#edit figure properties
gl.xlabel_style = {'size': 25}
gl.ylabel_style = {'size': 25}
# gl.xlocator = mticker.FixedLocator([-124, -122, -120, -118, -116, -114])
# gl.ylocator = mticker.FixedLocator([32, 34, 36, 38, 40])
ax.legend(fontsize=25, loc='lower left')
# ax.set_xlim(plt_latlon_win[:,1])
# ax.set_ylim(plt_latlon_win[:,0])
#save figure
fig.tight_layout()
fig.savefig( dir_fig + fname_fig + '.png' )
# print data info
print(r'New Records:')
print(f'\tnumber of rec: %.i'%len(df_flatfile_newrec))
print(f'\tnumber of rec (R<200km): %.i'%np.sum(df_flatfile_newrec.Rrup<=200))
print(f'\tnumber of rec (R<%.1f): %.i'%(rrup_thres, np.sum(df_flatfile_newrec.Rrup<=rrup_thres)))
print(f'\tnumber of eq: %.i'%n_eq)
print(f'\tnumber of sta: %.i'%n_sta)
print(f'\tmin magnitude: %.1f'%df_flatfile_newrec.mag.min())
print(f'\tmax magnitude: %.1f'%df_flatfile_newrec.mag.max())
print(f'\tcoverage: %.i to %i'%(df_flatfile_newrec.year.min(), df_flatfile_newrec.year.max()))
#write out summary
f = open(dir_out + 'summary_data' + '.txt', 'w')
f.write(f'New Records:\n')
f.write(f'\tnumber of rec: %.i\n'%len(df_flatfile_newrec))
f.write(f'\tnumber of rec (R<200km): %.i\n'%np.sum(df_flatfile_newrec.Rrup<=200))
f.write(f'\tnumber of rec (R<%.1f): %.i\n'%(rrup_thres, np.sum(df_flatfile_newrec.Rrup<=rrup_thres)))
f.write(f'\tnumber of eq: %.i\n'%n_eq)
f.write(f'\tnumber of sta: %.i\n'%n_sta)
f.write(f'\tmin magnitude: %.1f\n'%df_flatfile_newrec.mag.min())
f.write(f'\tmax magnitude: %.1f\n'%df_flatfile_newrec.mag.max())
f.write(f'\tcoverage: %.i to %i\n'%(df_flatfile_newrec.year.min(), df_flatfile_newrec.year.max()))
f.close()
| 15,785 | 44.889535 | 162 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/preprocessing/PlotCellPaths.py | """
Created on Sun Sep 13 18:00:32 2020
@author: glavrent
"""
# %% Required Packages
#load variables
import os
import sys
import pathlib
import glob
import re #regular expression package
#arithmetic libraries
import numpy as np
import pandas as pd
#geographic coordinates
import pyproj
#plottign libraries
from matplotlib import pyplot as plt
#user-derfined functions
sys.path.insert(0,'../../Python_lib/plotting')
import pylib_contour_plots as pycplt
# Define Input Data
fname_flatfile = '../../../Data/Verification/preprocessing/flatfiles/CA_NV_2011-2021Lite/CatalogNewRecordsLite_2011-2021_CA_NV.csv'
fname_cellinfo = '../../../Data/Verification/preprocessing/cell_distances/CatalogNGAWest3CALite_cellinfo.csv'
fname_celldistfile = '../../../Data/Verification/preprocessing/cell_distances/CatalogNGAWest3CALite_distancematrix.csv'
#grid limits and size
coeff_latlon_win = np.array([[32, -125],[42.5, -114]])
#log scale for number of paths
flag_logscl = True
#output directory
dir_out = '../../../Data/Verification/preprocessing/cell_distances/figures/'
# Load Data
df_flatfile = pd.read_csv(fname_flatfile)
df_cellinfo = pd.read_csv(fname_cellinfo)
#cell distance file
df_celldata = pd.read_csv(fname_celldistfile, index_col=0).reindex(df_flatfile.rsn)
# Process Data
#coordinates and projection system
# projection system
utm_zone = np.unique(df_flatfile.UTMzone)[0] #utm zone
utmProj = pyproj.Proj("+proj=utm +zone="+utm_zone+", +ellps=WGS84 +datum=WGS84 +units=m +no_defs")
#cell edge coordinates
cell_edge_latlon = []
for cell_edge in [['q5X','q5Y'], ['q6X','q6Y'], ['q8X','q8Y'],
['q7X','q7Y'], ['q5X','q5Y']]:
cell_edge_latlon.append( np.fliplr(np.array([utmProj(c_xy[0]*1000, c_xy[1]*1000, inverse=True) for c_xy in
df_cellinfo.loc[:,cell_edge].values])) )
cell_edge_latlon = np.hstack(cell_edge_latlon)
#cell mid-coordinates
cell_latlon = np.fliplr(np.array([utmProj(c_xy[0]*1000, c_xy[1]*1000, inverse=True) for c_xy in
df_cellinfo.loc[:,['mptX','mptY']].values]))
#earthquake and station ids
eq_id_train = df_flatfile['eqid'].values.astype(int)
sta_id_train = df_flatfile['ssn'].values.astype(int)
eq_id, eq_idx_inv = np.unique(eq_id_train, return_index=True)
sta_id, sta_idx_inv = np.unique(sta_id_train, return_index=True)
#earthquake and station coordinates
eq_latlon_train = df_flatfile[['eqLat', 'eqLon']].values
stat_latlon_train = df_flatfile[['staLat', 'staLon']].values
#unique earthquake and station coordinates
eq_latlon = eq_latlon_train[eq_idx_inv,:]
stat_latlon = stat_latlon_train[sta_idx_inv,:]
#cell names
cell_i = [bool(re.match('^c\\..*$',c_n)) for c_n in df_celldata.columns.values] #indices for cell columns
cell_names = df_celldata.columns.values[cell_i]
#cell-distance matrix with all cells
cell_dist = df_celldata[cell_names]
cell_n_paths = (cell_dist > 0).sum()
# Create cell figures
if not os.path.isdir(dir_out): pathlib.Path(dir_out).mkdir(parents=True, exist_ok=True)
# if flag_pub:
# # mpl.rcParams['font.family'] = 'Avenir'
# plt.rcParams['axes.linewidth'] = 2
# Plot cell paths
fname_fig = 'cA_paths'
fig, ax, data_crs, gl = pycplt.PlotMap()
#plot earthquake and station locations
ax.plot(eq_latlon[:,1], eq_latlon[:,0], '*', transform = data_crs, markersize = 10, zorder=13, label='Events')
ax.plot(stat_latlon[:,1], stat_latlon[:,0], 'o', transform = data_crs, markersize = 6, zorder=12, label='Stations')
# ax.plot(eq_latlon[:,1], eq_latlon[:,0], '^', transform = data_crs, color = 'black', markersize = 10, zorder=13, label='Earthquake')
# ax.plot(stat_latlon[:,1], stat_latlon[:,0], 'o', transform = data_crs, color = 'black', markersize = 3, zorder=12, label='Station')
#plot earthquake-station paths
for rec in df_flatfile[['eqLat','eqLon','staLat','staLon']].iterrows():
ax.plot(rec[1][['eqLon','staLon']], rec[1][['eqLat','staLat']], transform = data_crs, color = 'gray', linewidth=0.05, zorder=10, alpha=0.2)
#plot cells
for ce_xy in cell_edge_latlon:
ax.plot(ce_xy[[1,3,5,7,9]],ce_xy[[0,2,4,6,8]],color='gray', transform = data_crs)
#figure limits
ax.set_xlim( coeff_latlon_win[:,1] )
ax.set_ylim( coeff_latlon_win[:,0] )
#edit figure properties
#grid lines
gl = ax.gridlines(draw_labels=True)
gl.xlabels_top = False
gl.ylabels_right = False
gl.xlabel_style = {'size': 25}
gl.ylabel_style = {'size': 25}
#add legend
ax.legend(fontsize=25, loc='lower left')
#apply tight layout
fig.show()
fig.tight_layout()
fig.savefig( dir_out + fname_fig + '.png')
# Plot cell paths
fname_fig = 'cA_num_paths'
cbar_label = 'Number of paths'
data2plot = np.vstack([cell_latlon.T, cell_n_paths.values]).T
#color limits
cmin = 0
cmax = 2000
#log scale options
if flag_logscl:
# data2plot[:,2] = np.maximum(data2plot[:,2], 1)
cmin = np.log(1)
cmax = np.log(cmax)
#create figure
fig, ax, cbar, data_crs, gl = pycplt.PlotCellsCAMap(data2plot, cmin=cmin, cmax=cmax, log_cbar = flag_logscl,
frmt_clb = '%.0f', cmap='OrRd')
#plot cells
for ce_xy in cell_edge_latlon:
ax.plot(ce_xy[[1,3,5,7]],ce_xy[[0,2,4,6]],color='gray', transform = data_crs)
#figure limits
ax.set_xlim( coeff_latlon_win[:,1] )
ax.set_ylim( coeff_latlon_win[:,0] )
#edit figure properties
#grid lines
gl = ax.gridlines(draw_labels=True)
gl.xlabels_top = False
gl.ylabels_right = False
gl.xlabel_style = {'size': 25}
gl.ylabel_style = {'size': 25}
#update colorbar
cbar.set_label(cbar_label, size=30)
cbar.ax.tick_params(labelsize=25)
#apply tight layout
fig.show()
fig.tight_layout()
fig.savefig( dir_out + fname_fig + '.png')
| 5,935 | 34.54491 | 143 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/preprocessing/PlotUsableMagRrupCatalog.py | """
Created on Mon Oct 4 16:32:37 2021
@author: glavrent
"""
# %% Required Packages
#load libraries
import os
import pathlib
#arithmetic libraries
import numpy as np
import pandas as pd
#plotting libraries
from matplotlib import pyplot as plt
import matplotlib.ticker as mticker
# %% Define variables
#input file names
fname_flatfile_NGA2 = '../../../Raw_files/nga_w2/Updated_NGA_West2_Flatfile_RotD50_d050_public_version.xlsx'
fname_mag_rrup_lim = '../../../Data/Verification/preprocessing/flatfiles/usable_mag_rrup/usable_Mag_Rrup_coeffs.csv'
#output directoy
dir_fig = '../../../Data/Verification/preprocessing/flatfiles/usable_mag_rrup/'
# %% Load Data
#NGAWest2
df_flatfile_NGA2 = pd.read_excel(fname_flatfile_NGA2)
#M/R limit
df_m_r_lim = pd.read_csv(fname_mag_rrup_lim,index_col=0)
#remove rec with unavailable data
df_flatfile_NGA2 = df_flatfile_NGA2.loc[df_flatfile_NGA2.EQID>0,:]
df_flatfile_NGA2 = df_flatfile_NGA2.loc[df_flatfile_NGA2['ClstD (km)']>0,:]
#mag and distance arrays
mag_array = df_flatfile_NGA2['Earthquake Magnitude']
rrup_array = df_flatfile_NGA2['ClstD (km)']
#compute limit
rrup_lim1 = np.arange(0,1001)
mag_lim1 = (df_m_r_lim.loc['b0','coefficients'] +
df_m_r_lim.loc['b1','coefficients'] * rrup_lim1 +
df_m_r_lim.loc['b2','coefficients'] * rrup_lim1**2)
rrup_lim2 = df_m_r_lim.loc['max_rrup','coefficients']
# %% Process Data
if not os.path.isdir(dir_fig): pathlib.Path(dir_fig).mkdir(parents=True, exist_ok=True)
# create figures
# Mag-Dist distribution
fname_fig = 'M-R_limits'
#create figure
fig, ax = plt.subplots(figsize = (10,9))
pl1 = ax.scatter(rrup_array, mag_array, label='NGAWest2 CA')
pl2 = ax.plot(rrup_lim1, mag_lim1, linewidth=2, color='black')
pl3 = ax.vlines(rrup_lim2, ymin=0, ymax=10, linewidth=2, color='black', linestyle='--')
#edit figure properties
ax.set_xlabel(r'Distance ($km$)', fontsize=30)
ax.set_ylabel(r'Magnitude', fontsize=30)
ax.grid(which='both')
# ax.set_xscale('log')
ax.set_xlim([0, 1000])
ax.set_ylim([2, 8])
ax.tick_params(axis='x', labelsize=25)
ax.tick_params(axis='y', labelsize=25)
# ax.legend(fontsize=25, loc='upper left')
ax.xaxis.set_tick_params(which='major', size=10, width=2, direction='in', top='on')
ax.xaxis.set_tick_params(which='minor', size=7, width=2, direction='in', top='on')
ax.yaxis.set_tick_params(which='major', size=10, width=2, direction='in', right='on')
ax.yaxis.set_tick_params(which='minor', size=7, width=2, direction='in', right='on')
fig.tight_layout()
#save figure
fig.savefig( dir_fig + fname_fig + '.png' )
| 2,814 | 33.753086 | 117 | py |
ngmm_tools | ngmm_tools-master/Analyses/Code_Verification/preprocessing/ComputeCellDistance.py | """
Created on Thu Apr 9 11:04:25 2020
@author: glavrent
"""
# Required Packages
#load libraries
import os
import sys
import pathlib
import numpy as np
import pandas as pd
from scipy import sparse
#geographic libraries
import pyproj
#user libraries
sys.path.insert(0,'../../Python_lib/ground_motions')
import pylib_cell_dist as pylib_cells
# %% Define Input Data
#input flatfile
# fname_flatfile = 'CatalogNGAWest3CA'
# fname_flatfile = 'CatalogNGAWest3CA_2013'
fname_flatfile = 'CatalogNGAWest3CALite'
# fname_flatfile = 'CatalogNGAWest3NCA'
# fname_flatfile = 'CatalogNGAWest3SCA'
dir_flatfile = '../../../Data/Verification/preprocessing/flatfiles/merged/'
#output files
dir_out = '../../../Data/Verification/preprocessing/cell_distances/'
# %% Read and Porcess Input Data
# read ground-motion data
fullname_flatfile = dir_flatfile + fname_flatfile + '.csv'
df_flatfile = pd.read_csv(fullname_flatfile)
n_rec = len(df_flatfile)
#define projection system
assert(len(np.unique(df_flatfile.UTMzone))==1),'Error. Multiple UTM zones defined.'
utm_zone = df_flatfile.UTMzone[0]
utmProj = pyproj.Proj("+proj=utm +zone="+utm_zone+", +ellps=WGS84 +datum=WGS84 +units=m +no_defs")
#create output directory
if not os.path.isdir(dir_out): pathlib.Path(dir_out).mkdir(parents=True, exist_ok=True)
#create object with source and station locations
#utm coordinates
data4celldist = df_flatfile.loc[:,['eqX','eqY','eqZ','staX','staY']].values
flagUTM = True
#add elevation for stations
data4celldist = np.hstack([data4celldist,np.zeros([n_rec,1])])
# %% Create Cell Grid
#grid range
grid_lims_x = [data4celldist[:,[0,3]].min(), data4celldist[:,[0,3]].max()]
grid_lims_y = [data4celldist[:,[1,4]].min(), data4celldist[:,[1,4]].max()]
grid_lims_z = [data4celldist[:,[2,5]].min(), data4celldist[:,[2,5]].max()]
#manual limits
#utm limits
# #NGAWest3 full
# grid_lims_x = [-200, 1100]
# grid_lims_y = [3300, 4800]
# grid_lims_z = [-100, 0]
#NGAWest3 lite
grid_lims_x = [-200, 800]
grid_lims_y = [3450, 4725]
grid_lims_z = [-50, 0]
#cell size
cell_size = [25, 25, 50]
#lat-lon grid spacing
grid_x = np.arange(grid_lims_x[0], grid_lims_x[1]+0.1, cell_size[0])
grid_y = np.arange(grid_lims_y[0], grid_lims_y[1]+0.1, cell_size[1])
grid_z = np.arange(grid_lims_z[0], grid_lims_z[1]+0.1, cell_size[2])
#cell schematic
# / | / |
# | | | |
# |/ |/
#create cells
j1 = 0
j2 = 0
j3 = 0
cells = []
for j1 in range(len(grid_x)-1):
for j2 in range(len(grid_y)-1):
for j3 in range(len(grid_z)-1):
#cell corners (bottom-face)
cell_c1 = [grid_x[j1], grid_y[j2], grid_z[j3]]
cell_c2 = [grid_x[j1+1], grid_y[j2], grid_z[j3]]
cell_c3 = [grid_x[j1], grid_y[j2+1], grid_z[j3]]
cell_c4 = [grid_x[j1+1], grid_y[j2+1], grid_z[j3]]
#cell corners (top-face)
cell_c5 = [grid_x[j1], grid_y[j2], grid_z[j3+1]]
cell_c6 = [grid_x[j1+1], grid_y[j2], grid_z[j3+1]]
cell_c7 = [grid_x[j1], grid_y[j2+1], grid_z[j3+1]]
cell_c8 = [grid_x[j1+1], grid_y[j2+1], grid_z[j3+1]]
#cell center
cell_cent = np.mean(np.stack([cell_c1,cell_c2,cell_c3,cell_c4,
cell_c5,cell_c6,cell_c7,cell_c8]),axis = 0).tolist()
#summarize all cell coordinates in a list
cell_info = cell_c1 + cell_c2 + cell_c3 + cell_c4 + \
cell_c5 + cell_c6 + cell_c7 + cell_c8 + cell_cent
#add cell info
cells.append(cell_info)
del j1, j2, j3, cell_info
del cell_c1, cell_c2, cell_c3, cell_c4, cell_c5, cell_c6, cell_c7, cell_c8
cells = np.array(cells)
n_cells = len(cells)
#cell info
cell_ids = np.arange(n_cells)
cell_names = ['c.%i'%(i) for i in cell_ids]
cell_q_names = ['q1X','q1Y','q1Z','q2X','q2Y','q2Z','q3X','q3Y','q3Z','q4X','q4Y','q4Z',
'q5X','q5Y','q5Z','q6X','q6Y','q6Z','q7X','q7Y','q7Z','q8X','q8Y','q8Z',
'mptX','mptY','mptZ']
# Create cell info dataframe
#cell names
df_data1 = pd.DataFrame({'cellid': cell_ids, 'cellname': cell_names})
#cell coordinates
df_data2 = pd.DataFrame(cells, columns = cell_q_names)
df_cellinfo = pd.merge(df_data1,df_data2,left_index=True,right_index=True)
# add cell utm zone
df_cellinfo.loc[:,'UTMzone'] = utm_zone
# Compute Lat\Lon of cells
#cell verticies
for q in range(1,9):
c_X = ['q%iX'%q, 'q%iY'%q]
c_latlon = ['q%iLat'%q, 'q%iLon'%q]
df_cellinfo.loc[:,c_latlon] = np.flip( np.array([utmProj(pt_xy[0]*1e3, pt_xy[1]*1e3, inverse=True)
for _, pt_xy in df_cellinfo[c_X].iterrows() ]), axis=1)
#cell midpoints
c_X = ['mptX', 'mptY']
c_latlon = ['mptLat','mptLon']
df_cellinfo.loc[:,c_latlon] = np.flip( np.array([utmProj(pt_xy[0]*1e3, pt_xy[1]*1e3, inverse=True)
for _, pt_xy in df_cellinfo[c_X].iterrows() ]), axis=1)
# %% Compute Cell distances
cells4dist = cells[:,[0,1,2,21,22,23]]
distancematrix = np.zeros([len(data4celldist), len(cells4dist)])
for i in range(len(data4celldist)):
print('Computing cell distances, record',i)
pt1 = data4celldist[i,(0,1,2)]
pt2 = data4celldist[i,(3,4,5)]
dm = pylib_cells.ComputeDistGridCells(pt1,pt2,cells4dist, flagUTM)
distancematrix[i] = dm
#print Rrup missfits
dist_diff = df_flatfile.Rrup - distancematrix.sum(axis=1)
print('max R_rup misfit', max(dist_diff))
print('min R_rup misfit', min(dist_diff))
#convert cell distances to sparse matrix
distmatrix_sparce = sparse.coo_matrix(distancematrix)
# Create cell distances data-frame
#record info
df_recinfo = df_flatfile[['rsn','eqid','ssn']]
#cell distances
df_celldist = pd.DataFrame(distancematrix, columns = cell_names)
df_celldist = pd.merge(df_recinfo, df_celldist, left_index=True, right_index=True)
#spase cell distances
df_celldist_sp = pd.DataFrame({'row': distmatrix_sparce.row+1, 'col': distmatrix_sparce.col+1, 'data': distmatrix_sparce.data})
# %% Save data
#save cell info
fname_cellinfo = fname_flatfile + '_cellinfo'
df_cellinfo.to_csv(dir_out + fname_cellinfo + '.csv', index=False)
# #save distance metrics
fname_celldist = fname_flatfile + '_distancematrix'
df_celldist.to_csv(dir_out + fname_celldist + '.csv', index=False)
# #save distance matrix as sparce
fname_celldist = fname_flatfile + '_distancematrix_sparce'
df_celldist_sp.to_csv(dir_out + fname_celldist + '.csv', index=False)
| 7,014 | 33.219512 | 127 | py |