repo
stringlengths 2
99
| file
stringlengths 14
239
| code
stringlengths 20
3.99M
| file_length
int64 20
3.99M
| avg_line_length
float64 9.73
128
| max_line_length
int64 11
86.4k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
PyKrige | PyKrige-main/src/pykrige/kriging_tools.py | """
PyKrige
=======
Code by Benjamin S. Murphy and the PyKrige Developers
bscott.murphy@gmail.com
Summary
-------
Methods for reading/writing ASCII grid files.
Copyright (c) 2015-2020, PyKrige Developers
"""
import datetime
import io
import os
import warnings
import numpy as np
def write_asc_grid(x, y, z, filename="output.asc", no_data=-999.0, style=1):
r"""Writes gridded data to ASCII grid file (\*.asc).
This is useful for exporting data to a GIS program.
Parameters
----------
x : array_like, shape (N,) or (N, 1)
X-coordinates of grid points at center of cells.
y : array_like, shape (M,) or (M, 1)
Y-coordinates of grid points at center of cells.
z : array_like, shape (M, N)
Gridded data values. May be a masked array.
filename : string, optional
Name of output \*.asc file. Default name is 'output.asc'.
no_data : float, optional
no data value to be used
style : int, optional
Determines how to write the \*.asc file header.
Specifying 1 writes out DX, DY, XLLCENTER, YLLCENTER.
Specifying 2 writes out CELLSIZE (note DX must be the same as DY),
XLLCORNER, YLLCORNER. Default is 1.
"""
if np.ma.is_masked(z):
z = np.array(z.tolist(no_data))
x = np.squeeze(np.array(x))
y = np.squeeze(np.array(y))
z = np.squeeze(np.array(z))
nrows = z.shape[0]
ncols = z.shape[1]
if z.ndim != 2:
raise ValueError("Two-dimensional grid is required to write *.asc grid.")
if x.ndim > 1 or y.ndim > 1:
raise ValueError(
"Dimensions of X and/or Y coordinate arrays are not "
"as expected. Could not write *.asc grid."
)
if z.shape != (y.size, x.size):
warnings.warn(
"Grid dimensions are not as expected. "
"Incorrect *.asc file generation may result.",
RuntimeWarning,
)
if np.amin(x) != x[0] or np.amin(y) != y[0]:
warnings.warn(
"Order of X or Y coordinates is not as expected. "
"Incorrect *.asc file generation may result.",
RuntimeWarning,
)
dx = abs(x[1] - x[0])
dy = abs(y[1] - y[0])
if not np.isclose(abs((x[-1] - x[0]) / (x.shape[0] - 1)), dx) or not np.isclose(
abs((y[-1] - y[0]) / (y.shape[0] - 1)), dy
):
raise ValueError(
"X or Y spacing is not constant; *.asc grid cannot be written."
)
cellsize = -1
if style == 2:
if dx != dy:
raise ValueError(
"X and Y spacing is not the same. "
"Cannot write *.asc file in the specified format."
)
cellsize = dx
xllcenter = x[0]
yllcenter = y[0]
# Note that these values are flagged as -1. If there is a problem in trying
# to write out style 2, the -1 value will appear in the output file.
xllcorner = -1
yllcorner = -1
if style == 2:
xllcorner = xllcenter - dx / 2.0
yllcorner = yllcenter - dy / 2.0
with io.open(filename, "w") as f:
if style == 1:
f.write("NCOLS " + "{:<10n}".format(ncols) + "\n")
f.write("NROWS " + "{:<10n}".format(nrows) + "\n")
f.write("XLLCENTER " + "{:<10.2f}".format(xllcenter) + "\n")
f.write("YLLCENTER " + "{:<10.2f}".format(yllcenter) + "\n")
f.write("DX " + "{:<10.2f}".format(dx) + "\n")
f.write("DY " + "{:<10.2f}".format(dy) + "\n")
f.write("NODATA_VALUE " + "{:<10.2f}".format(no_data) + "\n")
elif style == 2:
f.write("NCOLS " + "{:<10n}".format(ncols) + "\n")
f.write("NROWS " + "{:<10n}".format(nrows) + "\n")
f.write("XLLCORNER " + "{:<10.2f}".format(xllcorner) + "\n")
f.write("YLLCORNER " + "{:<10.2f}".format(yllcorner) + "\n")
f.write("CELLSIZE " + "{:<10.2f}".format(cellsize) + "\n")
f.write("NODATA_VALUE " + "{:<10.2f}".format(no_data) + "\n")
else:
raise ValueError("style kwarg must be either 1 or 2.")
for m in range(z.shape[0] - 1, -1, -1):
for n in range(z.shape[1]):
f.write("{:<16.2f}".format(z[m, n]))
if m != 0:
f.write("\n")
def read_asc_grid(filename, footer=0):
r"""Reads ASCII grid file (\*.asc).
Parameters
----------
filename : str
Name of \*.asc file.
footer : int, optional
Number of lines at bottom of \*.asc file to skip.
Returns
-------
grid_array : numpy array, shape (M, N)
(M, N) array of grid values, where M is number of Y-coordinates and
N is number of X-coordinates. The array entry corresponding to
the lower-left coordinates is at index [M, 0], so that
the array is oriented as it would be in X-Y space.
x : numpy array, shape (N,)
1D array of N X-coordinates.
y : numpy array, shape (M,)
1D array of M Y-coordinates.
CELLSIZE : tuple or float
Either a two-tuple of (x-cell size, y-cell size),
or a float that specifies the uniform cell size.
NODATA : float
Value that specifies which entries are not actual data.
"""
ncols = None
nrows = None
xllcorner = None
xllcenter = None
yllcorner = None
yllcenter = None
cellsize = None
dx = None
dy = None
no_data = None
header_lines = 0
with io.open(filename, "r") as f:
while True:
string, value = f.readline().split()
header_lines += 1
if string.lower() == "ncols":
ncols = int(value)
elif string.lower() == "nrows":
nrows = int(value)
elif string.lower() == "xllcorner":
xllcorner = float(value)
elif string.lower() == "xllcenter":
xllcenter = float(value)
elif string.lower() == "yllcorner":
yllcorner = float(value)
elif string.lower() == "yllcenter":
yllcenter = float(value)
elif string.lower() == "cellsize":
cellsize = float(value)
elif string.lower() == "cell_size":
cellsize = float(value)
elif string.lower() == "dx":
dx = float(value)
elif string.lower() == "dy":
dy = float(value)
elif string.lower() == "nodata_value":
no_data = float(value)
elif string.lower() == "nodatavalue":
no_data = float(value)
else:
raise IOError("could not read *.asc file. Error in header.")
if (
(ncols is not None)
and (nrows is not None)
and (
((xllcorner is not None) and (yllcorner is not None))
or ((xllcenter is not None) and (yllcenter is not None))
)
and ((cellsize is not None) or ((dx is not None) and (dy is not None)))
and (no_data is not None)
):
break
raw_grid_array = np.genfromtxt(
filename, skip_header=header_lines, skip_footer=footer
)
grid_array = np.flipud(raw_grid_array)
if nrows != grid_array.shape[0] or ncols != grid_array.shape[1]:
raise IOError(
"Error reading *.asc file. Encountered problem "
"with header: NCOLS and/or NROWS does not match "
"number of columns/rows in data file body."
)
if xllcorner is not None and yllcorner is not None:
if dx is not None and dy is not None:
xllcenter = xllcorner + dx / 2.0
yllcenter = yllcorner + dy / 2.0
else:
xllcenter = xllcorner + cellsize / 2.0
yllcenter = yllcorner + cellsize / 2.0
if dx is not None and dy is not None:
x = np.arange(xllcenter, xllcenter + ncols * dx, dx)
y = np.arange(yllcenter, yllcenter + nrows * dy, dy)
else:
x = np.arange(xllcenter, xllcenter + ncols * cellsize, cellsize)
y = np.arange(yllcenter, yllcenter + nrows * cellsize, cellsize)
# Sometimes x and y and can be an entry too long due to imprecision
# in calculating the upper cutoff for np.arange(); this bit takes care of
# that potential problem.
if x.size == ncols + 1:
x = x[:-1]
if y.size == nrows + 1:
y = y[:-1]
if cellsize is None:
cellsize = (dx, dy)
return grid_array, x, y, cellsize, no_data
def write_zmap_grid(
x, y, z, filename="output.zmap", no_data=-999.0, coord_sys="<null>"
):
r"""Writes gridded data to ASCII grid file in zmap format (\*.zmap).
This is useful for exporting data to a GIS program, or Petrel
https://gdal.org/drivers/raster/zmap.html
Parameters
----------
x : array_like, shape (N,) or (N, 1)
X-coordinates of grid points at center of cells.
y : array_like, shape (M,) or (M, 1)
Y-coordinates of grid points at center of cells.
z : array_like, shape (M, N)
Gridded data values. May be a masked array.
filename : string, optional
Name of output \*.zmap file. Default name is 'output.zmap'.
no_data : float, optional
no data value to be used
coord_sys : String, optional
coordinate sytem description
"""
nodes_per_line = 5
field_width = 15
if np.ma.is_masked(z):
z = np.array(z.tolist(no_data))
x = np.squeeze(np.array(x))
y = np.squeeze(np.array(y))
z = np.squeeze(np.array(z))
nx = len(x)
ny = len(y)
dx = abs(x[1] - x[0])
dy = abs(y[1] - y[0])
if not np.isclose(abs((x[-1] - x[0]) / (x.shape[0] - 1)), dx) or not np.isclose(
abs((y[-1] - y[0]) / (y.shape[0] - 1)), dy
):
raise ValueError(
"X or Y spacing is not constant; *.asc grid cannot be written."
)
xllcenter = x[0]
yllcenter = y[0]
hix = xllcenter + (nx - 1) * dx
hiy = yllcenter + (ny - 1) * dy
now = datetime.datetime.now()
with io.open(filename, "w") as f:
f.write("!" + "\n")
f.write("! ZIMS FILE NAME : " + os.path.basename(filename) + "\n")
f.write(
"! FORMATTED FILE CREATION DATE: " + now.strftime("%d/%m/%Y") + "\n"
)
f.write(
"! FORMATTED FILE CREATION TIME: " + now.strftime("%H:%M:%S") + "\n"
)
f.write("! COORDINATE REFERENCE SYSTEM: " + coord_sys + "\n")
f.write("!" + "\n")
f.write("@Grid HEADER, GRID, " + str(nodes_per_line) + "\n")
f.write(" " + str(field_width) + ", " + str(no_data) + ", , 1 , 1" + "\n")
f.write(
" "
+ str(ny)
+ ", "
+ str(nx)
+ ", "
+ str(xllcenter)
+ ", "
+ str(hix)
+ ", "
+ str(yllcenter)
+ ", "
+ str(hiy)
+ "\n"
)
f.write(" " + str(dx) + ", 0.0, 0.0 " + "\n")
f.write("@" + "\n")
for n in range(z.shape[1]):
count = 0
for m in range(z.shape[0] - 1, -1, -1):
count += 1
if np.isnan(z[m, n]):
f.write(space_back_to_front(format(no_data, "13.7E") + " "))
else:
if abs(z[m, n]) >= 1e100: # one tailing space less
f.write(space_back_to_front(format(z[m, n], "13.7E") + " "))
elif abs(z[m, n]) >= 1e6:
f.write(space_back_to_front(format(z[m, n], "13.7E") + " "))
else:
f.write(space_back_to_front("{:<13.4f}".format(z[m, n]) + " "))
if count % nodes_per_line == 0 or m == 0:
f.write("\n")
def read_zmap_grid(filename):
r"""Reads ASCII grid file in zmap format (\*.zmap).
https://gdal.org/drivers/raster/zmap.html
Parameters
----------
filename : str
Name of \*.zmap file.
Returns
-------
grid_array : numpy array, shape (M, N)
(M, N) array of grid values, where M is number of Y-coordinates and
N is number of X-coordinates. The array entry corresponding to
the lower-left coordinates is at index [M, 0], so that
the array is oriented as it would be in X-Y space.
x : numpy array, shape (N,)
1D array of N X-coordinates.
y : numpy array, shape (M,)
1D array of M Y-coordinates.
cellsize : tuple or float
Either a two-tuple of (x-cell size, y-cell size),
or a float that specifies the uniform cell size.
no_data_value : float
Value that specifies which entries are not actual data.
coord_sys : String
Coordinate system name
"""
no_data_value, nx, ny, originx, originy, maxx, maxy, dx, dy = (
0,
0,
0,
0,
0,
0,
0,
0,
0,
)
data_values = np.empty(1)
coord_sys = "<null>"
i_header_line, i_value = 0, 0
with io.open(filename, "r") as f:
while True:
line = f.readline()
if line.startswith("!"):
line_strings = line.split(":")
if line_strings[0].__contains__("COORDINATE REFERENCE SYSTEM"):
coord_sys = line_strings[1].replace("\n", "")
else:
line_strings = line.split()
line_strings = [string.replace(",", "") for string in line_strings]
if len(line_strings) == 0:
break
if i_header_line == -1 and not line_strings[0].startswith("!"):
for i_string in range(len(line_strings)):
data_values[i_value] = float(line_strings[i_string])
i_value += 1
if line_strings[0].startswith("@"):
if i_header_line == 0:
i_header_line += 1
else:
i_header_line = -1
if i_header_line > 0:
if i_header_line == 2:
no_data_value = float(line_strings[1])
elif i_header_line == 3:
ny = int(line_strings[0])
nx = int(line_strings[1])
originx = float(line_strings[2])
maxx = float(line_strings[3])
originy = float(line_strings[4])
maxy = float(line_strings[5])
data_values = np.empty(ny * nx)
i_header_line += 1
if nx * ny != len(data_values):
raise IOError(
"Error reading *.zmap file. Encountered problem "
"with header: (nx * ny) does not match with the "
"number items in data file body."
)
z = np.empty([ny, nx])
i_value = 0
for n in range(z.shape[1]):
for m in range(z.shape[0] - 1, -1, -1):
z[m, n] = data_values[i_value]
i_value += 1
dx = (maxx - originx) / (nx - 1)
dy = (maxy - originy) / (ny - 1)
gridx = np.arange(originx, originx + nx * dx, dx)
gridy = np.arange(originy, originy + ny * dy, dy)
cellsize = (dx, dy)
return z, gridx, gridy, cellsize, no_data_value, coord_sys
def space_back_to_front(string):
net = string.replace(" ", "")
return "".join(string.rsplit(net)) + net
| 15,629 | 32.612903 | 88 | py |
PyKrige | PyKrige-main/docs/source/sphinxext/github_link.py | # Adapted from scikit learn
import inspect
import os
import subprocess
import sys
from functools import partial
from operator import attrgetter
REVISION_CMD = "git rev-parse --short HEAD"
def _get_git_revision():
try:
revision = subprocess.check_output(REVISION_CMD.split()).strip()
except (subprocess.CalledProcessError, OSError):
print("Failed to execute git to get revision")
return None
return revision.decode("utf-8")
def _linkcode_resolve(domain, info, package, url_fmt, revision):
"""Determine a link to online source for a class/method/function
This is called by sphinx.ext.linkcode
An example with a long-untouched module that everyone has
>>> _linkcode_resolve('py', {'module': 'tty',
... 'fullname': 'setraw'},
... package='tty',
... url_fmt='http://hg.python.org/cpython/file/'
... '{revision}/Lib/{package}/{path}#L{lineno}',
... revision='xxxx')
'http://hg.python.org/cpython/file/xxxx/Lib/tty/tty.py#L18'
"""
if revision is None:
return
if domain not in ("py", "pyx"):
return
if not info.get("module") or not info.get("fullname"):
return
class_name = info["fullname"].split(".")[0]
if type(class_name) != str:
# Python 2 only
class_name = class_name.encode("utf-8")
module = __import__(info["module"], fromlist=[class_name])
obj = attrgetter(info["fullname"])(module)
try:
fn = inspect.getsourcefile(obj)
except Exception:
fn = None
if not fn:
try:
fn = inspect.getsourcefile(sys.modules[obj.__module__])
except Exception:
fn = None
if not fn:
return
fn = os.path.relpath(fn, start=os.path.dirname(__import__(package).__file__))
try:
lineno = inspect.getsourcelines(obj)[1]
except Exception:
lineno = ""
return url_fmt.format(revision=revision, package=package, path=fn, lineno=lineno)
def make_linkcode_resolve(package, url_fmt):
"""Returns a linkcode_resolve function for the given URL format
revision is a git commit reference (hash or name)
package is the name of the root module of the package
url_fmt is along the lines of ('https://github.com/USER/PROJECT/'
'blob/{revision}/{package}/'
'{path}#L{lineno}')
"""
revision = _get_git_revision()
return partial(
_linkcode_resolve, revision=revision, package=package, url_fmt=url_fmt
)
| 2,645 | 29.767442 | 85 | py |
optisplit | optisplit-main/mean.py | import pandas as pd
import os
import numpy as np
from pdb import set_trace as bp
import sys
from pathlib import Path
"""Calculate means of result files."""
def sort_dfs(dfs):
res = []
for df in dfs:
start = df.iloc[:4,:].sort_values(by=[' method'], ascending=False)
end = df.iloc[4:,:].sort_values(by=[' method'], ascending=False)
df_new = pd.concat([start, end])
df_new.set_index(np.arange(len(df)), inplace=True)
res.append(df_new)
return res
if __name__ == '__main__':
name = sys.argv[1]
output_dir = sys.argv[2]
if name not in ['small', 'go', 'xml']:
print('invalid dataset name')
sys.exit(1)
dfs = [pd.read_csv(Path(output_dir, fname), index_col=False) for fname in os.listdir(output_dir) if 'csv' in fname and name in fname and 'mean' not in fname]
print(len(dfs))
if name == 'xml':
dfs = sort_dfs(dfs)
df = pd.concat(dfs).groupby(level=0).mean()
df.insert(0, 'method', dfs[0].values[:,1])
df.insert(0, 'dataset', dfs[0].values[:,0])
df.to_csv(Path(output_dir, f'mean_scores_{name}.csv'), index=False, float_format='%.4f')
# df.to_csv(Path(path, f'mean_scores_{name}.csv'), index=False)
| 1,226 | 26.266667 | 161 | py |
optisplit | optisplit-main/evaluation_metric_experiment.py | import numpy as np
import joblib
import matplotlib.pyplot as plt
import scipy.sparse as sp
import warnings
from copy import deepcopy
from pdb import set_trace as bp
from textwrap import wrap
import cv_balance
np.set_printoptions(formatter={'float': lambda x: "{0:0.5f}".format(x)})
warnings.filterwarnings('ignore', message='Comparing a sparse matrix with 0 using == is inefficient')
def equal(y, ones, n_folds):
"""Equally distributed folds"""
for j, yy in enumerate(y):
for i in range(yy.shape[1]):
yy[:ones[i]//n_folds, i] = 1
targets = np.row_stack(y)
return sp.csr_matrix(targets)
def classes_missing_from_1_fold(y, ones, n_folds):
for j, yy in enumerate(y):
if j == 0:
continue
else:
for i in range(yy.shape[1]):
yy[:ones[i]//(n_folds-1), i] = 1
targets = np.row_stack(y).astype(np.int)
return sp.csr_matrix(targets)
def difference(y, ones, n_folds):
"""Small difference between folds"""
diff = 0.2
for j, yy in enumerate(y):
if j == 0:
for i in range(yy.shape[1]):
yy[:ones[i]//n_folds+(diff*(ones[i]//n_folds)).astype(np.int), i] = 1
elif j== 1:
for i in range(yy.shape[1]):
yy[:ones[i]//n_folds-(diff*(ones[i]//n_folds)).astype(np.int), i] = 1
else:
for i in range(yy.shape[1]):
yy[:ones[i]//n_folds, i] = 1
targets = sp.csr_matrix(np.row_stack(y))
return targets
def mk_y(size, n_folds):
"""Generate the synthetic data"""
y = np.split(np.zeros(size), n_folds)
folds = np.split(np.arange(size[0]), n_folds)
folds = [(np.setdiff1d(np.arange(size[0]), f), f) for f in folds]
ones = np.linspace(start=2*n_folds, stop=size[0]//2, num=100).astype(np.int)
res = {}
res['Equal'] = folds, equal(deepcopy(y), ones, n_folds)
res['Difference'] = folds, difference(deepcopy(y), ones, n_folds)
res['One missing'] = folds, classes_missing_from_1_fold(deepcopy(y), ones, n_folds)
joblib.dump(res, 'results/res.joblib')
def calculate_scores(target_fold_ratio, actual_fold_ratio):
"""Return LD and rLD scores for the given ratios"""
#Notation like in Section 3.
D = 1 # data size
Di = np.linspace(0.01*D, 0.99*D, 100) # number of positives in each class
Sj = D*actual_fold_ratio
Sij = Di*target_fold_ratio
d = Di / D
p = Sij / Sj
rld = np.abs((d-p)/d)
ld = np.abs(p/(1-p) - d/(1-d))
return ld, rld
def plot_measures():
"""Plot LD and rLD scores of folds with given error"""
# get scores
ratios = [(0.2, 0.25), (0.2, 0.3), (0.2, 0.4), (0.2, 0.5)][::-1]
scores = [calculate_scores(*r) for r in ratios]
ld_scores = [s[0] for s in scores]
rld_scores = [s[1] for s in scores]
# plot results
# Score comparison
plt.figure(figsize=(11, 3.8))
plt.subplots_adjust(wspace=0.3, top=0.90, bottom=0.15, right=0.82, left=0.10)
Di = np.linspace(0.01, 0.99, 100)
plt.subplot(1,2,1,)
plt.yscale('log')
plt.plot(Di, np.array(ld_scores).T)
plt.xlabel('$D_i$', fontsize=13)
plt.title('A', fontsize=16)
plt.ylabel('LD', fontsize=13, rotation=0, labelpad=15)
plt.xticks(fontsize=13)
plt.yticks(fontsize=13)
plt.subplot(1,2,2,)
plt.plot(Di, np.array(rld_scores).T)
plt.title('B', fontsize=16)
plt.ylabel('rLD', fontsize=13, rotation=0, labelpad=15)
plt.xlabel('$D_i$', fontsize=13)
plt.xticks(fontsize=13)
plt.yticks(fontsize=13)
title = 'Ratio of positive data points in the fold'
title = '\n'.join(wrap(title, 20))
lg = plt.legend([r[1] for r in ratios], bbox_to_anchor=(1.03, 0.8), loc="upper left", fontsize=13, title=title)
title = lg.get_title()
title.set_fontsize(13)
plt.savefig(f'results/ld_vs_rld.pdf')
# Difference comparison
# calculate pairwise differences between scores
ld_differences = np.array([x - y for i,x in enumerate(ld_scores[::-1]) for j,y in enumerate(ld_scores[::-1]) if i > j]).T
rld_differences = np.array([x - y for i,x in enumerate(rld_scores[::-1]) for j,y in enumerate(rld_scores[::-1]) if i > j]).T
labels = np.array([f'{ratios[i][1]}-{ratios[j][1]}' for i,x in enumerate(ld_scores[::-1]) for j,y in enumerate(ld_scores[::-1]) if i > j]).T
plt.clf()
plt.figure(figsize=(11, 3.8))
plt.subplots_adjust(wspace=0.3, top=0.90, bottom=0.15, right=0.82, left=0.10)
Di = np.linspace(0.01, 0.99, 100)
plt.subplot(1,2,1,)
plt.yscale('log')
plt.plot(Di, ld_differences)
plt.xlabel('$D_i$', fontsize=13)
plt.title('C', fontsize=16)
plt.ylabel('$\Delta LD$', fontsize=13, rotation=0, labelpad=15)
plt.xticks(fontsize=13)
plt.yticks(fontsize=13)
plt.subplot(1,2,2,)
plt.plot(Di, rld_differences)
plt.title('D', fontsize=16)
plt.xlabel('$D_i$', fontsize=13)
plt.ylabel('$\Delta rLD$', fontsize=13, rotation=0, labelpad=15)
plt.xticks(fontsize=13)
plt.yticks(fontsize=13)
plt.legend(labels, bbox_to_anchor=(1.02, 0.8), loc="upper left", fontsize=13)
plt.savefig(f'results/ld_vs_rld_differences.pdf')
def synthetic_data_experiment():
datas = joblib.load('results/res.joblib')
methods = ['rld', 'ld', 'dcp']
for i, name in enumerate(datas):
plt.clf()
data = datas[name]
rld = np.array(cv_balance.rld(data[0], data[1])).ravel()
ld = cv_balance.ld(data[0], data[1])
dcp = cv_balance.cv_evaluate(data[0], data[1], np.array(data[1].sum(axis=0)).ravel(), method='dcp')
res_all = np.column_stack((ld, rld, dcp))
sizes = np.array(data[1].sum(axis=0)).ravel()
if i == 2:
plt.figure(figsize=(6.6, 3.8))
else:
plt.figure(figsize=(5.4, 3.8))
for j, m in enumerate(['.', '+', '2']):
plt.plot(sizes, res_all[:,j], ms=11, marker=m, markevery=0.04, alpha=0.9, linestyle='None')
plt.xscale('symlog', linthreshx=0.000001)
plt.yscale('symlog', linthreshy=0.000001)
plt.ylim(-0.000001, np.max(res_all)+3)
plt.xlabel('Class size', fontsize=16)
plt.ylabel('Score', fontsize=16)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.title(name, x=0.5, y=0.89, fontsize=16)
if i == 2:
lg = plt.legend(['LD', 'rLD', 'DCP'], bbox_to_anchor=(1.05, 0.5), loc="upper left", fontsize=14, title='Measure')
title = lg.get_title()
title.set_fontsize(14)
plt.tight_layout()
plt.savefig(f'results/{name}.pdf')
if __name__ == '__main__':
mk_y((100000,100), 10)
synthetic_data_experiment()
plot_measures()
| 6,729 | 29.87156 | 144 | py |
optisplit | optisplit-main/cv_comparison_experiment.py | import argparse
import sys
import time
import arff
import joblib
import numpy as np
import scipy.sparse as sp
from copy import deepcopy
from datetime import timedelta
from joblib import Parallel, delayed
from pdb import set_trace as bp
from skmultilearn.model_selection import IterativeStratification
from cv_balance import optisplit, random_cv, cv_evaluate, check_folds, rld, ld
sys.path.append('stratified_sampling_for_XML/stratify_function/')
from stratify import stratified_train_test_split
import warnings
warnings.filterwarnings('ignore', message='Comparing a sparse matrix with 0 using == is inefficient')
def load_datasets(dataset_type):
datasets = {}
if dataset_type == 'small':
for dataset in [('mediamill', 101), ('bibtex', 159), ('delicious', 983)]:
print(f'loading {dataset[0]}')
with open(f'data/{dataset[0]}.arff') as f:
data = arff.load(f)
data = np.array(data['data'])
datasets[dataset[0]] = sp.csr_matrix(data[:,-dataset[1]:].astype(np.int))
elif dataset_type == 'go':
for dataset in ['CC', 'MF']:
print(f'loading {dataset}')
data =sp.load_npz(f'data/{dataset}_targets.npz')
class_sizes = data.sum(axis=0)
if np.any(class_sizes == data.shape[0]):
data = data[:, np.array(class_sizes) < data.shape[0]]
datasets[dataset] = data
elif dataset_type == 'xml':
for dataset in ['BP_targets.npz', 'wiki10_31k.npz']:
print(f'loading {dataset}')
data =sp.load_npz(f'data/{dataset}')
class_sizes = data.sum(axis=0)
if np.any(class_sizes == 0):
data = data[:, (np.array(class_sizes) > 0).ravel()]
if np.any(class_sizes == data.shape[0]):
data = data[:, np.array(class_sizes) < data.shape[0]]
datasets[dataset] = data
else:
raise NotImplementedError('unknown datasets')
return datasets
def iterstrat(n_folds, targets, random_state=42):
"""Iterative stratification"""
X = np.zeros((targets.shape[0], 1))
k_fold = IterativeStratification(n_splits=n_folds, random_state=random_state).split(X,targets)
return list(k_fold)
def sois(n_folds, targets, random_state=42):
"""Second order iterative stratification"""
X = np.zeros((targets.shape[0], 1))
k_fold = IterativeStratification(n_splits=n_folds, random_state=random_state, order=2).split(X,targets)
return list(k_fold)
def stratified(n_folds, targets, random_state=42):
"""Stratified sampling"""
res = []
remaining = np.arange(targets.shape[0])
m = targets.shape[0]//n_folds
for i in range(n_folds):
if len(remaining) > m and i < n_folds-1:
s = m/len(remaining)
else:
s = len(remaining)
tt = list(targets[remaining,:].tolil().rows)
X = list(np.zeros((targets.shape[0], 1))[remaining])
split = stratified_train_test_split(X, tt, target_test_size=s, random_state=random_state)
remaining2 = remove(remaining, split[1])
res.append((None, remaining[split[1]]))
remaining = remaining2
res = [(np.setdiff1d(np.arange(targets.shape[0]), f[1]), f[1]) for f in res]
return res
def partitioning_cv(n_folds, targets, random_state=42):
"""Partitioning method based on stratified random sampling"""
np.random.seed(random_state)
frequencies = np.array(np.mean(targets, axis=0)).ravel()
index = list(targets.tolil().rows)
tt = [frequencies[index[i]] for i in range(len(index))]
D = np.array([np.product(t) for t in tt])
index = np.argsort(D)
stratas = np.array_split(index, n_folds)
for i in range(len(stratas)):
np.random.shuffle(stratas[i])
substratas = [np.array_split(s, n_folds) for s in stratas]
folds = []
for j in range(n_folds):
res = []
for i in range(n_folds):
res.append(substratas[i][j])
folds.append((None, np.concatenate(res).ravel()))
folds = [(np.setdiff1d(np.arange(targets.shape[0]), f[1]), f[1]) for f in folds]
return folds
def remove(remaining, split):
remaining2 = np.setdiff1d(remaining, remaining[split])
return remaining2
def improve_split(dataset_type, random_state=42, output_dir='results'):
"""Use optisplit to improve an existing split"""
np.random.seed(random_state)
folds = joblib.load(f'{output_dir}/folds_{dataset_type}_{random_state}.joblib')
res = {}
for dataset in folds.keys():
res[dataset] = {}
for method in folds[dataset].keys():
data = folds[dataset][method]
folds0 = [(np.setdiff1d(np.arange(data[1].shape[0]), f[1]), f[1]) for f in data[0]]
if not check_folds(folds0, data[1]):
bp()
check_folds(folds0, data[1])
print(f'{method}')
start = time.time()
result = optisplit(n_splits=len(data[0]), targets=data[1], seed=random_state,initial_folds=folds0)
elapsed = time.time()-start
runtime = f'Time: {str(timedelta(seconds=elapsed))}'
res[dataset][method] = result, data[1], elapsed
print(runtime)
joblib.dump(res, f'{output_dir}/folds_{dataset_type}_{random_state}_IMPROVED.joblib')
def create_folds(dataset_type, n_folds=5, random_state=42, output_dir='results'):
own_dcp = lambda n_splits, targets, random_seed: optisplit(n_splits, targets, method='dcp', seed=random_seed)
own_rld = lambda n_splits, targets, random_seed: optisplit(n_splits, targets, method='rld', seed=random_seed)
own_ld = lambda n_splits, targets, random_seed: optisplit(n_splits, targets, method='ld', seed=random_seed)
datasets = load_datasets(dataset_type)
if dataset_type in ['small', 'go']:
methods = {'SS':stratified, 'PMBSRS':partitioning_cv, 'IS':iterstrat, 'SOIS':sois, 'own_ld':own_ld, 'own_dcp':own_dcp, 'own_rld':own_rld, 'random':random_cv}
else:
methods = {'own_ld':own_ld, 'own_dcp':own_dcp, 'own_rld':own_rld, 'PMBSRS':partitioning_cv, 'random':random_cv, 'SS':stratified}
res = {}
for dataset in datasets.keys():
print(f'{dataset}')
res[dataset] = {}
for method in methods.keys():
print(f'{method}')
start = time.time()
targets = datasets[dataset]
try:
result = methods[method](n_folds, deepcopy(targets), random_state)
elapsed = time.time()-start
runtime = f'Time: {str(timedelta(seconds=elapsed))}'
res[dataset][method] = result, targets, elapsed
print(runtime)
except:
print(f'Error in {method} on {dataset} - skipped')
joblib.dump(res, f'{output_dir}/folds_{dataset_type}_{random_state}.joblib')
def example_distribution(folds, targets):
k = len(folds)
res = 0
for j in range(k):
Sj = len(folds[j][1])
cj = targets.shape[0]*(1/k)
res += np.abs(Sj - cj)
return (1/k)*res
def evaluate_folds(dataset_type, random_state, output_dir):
folds = joblib.load(f'{output_dir}/folds_{dataset_type}_{random_state}.joblib')
res = {}
for dataset in folds.keys():
res[dataset] = {}
for method in folds[dataset].keys():
data = folds[dataset][method]
targets = data[1]
class_sizes = np.array(targets.sum(axis=0)).ravel()
# remove empty classes if they exists
targets = targets[:, np.where(class_sizes > 0)[0]]
class_sizes = np.array(targets.sum(axis=0)).ravel()
dcp = cv_evaluate(data[0], targets, class_sizes, method='dcp')
ED = example_distribution(data[0], targets)
LD = np.mean(ld(data[0], targets))
rld_score = np.mean(rld(data[0], targets))
dcp_score = np.mean(dcp)
runtime = data[2]
res[dataset][method] = {'ED':ED, 'LD':LD, 'dcp':dcp_score, 'rld':rld_score, 'runtime':runtime}
tostr = lambda x: str(x).replace('[','').replace(']','').replace('\'', '')
with open(f'{output_dir}/scores_{dataset_type}_{random_state}.csv', 'w') as f:
fields = 'dataset, method, ED, LD, dcp, rld, runtime\n'
f.write(fields)
for dataset, results in res.items():
for method, scores in results.items():
score_str = tostr([v for v in list(scores.values())])
f.write(f'{dataset},{method},{score_str}\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('dataset_type', type=str, help='small, go or xml')
parser.add_argument('random_state', type=int)
parser.add_argument('output_dir', type=str)
parser.add_argument('-e', '--evaluation', action='store_true', help='run evaluations')
parser.add_argument('-i', '--improve', action='store_true', help='improve existing folds')
parser.add_argument('-c', '--create', action='store_true', help='create folds')
args = parser.parse_args()
if args.create:
create_folds(dataset_type=args.dataset_type, random_state=args.random_state, output_dir=args.output_dir)
if args.evaluation:
evaluate_folds(dataset_type=args.dataset_type, random_state=args.random_state, output_dir=args.output_dir)
if args.improve:
improve_split(dataset_type=args.dataset_type, random_state=args.random_state, output_dir=args.output_dir)
| 9,525 | 37.723577 | 166 | py |
optisplit | optisplit-main/cv_balance.py | import time
import numpy as np
import scipy.sparse as sp
from copy import deepcopy
from datetime import timedelta
from pdb import set_trace as bp
def rld(folds, targets):
tt = deepcopy(targets)
res = []
di = np.array(tt.sum(axis=0)).ravel() / tt.shape[0]
for f in folds:
pij = np.array(tt[f[1]].sum(axis=0)).ravel() / len(f[1])
res.append((abs((di - pij)/di)))
res = np.stack(res)
return res.mean(axis=0)
def dcp(folds, targets):
tt = deepcopy(targets)
res = []
Si = np.array(tt.sum(axis=0)).ravel()
for f in folds:
Sji = np.array(tt[f[1]].sum(axis=0)).ravel()
res.append(Sji)
res = np.stack(res)
return (res / Si).max(axis=0) - 1/len(folds)
def ld(folds, targets):
tt = deepcopy(targets)
res = []
di = np.array(tt.sum(axis=0)).ravel() / tt.shape[0]
di = np.where(di == 1, (tt.shape[0]-1)/tt.shape[0], di) # avoid division by zero
for f in folds:
pij = np.array(tt[f[1]].sum(axis=0)).ravel() / len(f[1])
pij = np.where(pij == 1, (len(f[1])-1)/len(f[1]), pij)
res.append(abs((pij/(1-pij) - di/(1-di))))
res = np.stack(res)
return res.mean(axis=0)
def cv_evaluate(folds, targets, method='original'):
"""Return X, Y evaluation metrics for a cv"""
if method == 'dcp':
res = np.array(dcp(folds, targets)).ravel()
elif method == 'rld':
res = np.array(rld(folds, targets)).ravel()
elif method == 'ld':
res = np.array(ld(folds, targets)).ravel()
else:
raise NotImplementedError('invalid method')
return np.array(res).ravel()
def transfer_sequences(class_index, arr0, arr1, n_transfer, A, targets, sequences=None):
"""Transfer contents of class_index array from arr0 to arr1"""
arr0_index = np.intersect1d(class_index, arr0).astype(np.int)
# select sequences with smallest number of other features
tt = np.array(targets[arr0_index, :].sum(axis=1)).ravel()
if sequences is not None:
# use precomputed transfer index
transfer_index = sequences
else:
# select sequences with fewest other classes to be transferred
transfer_index = arr0_index[tt.argsort()[:n_transfer]]
# move arr0 to arr1
arr1 = np.concatenate((arr1, transfer_index)).astype(np.int)
arr0 = np.setdiff1d(arr0, transfer_index).astype(np.int)
return arr0, arr1, transfer_index
def balance(targets, A, folds, n_splits):
n_transfer = calc_transfer(targets, A, folds, n_splits)
class_index = np.where(targets[:,A].toarray().ravel() > 0)[0]
excess = np.array([])
# process folds with too many test cases
for i, n in enumerate(n_transfer):
if n_transfer[i] < 0:
tr_index = folds[i][0]
test_index = folds[i][1]
test_index, tr_index, transfer_index = transfer_sequences(class_index, test_index, tr_index, abs(n_transfer[i]), A, targets)
excess = np.concatenate((excess, transfer_index))
folds[i] = tr_index, test_index #?
else:
continue
# process folds with too few test cases
for i, n in enumerate(n_transfer):
if n_transfer[i] > 0:
tr_index = folds[i][0]
test_index = folds[i][1]
sequences = excess[:abs(n_transfer[i])]
excess = np.setdiff1d(excess, sequences)
tr_index, test_index, transfer_index = transfer_sequences(class_index, tr_index, test_index, n_transfer[i], A, targets, sequences=sequences)
folds[i] = tr_index, test_index #?
else:
continue
assert len(excess) == 0,'Failed to distribute all sequences'
return folds, n_transfer
def check_folds(folds, targets):
all_sequences_in_test = sum([len(np.unique(f[1])) for f in folds]) == targets.shape[0]
separate_training_test = all([len(np.intersect1d(f[0], f[1])) == 0 for f in folds])
data_shape = all([len(f[0]) + len(f[1]) == targets.shape[0] for f in folds])
no_overlapping_test_sets = len(np.unique(np.concatenate([np.unique(f[1]) for f in folds]))) == len(np.concatenate([f[1] for f in folds]))
return all_sequences_in_test and no_overlapping_test_sets and separate_training_test and data_shape
def random_cv(n_splits, targets, seed=42):
np.random.seed(seed)
t = np.arange(targets.shape[0])
np.random.shuffle(t)
folds = np.array_split(t, n_splits)
folds = [(np.setdiff1d(t,f), f) for f in folds]
return folds
def calc_transfer(targets, A, folds, n_splits):
# calculate the amount of balancing needed
tt = np.array([targets[f[1], A].sum() for f in folds])
n_transfer = np.array([tt.sum()//n_splits - t for t in tt])
if sum(n_transfer) < 0:
aa = np.zeros(len(n_transfer)).astype(np.int)
aa[:abs(sum(n_transfer))] = 1
n_transfer = n_transfer + aa
assert sum(n_transfer) == 0, 'Balancing failed'
return n_transfer
def optisplit(n_splits, targets, method='rld', max_epochs=3, seed=42, initial_folds=None):
"""Run Optisplit.
Parameters
----------
n_splits : int
Number of cross validation folds
targets : scipy csr matrix
Target matrix
method : str (rld or dcp), default=rld
Optimisation method
max_epochs: int, defauld=3
Number of times to run optisplit over the data
seed: int, default=42
Random seed
initial_folds: list, default=None
List of numpy arrays containing cross validation fold indices. These
are used as the initial folds.
Returns
-------
list
list of n_split tuples containing numpy arrays containing training and test fold indices.
"""
np.random.seed(seed)
targets = sp.csr_matrix(targets)
class_sizes = targets.sum(axis=0)
# if > 50% of the examples are positive, optimize the negative distribution
pos_index = np.where(class_sizes > 0.5*targets.shape[0])[0]
targets[:,pos_index] = (targets[:,pos_index] == 0).astype(np.int)
class_sizes = targets.sum(axis=0)
if initial_folds is None:
folds0 = random_cv(n_splits, targets)
else:
folds0 = initial_folds
res0 = cv_evaluate(folds0, targets, method=method)
score0 = np.sum(res0)
start = time.time()
for jjj in range(max_epochs):
max_offset = 0
print(f'round {jjj}')
if jjj == 0:
print(score0)
for iii in range(targets.shape[1]):
folds = deepcopy(folds0)
A = np.argsort(np.array(res0).ravel())[::-1][max_offset]
folds, n_transfer = balance(targets, A, folds, n_splits)
res1 = cv_evaluate(folds, targets, method=method)
if np.sum(res0) <= np.sum(res1) or np.all(n_transfer == 0):
#balancing unbalanced some other classes
max_offset += 1
continue
score1 = np.sum(res1)
folds0 = folds
res0 = res1
print(score1)
if np.isclose(score0, score1, atol=0.1):
break
assert check_folds(folds, targets), 'Invalid CV folds created'
print(f'Time: {str(timedelta(seconds=time.time()-start))}')
print(f'Ignored {max_offset} classes')
return folds0
def main():
pass
if __name__ == '__main__':
main()
| 7,298 | 31.29646 | 152 | py |
optisplit | optisplit-main/stratified_sampling_for_XML/stratify_function/stratify.py | import random
import numpy as np
from datetime import datetime
import helper_funcs
def stratified_train_test_split(X, y, target_test_size, random_state=None, epochs=50, swap_probability=0.1, threshold_proportion=0.1, decay=0.1):
if random_state != None:
random.seed(random_state)
# To keep track of how long the initialization takes
start_time = datetime.now()
# Keep track how how many instances have been swapped to train or test
swap_counter = {
'to_train': 0,
'to_test': 0,
}
# 1. Create instances_dict to keep track of instance information:
# labels: array of labels, []
# train_or_test: string, 'train' or 'test'
# instance_score: float, adjusted sum of label scores
instances_dict = helper_funcs.create_instances_dict(X, y, target_test_size)
# 1.5 Get average number of labels per instance
labels_per_instance = []
for instance_id, instance_dict in instances_dict.items():
labels_count = len(instance_dict['labels'])
labels_per_instance.append(labels_count)
average_labels_per_instance = sum(labels_per_instance) / len(labels_per_instance)
# 2. Create labels_dict to keep track of label information:
# train: int, number of times label appears in train set
# test: int, number of times label appears in test set
# label_score: float, label score
labels_dict = helper_funcs.create_labels_dict(instances_dict)
# 3. Calculate the label score for each label in labels_dict
# Positive score if too much of the label is in the test set
# Negative score if too much of the label is in the train set
helper_funcs.score_labels(labels_dict, target_test_size, average_labels_per_instance)
# 4. Calculate the instance score for each instance in instances_dict
# A high score means the instance is a good candidate for swapping
helper_funcs.score_instances(instances_dict, labels_dict)
# 5. Calculate the total score
# The higher the score, the more 'imbalanced' the distribution of labels between train and test sets
total_score = helper_funcs.calculate_total_score(instances_dict)
print(f'Starting score: {round(total_score)}. Calculated in {str(datetime.now() - start_time).split(".")[0]}')
# Main loop to create stratified train-test split
for epoch in range(epochs):
# To keep track of how long each itteration takes
itteration_start_time = datetime.now()
# 6. Calculate the threshold score for swapping
threshold_score = helper_funcs.calculte_threshold_score(instances_dict, average_labels_per_instance, epoch, threshold_proportion, decay)
# 7. Swap the instances with instance_score that is greater than the threshold score
# Probability of swapping an instance is swap_probability
helper_funcs.swap_instances(instances_dict, threshold_score, swap_counter, average_labels_per_instance, epoch, swap_probability, decay)
# 2. Recreate labels_dict with updated train-test split
labels_dict = helper_funcs.create_labels_dict(instances_dict)
# from pdb import set_trace as bp
# bp()
# 3. Recalculate the label score for each label in labels_dict
helper_funcs.score_labels(labels_dict, target_test_size, average_labels_per_instance)
# 4. Recalculate the instance score for each instance in instances_dict
helper_funcs.score_instances(instances_dict, labels_dict)
# 5. Recalculate the total score
total_score = helper_funcs.calculate_total_score(instances_dict)
print(f'Epoch {epoch + 1}/{epochs} score: {round(total_score)}. Calculated in {str(datetime.now() - itteration_start_time).split(".")[0]}')
# Prepare X_train, X_test, y_train, y_test
X_train = []
X_test = []
y_train = []
y_test = []
train_index = []
test_index = []
for instance_id, instance_dict in instances_dict.items():
if instance_dict['train_or_test'] == 'train':
X_train.append(X[instance_id])
y_train.append(y[instance_id])
train_index.append(instance_id)
elif instance_dict['train_or_test'] == 'test':
X_test.append(X[instance_id])
y_test.append(y[instance_id])
test_index.append(instance_id)
else:
print(f'Something went wrong: {instance_id}')
# # Print some statistics
# actual_test_size = len(X_test) / (len(X_train) + len(X_test))
# print(f'To train: {swap_counter["to_train"]}')
# print(f'To test: {swap_counter["to_test"]}')
# print(f'Target test size: {target_test_size}')
# print(f'Actual test size: {actual_test_size}')
return np.array(train_index), np.array(test_index)
| 4,788 | 40.284483 | 147 | py |
optisplit | optisplit-main/stratified_sampling_for_XML/stratify_function/helper_funcs.py | import random
import numpy as np
# 1. Create instances_dict to keep track of instance information:
# labels: array of labels, []
# train_or_test: string, 'train' or 'test'
# instance_score: float, adjusted sum of label scores
def create_instances_dict(X, y, target_test_size):
instances_dict = {}
instance_id = 0
for _ in X:
train_or_test = 'train'
if random.uniform(0, 1) <= target_test_size:
train_or_test = 'test'
instances_dict[instance_id] = {
'labels': y[instance_id],
'train_or_test': train_or_test,
'instance_score': 0,
}
instance_id += 1
return instances_dict
# 2. Create labels_dict to keep track of label information:
# train: int, number of times label appears in train set
# test: int, number of times label appears in test set
# label_score: float, label score
def create_labels_dict(instances_dict):
labels_dict = {}
for _, instance_dict in instances_dict.items():
train_or_test = instance_dict['train_or_test']
for label in instance_dict['labels']:
try:
if train_or_test == 'train':
labels_dict[label]['train'] += 1
else:
labels_dict[label]['test'] += 1
except:
if train_or_test == 'train':
labels_dict[label] = {
'train': 1,
'test': 0,
'label_score': 0
}
else:
labels_dict[label] = {
'train': 0,
'test': 1,
'label_score': 0
}
return labels_dict
# 3. Calculate the label score for each label in labels_dict
# Positive score if too much of the label is in the test set
# Negative score if too much of the label is in the train set
def score_labels( labels_dict, target_test_size, average_labels_per_instance):
for label, label_dict in labels_dict.items():
label_score = 0
label_count = label_dict['train'] + label_dict['test']
if label_count > 1:
actual_test_proportion = label_dict['test'] / label_count
if actual_test_proportion >= target_test_size: # Too much of the label is in the test set
label_score = (actual_test_proportion - target_test_size) / (1 - target_test_size)
if actual_test_proportion > 0.999:
label_score += average_labels_per_instance
else: # Too much of the label is in the train set
label_score = (actual_test_proportion - target_test_size) / target_test_size
if actual_test_proportion < 0.001:
label_score -= average_labels_per_instance
labels_dict[label]['label_score'] = label_score
# 4. Calculate the instance score for each instance in instances_dict
# A high score means the instance is a good candidate for swapping
def score_instances(instances_dict, labels_dict):
for instance_id, instance_dict in instances_dict.items():
instance_score = 0
train_or_test = instance_dict['train_or_test']
for label in instance_dict['labels']:
label_score = labels_dict[label]['label_score']
if label_score > 0: # If too much of the label is in the test set
if train_or_test == 'test':
instance_score += label_score # If instance in test, increase score
elif train_or_test == 'train':
instance_score -= label_score # If instance in train, decrease score
else:
print(f'Something went wrong: {instance_id}')
elif label_score < 0: # If too much of the label is in the train set
if train_or_test == 'train':
instance_score -= label_score # If instance in train, increase score
elif train_or_test == 'test':
instance_score += label_score # If instance in test, decrease score
else:
print(f'Something went wrong: {instance_id}')
instances_dict[instance_id]['instance_score'] = instance_score
# 5. Calculate the total score
# The higher the score, the more 'imbalanced' the distribution of labels between train and test sets
def calculate_total_score(instances_dict):
total_score = 0
for _, instance_dict in instances_dict.items():
total_score += instance_dict['instance_score']
return total_score
# 6. Calculate the threshold score for swapping
def calculte_threshold_score(instances_dict, average_labels_per_instance, epoch, threshold_proportion, decay):
instance_scores = []
for _, instance_dict in instances_dict.items():
if instance_dict['instance_score'] < average_labels_per_instance:
instance_scores.append(instance_dict['instance_score'])
threshold_score = np.quantile(instance_scores, (1 - (threshold_proportion / ((1 + decay) ** epoch))))
if threshold_score < 0:
threshold_score = 0
return threshold_score
# 7. Swap the instances with instance_score that is greater than the threshold score
# Probability of swapping an instance is swap_probability
def swap_instances(instances_dict, threshold_score, swap_counter, average_labels_per_instance, epoch, swap_probability, decay):
for instance_id, instance_dict in instances_dict.items():
instance_score = instance_dict['instance_score']
if instance_score >= average_labels_per_instance:
if random.uniform(0, 1) <= 0.25 / (1.05 ** epoch):
current_group = instance_dict['train_or_test']
if current_group == 'train':
instances_dict[instance_id]['train_or_test'] = 'test'
swap_counter['to_test'] += 1
elif current_group == 'test':
instances_dict[instance_id]['train_or_test'] = 'train'
swap_counter['to_train'] += 1
elif instance_score > threshold_score and random.uniform(0, 1) <= swap_probability / ((1 + decay) ** epoch):
current_group = instance_dict['train_or_test']
if current_group == 'train':
instances_dict[instance_id]['train_or_test'] = 'test'
swap_counter['to_test'] += 1
elif current_group == 'test':
instances_dict[instance_id]['train_or_test'] = 'train'
swap_counter['to_train'] += 1
| 6,577 | 47.014599 | 127 | py |
PC-JeDi | PC-JeDi-main/src/plotting.py | from copy import deepcopy
from functools import partial
from pathlib import Path
from typing import Optional, Union
import matplotlib.pyplot as plt
import numpy as np
import PIL
import wandb
from jetnet.utils import efps
def plot_multi_hists(
data_list: Union[list, np.ndarray],
data_labels: Union[list, str],
col_labels: Union[list, str],
path: Optional[Union[Path, str]] = None,
scale_factors: Optional[list] = None,
do_err: bool = False,
do_norm: bool = False,
bins: Union[list, str, partial] = "auto",
logy: bool = False,
y_label: Optional[str] = None,
ylim: Optional[list] = None,
rat_ylim: tuple = (0, 2),
rat_label: Optional[str] = None,
scale: int = 5,
do_legend: bool = True,
hist_kwargs: Optional[list] = None,
err_kwargs: Optional[list] = None,
legend_kwargs: Optional[dict] = None,
incl_overflow: bool = True,
incl_underflow: bool = True,
do_ratio_to_first: bool = False,
return_fig: bool = False,
return_img: bool = False,
) -> Union[plt.Figure, None]:
"""Plot multiple histograms given a list of 2D tensors/arrays.
- Performs the histogramming here
- Each column the arrays will be a seperate axis
- Matching columns in each array will be superimposed on the same axis
- If the tensor being passed is 3D it will average them and combine the uncertainty
args:
data_list: A list of tensors or numpy arrays, each col will be a seperate axis
data_labels: A list of labels for each tensor in data_list
col_labels: A list of labels for each column/axis
path: The save location of the plots (include img type)
scale_factors: List of scalars to be applied to each histogram
do_err: If the statistical errors should be included as shaded regions
do_norm: If the histograms are to be a density plot
bins: List of bins to use for each axis, can use numpy's strings
logy: If we should use the log in the y-axis
y_label: Label for the y axis of the plots
ylim: The y limits for all plots
rat_ylim: The y limits of the ratio plots
rat_label: The label for the ratio plot
scale: The size in inches for each subplot
do_legend: If the legend should be plotted
hist_kwargs: Additional keyword arguments for the line for each histogram
legend_kwargs: Extra keyword arguments to pass to the legend constructor
incl_overflow: Have the final bin include the overflow
incl_underflow: Have the first bin include the underflow
do_ratio_to_first: Include a ratio plot to the first histogram in the list
as_pdf: Also save an additional image in pdf format
return_fig: Return the figure (DOES NOT CLOSE IT!)
return_img: Return a PIL image (will close the figure)
"""
# Make the arguments lists for generality
if not isinstance(data_list, list):
data_list = [data_list]
if isinstance(data_labels, str):
data_labels = [data_labels]
if isinstance(col_labels, str):
col_labels = [col_labels]
if not isinstance(bins, list):
bins = data_list[0].shape[-1] * [bins]
if not isinstance(scale_factors, list):
scale_factors = len(data_list) * [scale_factors]
if not isinstance(hist_kwargs, list):
hist_kwargs = len(data_list) * [hist_kwargs]
if not isinstance(err_kwargs, list):
err_kwargs = len(data_list) * [err_kwargs]
# Cycle through the datalist and ensure that they are 2D, as each column is an axis
for data_idx in range(len(data_list)):
if data_list[data_idx].ndim < 2:
data_list[data_idx] = data_list[data_idx].unsqueeze(-1)
# Check the number of histograms to plot
n_data = len(data_list)
n_axis = data_list[0].shape[-1]
# Make sure that all the list lengths are consistant
assert len(data_labels) == n_data
assert len(col_labels) == n_axis
assert len(bins) == n_axis
# Make sure the there are not too many subplots
if n_axis > 20:
raise RuntimeError("You are asking to create more than 20 subplots!")
# Create the figure and axes lists
dims = np.array([1, n_axis]) # Subplot is (n_rows, n_columns)
size = np.array([n_axis, 1.0]) # Size is (width, height)
if do_ratio_to_first:
dims *= np.array([2, 1]) # Double the number of rows
size *= np.array([1, 1.2]) # Increase the height
fig, axes = plt.subplots(
*dims,
figsize=tuple(scale * size),
gridspec_kw={"height_ratios": [3, 1] if do_ratio_to_first else {1}},
squeeze=False,
)
# Cycle through each axis and determine the bins that should be used
# Automatic/Interger bins are replaced using the first item in the data list
for ax_idx in range(n_axis):
ax_bins = bins[ax_idx]
if isinstance(ax_bins, partial):
ax_bins = ax_bins()
# If the axis bins was specified to be 'auto' or another numpy string
if isinstance(ax_bins, str):
unq = np.unique(data_list[0][:, ax_idx])
n_unique = len(unq)
# If the number of datapoints is less than 10 then use even spacing
if 1 < n_unique < 10:
ax_bins = (unq[1:] + unq[:-1]) / 2 # Use midpoints, add final, initial
ax_bins = np.append(ax_bins, unq.max() + unq.max() - ax_bins[-1])
ax_bins = np.insert(ax_bins, 0, unq.min() + unq.min() - ax_bins[0])
# Numpy function to get the bin edges, catches all other cases (int, etc)
ax_bins = np.histogram_bin_edges(data_list[0][:, ax_idx], bins=ax_bins)
# Replace the element in the array with the edges
bins[ax_idx] = ax_bins
# Cycle through each of the axes
for ax_idx in range(n_axis):
# Get the bins for this axis
ax_bins = bins[ax_idx]
# Cycle through each of the data arrays
for data_idx in range(n_data):
# Apply overflow and underflow (make a copy)
data = np.copy(data_list[data_idx][..., ax_idx]).squeeze()
if incl_overflow:
data = np.minimum(data, ax_bins[-1])
if incl_underflow:
data = np.maximum(data, ax_bins[0])
# If the data is still a 2D tensor treat it as a collection of histograms
if data.ndim > 1:
h = []
for dim in range(data.shape[-1]):
h.append(np.histogram(data[:, dim], ax_bins, density=do_norm)[0])
# Nominal and err is based on chi2 of same value, mult measurements
hist = 1 / np.mean(1 / np.array(h), axis=0)
hist_err = np.sqrt(1 / np.sum(1 / np.array(h), axis=0))
# Otherwise just calculate a single histogram
else:
hist, _ = np.histogram(data, ax_bins, density=do_norm)
hist_err = np.sqrt(hist)
# Apply the scale factors
if scale_factors[data_idx] is not None:
hist *= scale_factors
hist_err *= scale_factors
# Save the first histogram for the ratio plots
if data_idx == 0:
denom_hist = hist
denom_err = hist_err
# Get the additional keyword arguments for the histograms and errors
if hist_kwargs[data_idx] is not None and bool(hist_kwargs[data_idx]):
h_kwargs = deepcopy(hist_kwargs[data_idx])
else:
h_kwargs = {}
# Use the stair function to plot the histograms
line = axes[0, ax_idx].stairs(
hist, ax_bins, label=data_labels[data_idx], **h_kwargs
)
if err_kwargs[data_idx] is not None and bool(err_kwargs[data_idx]):
e_kwargs = deepcopy(err_kwargs[data_idx])
else:
e_kwargs = {"color": line._edgecolor, "alpha": 0.2, "fill": True}
# Include the uncertainty in the plots as a shaded region
if do_err:
axes[0, ax_idx].stairs(
hist + hist_err,
ax_bins,
baseline=hist - hist_err,
**e_kwargs,
)
# Add a ratio plot
if do_ratio_to_first:
if hist_kwargs[data_idx] is not None and bool(hist_kwargs[data_idx]):
ratio_kwargs = deepcopy(hist_kwargs[data_idx])
else:
ratio_kwargs = {
"color": line._edgecolor,
"linestyle": line._linestyle,
}
ratio_kwargs["fill"] = False # Never fill a ratio plot
# Calculate the new ratio values with their errors
rat_hist = hist / denom_hist
rat_err = rat_hist * np.sqrt(
(hist_err / hist) ** 2 + (denom_err / denom_hist) ** 2
)
# Plot the ratios
axes[1, ax_idx].stairs(
rat_hist,
ax_bins,
**ratio_kwargs,
)
# Use a standard shaded region for the errors
if do_err:
axes[1, ax_idx].stairs(
rat_hist + rat_err,
ax_bins,
baseline=rat_hist - rat_err,
**e_kwargs,
)
# Cycle again through each axis and apply editing
for ax_idx in range(n_axis):
ax_bins = bins[ax_idx]
# X axis
axes[0, ax_idx].set_xlim(ax_bins[0], ax_bins[-1])
if do_ratio_to_first:
axes[0, ax_idx].set_xticklabels([])
axes[1, ax_idx].set_xlabel(col_labels[ax_idx])
axes[1, ax_idx].set_xlim(ax_bins[0], ax_bins[-1])
else:
axes[0, ax_idx].set_xlabel(col_labels[ax_idx])
# Y axis
if logy:
axes[0, ax_idx].set_yscale("log")
if ylim is not None:
axes[0, ax_idx].set_ylim(*ylim)
else:
_, ylim2 = axes[0, ax_idx].get_ylim()
if logy:
axes[0, ax_idx].set_ylim(top=10 ** (np.log10(ylim2) * 1.40))
else:
axes[0, ax_idx].set_ylim(top=ylim2 * 1.35)
if y_label is not None:
axes[0, ax_idx].set_ylabel(y_label)
elif do_norm:
axes[0, ax_idx].set_ylabel("Normalised Entries")
else:
axes[0, ax_idx].set_ylabel("Entries")
# Ratio Y axis
if do_ratio_to_first:
axes[1, ax_idx].set_ylim(rat_ylim)
if rat_label is not None:
axes[1, ax_idx].set_ylabel(rat_label)
else:
axes[1, ax_idx].set_ylabel(f"Ratio to {data_labels[0]}")
# Legend
if do_legend:
legend_kwargs = legend_kwargs or {}
axes[0, ax_idx].legend(**legend_kwargs)
# Final figure layout
fig.tight_layout()
if do_ratio_to_first:
fig.subplots_adjust(hspace=0.08) # For ratio plots minimise the h_space
# Save the file
if path is not None:
fig.savefig(path)
# Return a rendered image, or the matplotlib figure, or close
if return_img:
img = PIL.Image.frombytes(
"RGB", fig.canvas.get_width_height(), fig.canvas.tostring_rgb()
)
plt.close(fig)
return img
if return_fig:
return fig
plt.close(fig)
def locals_to_rel_mass_and_efp(csts: np.ndarray, mask: np.ndarray) -> np.ndarray:
"""Convert the values of a set of constituents to the relative mass and EFP
values of the jet they belong to.
Args:
csts: A numpy array of shape (batch_size, n_csts, 3)
containing the (eta, phi, pt) values of the constituents.
mask: A numpy array of shape (batch_size, n_csts)
containing a mask for the constituents, used to sum only over
the valid constituents.
Returns:
A numpy array of shape (batch_size, 2)
containing the relative mass and EFP values of the jet.
"""
# Calculate the constituent pt, eta and phi
eta = csts[..., 0]
phi = csts[..., 1]
pt = csts[..., 2]
# Calculate the total jet values in cartensian coordinates, include mask for sum
jet_px = (pt * np.cos(phi) * mask).sum(axis=-1)
jet_py = (pt * np.sin(phi) * mask).sum(axis=-1)
jet_pz = (pt * np.sinh(eta) * mask).sum(axis=-1)
jet_e = (pt * np.cosh(eta) * mask).sum(axis=-1)
# Get the derived jet values, the clamps ensure NaNs dont occur
jet_m = np.sqrt(
np.clip(jet_e**2 - jet_px**2 - jet_py**2 - jet_pz**2, 0, None)
)
# Get the efp values
jet_efps = efps(csts, efp_jobs=1).mean(axis=-1)
return np.vstack([jet_m, jet_efps]).T
def plot_mpgan_marginals(
outputs: np.ndarray,
nodes: np.ndarray,
mask: np.ndarray,
current_epoch: int,
) -> None:
# Clip the outputs for the marginals to match expected jet spread
outputs[..., 0] = np.clip(outputs[..., 0], -0.5, 0.5)
outputs[..., 1] = np.clip(outputs[..., 1], -0.5, 0.5)
outputs[..., 2] = np.clip(outputs[..., 2], 0, 1)
# Plot histograms for the constituent marginals
Path("./plots/").mkdir(parents=False, exist_ok=True)
cst_img = plot_multi_hists(
data_list=[nodes[mask], outputs[mask]],
data_labels=["Original", "Generated"],
col_labels=[r"$\Delta \eta$", r"$\Delta \phi$", r"$\frac{p_T}{Jet_{p_T}}$"],
do_norm=True,
return_img=True,
path=f"./plots/csts_{current_epoch}",
logy=True,
)
# Convert to total jet mass and pt, do some clamping to make everyone happy
pred_jets = locals_to_rel_mass_and_efp(outputs, mask)
pred_jets[:, 0] = np.clip(pred_jets[:, 0], 0, 0.4)
pred_jets[:, 1] = np.clip(pred_jets[:, 1], 0, 4e-3)
pred_jets = np.nan_to_num(pred_jets)
real_jets = locals_to_rel_mass_and_efp(nodes, mask)
real_jets[:, 0] = np.clip(real_jets[:, 0], 0, 0.4)
real_jets[:, 1] = np.clip(real_jets[:, 1], 0, 4e-3)
real_jets = np.nan_to_num(real_jets)
# Image for the total jet variables
jet_img = plot_multi_hists(
data_list=[real_jets, pred_jets],
data_labels=["Original", "Generated"],
col_labels=["Relative Jet Mass", "Jet EFP"],
do_norm=True,
return_img=True,
path=f"./plots/jets_{current_epoch}",
)
# Create the wandb table and add the data
if wandb.run is not None:
gen_table = wandb.Table(columns=["constituents", "jets"])
gen_table.add_data(wandb.Image(cst_img), wandb.Image(jet_img))
wandb.run.log({"generated": gen_table}, commit=False)
| 14,847 | 36.589873 | 87 | py |
PC-JeDi | PC-JeDi-main/src/physics.py | # import jetnet
import numpy as np
import pytorch_lightning as pl
import torch as T
# FIX RANDOM SEED FOR REPRODUCIBILITY
pl.seed_everything(0, workers=True)
def locals_to_mass_and_pt(csts: T.Tensor, mask: T.BoolTensor) -> T.Tensor:
"""Calculate the overall jet pt and mass from the constituents. The
constituents are expected to be expressed as:
- del_eta
- del_phi
- log_pt
"""
# Calculate the constituent pt, eta and phi
eta = csts[..., 0]
phi = csts[..., 1]
pt = csts[..., 2].exp()
# Calculate the total jet values in cartensian coordinates, include mask for sum
jet_px = (pt * T.cos(phi) * mask).sum(axis=-1)
jet_py = (pt * T.sin(phi) * mask).sum(axis=-1)
jet_pz = (pt * T.sinh(eta) * mask).sum(axis=-1)
jet_e = (pt * T.cosh(eta) * mask).sum(axis=-1)
# Get the derived jet values, the clamps ensure NaNs dont occur
jet_pt = T.clamp_min(jet_px**2 + jet_py**2, 0).sqrt()
jet_m = T.clamp_min(jet_e**2 - jet_px**2 - jet_py**2 - jet_pz**2, 0).sqrt()
return T.vstack([jet_pt, jet_m]).T
def numpy_locals_to_mass_and_pt(
csts: np.ndarray,
mask: np.ndarray,
pt_logged=False,
) -> np.ndarray:
"""Calculate the overall jet pt and mass from the constituents. The
constituents are expected to be expressed as:
- del_eta
- del_phi
- log_pt or just pt depending on pt_logged
"""
# Calculate the constituent pt, eta and phi
eta = csts[..., 0]
phi = csts[..., 1]
pt = np.exp(csts[..., 2]) * mask if pt_logged else csts[..., 2]
# Calculate the total jet values in cartensian coordinates, include mask for sum
jet_px = (pt * np.cos(phi) * mask).sum(axis=-1)
jet_py = (pt * np.sin(phi) * mask).sum(axis=-1)
jet_pz = (pt * np.sinh(eta) * mask).sum(axis=-1)
jet_e = (pt * np.cosh(eta) * mask).sum(axis=-1)
# Get the derived jet values, the clamps ensure NaNs dont occur
jet_pt = np.sqrt(np.clip(jet_px**2 + jet_py**2, 0, None))
jet_m = np.sqrt(
np.clip(jet_e**2 - jet_px**2 - jet_py**2 - jet_pz**2, 0, None)
)
return np.vstack([jet_pt, jet_m]).T
| 2,120 | 30.191176 | 84 | py |
PC-JeDi | PC-JeDi-main/src/numpy_utils.py | import numpy as np
def undo_log_squash(data: np.ndarray) -> np.ndarray:
"""Undo the log squash function above."""
return np.sign(data) * (np.exp(np.abs(data)) - 1)
def log_squash(data: np.ndarray) -> np.ndarray:
"""Apply a log squashing function for distributions with high tails."""
return np.sign(data) * np.log(np.abs(data) + 1)
| 352 | 28.416667 | 75 | py |
PC-JeDi | PC-JeDi-main/src/torch_utils.py | from typing import Union
import numpy as np
import torch as T
import torch.nn as nn
def get_loss_fn(name: str, **kwargs) -> nn.Module:
"""Return a pytorch loss function given a name."""
if name == "none":
return None
# Regression losses
if name == "huber":
return nn.HuberLoss(reduction="none")
if name == "mse":
return nn.MSELoss(reduction="none")
if name == "mae":
return nn.L1Loss(reduction="none")
def to_np(inpt: Union[T.Tensor, tuple]) -> np.ndarray:
"""More consicse way of doing all the necc steps to convert a pytorch
tensor to numpy array.
- Includes gradient deletion, and device migration
"""
if isinstance(inpt, (tuple, list)):
return type(inpt)(to_np(x) for x in inpt)
if inpt.dtype == T.bfloat16: # Numpy conversions don't support bfloat16s
inpt = inpt.half()
return inpt.detach().cpu().numpy()
| 918 | 26.848485 | 77 | py |
PC-JeDi | PC-JeDi-main/src/hydra_utils.py | """A collection of misculaneous functions usefull for the lighting/hydra
template."""
import logging
import os
from pathlib import Path
from typing import Any, List, Sequence
import hydra
import rich
import rich.syntax
import rich.tree
import wandb
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.utilities.rank_zero import rank_zero_only
log = logging.getLogger(__name__)
@rank_zero_only
def reload_original_config(cfg: OmegaConf, get_best: bool = False) -> OmegaConf:
"""Replaces the cfg with the one stored at the checkpoint location.
Will also set the chkpt_dir to the latest version of the last or
best checkpoint
"""
# Load the original config found in the the file directory
orig_cfg = OmegaConf.load(Path("full_config.yaml"))
# Get the latest updated checkpoint with the prefix last or best
flag = "best" if get_best else "last"
orig_cfg.ckpt_path = str(
sorted(Path.cwd().glob(f"checkpoints/{flag}*.ckpt"), key=os.path.getmtime)[-1]
)
# Set the wandb logger to attempt to resume the job
if hasattr(orig_cfg, "loggers"):
if hasattr(orig_cfg.loggers, "wandb"):
orig_cfg.loggers.wandb.resume = True
return orig_cfg
@rank_zero_only
def print_config(
cfg: DictConfig,
print_order: Sequence[str] = (
"datamodule",
"model",
"callbacks",
"loggers",
"trainer",
"paths",
),
resolve: bool = True,
) -> None:
"""Prints content of DictConfig using Rich library and its tree structure.
Args:
cfg: Configuration composed by Hydra.
print_order: Determines in what order config components are printed.
resolve: Whether to resolve reference fields of DictConfig.
save_to_file: Whether to export config to the hydra output folder.
"""
style = "dim"
tree = rich.tree.Tree("CONFIG", style=style, guide_style=style)
queue = []
# add fields from `print_order` to queue
for field in print_order:
queue.append(field) if field in cfg else log.warning(
f"Field '{field}' not found in config. Skipping '{field}' printing..."
)
# add all the other fields to queue (not specified in `print_order`)
for field in cfg:
if field not in queue:
queue.insert(0, field)
# generate config tree from queue
for field in queue:
branch = tree.add(field, style=style, guide_style=style)
config_group = cfg[field]
if isinstance(config_group, DictConfig):
branch_content = OmegaConf.to_yaml(config_group, resolve=resolve)
else:
branch_content = str(config_group)
branch.add(rich.syntax.Syntax(branch_content, "yaml"))
# print config tree
rich.print(tree)
def save_config(cfg: OmegaConf) -> None:
"""Saves the config to the output directory.
This is necc ontop of hydra's default conf.yaml as it will resolve the entries
allowing one to resume jobs identically with elements such as ${now:%H-%M-%S}.
Furthermore, hydra does not allow resuming a previous job from the same dir.
The work around is reload_original_config but that will fail as hydra overwites
the default config.yaml file on startup, so this backup is needed for resuming.
"""
# In order to be able to resume the wandb logger session, save the run id
if hasattr(cfg, "loggers"):
if hasattr(cfg.loggers, "wandb"):
if wandb.run is not None:
cfg.loggers.wandb.id = wandb.run.id
# save config tree to file
OmegaConf.save(cfg, Path(cfg.paths.full_path, "full_config.yaml"), resolve=True)
@rank_zero_only
def log_hyperparameters(
cfg: DictConfig, model: LightningModule, trainer: Trainer
) -> None:
"""Passes the config dict to the trainer's logger, also calculates #
params."""
# Convert the config object to a hyperparameter dict
hparams = OmegaConf.to_container(cfg, resolve=True)
# calculate the number of trainable parameters in the model and add it
hparams["model/params/total"] = sum(p.numel() for p in model.parameters())
hparams["model/params/trainable"] = sum(
p.numel() for p in model.parameters() if p.requires_grad
)
hparams["model/params/non_trainable"] = sum(
p.numel() for p in model.parameters() if not p.requires_grad
)
trainer.logger.log_hyperparams(hparams)
def instantiate_collection(cfg_coll: DictConfig) -> List[Any]:
"""Uses hydra to instantiate a collection of classes and return a list."""
objs = []
if not cfg_coll:
log.warning("List of configs is empty")
return objs
if not isinstance(cfg_coll, DictConfig):
raise TypeError("List of configs must be a DictConfig!")
for _, cb_conf in cfg_coll.items():
if isinstance(cb_conf, DictConfig) and "_target_" in cb_conf:
log.info(f"Instantiating <{cb_conf._target_}>")
objs.append(hydra.utils.instantiate(cb_conf))
return objs
| 5,097 | 30.8625 | 86 | py |
PC-JeDi | PC-JeDi-main/src/datamodules/jetnet.py | from copy import deepcopy
from typing import Mapping
import numpy as np
from jetnet.datasets import JetNet
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader, Dataset
from src.numpy_utils import log_squash
from src.physics import numpy_locals_to_mass_and_pt
class JetNetData(Dataset):
"""Wrapper for the JetNet dataset so it works with our models with
different inputs."""
def __init__(self, **kwargs) -> None:
# Extra arguments used here
self.log_squash_pt = kwargs.pop("log_squash_pt", False)
self.high_as_context = kwargs.pop("high_as_context", True)
self.recalc_high = kwargs.pop("recalculate_jet_from_pc", True)
self.n_jets = kwargs.pop("n_jets", None)
# All other arguments passed to the jetnet dataset constructor
self.csts, self.high = JetNet.getData(**kwargs)
self.csts = self.csts.astype(np.float32)
self.high = self.high.astype(np.float32)
# Trim the data based on the requested number of jets (None does nothing)
self.csts = self.csts[: self.n_jets].astype(np.float32)
self.high = self.high[: self.n_jets].astype(np.float32)
# Manually calculate the mask by looking for zero padding
self.mask = ~np.all(self.csts == 0, axis=-1)
# Change the constituent information from pt-fraction to pure pt
csts = self.csts.copy()
csts[..., -1] = csts[..., -1] * self.high[..., 0:1]
# Recalculate the jet mass and pt using the point cloud
if self.recalc_high:
self.high = numpy_locals_to_mass_and_pt(csts, self.mask)
# Change the pt fraction to log_squash(pt)
if self.log_squash_pt:
self.csts[..., -1] = log_squash(csts[..., -1]) * self.mask
def __getitem__(self, idx) -> tuple:
csts = self.csts[idx]
high = self.high[idx] if self.high_as_context else np.empty(0, dtype="f")
mask = self.mask[idx]
return csts, mask, high
def __len__(self) -> int:
return len(self.high)
class JetNetDataModule(LightningDataModule):
def __init__(
self,
*,
data_conf: Mapping,
loader_kwargs: Mapping,
) -> None:
super().__init__()
self.save_hyperparameters(logger=False)
# Get the dimensions of the data from the config file
self.dim = len(data_conf["particle_features"])
self.n_nodes = data_conf["num_particles"]
if data_conf["high_as_context"]:
self.ctxt_dim = len(data_conf["jet_features"])
else:
self.ctxt_dim = 0
def setup(self, stage: str) -> None:
"""Sets up the relevant datasets."""
if stage == "fit":
self.train_set = JetNetData(**self.hparams.data_conf, split="train")
self.valid_set = JetNetData(**self.hparams.data_conf, split="test")
if stage == "test":
self.test_set = JetNetData(**self.hparams.data_conf, split="test")
def train_dataloader(self) -> DataLoader:
return DataLoader(self.train_set, **self.hparams.loader_kwargs, shuffle=True)
def val_dataloader(self) -> DataLoader:
return DataLoader(self.valid_set, **self.hparams.loader_kwargs, shuffle=False)
def test_dataloader(self) -> DataLoader:
test_kwargs = deepcopy(self.hparams.loader_kwargs)
test_kwargs["drop_last"] = False
return DataLoader(self.test_set, **test_kwargs, shuffle=False)
| 3,490 | 34.989691 | 86 | py |
PC-JeDi | PC-JeDi-main/src/models/diffusion.py | import math
from typing import Optional, Tuple
import torch as T
from tqdm import tqdm
class VPDiffusionSchedule:
def __init__(self, max_sr: float = 1, min_sr: float = 1e-2) -> None:
self.max_sr = max_sr
self.min_sr = min_sr
def __call__(self, time: T.Tensor) -> T.Tensor:
return cosine_diffusion_shedule(time, self.max_sr, self.min_sr)
def get_betas(self, time: T.Tensor) -> T.Tensor:
return cosine_beta_shedule(time, self.max_sr, self.min_sr)
def cosine_diffusion_shedule(
diff_time: T.Tensor, max_sr: float = 1, min_sr: float = 1e-2
) -> Tuple[T.Tensor, T.Tensor]:
"""Calculates the signal and noise rate for any point in the diffusion
processes.
Using continuous diffusion times between 0 and 1 which make switching between
different numbers of diffusion steps between training and testing much easier.
Returns only the values needed for the jump forward diffusion step and the reverse
DDIM step.
These are sqrt(alpha_bar) and sqrt(1-alphabar) which are called the signal_rate
and noise_rate respectively.
The jump forward diffusion process is simply a weighted sum of:
input * signal_rate + eps * noise_rate
Uses a cosine annealing schedule as proposed in
Proposed in https://arxiv.org/abs/2102.09672
Args:
diff_time: The time used to sample the diffusion scheduler
Output will match the shape
Must be between 0 and 1
max_sr: The initial rate at the first step
min_sr: How much signal is preserved at end of diffusion
(can't be zero due to log)
"""
# Use cosine annealing, which requires switching from times -> angles
start_angle = math.acos(max_sr)
end_angle = math.acos(min_sr)
diffusion_angles = start_angle + diff_time * (end_angle - start_angle)
signal_rates = T.cos(diffusion_angles)
noise_rates = T.sin(diffusion_angles)
return signal_rates, noise_rates
def cosine_beta_shedule(
diff_time: T.Tensor, max_sr: float = 1, min_sr: float = 1e-2
) -> T.Tensor:
"""Returns the beta values for the continuous flows using the above cosine
scheduler."""
start_angle = math.acos(max_sr)
end_angle = math.acos(min_sr)
diffusion_angles = start_angle + diff_time * (end_angle - start_angle)
return 2 * (end_angle - start_angle) * T.tan(diffusion_angles)
def ddim_predict(
noisy_data: T.Tensor,
pred_noises: T.Tensor,
signal_rates: T.Tensor,
noise_rates: T.Tensor,
) -> T.Tensor:
"""Use a single ddim step to predict the final image from anywhere in the
diffusion process."""
return (noisy_data - noise_rates * pred_noises) / signal_rates
@T.no_grad()
def ddim_sampler(
model,
diff_sched: VPDiffusionSchedule,
initial_noise: T.Tensor,
n_steps: int = 50,
keep_all: bool = False,
mask: Optional[T.Tensor] = None,
ctxt: Optional[T.BoolTensor] = None,
clip_predictions: Optional[tuple] = None,
) -> Tuple[T.Tensor, list]:
"""Apply the DDIM sampling process to generate a batch of samples from
noise.
Args:
model: A denoising diffusion model
Requires: inpt_dim, device, forward() method that outputs pred noise
diif_sched: A diffusion schedule object to calculate signal and noise rates
initial_noise: The initial noise to pass through the process
If none it will be generated here
n_steps: The number of iterations to generate the samples
keep_all: Return all stages of diffusion process
Can be memory heavy for large batches
num_samples: How many samples to generate
Ignored if initial_noise is provided
mask: The mask for the output point clouds
ctxt: The context tensor for the output point clouds
clip_predictions: Can stabalise generation by clipping the outputs
"""
# Get the initial noise for generation and the number of sammples
num_samples = initial_noise.shape[0]
# The shape needed for expanding the time encodings
expanded_shape = [-1] + [1] * (initial_noise.dim() - 1)
# Check the input argument for the n_steps, must be less than what was trained
all_stages = []
step_size = 1 / n_steps
# The initial variables needed for the loop
noisy_data = initial_noise
diff_times = T.ones(num_samples, device=model.device)
next_signal_rates, next_noise_rates = diff_sched(diff_times.view(expanded_shape))
for step in tqdm(range(n_steps), "DDIM-sampling", leave=False):
# Update with the previous 'next' step
signal_rates = next_signal_rates
noise_rates = next_noise_rates
# Keep track of the diffusion evolution
if keep_all:
all_stages.append(noisy_data)
# Apply the denoise step to get X_0 and expected noise
pred_noises = model(noisy_data, diff_times, mask, ctxt)
pred_data = ddim_predict(noisy_data, pred_noises, signal_rates, noise_rates)
# Get the next predicted components using the next signal and noise rates
diff_times = diff_times - step_size
next_signal_rates, next_noise_rates = diff_sched(
diff_times.view(expanded_shape)
)
# Clamp the predicted X_0 for stability
if clip_predictions is not None:
pred_data.clamp_(*clip_predictions)
# Remix the predicted components to go from estimated X_0 -> X_{t-1}
noisy_data = next_signal_rates * pred_data + next_noise_rates * pred_noises
return pred_data, all_stages
@T.no_grad()
def euler_maruyama_sampler(
model,
diff_sched: VPDiffusionSchedule,
initial_noise: T.Tensor,
n_steps: int = 50,
keep_all: bool = False,
mask: Optional[T.Tensor] = None,
ctxt: Optional[T.BoolTensor] = None,
clip_predictions: Optional[tuple] = None,
) -> Tuple[T.Tensor, list]:
"""Apply the full reverse process to noise to generate a batch of
samples."""
# Get the initial noise for generation and the number of sammples
num_samples = initial_noise.shape[0]
# The shape needed for expanding the time encodings
expanded_shape = [-1] + [1] * (initial_noise.dim() - 1)
# Check the input argument for the n_steps, must be less than what was trained
all_stages = []
delta_t = 1 / n_steps
# The initial variables needed for the loop
x_t = initial_noise
t = T.ones(num_samples, device=model.device)
for step in tqdm(range(n_steps), "Euler-Maruyama-sampling", leave=False):
# Use the model to get the expected noise
pred_noises = model(x_t, t, mask, ctxt)
# Use to get s_theta
_, noise_rates = diff_sched(t.view(expanded_shape))
s = -pred_noises / noise_rates
# Take one step using the em method
betas = diff_sched.get_betas(t.view(expanded_shape))
x_t += 0.5 * betas * (x_t + 2 * s) * delta_t
x_t += (betas * delta_t).sqrt() * T.randn_like(x_t)
t -= delta_t
# Keep track of the diffusion evolution
if keep_all:
all_stages.append(x_t)
# Clamp the denoised data for stability
if clip_predictions is not None:
x_t.clamp_(*clip_predictions)
return x_t, all_stages
@T.no_grad()
def euler_sampler(
model,
diff_sched: VPDiffusionSchedule,
initial_noise: T.Tensor,
n_steps: int = 50,
keep_all: bool = False,
mask: Optional[T.Tensor] = None,
ctxt: Optional[T.BoolTensor] = None,
clip_predictions: Optional[tuple] = None,
) -> Tuple[T.Tensor, list]:
"""Apply the full reverse process to noise to generate a batch of
samples."""
# Get the initial noise for generation and the number of sammples
num_samples = initial_noise.shape[0]
# The shape needed for expanding the time encodings
expanded_shape = [-1] + [1] * (initial_noise.dim() - 1)
# Check the input argument for the n_steps, must be less than what was trained
all_stages = []
delta_t = 1 / n_steps
# The initial variables needed for the loop
t = T.ones(num_samples, device=model.device)
signal_rates, noise_rates = diff_sched(t.view(expanded_shape))
x_t = initial_noise * (signal_rates + noise_rates)
for step in tqdm(range(n_steps), "Euler-sampling", leave=False):
# Take a step using the euler method and the gradient calculated by the ode
x_t += get_ode_gradient(model, diff_sched, x_t, t, mask, ctxt) * delta_t
t -= delta_t
# Keep track of the diffusion evolution
if keep_all:
all_stages.append(x_t)
# Clamp the denoised data for stability
if clip_predictions is not None:
x_t.clamp_(*clip_predictions)
return x_t, all_stages
@T.no_grad()
def runge_kutta_sampler(
model,
diff_sched: VPDiffusionSchedule,
initial_noise: T.Tensor,
n_steps: int = 50,
keep_all: bool = False,
mask: Optional[T.Tensor] = None,
ctxt: Optional[T.BoolTensor] = None,
clip_predictions: Optional[tuple] = None,
) -> Tuple[T.Tensor, list]:
"""Apply the full reverse process to noise to generate a batch of
samples."""
# Get the initial noise for generation and the number of sammples
num_samples = initial_noise.shape[0]
# Check the input argument for the n_steps, must be less than what was trained
all_stages = []
delta_t = 1 / n_steps
# Wrap the ode gradient in a lambda function depending only on xt and t
ode_grad = lambda t, x_t: get_ode_gradient(model, diff_sched, x_t, t, mask, ctxt)
# The initial variables needed for the loop
x_t = initial_noise
t = T.ones(num_samples, device=model.device)
for step in tqdm(range(n_steps), "Runge-Kutta-sampling", leave=False):
k1 = delta_t * (ode_grad(t, x_t))
k2 = delta_t * (ode_grad((t - delta_t / 2), (x_t + k1 / 2)))
k3 = delta_t * (ode_grad((t - delta_t / 2), (x_t + k2 / 2)))
k4 = delta_t * (ode_grad((T.clamp_min(t - delta_t, 0)), (x_t + k3)))
k = (k1 + 2 * k2 + 2 * k3 + k4) / 6
x_t += k
t -= delta_t
# Keep track of the diffusion evolution
if keep_all:
all_stages.append(x_t)
# Clamp the denoised data for stability
if clip_predictions is not None:
x_t.clamp_(*clip_predictions)
return x_t, all_stages
def get_ode_gradient(
model,
diff_sched: VPDiffusionSchedule,
x_t: T.Tensor,
t: T.Tensor,
mask: Optional[T.BoolTensor] = None,
ctxt: Optional[T.Tensor] = None,
) -> T.Tensor:
expanded_shape = [-1] + [1] * (x_t.dim() - 1)
_, noise_rates = diff_sched(t.view(expanded_shape))
betas = diff_sched.get_betas(t.view(expanded_shape))
return 0.5 * betas * (x_t - model(x_t, t, mask, ctxt) / noise_rates)
def run_sampler(sampler: str, *args, **kwargs) -> Tuple[T.Tensor, list]:
if sampler == "em":
return euler_maruyama_sampler(*args, **kwargs)
if sampler == "euler":
return euler_sampler(*args, **kwargs)
if sampler == "rk":
return runge_kutta_sampler(*args, **kwargs)
if sampler == "ddim":
return ddim_sampler(*args, **kwargs)
raise RuntimeError(f"Unknown sampler: {sampler}")
| 11,263 | 33.873065 | 86 | py |
PC-JeDi | PC-JeDi-main/src/models/transformers.py | """Some classes to describe transformer architectures."""
import math
from typing import Mapping, Optional, Union
import torch as T
import torch.nn as nn
from torch.nn.functional import dropout, softmax
from .modules import DenseNetwork
def merge_masks(
q_mask: Union[T.BoolTensor, None],
kv_mask: Union[T.BoolTensor, None],
attn_mask: Union[T.BoolTensor, None],
q_shape: T.Size,
k_shape: T.Size,
device: T.device,
) -> Union[None, T.BoolTensor]:
"""Create a full attention mask which incoporates the padding
information."""
# Create the full mask which combines the attention and padding masks
merged_mask = None
# If either pad mask exists, create
if q_mask is not None or kv_mask is not None:
if q_mask is None:
q_mask = T.full(q_shape[:-1], True, device=device)
if kv_mask is None:
kv_mask = T.full(k_shape[:-1], True, device=device)
merged_mask = q_mask.unsqueeze(-1) & kv_mask.unsqueeze(-2)
# If attention mask exists, create
if attn_mask is not None:
merged_mask = attn_mask if merged_mask is None else attn_mask & merged_mask
return merged_mask
def attention(
query: T.Tensor,
key: T.Tensor,
value: T.Tensor,
dim_key: int,
attn_mask: Optional[T.BoolTensor] = None,
attn_bias: Optional[T.Tensor] = None,
drp: float = 0.0,
training: bool = True,
) -> T.Tensor:
"""Apply the attention using the scaled dot product between the key query
and key tensors, then matrix multiplied by the value.
Note that the attention scores are ordered in recv x send, which is the opposite
to how I usually do it for the graph network, which is send x recv
We use masked fill -T.inf as this kills the padded key/values elements but
introduces nans for padded query elements. We could used a very small number like
-1e9 but this would need to scale with if we are using half precision.
Args:
query: Batched query sequence of tensors (b, h, s, f)
key: Batched key sequence of tensors (b, h, s, f)
value: Batched value sequence of tensors (b, h, s, f)
dim_key: The dimension of the key features, used to scale the dot product
attn_mask: The attention mask, used to blind certain combinations of k,q pairs
attn_bias: Extra weights to combine with attention weights
drp: Dropout probability
training: If the model is in training mode, effects the dropout applied
"""
# Perform the matrix multiplication
scores = T.matmul(query, key.transpose(-2, -1)) / math.sqrt(dim_key)
# Add the bias terms if present
if attn_bias is not None: # Move the head dimension to the first
scores = scores + attn_bias.permute(0, 3, 1, 2)
# Mask away the scores between invalid elements in sequence
if attn_mask is not None:
scores = scores.masked_fill(~attn_mask.unsqueeze(-3), -T.inf)
# Apply the softmax function per head feature
scores = softmax(scores, dim=-1)
# Kill the nans introduced by the padded query elements
scores = T.nan_to_num(scores, 0)
# Apply dropout to the attention scores
scores = dropout(scores, p=drp, training=training)
# Finally multiply these scores by the output
scores = T.matmul(scores, value)
return scores
class MultiHeadedAttentionBlock(nn.Module):
"""Generic Multiheaded Attention.
Takes in three sequences with dim: (batch, sqeuence, features)
- q: The primary sequence queries (determines output sequence length)
- k: The attending sequence keys (determines incoming information)
- v: The attending sequence values
In a message passing sense you can think of q as your receiver nodes, v and k
are the information coming from the sender nodes.
When q == k(and v) this is a SELF attention operation
When q != k(and v) this is a CROSS attention operation
===
Block operations:
1) Uses three linear layers to project the sequences.
- q = q_linear * q
- k = k_linear * k
- v = v_linear * v
2) Outputs are reshaped to add a head dimension, and transposed for matmul.
- features = model_dim = head_dim * num_heads
- dim becomes: batch, num_heads, sequence, head_dim
3) Passes these through to the attention module (message passing)
- In standard transformers this is the scaled dot product attention
- Also takes additional dropout layer to mask the attention
4) Flatten out the head dimension and pass through final linear layer
- results are same as if attention was done seperately for each head and concat
- dim: batch, q_seq, head_dim * num_heads
"""
def __init__(
self,
model_dim: int,
num_heads: int = 1,
drp: float = 0,
) -> None:
"""
Args:
model_dim: The dimension of the model
num_heads: The number of different attention heads to process in parallel
- Must allow interger division into model_dim
drp: The dropout probability used in the MHA operation
"""
super().__init__()
# Define model base attributes
self.model_dim = model_dim
self.num_heads = num_heads
self.head_dim = model_dim // num_heads
# Check that the dimension of each head makes internal sense
if self.head_dim * num_heads != model_dim:
raise ValueError("Model dimension must be divisible by number of heads!")
# Initialise the weight matrices
self.q_linear = nn.Linear(model_dim, model_dim)
self.k_linear = nn.Linear(model_dim, model_dim)
self.v_linear = nn.Linear(model_dim, model_dim)
self.out_linear = nn.Linear(model_dim, model_dim)
self.drp = drp
def forward(
self,
q: T.Tensor,
k: Optional[T.Tensor] = None,
v: Optional[T.Tensor] = None,
q_mask: Optional[T.BoolTensor] = None,
kv_mask: Optional[T.BoolTensor] = None,
attn_mask: Optional[T.BoolTensor] = None,
attn_bias: Optional[T.Tensor] = None,
) -> T.Tensor:
"""
Args:
q: The main sequence queries (determines the output length)
k: The incoming information keys
v: The incoming information values
q_mask: Shows which elements of the main sequence are real
kv_mask: Shows which elements of the attn sequence are real
attn_mask: Extra mask for the attention matrix (eg: look ahead)
attn_bias: Extra bias term for the attention matrix (eg: edge features)
"""
# If only q and q_mask are provided then we automatically apply self attention
if k is None:
k = q
if kv_mask is None:
kv_mask = q_mask
v = v if v is not None else k
# Store the batch size, useful for reshaping
b_size, seq, feat = q.shape
# Work out the masking situation, with padding, no peaking etc
attn_mask = merge_masks(q_mask, kv_mask, attn_mask, q.shape, k.shape, q.device)
# Generate the q, k, v projections, break final head dimension in 2
shape = (b_size, -1, self.num_heads, self.head_dim)
q = self.q_linear(q).view(shape)
k = self.k_linear(k).view(shape)
v = self.v_linear(v).view(shape)
# Transpose to get dimensions: B,H,Seq,HD (required for matmul)
q = q.transpose(1, 2)
k = k.transpose(1, 2)
v = v.transpose(1, 2)
# Calculate the new sequence values, for memory reasons overwrite q
q = attention(
q,
k,
v,
self.head_dim,
attn_mask=attn_mask,
attn_bias=attn_bias,
drp=self.drp,
training=self.training,
) # Returned shape is B,H,Q_seq,HD
# Concatenate the all of the heads together to get shape: B,Seq,F
q = q.transpose(1, 2).contiguous().view(b_size, -1, self.model_dim)
# Pass through final linear layer
q = self.out_linear(q)
return q
class TransformerEncoderLayer(nn.Module):
"""A transformer encoder layer based on the GPT-2+Normformer style
arcitecture.
We choose Normformer as it has often proved to be the most stable to train
https://arxiv.org/abs/2210.06423
https://arxiv.org/abs/2110.09456
It contains:
- Multihead(self)Attention block
- A dense network
Layernorm is applied before each operation
Residual connections are used to bypass each operation
"""
def __init__(
self,
model_dim: int,
mha_config: Optional[Mapping] = None,
dense_config: Optional[Mapping] = None,
ctxt_dim: int = 0,
) -> None:
"""
Args:
model_dim: The embedding dimensio of the transformer block
mha_config: Keyword arguments for multiheaded-attention block
dense_config: Keyword arguments for feed forward network
ctxt_dim: Context dimension,
"""
super().__init__()
mha_config = mha_config or {}
dense_config = dense_config or {}
self.model_dim = model_dim
self.ctxt_dim = ctxt_dim
# The basic blocks
self.self_attn = MultiHeadedAttentionBlock(model_dim, **mha_config)
self.dense = DenseNetwork(
model_dim, outp_dim=model_dim, ctxt_dim=ctxt_dim, **dense_config
)
# The normalisation layers (lots from NormFormer)
self.norm1 = nn.LayerNorm(model_dim)
self.norm2 = nn.LayerNorm(model_dim)
self.norm3 = nn.LayerNorm(model_dim)
def forward(
self,
x: T.Tensor,
mask: Optional[T.BoolTensor] = None,
ctxt: Optional[T.Tensor] = None,
attn_bias: Optional[T.Tensor] = None,
attn_mask: Optional[T.BoolTensor] = None,
) -> T.Tensor:
"Pass through the layer using residual connections and layer normalisation"
x = x + self.norm2(
self.self_attn(
self.norm1(x), q_mask=mask, attn_mask=attn_mask, attn_bias=attn_bias
)
)
x = x + self.dense(self.norm3(x), ctxt)
return x
class TransformerEncoder(nn.Module):
"""A stack of N transformer encoder layers followed by a final
normalisation step.
Sequence -> Sequence
"""
def __init__(
self,
model_dim: int = 64,
num_layers: int = 3,
mha_config: Optional[Mapping] = None,
dense_config: Optional[Mapping] = None,
ctxt_dim: int = 0,
) -> None:
"""
Args:
model_dim: Feature sieze for input, output, and all intermediate layers
num_layers: Number of encoder layers used
mha_config: Keyword arguments for the mha block
dense_config: Keyword arguments for the dense network in each layer
ctxt_dim: Dimension of the context inputs
"""
super().__init__()
self.model_dim = model_dim
self.num_layers = num_layers
self.layers = nn.ModuleList(
[
TransformerEncoderLayer(model_dim, mha_config, dense_config, ctxt_dim)
for _ in range(num_layers)
]
)
self.final_norm = nn.LayerNorm(model_dim)
def forward(self, x: T.Tensor, **kwargs) -> T.Tensor:
"""Pass the input through all layers sequentially."""
for layer in self.layers:
x = layer(x, **kwargs)
return self.final_norm(x)
class FullTransformerEncoder(nn.Module):
"""A transformer encoder with added input and output embedding networks.
Sequence -> Sequence
"""
def __init__(
self,
inpt_dim: int,
outp_dim: int,
edge_dim: int = 0,
ctxt_dim: int = 0,
te_config: Optional[Mapping] = None,
node_embd_config: Optional[Mapping] = None,
outp_embd_config: Optional[Mapping] = None,
edge_embd_config: Optional[Mapping] = None,
ctxt_embd_config: Optional[Mapping] = None,
) -> None:
"""
Args:
inpt_dim: Dim. of each element of the sequence
outp_dim: Dim. of of the final output vector
edge_dim: Dim. of the input edge features
ctxt_dim: Dim. of the context vector to pass to the embedding nets
te_config: Keyword arguments to pass to the TVE constructor
node_embd_config: Keyword arguments for node dense embedder
outp_embd_config: Keyword arguments for output dense embedder
edge_embd_config: Keyword arguments for edge dense embedder
ctxt_embd_config: Keyword arguments for context dense embedder
"""
super().__init__()
self.inpt_dim = inpt_dim
self.outp_dim = outp_dim
self.ctxt_dim = ctxt_dim
self.edge_dim = edge_dim
te_config = te_config or {}
node_embd_config = node_embd_config or {}
outp_embd_config = outp_embd_config or {}
edge_embd_config = edge_embd_config or {}
# Initialise the context embedding network (optional)
if self.ctxt_dim:
self.ctxt_emdb = DenseNetwork(
inpt_dim=self.ctxt_dim,
**ctxt_embd_config,
)
self.ctxt_out = self.ctxt_emdb.outp_dim
else:
self.ctxt_out = 0
# Initialise the TVE, the main part of this network
self.te = TransformerEncoder(**te_config, ctxt_dim=self.ctxt_out)
self.model_dim = self.te.model_dim
# Initialise all embedding networks
self.node_embd = DenseNetwork(
inpt_dim=self.inpt_dim,
outp_dim=self.model_dim,
ctxt_dim=self.ctxt_out,
**node_embd_config,
)
self.outp_embd = DenseNetwork(
inpt_dim=self.model_dim,
outp_dim=self.outp_dim,
ctxt_dim=self.ctxt_out,
**outp_embd_config,
)
# Initialise the edge embedding network (optional)
if self.edge_dim:
self.edge_embd = DenseNetwork(
inpt_dim=self.edge_dim,
outp_dim=self.te.layers[0].self_attn.num_heads,
ctxt_dim=self.ctxt_out,
**edge_embd_config,
)
def forward(
self,
x: T.Tensor,
mask: Optional[T.BoolTensor] = None,
ctxt: Optional[T.Tensor] = None,
attn_bias: Optional[T.Tensor] = None,
attn_mask: Optional[T.BoolTensor] = None,
) -> T.Tensor:
"""Pass the input through all layers sequentially."""
if self.ctxt_dim:
ctxt = self.ctxt_emdb(ctxt)
if self.edge_dim:
attn_bias = self.edge_embd(attn_bias, ctxt)
x = self.node_embd(x, ctxt)
x = self.te(x, mask=mask, ctxt=ctxt, attn_bias=attn_bias, attn_mask=attn_mask)
x = self.outp_embd(x, ctxt)
return x
| 15,049 | 33.837963 | 87 | py |
PC-JeDi | PC-JeDi-main/src/models/schedulers.py | from torch.optim import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
class WarmupToConstant(_LRScheduler):
"""Gradually warm-up learning rate in optimizer to a constant value."""
def __init__(self, optimizer: Optimizer, num_steps: int = 100) -> None:
"""
args:
optimizer (Optimizer): Wrapped optimizer.
num_steps: target learning rate is reached at num_steps.
"""
self.num_steps = num_steps
self.finished = False
super().__init__(optimizer)
def get_lr(self) -> list[float]:
if self.last_epoch > self.num_steps:
return [base_lr for base_lr in self.base_lrs]
return [
(base_lr / self.num_steps) * self.last_epoch for base_lr in self.base_lrs
]
| 793 | 32.083333 | 85 | py |
PC-JeDi | PC-JeDi-main/src/models/modules.py | """Collection of pytorch modules that make up the networks."""
import math
from typing import Optional, Union
import torch as T
import torch.nn as nn
def get_act(name: str) -> nn.Module:
"""Return a pytorch activation function given a name."""
if name == "relu":
return nn.ReLU()
if name == "lrlu":
return nn.LeakyReLU(0.1)
if name == "silu" or name == "swish":
return nn.SiLU()
if name == "selu":
return nn.SELU()
if name == "softmax":
return nn.Softmax()
if name == "gelu":
return nn.GELU()
if name == "tanh":
return nn.Tanh()
if name == "softmax":
return nn.Softmax()
if name == "sigmoid":
return nn.Sigmoid()
raise ValueError("No activation function with name: ", name)
def get_nrm(name: str, outp_dim: int) -> nn.Module:
"""Return a 1D pytorch normalisation layer given a name and a output size
Returns None object if name is none."""
if name == "batch":
return nn.BatchNorm1d(outp_dim)
if name == "layer":
return nn.LayerNorm(outp_dim)
if name == "none":
return None
else:
raise ValueError("No normalistation with name: ", name)
class MLPBlock(nn.Module):
"""A simple MLP block that makes up a dense network.
Made up of several layers containing:
- linear map
- activation function [Optional]
- layer normalisation [Optional]
- dropout [Optional]
Only the input of the block is concatentated with context information.
For residual blocks, the input is added to the output of the final layer.
"""
def __init__(
self,
inpt_dim: int,
outp_dim: int,
ctxt_dim: int = 0,
n_layers: int = 1,
act: str = "lrlu",
nrm: str = "none",
drp: float = 0,
do_res: bool = False,
) -> None:
"""Init method for MLPBlock.
Parameters
----------
inpt_dim : int
The number of features for the input layer
outp_dim : int
The number of output features
ctxt_dim : int, optional
The number of contextual features to concat to the inputs, by default 0
n_layers : int, optional1
A string indicating the name of the activation function, by default 1
act : str, optional
A string indicating the name of the normalisation, by default "lrlu"
nrm : str, optional
The dropout probability, 0 implies no dropout, by default "none"
drp : float, optional
Add to previous output, only if dim does not change, by default 0
do_res : bool, optional
The number of transform layers in this block, by default False
"""
super().__init__()
# Save the input and output dimensions of the module
self.inpt_dim = inpt_dim
self.outp_dim = outp_dim
self.ctxt_dim = ctxt_dim
# If this layer includes an additive residual connection
self.do_res = do_res and (inpt_dim == outp_dim)
# Initialise the block layers as a module list
self.block = nn.ModuleList()
for n in range(n_layers):
# Increase the input dimension of the first layer to include context
lyr_in = inpt_dim + ctxt_dim if n == 0 else outp_dim
# Linear transform, activation, normalisation, dropout
self.block.append(nn.Linear(lyr_in, outp_dim))
if act != "none":
self.block.append(get_act(act))
if nrm != "none":
self.block.append(get_nrm(nrm, outp_dim))
if drp > 0:
self.block.append(nn.Dropout(drp))
def forward(self, inpt: T.Tensor, ctxt: Optional[T.Tensor] = None) -> T.Tensor:
"""
args:
tensor: Pytorch tensor to pass through the network
ctxt: The conditioning tensor, can be ignored
"""
# Concatenate the context information to the input of the block
if self.ctxt_dim and ctxt is None:
raise ValueError(
"Was expecting contextual information but none has been provided!"
)
temp = T.cat([inpt, ctxt], dim=-1) if self.ctxt_dim else inpt
# Pass through each transform in the block
for layer in self.block:
temp = layer(temp)
# Add the original inputs again for the residual connection
if self.do_res:
temp = temp + inpt
return temp
def __repr__(self) -> str:
"""Generate a one line string summing up the components of the
block."""
string = str(self.inpt_dim)
if self.ctxt_dim:
string += f"({self.ctxt_dim})"
string += "->"
string += "->".join([str(b).split("(", 1)[0] for b in self.block])
string += "->" + str(self.outp_dim)
if self.do_res:
string += "(add)"
return string
class DenseNetwork(nn.Module):
"""A dense neural network made from a series of consecutive MLP blocks and
context injection layers."""
def __init__(
self,
inpt_dim: int,
outp_dim: int = 0,
ctxt_dim: int = 0,
hddn_dim: Union[int, list] = 32,
num_blocks: int = 1,
n_lyr_pbk: int = 1,
act_h: str = "lrlu",
act_o: str = "none",
do_out: bool = True,
nrm: str = "none",
drp: float = 0,
do_res: bool = False,
ctxt_in_inpt: bool = True,
ctxt_in_hddn: bool = False,
) -> None:
"""Initialise the DenseNetwork.
Parameters
----------
inpt_dim : int
The number of input neurons
outp_dim : int, optional
The number of output neurons. If none it will take from inpt or hddn,
by default 0
ctxt_dim : int, optional
The number of context features. The context feature use is determined by
ctxt_type, by default 0
hddn_dim : Union[int, list], optional
The width of each hidden block. If a list it overides depth, by default 32
num_blocks : int, optional
The number of hidden blocks, can be overwritten by hddn_dim, by default 1
n_lyr_pbk : int, optional
The number of transform layers per hidden block, by default 1
act_h : str, optional
The name of the activation function to apply in the hidden blocks,
by default "lrlu"
act_o : str, optional
The name of the activation function to apply to the outputs,
by default "none"
do_out : bool, optional
If the network has a dedicated output block, by default True
nrm : str, optional
Type of normalisation (layer or batch) in each hidden block, by default "none"
drp : float, optional
Dropout probability for hidden layers (0 means no dropout), by default 0
do_res : bool, optional
Use resisdual-connections between hidden blocks (only if same size),
by default False
ctxt_in_inpt : bool, optional
Include the ctxt tensor in the input block, by default True
ctxt_in_hddn : bool, optional
Include the ctxt tensor in the hidden blocks, by default False
Raises
------
ValueError
If the network was given a context input but both ctxt_in_inpt and
ctxt_in_hddn were False
"""
super().__init__()
# Check that the context is used somewhere
if ctxt_dim:
if not ctxt_in_hddn and not ctxt_in_inpt:
raise ValueError("Network has context inputs but nowhere to use them!")
# We store the input, hddn (list), output, and ctxt dims to query them later
self.inpt_dim = inpt_dim
if not isinstance(hddn_dim, int):
self.hddn_dim = hddn_dim
else:
self.hddn_dim = num_blocks * [hddn_dim]
self.outp_dim = outp_dim or inpt_dim if do_out else self.hddn_dim[-1]
self.num_blocks = len(self.hddn_dim)
self.ctxt_dim = ctxt_dim
self.do_out = do_out
# Necc for this module to work with the nflows package
self.hidden_features = self.hddn_dim[-1]
# Input MLP block
self.input_block = MLPBlock(
inpt_dim=self.inpt_dim,
outp_dim=self.hddn_dim[0],
ctxt_dim=self.ctxt_dim if ctxt_in_inpt else 0,
act=act_h,
nrm=nrm,
drp=drp,
)
# All hidden blocks as a single module list
self.hidden_blocks = []
if self.num_blocks > 1:
self.hidden_blocks = nn.ModuleList()
for h_1, h_2 in zip(self.hddn_dim[:-1], self.hddn_dim[1:]):
self.hidden_blocks.append(
MLPBlock(
inpt_dim=h_1,
outp_dim=h_2,
ctxt_dim=self.ctxt_dim if ctxt_in_hddn else 0,
n_layers=n_lyr_pbk,
act=act_h,
nrm=nrm,
drp=drp,
do_res=do_res,
)
)
# Output block (optional and there is no normalisation, dropout or context)
if do_out:
self.output_block = MLPBlock(
inpt_dim=self.hddn_dim[-1],
outp_dim=self.outp_dim,
act=act_o,
)
def forward(self, inputs: T.Tensor, ctxt: Optional[T.Tensor] = None) -> T.Tensor:
"""Pass through all layers of the dense network."""
# Reshape the context if it is available
if ctxt is not None:
dim_diff = inputs.dim() - ctxt.dim()
if dim_diff > 0:
ctxt = ctxt.view(ctxt.shape[0], *dim_diff * (1,), *ctxt.shape[1:])
ctxt = ctxt.expand(*inputs.shape[:-1], -1)
# Pass through the input block
inputs = self.input_block(inputs, ctxt)
# Pass through each hidden block
for h_block in self.hidden_blocks: # Context tensor will only be used if
inputs = h_block(inputs, ctxt) # block was initialised with a ctxt dim
# Pass through the output block
if self.do_out:
inputs = self.output_block(inputs)
return inputs
def __repr__(self):
string = ""
string += "\n (inp): " + repr(self.input_block) + "\n"
for i, h_block in enumerate(self.hidden_blocks):
string += f" (h-{i+1}): " + repr(h_block) + "\n"
if self.do_out:
string += " (out): " + repr(self.output_block)
return string
def one_line_string(self):
"""Return a one line string that sums up the network structure."""
string = str(self.inpt_dim)
if self.ctxt_dim:
string += f"({self.ctxt_dim})"
string += ">"
string += str(self.input_block.outp_dim) + ">"
if self.num_blocks > 1:
string += ">".join(
[
str(layer.out_features)
for hidden in self.hidden_blocks
for layer in hidden.block
if isinstance(layer, nn.Linear)
]
)
string += ">"
if self.do_out:
string += str(self.outp_dim)
return string
class IterativeNormLayer(nn.Module):
"""A basic normalisation layer so it can be part of the model.
Note! If a mask is provided in the forward pass, then this must be
the dimension to apply over the masked inputs! For example: Graph
nodes are usually batch x n_nodes x features so to normalise over
the features one would typically give extra_dims as (0,) But nodes
are always passed with the mask which flattens it to batch x
features. Batch dimension is done automatically, so we dont pass any
extra_dims!!!
"""
def __init__(
self,
inpt_dim: Union[T.Tensor, tuple, int],
means: Optional[T.Tensor] = None,
vars: Optional[T.Tensor] = None,
n: int = 0,
max_n: int = 5_00_000,
extra_dims: Union[tuple, int] = (),
) -> None:
"""Init method for Normalisatiion module.
Args:
inpt_dim: Shape of the input tensor, required for reloading
means: Calculated means for the mapping. Defaults to None.
vars: Calculated variances for the mapping. Defaults to None.
n: Number of samples used to make the mapping. Defaults to None.
max_n: Maximum number of iterations before the means and vars are frozen
extra_dims: The extra dimension(s) over which to calculate the stats
Will always calculate over the batch dimension
"""
super().__init__()
# Fail if only one of means or vars is provided
if (means is None) ^ (vars is None): # XOR
raise ValueError(
"""Only one of 'means' and 'vars' is defined. Either both or
neither must be defined"""
)
# Allow interger inpt_dim and n arguments
if isinstance(inpt_dim, int):
inpt_dim = (inpt_dim,)
if isinstance(n, int):
n = T.tensor(n)
# The dimensions over which to apply the normalisation, make positive!
if isinstance(extra_dims, int): # Ensure it is a list
extra_dims = [extra_dims]
else:
extra_dims = list(extra_dims)
if any([abs(e) > len(inpt_dim) for e in extra_dims]): # Check size
raise ValueError("extra_dims argument lists dimensions outside input range")
for d in range(len(extra_dims)):
if extra_dims[d] < 0: # make positive
extra_dims[d] = len(inpt_dim) + extra_dims[d]
extra_dims[d] += 1 # Add one because we are inserting a batch dimension
self.extra_dims = extra_dims
# Calculate the input and output shapes
self.max_n = max_n
self.inpt_dim = list(inpt_dim)
self.stat_dim = [1] + list(inpt_dim) # Add batch dimension
for d in range(len(self.stat_dim)):
if d in self.extra_dims:
self.stat_dim[d] = 1
# Buffers arenneeded for saving/loading the layer
self.register_buffer(
"means", T.zeros(self.stat_dim) if means is None else means
)
self.register_buffer("vars", T.ones(self.stat_dim) if vars is None else vars)
self.register_buffer("n", n)
# For the welford algorithm it is useful to have another variable m2
self.register_buffer("m2", T.ones(self.stat_dim) if vars is None else vars)
# If the means are set here then the model is "frozen" and not updated
self.frozen = means is not None
def _mask(self, inpt: T.Tensor, mask: Optional[T.BoolTensor] = None) -> T.Tensor:
if mask is None:
return inpt
return inpt[mask]
def _check_attributes(self) -> None:
if self.means is None or self.vars is None:
raise ValueError(
"Stats for have not been initialised or fit() has not been run!"
)
def fit(
self, inpt: T.Tensor, mask: Optional[T.BoolTensor] = None, freeze: bool = True
) -> None:
"""Set the stats given a population of data."""
inpt = self._mask(inpt, mask)
self.vars, self.means = T.var_mean(
inpt, dim=(0, *self.extra_dims), keepdim=True
)
self.n = T.tensor(len(inpt), device=self.means.device)
self.m2 = self.vars * self.n
self.frozen = freeze
def forward(self, inpt: T.Tensor, mask: Optional[T.BoolTensor] = None) -> T.Tensor:
"""Applies the standardisation to a batch of inputs, also uses the
inputs to update the running stats if in training mode."""
with T.no_grad():
sel_inpt = self._mask(inpt, mask)
if not self.frozen and self.training:
self.update(sel_inpt)
# Apply the mapping
normed_inpt = (sel_inpt - self.means) / (self.vars.sqrt() + 1e-8)
# Undo the masking
if mask is not None:
inpt = inpt.clone() # prevents inplace operation, bad for autograd
inpt[mask] = normed_inpt
return inpt
return normed_inpt
def reverse(self, inpt: T.Tensor, mask: Optional[T.BoolTensor] = None) -> T.Tensor:
"""Unnormalises the inputs given the recorded stats."""
sel_inpt = self._mask(inpt, mask)
unnormed_inpt = sel_inpt * self.vars.sqrt() + self.means
# Undo the masking
if mask is not None:
inpt = inpt.clone() # prevents inplace operation, bad for autograd
inpt[mask] = unnormed_inpt
return inpt
return unnormed_inpt
def update(self, inpt: T.Tensor, mask: Optional[T.BoolTensor] = None) -> None:
"""Update the running stats using a batch of data."""
inpt = self._mask(inpt, mask)
# For first iteration
if self.n == 0:
self.fit(inpt, freeze=False)
return
# later iterations based on batched welford algorithm
with T.no_grad():
self.n += len(inpt)
delta = inpt - self.means
self.means += (delta / self.n).mean(
dim=(0, *self.extra_dims), keepdim=True
) * len(inpt)
delta2 = inpt - self.means
self.m2 += (delta * delta2).mean(
dim=(0, *self.extra_dims), keepdim=True
) * len(inpt)
self.vars = self.m2 / self.n
# Freeze the model if we exceed the requested stats
self.frozen = self.n >= self.max_n
class CosineEncoding:
def __init__(
self,
outp_dim: int = 32,
min_value: float = 0.0,
max_value: float = 1.0,
frequency_scaling: str = "exponential",
) -> None:
self.outp_dim = outp_dim
self.min_value = min_value
self.max_value = max_value
self.frequency_scaling = frequency_scaling
def __call__(self, inpt: T.Tensor) -> T.Tensor:
return cosine_encoding(
inpt, self.outp_dim, self.min_value, self.max_value, self.frequency_scaling
)
def cosine_encoding(
x: T.Tensor,
outp_dim: int = 32,
min_value: float = 0.0,
max_value: float = 1.0,
frequency_scaling: str = "exponential",
) -> T.Tensor:
"""Computes a positional cosine encodings with an increasing series of
frequencies.
The frequencies either increase linearly or exponentially (default).
The latter is good for when max_value is large and extremely high sensitivity to the
input is required.
If inputs greater than the max value are provided, the outputs become degenerate.
If inputs smaller than the min value are provided, the inputs the the cosine will
be both positive and negative, which may lead degenerate outputs.
Always make sure that the min and max bounds are not exceeded!
Args:
x: The input, the final dimension is encoded. If 1D then it will be unqueezed
out_dim: The dimension of the output encoding
min_value: Added to x (and max) as cosine embedding works with positive inputs
max_value: The maximum expected value, sets the scale of the lowest frequency
frequency_scaling: Either 'linear' or 'exponential'
Returns:
The cosine embeddings of the input using (out_dim) many frequencies
"""
# Unsqueeze if final dimension is flat
if x.shape[-1] != 1 or x.dim() == 1:
x = x.unsqueeze(-1)
# Check the the bounds are obeyed
if T.any(x > max_value):
print("Warning! Passing values to cosine_encoding encoding that exceed max!")
if T.any(x < min_value):
print("Warning! Passing values to cosine_encoding encoding below min!")
# Calculate the various frequencies
if frequency_scaling == "exponential":
freqs = T.arange(outp_dim, device=x.device).exp()
elif frequency_scaling == "linear":
freqs = T.arange(1, outp_dim + 1, device=x.device)
else:
raise RuntimeError(f"Unrecognised frequency scaling: {frequency_scaling}")
return T.cos((x + min_value) * freqs * math.pi / (max_value + min_value))
| 20,518 | 35.575758 | 90 | py |
PC-JeDi | PC-JeDi-main/src/models/pc_jedi.py | import copy
from functools import partial
from typing import Mapping, Optional, Tuple
import numpy as np
import pytorch_lightning as pl
import torch as T
import wandb
from jetnet.evaluation import w1efp, w1m, w1p
from src.models.diffusion import VPDiffusionSchedule, run_sampler
from src.models.modules import CosineEncoding, IterativeNormLayer
from src.models.schedulers import WarmupToConstant
from src.models.transformers import FullTransformerEncoder
from src.numpy_utils import undo_log_squash
from src.plotting import plot_mpgan_marginals
from src.torch_utils import get_loss_fn, to_np
class TransformerDiffusionGenerator(pl.LightningModule):
"""A generative model which uses the diffusion process on a point cloud."""
def __init__(
self,
*,
pc_dim: list,
ctxt_dim: int,
n_nodes: int,
cosine_config: Mapping,
diff_config: Mapping,
normaliser_config: Mapping,
trans_enc_config: Mapping,
optimizer: partial,
loss_name: str = "mse",
mle_loss_weight: float = 0.0,
ema_sync: float = 0.999,
sampler_name: str = "em",
sampler_steps: int = 100,
) -> None:
"""
Args:
pc_dim: The dimension of the point cloud
ctxt_dim: The size of the context vector for the point cloud
n_nodes: Max number of nodes used to train this model
cosine_config: For defining the cosine embedding arguments
normaliser_config: For defining the iterative normalisation layer
diff_shedule: The diffusion scheduler, defines the signal and noise rates
trans_enc_config: Keyword arguments for the TransformerEncoder network
optimizer: Partially initialised optimizer
sched_config: The config for how to apply the scheduler
ema_sync: How fast the ema network syncs with the given one
loss_name: Name of the loss function to use for noise estimation
mle_loss_weight: Relative weight of the Maximum-Liklihood loss term
sampler_name: Name of O/SDE solver, does not effect training.
sampler_steps: Steps used in generation, does not effect training.
"""
super().__init__()
self.save_hyperparameters(logger=False)
# Class attributes
self.pc_dim = pc_dim
self.ctxt_dim = ctxt_dim
self.n_nodes = n_nodes
self.loss_fn = get_loss_fn(loss_name)
self.mle_loss_weight = mle_loss_weight
self.ema_sync = ema_sync
# The encoder and scheduler needed for diffusion
self.diff_sched = VPDiffusionSchedule(**diff_config)
self.time_encoder = CosineEncoding(**cosine_config)
# The layer which normalises the input point cloud data
self.normaliser = IterativeNormLayer((pc_dim,), **normaliser_config)
if self.ctxt_dim:
self.ctxt_normaliser = IterativeNormLayer((ctxt_dim,), **normaliser_config)
# The denoising transformer
self.net = FullTransformerEncoder(
inpt_dim=pc_dim,
outp_dim=pc_dim,
ctxt_dim=ctxt_dim + self.time_encoder.outp_dim,
**trans_enc_config,
)
# A copy of the network which will sync with an exponential moving average
self.ema_net = copy.deepcopy(self.net)
# Sampler to run in the validation/testing loop
self.sampler_name = sampler_name
self.sampler_steps = sampler_steps
# Record of the outputs of the validation step
self.val_outs = []
def forward(
self,
noisy_data: T.Tensor,
diffusion_times: T.Tensor,
mask: T.BoolTensor,
ctxt: Optional[T.Tensor] = None,
) -> T.Tensor:
"""Pass through the model and get an estimate of the noise added to the
input."""
# Use the appropriate network for training or validation
if self.training:
network = self.net
else:
network = self.ema_net
# Encode the times and combine with existing context info
context = self.time_encoder(diffusion_times)
if self.ctxt_dim:
context = T.cat([context, ctxt], dim=-1)
# Use the selected network to esitmate the noise present in the data
return network(noisy_data, mask=mask, ctxt=context)
def _shared_step(self, sample: tuple) -> Tuple[T.Tensor, T.Tensor]:
"""Shared step used in both training and validaiton."""
# Unpack the sample tuple
nodes, mask, ctxt = sample
# Pass through the normalisers
nodes = self.normaliser(nodes, mask)
if self.ctxt_dim:
ctxt = self.ctxt_normaliser(ctxt)
# Sample from the gaussian latent space to perturb the point clouds
noises = T.randn_like(nodes) * mask.unsqueeze(-1)
# Sample uniform random diffusion times and get the rates
diffusion_times = T.rand(size=(len(nodes), 1), device=self.device)
signal_rates, noise_rates = self.diff_sched(diffusion_times.view(-1, 1, 1))
# Mix the signal and noise according to the diffusion equation
noisy_nodes = signal_rates * nodes + noise_rates * noises
# Predict the noise using the network
pred_noises = self.forward(noisy_nodes, diffusion_times, mask, ctxt)
# Simple noise loss is for "perceptual quality"
simple_loss = self.loss_fn(noises[mask], pred_noises[mask])
# MLE loss is for maximum liklihood training
if self.mle_loss_weight:
betas = self.diff_sched.get_betas(diffusion_times.view(-1, 1, 1))
mle_weights = betas / noise_rates
mle_loss = mle_weights * simple_loss
else:
mle_loss = T.zeros_like(simple_loss)
return simple_loss.mean(), mle_loss.mean()
def training_step(self, sample: tuple, _batch_idx: int) -> T.Tensor:
simple_loss, mle_loss = self._shared_step(sample)
total_loss = simple_loss + self.mle_loss_weight * mle_loss
self.log("train/simple_loss", simple_loss)
self.log("train/mle_loss", mle_loss)
self.log("train/total_loss", total_loss)
self._sync_ema_network()
return total_loss
def validation_step(self, sample: tuple, batch_idx: int) -> None:
simple_loss, mle_loss = self._shared_step(sample)
total_loss = simple_loss + self.mle_loss_weight * mle_loss
self.log("valid/simple_loss", simple_loss)
self.log("valid/mle_loss", mle_loss)
self.log("valid/total_loss", total_loss)
# Run the full generation of the sample during a validation step
outputs = self.full_generation(
self.sampler_name,
self.sampler_steps,
mask=sample[1],
ctxt=sample[2],
)
# Add to the collection of the validaiton outputs
self.val_outs.append((to_np(outputs), to_np(sample)))
def on_validation_epoch_end(self) -> None:
"""At the end of the validation epoch, calculate and log the metrics
and plot the histograms.
This function right now only works with MPGAN configs
"""
# Combine all outputs
gen_nodes = np.vstack([v[0] for v in self.val_outs])
real_nodes = np.vstack([v[1][0] for v in self.val_outs])
mask = np.vstack([v[1][1] for v in self.val_outs])
high = np.vstack([v[1][2] for v in self.val_outs])
# Change the data from log(pt+1) into pt fraction (needed for metrics)
if self.trainer.datamodule.hparams.data_conf.log_squash_pt:
gen_nodes[..., -1] = undo_log_squash(gen_nodes[..., -1]) / high[..., 0:1]
real_nodes[..., -1] = undo_log_squash(real_nodes[..., -1]) / high[..., 0:1]
# Apply clipping
gen_nodes = np.nan_to_num(gen_nodes)
gen_nodes[..., 0] = np.clip(gen_nodes[..., 0], -0.5, 0.5)
gen_nodes[..., 1] = np.clip(gen_nodes[..., 1], -0.5, 0.5)
gen_nodes[..., 2] = np.clip(gen_nodes[..., 2], 0, 1)
real_nodes = np.nan_to_num(real_nodes)
real_nodes[..., 0] = np.clip(real_nodes[..., 0], -0.5, 0.5)
real_nodes[..., 1] = np.clip(real_nodes[..., 1], -0.5, 0.5)
real_nodes[..., 2] = np.clip(real_nodes[..., 2], 0, 1)
# Calculate and log the Wasserstein discriminants
bootstrap = {
"num_eval_samples": 10000,
"num_batches": 10,
}
w1m_val, w1m_err = w1m(real_nodes, gen_nodes, **bootstrap)
w1p_val, w1p_err = w1p(real_nodes, gen_nodes, **bootstrap)
w1efp_val, w1efp_err = w1efp(real_nodes, gen_nodes, efp_jobs=1, **bootstrap)
self.log("valid/w1m", w1m_val)
self.log("valid/w1m_err", w1m_err)
self.log("valid/w1p", w1p_val.mean())
self.log("valid/w1p_err", w1p_err.mean())
self.log("valid/w1efp", w1efp_val.mean())
self.log("valid/w1efp_err", w1efp_err.mean())
# Plot the MPGAN-like marginals
plot_mpgan_marginals(gen_nodes, real_nodes, mask, self.trainer.current_epoch)
self.val_outs.clear()
def _sync_ema_network(self) -> None:
"""Updates the Exponential Moving Average Network."""
with T.no_grad():
for params, ema_params in zip(
self.net.parameters(), self.ema_net.parameters()
):
ema_params.data.copy_(
self.ema_sync * ema_params.data
+ (1.0 - self.ema_sync) * params.data
)
def on_fit_start(self, *_args) -> None:
"""Function to run at the start of training."""
# Define the metrics for wandb (otherwise the min wont be stored!)
if wandb.run is not None:
wandb.define_metric("train/simple_loss", summary="min")
wandb.define_metric("train/mle_loss", summary="min")
wandb.define_metric("train/total_loss", summary="min")
wandb.define_metric("valid/simple_loss", summary="min")
wandb.define_metric("valid/mle_loss", summary="min")
wandb.define_metric("valid/total_loss", summary="min")
wandb.define_metric("valid/w1m", summary="min")
wandb.define_metric("valid/w1p", summary="min")
wandb.define_metric("valid/w1efp", summary="min")
def set_sampler(
self, sampler_name: Optional[str] = None, sampler_steps: Optional[int] = None
) -> None:
"""Replaces the sampler list with a new one."""
if sampler_name is not None:
self.sampler_name = sampler_name
if sampler_steps is not None:
self.sampler_steps = sampler_steps
def full_generation(
self,
sampler: str,
steps: int,
mask: Optional[T.BoolTensor] = None,
ctxt: Optional[T.Tensor] = None,
initial_noise: Optional[T.Tensor] = None,
) -> T.Tensor:
"""Fully generate a batch of data from noise, given context information
and a mask."""
# Either a mask or initial noise must be defined or we dont know how
# many samples to generate and with what cardinality
if mask is None and initial_noise is None:
raise ValueError("Please provide either a mask or noise to generate from")
if mask is None:
mask = T.full(initial_noise.shape[:-1], True, device=self.device)
if initial_noise is None:
initial_noise = T.randn((*mask.shape, self.pc_dim), device=self.device)
# Normalise the context
if self.ctxt_dim:
ctxt = self.ctxt_normaliser(ctxt)
assert len(ctxt) == len(initial_noise)
# Run the sampling method
outputs, _ = run_sampler(
sampler,
self,
self.diff_sched,
initial_noise=initial_noise * mask.unsqueeze(-1),
n_steps=steps,
mask=mask,
ctxt=ctxt,
clip_predictions=(-25, 25),
)
# Ensure that the output adheres to the mask
outputs[~mask] = 0
# Return the normalisation of the generated point cloud
return self.normaliser.reverse(outputs, mask=mask)
def configure_optimizers(self) -> dict:
"""Configure the optimisers and learning rate sheduler for this
model."""
# Finish initialising the optimiser and create the scheduler
opt = self.hparams.optimizer(params=self.parameters())
sched = WarmupToConstant(opt, num_steps=10_000)
# Return the dict for the lightning trainer
return {
"optimizer": opt,
"lr_scheduler": {
"scheduler": sched,
"interval": "step",
"frequency": 1,
},
}
| 12,805 | 38.403077 | 87 | py |
PC-JeDi | PC-JeDi-main/scripts/train.py | import pyrootutils
root = pyrootutils.setup_root(search_from=__file__, pythonpath=True)
import logging
import hydra
import pytorch_lightning as pl
from omegaconf import DictConfig
from src.hydra_utils import (
instantiate_collection,
log_hyperparameters,
print_config,
reload_original_config,
save_config,
)
log = logging.getLogger(__name__)
@hydra.main(
version_base=None, config_path=str(root / "configs"), config_name="train.yaml"
)
def main(cfg: DictConfig) -> None:
log.info("Setting up full job config")
if cfg.full_resume:
cfg = reload_original_config(cfg)
print_config(cfg)
if cfg.seed:
log.info(f"Setting seed to: {cfg.seed}")
pl.seed_everything(cfg.seed, workers=True)
log.info("Instantiating the data module")
datamodule = hydra.utils.instantiate(cfg.datamodule)
log.info("Instantiating the model")
model = hydra.utils.instantiate(
cfg.model,
pc_dim=datamodule.dim,
n_nodes=datamodule.n_nodes,
ctxt_dim=datamodule.ctxt_dim,
)
log.info(model)
log.info("Instantiating all callbacks")
callbacks = instantiate_collection(cfg.callbacks)
log.info("Instantiating the loggers")
loggers = instantiate_collection(cfg.loggers)
log.info("Instantiating the trainer")
trainer = hydra.utils.instantiate(cfg.trainer, callbacks=callbacks, logger=loggers)
if loggers:
log.info("Logging all hyperparameters")
log_hyperparameters(cfg, model, trainer)
log.info("Saving config so job can be resumed")
save_config(cfg)
log.info("Starting training!")
trainer.fit(model, datamodule, ckpt_path=cfg.ckpt_path)
if __name__ == "__main__":
main()
| 1,731 | 23.742857 | 87 | py |
trees_from_transformers | trees_from_transformers-master/run.py | import argparse
import datetime
import logging
import os
import pickle
from tqdm import tqdm
import torch
from transformers import *
from data.dataset import Dataset
from utils.measure import Measure
from utils.parser import not_coo_parser, parser
from utils.tools import set_seed, select_indices, group_indices
from utils.yk import get_actions, get_nonbinary_spans
MODELS = [(BertModel, BertTokenizer, BertConfig, 'bert-base-cased'),
(BertModel, BertTokenizer, BertConfig, 'bert-large-cased'),
(GPT2Model, GPT2Tokenizer, GPT2Config, 'gpt2'),
(GPT2Model, GPT2Tokenizer, GPT2Config, 'gpt2-medium'),
(RobertaModel, RobertaTokenizer, RobertaConfig, 'roberta-base'),
(RobertaModel, RobertaTokenizer, RobertaConfig, 'roberta-large'),
(XLNetModel, XLNetTokenizer, XLNetConfig, 'xlnet-base-cased'),
(XLNetModel, XLNetTokenizer, XLNetConfig, 'xlnet-large-cased')]
def evaluate(args):
scores = dict()
for model_class, tokenizer_class, model_config, pretrained_weights in MODELS:
tokenizer = tokenizer_class.from_pretrained(
pretrained_weights, cache_dir=args.lm_cache_path)
if args.from_scratch:
config = model_config.from_pretrained(pretrained_weights)
config.output_hidden_states = True
config.output_attentions = True
model = model_class(config).to(args.device)
else:
model = model_class.from_pretrained(
pretrained_weights,
cache_dir=args.lm_cache_path,
output_hidden_states=True,
output_attentions=True).to(args.device)
with torch.no_grad():
test_sent = tokenizer.encode('test', add_special_tokens=False)
token_ids = torch.tensor([test_sent]).to(args.device)
all_hidden, all_att = model(token_ids)[-2:]
n_layers = len(all_att)
n_att = all_att[0].size(1)
n_hidden = all_hidden[0].size(-1)
measure = Measure(n_layers, n_att)
data = Dataset(path=args.data_path, tokenizer=tokenizer)
for idx, s in tqdm(enumerate(data.sents), total=len(data.sents),
desc=pretrained_weights, ncols=70):
raw_tokens = data.raw_tokens[idx]
tokens = data.tokens[idx]
if len(raw_tokens) < 2:
data.cnt -= 1
continue
token_ids = tokenizer.encode(s, add_special_tokens=False)
token_ids_tensor = torch.tensor([token_ids]).to(args.device)
with torch.no_grad():
all_hidden, all_att = model(token_ids_tensor)[-2:]
all_hidden, all_att = list(all_hidden[1:]), list(all_att)
# (n_layers, seq_len, hidden_dim)
all_hidden = torch.cat([all_hidden[n] for n in range(n_layers)], dim=0)
# (n_layers, n_att, seq_len, seq_len)
all_att = torch.cat([all_att[n] for n in range(n_layers)], dim=0)
if len(tokens) > len(raw_tokens):
th = args.token_heuristic
if th == 'first' or th == 'last':
mask = select_indices(tokens, raw_tokens, pretrained_weights, th)
assert len(mask) == len(raw_tokens)
all_hidden = all_hidden[:, mask]
all_att = all_att[:, :, mask, :]
all_att = all_att[:, :, :, mask]
else:
# mask = torch.tensor(data.masks[idx])
mask = group_indices(tokens, raw_tokens, pretrained_weights)
raw_seq_len = len(raw_tokens)
all_hidden = torch.stack(
[all_hidden[:, mask == i].mean(dim=1)
for i in range(raw_seq_len)], dim=1)
all_att = torch.stack(
[all_att[:, :, :, mask == i].sum(dim=3)
for i in range(raw_seq_len)], dim=3)
all_att = torch.stack(
[all_att[:, :, mask == i].mean(dim=2)
for i in range(raw_seq_len)], dim=2)
l_hidden, r_hidden = all_hidden[:, :-1], all_hidden[:, 1:]
l_att, r_att = all_att[:, :, :-1], all_att[:, :, 1:]
syn_dists = measure.derive_dists(l_hidden, r_hidden, l_att, r_att)
gold_spans = data.gold_spans[idx]
gold_tags = data.gold_tags[idx]
assert len(gold_spans) == len(gold_tags)
for m, d in syn_dists.items():
pred_spans = []
for i in range(measure.scores[m].n):
dist = syn_dists[m][i].tolist()
if len(dist) > 1:
bias_base = (sum(dist) / len(dist)) * args.bias
bias = [bias_base * (1 - (1 / (len(dist) - 1)) * x)
for x in range(len(dist))]
dist = [dist[i] + bias[i] for i in range(len(dist))]
if args.use_not_coo_parser:
pred_tree = not_coo_parser(dist, raw_tokens)
else:
pred_tree = parser(dist, raw_tokens)
ps = get_nonbinary_spans(get_actions(pred_tree))[0]
pred_spans.append(ps)
measure.scores[m].update(pred_spans, gold_spans, gold_tags)
measure.derive_final_score()
scores[pretrained_weights] = measure.scores
if not os.path.exists(args.result_path):
os.makedirs(args.result_path)
with open(f'{args.result_path}/{pretrained_weights}.txt', 'w') as f:
print('Model name:', pretrained_weights, file=f)
print('Experiment time:', args.time, file=f)
print('# of layers:', n_layers, file=f)
print('# of attentions:', n_att, file=f)
print('# of hidden dimensions:', n_hidden, file=f)
print('# of processed sents:', data.cnt, file=f)
max_corpus_f1, max_sent_f1 = 0, 0
for n in range(n_layers):
print(f'[Layer {n + 1}]', file=f)
print('-' * (119 + measure.max_m_len), file=f)
for m, s in measure.scores.items():
if m in measure.h_measures + measure.a_avg_measures:
print(
f'| {m.upper()} {" " * (measure.max_m_len - len(m))} '
f'| Corpus F1: {s.corpus_f1[n] * 100:.2f} '
f'| Sent F1: {s.sent_f1[n] * 100:.2f} ',
end='', file=f)
for z in range(len(s.label_recalls[0])):
print(
f'| {s.labels[z]}: '
f'{s.label_recalls[n][z] * 100:.2f} ',
end='', file=f)
print('|', file=f)
if s.sent_f1[n] > max_sent_f1:
max_corpus_f1 = s.corpus_f1[n]
max_sent_f1 = s.sent_f1[n]
max_measure = m
max_layer = n + 1
else:
for i in range(n_att):
m_att = str(i) if i > 9 else '0' + str(i)
m_att = m + m_att + " " * (
measure.max_m_len - len(m))
i_att = n_att * n + i
print(
f'| {m_att.upper()}'
f'| Corpus F1: {s.corpus_f1[i_att] * 100:.2f} '
f'| Sent F1: {s.sent_f1[i_att] * 100:.2f} ',
end='', file=f)
for z in range(len(s.label_recalls[0])):
print(f'| {s.labels[z]}: '
f'{s.label_recalls[i_att][z] * 100:.2f} ',
end='', file=f)
print('|', file=f)
if s.sent_f1[i_att] > max_sent_f1:
max_corpus_f1 = s.corpus_f1[i_att]
max_sent_f1 = s.sent_f1[i_att]
max_measure = m_att
max_layer = n + 1
print('-' * (119 + measure.max_m_len), file=f)
print(f'[MAX]: | Layer: {max_layer} '
f'| {max_measure.upper()} '
f'| Corpus F1: {max_corpus_f1 * 100:.2f} '
f'| Sent F1: {max_sent_f1 * 100:.2f} |')
print(f'[MAX]: | Layer: {max_layer} '
f'| {max_measure.upper()} '
f'| Corpus F1: {max_corpus_f1 * 100:.2f} '
f'| Sent F1: {max_sent_f1 * 100:.2f} |', file=f)
return scores
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data-path',
default='.data/PTB/ptb-test.txt', type=str)
parser.add_argument('--result-path', default='outputs', type=str)
parser.add_argument('--lm-cache-path',
default='/data/transformers', type=str)
parser.add_argument('--from-scratch', default=False, action='store_true')
parser.add_argument('--gpu', default=0, type=int)
parser.add_argument('--bias', default=0.0, type=float,
help='the right-branching bias hyperparameter lambda')
parser.add_argument('--seed', default=1234, type=int)
parser.add_argument('--token-heuristic', default='mean', type=str,
help='Available options: mean, first, last')
parser.add_argument('--use-not-coo-parser', default=False,
action='store_true',
help='Turning on this option will allow you to exploit '
'the NOT-COO parser (named by Dyer et al. 2019), '
'which has been broadly adopted by recent methods '
'for unsupervised parsing. As this parser utilizes'
' the right-branching bias in its inner workings, '
'it may give rise to some unexpected gains or '
'latent issues for the resulting trees. For more '
'details, see https://arxiv.org/abs/1909.09428.')
args = parser.parse_args()
setattr(args, 'device', f'cuda:{args.gpu}'
if torch.cuda.is_available() and args.gpu >= 0 else 'cpu')
setattr(args, 'time', datetime.datetime.now().strftime('%Y%m%d-%H:%M:%S'))
dataset_name = args.data_path.split('/')[-1].split('.')[0]
parser = '-w-not-coo-parser' if args.use_not_coo_parser else ''
pretrained = 'scratch' if args.from_scratch else 'pretrained'
result_path = f'{args.result_path}/{dataset_name}-{args.token_heuristic}'
result_path += f'-{pretrained}-{args.bias}{parser}'
setattr(args, 'result_path', result_path)
set_seed(args.seed)
logging.disable(logging.WARNING)
print('[List of arguments]')
for a in args.__dict__:
print(f'{a}: {args.__dict__[a]}')
scores = evaluate(args)
with open(f'{args.result_path}/scores.pickle', 'wb') as f:
pickle.dump(scores, f)
if __name__ == '__main__':
main()
| 11,441 | 45.893443 | 85 | py |
trees_from_transformers | trees_from_transformers-master/utils/yk.py | """
The functions in this file are originated from the code for
Compound Probabilistic Context-Free Grammars for Grammar Induction,
Y. Kim et al., ACL 2019.
For more details, visit https://github.com/harvardnlp/compound-pcfg.
"""
import re
def clean_number(w):
new_w = re.sub('[0-9]{1,}([,.]?[0-9]*)*', 'N', w)
return new_w
def get_stats(span1, span2):
tp = 0
fp = 0
fn = 0
for span in span1:
if span in span2:
tp += 1
else:
fp += 1
for span in span2:
if span not in span1:
fn += 1
return tp, fp, fn
def get_nonbinary_spans(actions, SHIFT=0, REDUCE=1):
spans = []
tags = []
stack = []
pointer = 0
binary_actions = []
nonbinary_actions = []
num_shift = 0
num_reduce = 0
for action in actions:
# print(action, stack)
if action == "SHIFT":
nonbinary_actions.append(SHIFT)
stack.append((pointer, pointer))
pointer += 1
binary_actions.append(SHIFT)
num_shift += 1
elif action[:3] == 'NT(':
# stack.append('(')
stack.append(action[3:-1].split('-')[0])
elif action == "REDUCE":
nonbinary_actions.append(REDUCE)
right = stack.pop()
left = right
n = 1
# while stack[-1] is not '(':
while type(stack[-1]) is tuple:
left = stack.pop()
n += 1
span = (left[0], right[1])
tag = stack.pop()
if left[0] != right[1]:
spans.append(span)
tags.append(tag)
stack.append(span)
while n > 1:
n -= 1
binary_actions.append(REDUCE)
num_reduce += 1
else:
assert False
assert (len(stack) == 1)
assert (num_shift == num_reduce + 1)
return spans, tags, binary_actions, nonbinary_actions
def get_actions(line):
output_actions = []
line_strip = line.rstrip()
i = 0
max_idx = (len(line_strip) - 1)
while i <= max_idx:
assert line_strip[i] == '(' or line_strip[i] == ')'
if line_strip[i] == '(':
if is_next_open_bracket(line_strip, i): # open non-terminal
curr_NT = get_nonterminal(line_strip, i)
output_actions.append('NT(' + curr_NT + ')')
i += 1
# get the next open bracket,
# which may be a terminal or another non-terminal
while line_strip[i] != '(':
i += 1
else: # it's a terminal symbol
output_actions.append('SHIFT')
while line_strip[i] != ')':
i += 1
i += 1
while line_strip[i] != ')' and line_strip[i] != '(':
i += 1
else:
output_actions.append('REDUCE')
if i == max_idx:
break
i += 1
while line_strip[i] != ')' and line_strip[i] != '(':
i += 1
assert i == max_idx
return output_actions
def is_next_open_bracket(line, start_idx):
for char in line[(start_idx + 1):]:
if char == '(':
return True
elif char == ')':
return False
raise IndexError('Bracket possibly not balanced, '
'open bracket not followed by closed bracket')
def get_nonterminal(line, start_idx):
assert line[start_idx] == '(' # make sure it's an open bracket
output = []
for char in line[(start_idx + 1):]:
if char == ' ':
break
assert not (char == '(') and not (char == ')')
output.append(char)
return ''.join(output)
def get_tags_tokens_lowercase(line):
output = []
line_strip = line.rstrip()
for i in range(len(line_strip)):
if i == 0:
assert line_strip[i] == '('
# fulfilling this condition means this is a terminal symbol
if line_strip[i] == '(' and not (is_next_open_bracket(line_strip, i)):
output.append(get_between_brackets(line_strip, i))
# print 'output:',output
output_tags = []
output_tokens = []
output_lowercase = []
for terminal in output:
terminal_split = terminal.split()
# print(terminal, terminal_split)
assert len(
terminal_split) == 2 # each terminal contains a POS tag and word
output_tags.append(terminal_split[0])
output_tokens.append(terminal_split[1])
output_lowercase.append(terminal_split[1].lower())
return [output_tags, output_tokens, output_lowercase]
def get_between_brackets(line, start_idx):
output = []
for char in line[(start_idx + 1):]:
if char == ')':
break
assert not (char == '(')
output.append(char)
return ''.join(output)
| 4,935 | 29.097561 | 78 | py |
trees_from_transformers | trees_from_transformers-master/utils/parser.py | import numpy as np
def not_coo_parser(score, sent):
assert len(score) == len(sent) - 1
if len(score) == 0:
parse_tree = f'(T {sent[0]} )'
elif len(score) == 1:
parse_tree = f'(T (T {sent[0]} ) (T {sent[1]} ) )'
else:
idx_max = np.argmax(score)
l_len = len(sent[:idx_max + 1])
r_len = len(sent[idx_max + 2:])
if l_len > 0 and r_len > 0:
l_tree = not_coo_parser(score[:idx_max], sent[:idx_max + 1])
r_tree = not_coo_parser(score[idx_max + 2:], sent[idx_max + 2:])
r_tree = f'(T (T {sent[idx_max +1]} ) {r_tree} )'
parse_tree = f'(T {l_tree} {r_tree} )'
else:
if l_len == 0:
r_tree = not_coo_parser(score[idx_max + 2:], sent[idx_max + 2:])
r_tree = f'(T (T {sent[idx_max +1]} ) {r_tree} )'
parse_tree = r_tree
else:
l_tree = not_coo_parser(score[:idx_max], sent[:idx_max + 1])
parse_tree = f'(T {l_tree} (T {sent[idx_max + 1]} ) )'
return parse_tree
def parser(score, sent):
assert len(score) == len(sent) - 1
if len(score) == 0:
parse_tree = f'(T {sent[0]} )'
elif len(score) == 1:
parse_tree = f'(T (T {sent[0]} ) (T {sent[1]} ) )'
else:
idx_max = np.argmax(score)
l_len = len(sent[:idx_max + 1])
r_len = len(sent[idx_max + 1:])
if l_len > 0 and r_len > 0:
l_tree = parser(score[:idx_max], sent[:idx_max + 1])
r_tree = parser(score[idx_max + 1:], sent[idx_max + 1:])
parse_tree = f'(T {l_tree} {r_tree} )'
else:
if l_len == 0:
r_tree = parser(score[idx_max + 1:], sent[idx_max + 1:])
parse_tree = r_tree
else:
l_tree = parser(score[:idx_max], sent[:idx_max + 1])
parse_tree = l_tree
return parse_tree
| 1,936 | 33.589286 | 80 | py |
trees_from_transformers | trees_from_transformers-master/utils/score.py | import numpy as np
import torch
from utils.yk import get_stats
class Score(object):
def __init__(self, n):
self.corpus_f1 = torch.zeros(n, 3, dtype=torch.float)
self.sent_f1 = torch.zeros(n, dtype=torch.float)
self.n = n
self.cnt = 0
self.labels = ['SBAR', 'NP', 'VP', 'PP', 'ADJP', 'ADVP']
self.label_recalls = np.zeros((n, 6), dtype=float)
self.label_cnts = np.zeros(6, dtype=float)
def update(self, pred_spans, gold_spans, gold_tags):
pred_sets = [set(ps[:-1]) for ps in pred_spans]
gold_set = set(gold_spans[:-1])
self.update_corpus_f1(pred_sets, gold_set)
self.update_sentence_f1(pred_sets, gold_set)
self.update_label_recalls(pred_spans, gold_spans, gold_tags)
self.cnt += 1
def update_label_recalls(self, pred, gold, tags):
for i, tag in enumerate(tags):
if tag not in self.labels:
continue
tag_idx = self.labels.index(tag)
self.label_cnts[tag_idx] += 1
for z in range(len(pred)):
if gold[i] in pred[z]:
self.label_recalls[z][tag_idx] += 1
def update_corpus_f1(self, pred, gold):
stats = torch.tensor([get_stats(pred[i], gold) for i in range(self.n)],
dtype=torch.float)
self.corpus_f1 += stats
def update_sentence_f1(self, pred, gold):
# sent-level F1 is based on L83-89 from
for i in range(self.n):
model_out, std_out = pred[i], gold
overlap = model_out.intersection(std_out)
prec = float(len(overlap)) / (len(model_out) + 1e-8)
reca = float(len(overlap)) / (len(std_out) + 1e-8)
if len(std_out) == 0:
reca = 1.
if len(model_out) == 0:
prec = 1.
f1 = 2 * prec * reca / (prec + reca + 1e-8)
self.sent_f1[i] += f1
def derive_final_score(self):
tp = self.corpus_f1[:, 0]
fp = self.corpus_f1[:, 1]
fn = self.corpus_f1[:, 2]
prec = tp / (tp + fp)
recall = tp / (tp + fn)
epsilon = 1e-8
self.corpus_f1 = 2 * prec * recall / (prec + recall + epsilon)
self.sent_f1 /= self.cnt
for i in range(len(self.label_recalls)):
for j in range(len(self.label_recalls[0])):
self.label_recalls[i][j] /= self.label_cnts[j]
| 2,521 | 35.550725 | 79 | py |
trees_from_transformers | trees_from_transformers-master/utils/tools.py | import logging
import random
import torch
specials = {'bert': '#', 'gpt2': 'Ġ', 'xlnet': '▁', 'roberta': 'Ġ'}
def set_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
random.seed(seed)
def select_indices(tokens, raw_tokens, model, mode):
mask = []
raw_i = 0
collapsed = ''
model = model.split('-')[0]
special = specials[model]
for i in range(len(tokens)):
token = tokens[i]
while len(token) > 0 and token[0] == special:
token = token[1:]
if collapsed == '' and len(token) > 0:
start_idx = i
collapsed += token
if collapsed == raw_tokens[raw_i]:
if mode == 'first':
mask.append(start_idx)
elif mode == 'last':
mask.append(i)
else:
raise NotImplementedError
raw_i += 1
collapsed = ''
if raw_i != len(raw_tokens):
raise Exception(f'Token mismatch: \n{tokens}\n{raw_tokens}')
return mask
def group_indices(tokens, raw_tokens, model):
mask = []
raw_i = 0
collapsed = ''
model = model.split('-')[0]
special = specials[model]
for i in range(len(tokens)):
token = tokens[i]
while len(token) > 0 and token[0] == special:
token = token[1:]
collapsed += token
mask.append(raw_i)
if collapsed == raw_tokens[raw_i]:
raw_i += 1
collapsed = ''
if raw_i != len(raw_tokens):
raise Exception(f'Token mismatch: \n{tokens}\n{raw_tokens}')
return torch.tensor(mask)
| 1,612 | 24.603175 | 68 | py |
trees_from_transformers | trees_from_transformers-master/utils/extractor.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class Extractor(nn.Module):
def __init__(self, n_hidden):
super(Extractor, self).__init__()
self.linear = nn.Linear(n_hidden * 2, 1)
nn.init.uniform_(self.linear.weight, -0.01, 0.01)
nn.init.uniform_(self.linear.bias, 0)
def forward(self, l, r):
h = torch.cat([l, r], dim=-1)
o = self.linear(h)
# (seq_len-1)
return o.squeeze(-1)
def loss(self, d, gold):
assert len(d) == len(gold)
gold = d.new_tensor(gold)
l = 0
for i in range(len(d)):
for j in range(i+1, len(d)):
l += F.relu(1 - torch.sign(gold[i]- gold[j]) * (d[i] - d[j]))
return l
| 752 | 27.961538 | 77 | py |
trees_from_transformers | trees_from_transformers-master/utils/measure.py | import math
import torch
import torch.nn.functional as F
from utils.score import Score
class Measure(object):
def __init__(self, n_layers, n_att):
self.h_measures = ['cos', 'l1', 'l2']
self.a_measures = ['hellinger', 'jsd']
self.a_avg_measures = ['avg_hellinger', 'avg_jsd']
self.measures = self.h_measures + self.a_measures + self.a_avg_measures
self.max_m_len = max([len(m) for m in self.measures]) + 2
self.scores = {m: Score(n_layers) for m in self.h_measures}
for m in self.a_measures:
self.scores[m] = Score(n_layers * n_att)
for m in self.a_avg_measures:
self.scores[m] = Score(n_layers)
def derive_dists(self, l_hidden, r_hidden, l_att, r_att):
syn_dists = {}
for m in self.h_measures:
syn_dists[m] = getattr(self, m)(l_hidden, r_hidden)
for m in self.a_measures:
syn_dists[m] = getattr(self, m)(l_att, r_att)
syn_dists[m] = syn_dists[m].view(-1, syn_dists[m].size(-1))
for m in self.a_avg_measures:
syn_dists[m] = getattr(self, m)(l_att, r_att)
return syn_dists
def derive_final_score(self):
for m in self.scores.keys():
self.scores[m].derive_final_score()
@staticmethod
def cos(l_hidden, r_hidden):
# (n_layers, seq_len-1, hidden_dim) * 2 -> (n_layers, seq_len-1)
return (F.cosine_similarity(l_hidden, r_hidden, dim=-1) + 1) / 2
@staticmethod
def l1(l_hidden, r_hidden):
# (n_layers, seq_len-1, hidden_dim) * 2 -> (n_layers, seq_len-1)
return torch.norm(l_hidden - r_hidden, p=1, dim=-1)
@staticmethod
def l2(l_hidden, r_hidden):
# (n_layers, seq_len-1, hidden_dim) * 2 -> (n_layers, seq_len-1)
return torch.norm(l_hidden - r_hidden, p=2, dim=-1)
@staticmethod
def kl(p, q):
eps = 1e-30
p, q = p + eps, q + eps
p, q = p / p.sum(dim=-1, keepdim=True), q / q.sum(dim=-1, keepdim=True)
kl = F.kl_div(torch.log(q), p, reduction='none').sum(dim=-1)
# kl = (p * (torch.log(p) - torch.log(q))).sum(dim=-1)
# To deal with the numerical instability of the KL-div function in PyTorch
if (kl < 0).sum() > 0:
kl = kl * (1 - (kl < 0).float())
assert torch.isinf(kl).sum() == 0
assert torch.isnan(kl).sum() == 0
return kl
@staticmethod
def jsd(l_att, r_att):
m = (l_att + r_att) / 2
l_kl = Measure.kl(l_att, m)
r_kl = Measure.kl(r_att, m)
d = torch.sqrt((l_kl + r_kl) / 2)
assert (d < 0).sum() == 0
assert torch.isnan(d).sum() == 0
return d
@staticmethod
def hellinger(l_att, r_att):
d = (((l_att.sqrt() - r_att.sqrt()) ** 2).sum(dim=-1)).sqrt()
d /= math.sqrt(2)
return d
@staticmethod
def avg_hellinger(l_att, r_att):
d = Measure.hellinger(l_att, r_att)
return d.mean(dim=1)
@staticmethod
def avg_jsd(l_att, r_att):
d = Measure.jsd(l_att, r_att)
return d.mean(dim=1) | 3,102 | 33.477778 | 82 | py |
trees_from_transformers | trees_from_transformers-master/data/dataset.py | from utils.yk import get_actions, get_nonbinary_spans, get_tags_tokens_lowercase
class Dataset(object):
def __init__(self, path, tokenizer):
self.path = path
self.tokenizer = tokenizer
self.cnt = 0
self.sents = []
self.raw_tokens = []
self.tokens = []
self.masks = []
self.gold_spans = []
self.gold_tags = []
self.gold_trees = []
flatten = lambda l: [item for sublist in l for item in sublist]
with open(path, 'r') as f:
lines = f.readlines()
for line in lines:
raw_tokens = get_tags_tokens_lowercase(line)[1]
sent = ' '.join(raw_tokens)
actions = get_actions(line)
self.cnt += 1
self.sents.append(sent)
self.raw_tokens.append(raw_tokens)
self.tokens.append(self.tokenizer.tokenize(sent))
mask = [len(self.tokenizer.tokenize(w)) * [i]
for i, w in enumerate(sent.split())]
self.masks.append(flatten(mask))
gold_spans, gold_tags, _, _ = get_nonbinary_spans(actions)
self.gold_spans.append(gold_spans)
self.gold_tags.append(gold_tags)
self.gold_trees.append(line.strip())
| 1,271 | 32.473684 | 80 | py |
SSTAP | SSTAP-main/main.py | import sys
from dataset import VideoDataSet, VideoDataSet_unlabel
from loss_function import bmn_loss_func, get_mask
import os
import json
import torch
import torch.nn.parallel
import torch.nn.functional as F
import torch.nn as nn
import torch.optim as optim
import numpy as np
import opts
from ipdb import set_trace
from models import BMN, TemporalShift, TemporalShift_random
import pandas as pd
import random
from post_processing import BMN_post_processing
from eval import evaluation_proposal
from ipdb import set_trace
seed = 400
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed) # Numpy module.
random.seed(seed) # Python random module.
torch.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
os.environ["CUDA_VISIBLE_DEVICES"] = '0,1,2,3'
blue = lambda x: '\033[94m' + x + '\033[0m'
sys.dont_write_bytecode = True
global_step = 0
eval_loss = []
consistency_rampup = 5
consistency = 6 # 30 # 3 # None
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def softmax_mse_loss(input_logits, target_logits):
"""Takes softmax on both sides and returns MSE loss
Note:
- Returns the sum over all examples. Divide by the batch size afterwards
if you want the mean.
- Sends gradients to inputs but not the targets.
"""
assert input_logits.size() == target_logits.size()
# input_softmax = F.softmax(input_logits, dim=1)
# target_softmax = F.softmax(target_logits, dim=1)
# num_classes = input_logits.size()[1]
# return F.mse_loss(input_softmax, target_softmax, reduction='sum') / num_classes # size_average=False
return F.mse_loss(input_logits, target_logits, reduction='mean')
def softmax_kl_loss(input_logits, target_logits):
"""Takes softmax on both sides and returns KL divergence
Note:
- Returns the sum over all examples. Divide by the batch size afterwards
if you want the mean.
- Sends gradients to inputs but not the targets.
"""
assert input_logits.size() == target_logits.size()
# input_log_softmax = F.log_softmax(input_logits, dim=1)
# target_softmax = F.softmax(target_logits, dim=1)
# return F.kl_div(input_log_softmax, target_softmax, reduction='sum')
return F.kl_div(input_logits, target_logits, reduction='mean')
def Motion_MSEloss(output,clip_label,motion_mask=torch.ones(100).cuda()):
z = torch.pow((output-clip_label),2)
loss = torch.mean(motion_mask*z)
return loss
def sigmoid_rampup(current, rampup_length):
"""Exponential rampup from https://arxiv.org/abs/1610.02242"""
if rampup_length == 0:
return 1.0
else:
current = np.clip(current, 0.0, rampup_length)
phase = 1.0 - current / rampup_length
return float(np.exp(-5.0 * phase * phase))
def linear_rampup(current, rampup_length):
"""Linear rampup"""
assert current >= 0 and rampup_length >= 0
if current >= rampup_length:
return 1.0
else:
return current / rampup_length
def cosine_rampdown(current, rampdown_length):
"""Cosine rampdown from https://arxiv.org/abs/1608.03983"""
assert 0 <= current <= rampdown_length
return float(.5 * (np.cos(np.pi * current / rampdown_length) + 1))
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return consistency * sigmoid_rampup(epoch, consistency_rampup)
def train_BMN(data_loader, model, optimizer, epoch, bm_mask):
model.train()
epoch_pemreg_loss = 0
epoch_pemclr_loss = 0
epoch_tem_loss = 0
epoch_loss = 0
for n_iter, (input_data, label_confidence, label_start, label_end) in enumerate(data_loader):
input_data = input_data.cuda()
label_start = label_start.cuda()
label_end = label_end.cuda()
label_confidence = label_confidence.cuda()
confidence_map, start, end = model(input_data) # [B, 2, 100, 100], [B,100],[B,100]
loss = bmn_loss_func(confidence_map, start, end, label_confidence, label_start, label_end, bm_mask.cuda()) # loss = tem_loss + 10 * pem_reg_loss + pem_cls_loss
# return loss, tem_loss, pem_reg_loss, pem_cls_loss
optimizer.zero_grad()
loss[0].backward()
optimizer.step()
epoch_pemreg_loss += loss[2].cpu().detach().numpy()
epoch_pemclr_loss += loss[3].cpu().detach().numpy()
epoch_tem_loss += loss[1].cpu().detach().numpy()
epoch_loss += loss[0].cpu().detach().numpy()
print(
"BMN training loss(epoch %d): tem_loss: %.03f, pem class_loss: %.03f, pem reg_loss: %.03f, total_loss: %.03f" % (
epoch, epoch_tem_loss / (n_iter + 1),
epoch_pemclr_loss / (n_iter + 1),
epoch_pemreg_loss / (n_iter + 1),
epoch_loss / (n_iter + 1)))
def train_BMN_Semi(data_loader, train_loader_unlabel, model, model_ema, optimizer, epoch, bm_mask):
global global_step
model.train()
epoch_pemreg_loss = 0
epoch_pemclr_loss = 0
epoch_tem_loss = 0
epoch_loss = 0
consistency_loss_all = 0
consistency_loss_ema_all = 0
consistency_criterion = softmax_mse_loss # softmax_kl_loss
temporal_perb = TemporalShift_random(400, 64)
order_clip_criterion = nn.CrossEntropyLoss()
consistency = True
clip_order = True
dropout2d = True
temporal_re = True
unlabeled_train_iter = iter(train_loader_unlabel)
for n_iter, (input_data, label_confidence, label_start, label_end) in enumerate(data_loader):
input_data = input_data.cuda()
label_start = label_start.cuda()
label_end = label_end.cuda()
label_confidence = label_confidence.cuda()
input_data_student = temporal_perb(input_data)
if dropout2d:
input_data_student = F.dropout2d(input_data_student, 0.2)
else:
input_data_student = F.dropout(input_data_student, 0.2)
confidence_map, start, end = model(input_data_student) # [B, 2, 100, 100], [B,100],[B,100]
loss = bmn_loss_func(confidence_map, start, end, label_confidence, label_start, label_end, bm_mask.cuda())
confidence_map = confidence_map * bm_mask.cuda()
if temporal_re:
input_recons = F.dropout2d(input_data.permute(0,2,1), 0.2).permute(0,2,1)
else:
input_recons = F.dropout2d(input_data, 0.2)
recons_feature = model(input_recons, recons=True)
try:
input_data_unlabel= unlabeled_train_iter.next()
input_data_unlabel = input_data_unlabel.cuda()
except:
unlabeled_train_iter = iter(train_loader_unlabel)
input_data_unlabel = unlabeled_train_iter.next()
input_data_unlabel = input_data_unlabel.cuda()
input_data_unlabel_student = temporal_perb(input_data_unlabel)
if dropout2d:
input_data_unlabel_student = F.dropout2d(input_data_unlabel_student, 0.2)
else:
input_data_unlabel_student = F.dropout(input_data_unlabel_student, 0.2)
confidence_map_unlabel_student, start_unlabel_student, end_unlabel_student = model(input_data_unlabel_student)
confidence_map_unlabel_student = confidence_map_unlabel_student * bm_mask.cuda()
# label
input_data_label_student_flip = F.dropout2d(input_data.flip(2).contiguous(), 0.1)
confidence_map_label_student_flip, start_label_student_flip, end_label_student_flip = model(
input_data_label_student_flip)
confidence_map_label_student_flip = confidence_map_label_student_flip * bm_mask.cuda()
# unlabel
input_data_unlabel_student_flip = F.dropout2d(input_data_unlabel.flip(2).contiguous(), 0.1)
confidence_map_unlabel_student_flip, start_unlabel_student_flip, end_unlabel_student_flip = model(
input_data_unlabel_student_flip)
confidence_map_unlabel_student_flip = confidence_map_unlabel_student_flip * bm_mask.cuda()
if temporal_re:
recons_input_student = F.dropout2d(input_data_unlabel.permute(0,2,1), 0.2).permute(0,2,1)
else:
recons_input_student = F.dropout2d(input_data_unlabel, 0.2)
recons_feature_unlabel_student = model(recons_input_student, recons=True)
loss_recons = 0.0005 * (
Motion_MSEloss(recons_feature, input_data) + Motion_MSEloss(recons_feature_unlabel_student,
input_data_unlabel)) # 0.0001
with torch.no_grad():
# input_data_unlabel = input_data_unlabel.cuda()
input_data_ema = F.dropout(input_data, 0.05) # 0.3
confidence_map_teacher, start_teacher, end_teacher = model_ema(input_data_ema)
confidence_map_teacher = confidence_map_teacher * bm_mask.cuda()
input_data_unlabel_teacher = F.dropout(input_data_unlabel, 0.05) # 0.3
confidence_map_unlabel_teacher, start_unlabel_teacher, end_unlabel_teacher = model_ema(
input_data_unlabel_teacher)
confidence_map_unlabel_teacher = confidence_map_unlabel_teacher * bm_mask.cuda()
# flip (label)
out = torch.zeros_like(confidence_map_unlabel_teacher)
out_m = confidence_map_unlabel_teacher.flip(3).contiguous()
for i in range(100):
out[:, :, i, :100 - i] = out_m[:, :, i, i:]
confidence_map_unlabel_teacher_flip = out
# flip (unlabel)
out = torch.zeros_like(confidence_map_teacher)
out_m = confidence_map_teacher.flip(3).contiguous()
for i in range(100):
out[:, :, i, :100 - i] = out_m[:, :, i, i:]
confidence_map_label_teacher_flip = out
# start_unlabel_teacher_flip = start_unlabel_teacher.flip(1).contiguous()
# end_unlabel_teacher_flip = end_unlabel_teacher.flip(1).contiguous()
# add mask
start_unlabel_teacher[start_unlabel_teacher >= 0.9] = 1.0
start_unlabel_teacher[start_unlabel_teacher <= 0.1] = 0.0 # 2_add
end_unlabel_teacher[end_unlabel_teacher >= 0.9] = 1.0
end_unlabel_teacher[end_unlabel_teacher <= 0.1] = 0.0
# flip (label)
start_label_teacher_flip = start_teacher.flip(1).contiguous()
end_label_teacher_flip = end_teacher.flip(1).contiguous()
# flip (unlabel)
start_unlabel_teacher_flip = start_unlabel_teacher.flip(1).contiguous()
end_unlabel_teacher_flip = end_unlabel_teacher.flip(1).contiguous()
mask = torch.eq(
(start_unlabel_teacher.max(1)[0] > 0.6).float() + (end_unlabel_teacher.max(1)[0] > 0.6).float(), 2.)
confidence_map_unlabel_teacher = confidence_map_unlabel_teacher[mask]
start_unlabel_teacher = start_unlabel_teacher[mask]
end_unlabel_teacher = end_unlabel_teacher[mask]
# flip
confidence_map_unlabel_teacher_flip = confidence_map_unlabel_teacher_flip[mask]
start_unlabel_teacher_flip = start_unlabel_teacher_flip[mask]
end_unlabel_teacher_flip = end_unlabel_teacher_flip[mask]
# add mask
confidence_map_unlabel_student = confidence_map_unlabel_student[mask]
start_unlabel_student = start_unlabel_student[mask]
end_unlabel_student = end_unlabel_student[mask]
# flip add mask
confidence_map_unlabel_student_flip = confidence_map_unlabel_student_flip[mask]
start_unlabel_student_flip = start_unlabel_student_flip[mask]
end_unlabel_student_flip = end_unlabel_student_flip[mask]
if consistency:
consistency_weight = get_current_consistency_weight(epoch)
# meters.update('cons_weight', consistency_weight)
# set_trace()
consistency_loss = consistency_weight * (consistency_criterion(confidence_map, confidence_map_teacher) +
consistency_criterion(start, start_teacher) +
consistency_criterion(end, end_teacher))
consistency_loss_ema = consistency_weight * (
consistency_criterion(confidence_map_unlabel_teacher, confidence_map_unlabel_student) +
consistency_criterion(start_unlabel_teacher, start_unlabel_student) +
consistency_criterion(end_unlabel_teacher, end_unlabel_student))
# set_trace()
if torch.isnan(consistency_loss_ema):
consistency_loss_ema = torch.tensor(0.).cuda()
consistency_loss_ema_flip = 0.1 * consistency_weight * (
consistency_criterion(confidence_map_unlabel_teacher_flip, confidence_map_unlabel_student_flip) +
consistency_criterion(start_unlabel_teacher_flip, start_unlabel_student_flip) +
consistency_criterion(end_unlabel_teacher_flip, end_unlabel_student_flip)) + 0.1 * consistency_weight * (
consistency_criterion(confidence_map_label_teacher_flip, confidence_map_label_student_flip) +
consistency_criterion(start_label_teacher_flip, start_label_student_flip) +
consistency_criterion(end_label_teacher_flip, end_label_student_flip))
# meters.update('cons_loss', consistency_loss.item())
else:
consistency_loss = torch.tensor(0).cuda()
consistency_loss_ema = torch.tensor(0).cuda()
consistency_loss_ema_flip = torch.tensor(0).cuda()
# meters.update('cons_loss', 0)
if clip_order:
input_data_all = torch.cat([input_data, input_data_unlabel], 0)
batch_size, C, T = input_data_all.size()
idx = torch.randperm(batch_size)
input_data_all_new = input_data_all[idx]
forw_input = torch.cat(
[input_data_all_new[:batch_size // 2, :, T // 2:], input_data_all_new[:batch_size // 2, :, :T // 2]], 2)
back_input = input_data_all_new[batch_size // 2:, :, :]
input_all = torch.cat([forw_input, back_input], 0)
label_order = [0] * (batch_size // 2) + [1] * (batch_size - batch_size // 2)
label_order = torch.tensor(label_order).long().cuda()
out = model(input_all, clip_order=True)
loss_clip_order = order_clip_criterion(out, label_order)
loss_all = loss[0] + consistency_loss + consistency_loss_ema + loss_recons + 0.01 * loss_clip_order + consistency_loss_ema_flip
optimizer.zero_grad()
loss_all.backward()
optimizer.step()
global_step += 1
update_ema_variables(model, model_ema, 0.999, float(global_step/20)) # //5 //25
epoch_pemreg_loss += loss[2].cpu().detach().numpy()
epoch_pemclr_loss += loss[3].cpu().detach().numpy()
epoch_tem_loss += loss[1].cpu().detach().numpy()
epoch_loss += loss[0].cpu().detach().numpy()
consistency_loss_all += consistency_loss.cpu().detach().numpy()
consistency_loss_ema_all += consistency_loss_ema.cpu().detach().numpy()
if n_iter % 10 == 0:
print(
"training %d (epoch %d): tem_loss: %.03f, pem class_loss: %.03f, pem reg_loss: %.03f, consistency_loss: %.05f, consistency_loss_ema: %.05f, total_loss: %.03f" % (global_step,
epoch, epoch_tem_loss / (n_iter + 1),
epoch_pemclr_loss / (n_iter + 1),
epoch_pemreg_loss / (n_iter + 1),
consistency_loss_all / (n_iter + 1),
consistency_loss_ema_all / (n_iter + 1),
epoch_loss / (n_iter + 1)))
print(
blue("BMN training loss(epoch %d): tem_loss: %.03f, pem class_loss: %.03f, pem reg_loss: %.03f, total_loss: %.03f" % (
epoch, epoch_tem_loss / (n_iter + 1),
epoch_pemclr_loss / (n_iter + 1),
epoch_pemreg_loss / (n_iter + 1),
epoch_loss / (n_iter + 1))))
def train_BMN_Semi_Full(data_loader, model, model_ema, optimizer, epoch, bm_mask):
global global_step
model.train()
epoch_pemreg_loss = 0
epoch_pemclr_loss = 0
epoch_tem_loss = 0
epoch_loss = 0
consistency_loss_all = 0
consistency_loss_ema_all = 0
consistency_criterion = softmax_mse_loss # softmax_kl_loss
# perturbance = nn.dropout(0.3)
temporal_perb = TemporalShift_random(400, 64) # TemporalShift(400, 8) 16
order_clip_criterion = nn.CrossEntropyLoss()
consistency = True
clip_order = True
dropout2d = True
temporal_re = True
# unlabeled_train_iter = iter(train_loader_unlabel)
for n_iter, (input_data, label_confidence, label_start, label_end) in enumerate(data_loader):
input_data = input_data.cuda()
label_start = label_start.cuda()
label_end = label_end.cuda()
label_confidence = label_confidence.cuda()
input_data_student = temporal_perb(input_data)
if dropout2d:
input_data_student = F.dropout2d(input_data_student, 0.2)
else:
input_data_student = F.dropout(input_data_student, 0.2)
confidence_map, start, end = model(input_data_student) # [B, 2, 100, 100], [B,100],[B,100]
loss = bmn_loss_func(confidence_map, start, end, label_confidence, label_start, label_end, bm_mask.cuda())
confidence_map = confidence_map * bm_mask.cuda()
if temporal_re:
input_recons = F.dropout2d(input_data.permute(0, 2, 1), 0.2).permute(0, 2, 1)
else:
input_recons = F.dropout2d(input_data, 0.2)
recons_feature = model(input_recons, recons=True)
# try:
# input_data_unlabel= unlabeled_train_iter.next()
# input_data_unlabel = input_data_unlabel.cuda()
# except:
# unlabeled_train_iter = iter(train_loader_unlabel)
# input_data_unlabel = unlabeled_train_iter.next()
# input_data_unlabel = input_data_unlabel.cuda()
# input_data_unlabel = F.dropout2d(input_data_unlabel.cuda(), 0.2)
# input_data_unlabel_student = temporal_perb(input_data_unlabel)
# if dropout2d:
# input_data_unlabel_student = F.dropout2d(input_data_unlabel_student, 0.2)
# else:
# input_data_unlabel_student = F.dropout(input_data_unlabel_student, 0.2)
# confidence_map_unlabel_student, start_unlabel_student, end_unlabel_student = model(input_data_unlabel_student)
# confidence_map_unlabel_student = confidence_map_unlabel_student * bm_mask.cuda()
input_data_label_student_flip = F.dropout2d(input_data.flip(2).contiguous(), 0.1)
confidence_map_label_student_flip, start_label_student_flip, end_label_student_flip = model(
input_data_label_student_flip)
confidence_map_label_student_flip = confidence_map_label_student_flip * bm_mask.cuda()
# recons_input_student = F.dropout2d(input_data_unlabel.cuda(), 0.2)
# recons_feature_unlabel_student = model(recons_input_student, recons=True)
# set_trace()
loss_recons = 0.0005 * (
Motion_MSEloss(recons_feature, input_data)) # 0.0001
with torch.no_grad():
# input_data_unlabel = input_data_unlabel.cuda()
input_data_ema = F.dropout(input_data, 0.05) # 0.3
confidence_map_teacher, start_teacher, end_teacher = model_ema(input_data_ema)
confidence_map_teacher = confidence_map_teacher * bm_mask.cuda()
# input_data_unlabel_teacher = F.dropout(input_data_unlabel, 0.05) # 0.3
# confidence_map_unlabel_teacher, start_unlabel_teacher, end_unlabel_teacher = model_ema(
# input_data_unlabel_teacher)
# confidence_map_unlabel_teacher = confidence_map_unlabel_teacher * bm_mask.cuda()
# flip
out = torch.zeros_like(confidence_map_teacher)
out_m = confidence_map_teacher.flip(3).contiguous()
for i in range(100):
out[:, :, i, :100 - i] = out_m[:, :, i, i:]
confidence_map_label_teacher = out
# start_unlabel_teacher_flip = start_unlabel_teacher.flip(1).contiguous()
# end_unlabel_teacher_flip = end_unlabel_teacher.flip(1).contiguous()
# add mask
# start_label_teacher[start_label_teacher >= 0.9] = 1.0
# start_label_teacher[start_label_teacher <= 0.1] = 0.0 # 2_add
# end_unlabel_teacher[end_unlabel_teacher >= 0.9] = 1.0
# end_unlabel_teacher[end_unlabel_teacher <= 0.1] = 0.0
# flip
start_label_teacher_flip = label_start.flip(1).contiguous()
end_label_teacher_flip = label_end.flip(1).contiguous()
# mask = torch.eq(
# (start_unlabel_teacher.max(1)[0] > 0.6).float() + (end_unlabel_teacher.max(1)[0] > 0.6).float(), 2.)
# confidence_map_unlabel_teacher = confidence_map_unlabel_teacher[mask]
# start_unlabel_teacher = start_unlabel_teacher[mask]
# end_unlabel_teacher = end_unlabel_teacher[mask]
# flip
# confidence_map_unlabel_teacher_flip = confidence_map_unlabel_teacher_flip[mask]
# start_unlabel_teacher_flip = start_unlabel_teacher_flip[mask]
# end_unlabel_teacher_flip = end_unlabel_teacher_flip[mask]
# add mask
# confidence_map_unlabel_student = confidence_map_unlabel_student[mask]
# start_unlabel_student = start_unlabel_student[mask]
# end_unlabel_student = end_unlabel_student[mask]
# flip add mask
# confidence_map_unlabel_student_flip = confidence_map_label_student_flip[mask]
# start_unlabel_student_flip = start_label_student_flip[mask]
# end_unlabel_student_flip = end_label_student_flip[mask]
if consistency:
consistency_weight = get_current_consistency_weight(epoch)
# meters.update('cons_weight', consistency_weight)
# set_trace()
consistency_loss = consistency_weight * (consistency_criterion(confidence_map, confidence_map_teacher) +
consistency_criterion(start, start_teacher) +
consistency_criterion(end, end_teacher))
consistency_loss_ema_flip = 0.1 * consistency_weight * (
consistency_criterion(confidence_map_label_student_flip, confidence_map_label_teacher) +
consistency_criterion(start_label_student_flip, start_label_teacher_flip) +
consistency_criterion(end_label_student_flip, end_label_teacher_flip))
# consistency_loss_ema_flip = 0.1 * consistency_weight * (
# consistency_criterion(confidence_map_label_teacher, confidence_map_label_student_flip) +
# consistency_criterion(start_label_teacher_flip, start_label_student_flip) +
# consistency_criterion(end_label_teacher_flip, end_label_student_flip))
# meters.update('cons_loss', consistency_loss.item())
else:
consistency_loss = torch.tensor(0).cuda()
consistency_loss_ema = torch.tensor(0).cuda()
consistency_loss_ema_flip = torch.tensor(0).cuda()
# meters.update('cons_loss', 0)
if clip_order:
input_data_all = input_data # torch.cat([input_data, input_data_unlabel], 0)
batch_size, C, T = input_data_all.size()
idx = torch.randperm(batch_size)
input_data_all_new = input_data_all[idx]
forw_input = torch.cat(
[input_data_all_new[:batch_size // 2, :, T // 2:], input_data_all_new[:batch_size // 2, :, :T // 2]], 2)
back_input = input_data_all_new[batch_size // 2:, :, :]
input_all = torch.cat([forw_input, back_input], 0)
label_order = [0] * (batch_size // 2) + [1] * (batch_size - batch_size // 2)
label_order = torch.tensor(label_order).long().cuda()
out = model(input_all, clip_order=True)
loss_clip_order = order_clip_criterion(out, label_order)
loss_all = loss[0] + consistency_loss + loss_recons + 0.01 * loss_clip_order + consistency_loss_ema_flip
optimizer.zero_grad()
loss_all.backward()
optimizer.step()
global_step += 1
update_ema_variables(model, model_ema, 0.999, float(global_step/20)) # //5 //25
epoch_pemreg_loss += loss[2].cpu().detach().numpy()
epoch_pemclr_loss += loss[3].cpu().detach().numpy()
epoch_tem_loss += loss[1].cpu().detach().numpy()
epoch_loss += loss[0].cpu().detach().numpy()
consistency_loss_all += consistency_loss.cpu().detach().numpy()
# consistency_loss_ema_all += consistency_loss_ema.cpu().detach().numpy()
if n_iter % 10 == 0:
print(
"training %d (epoch %d): tem_loss: %.03f, pem class_loss: %.03f, pem reg_loss: %.03f, consistency_loss: %.05f, total_loss: %.03f" % (global_step,
epoch, epoch_tem_loss / (n_iter + 1),
epoch_pemclr_loss / (n_iter + 1),
epoch_pemreg_loss / (n_iter + 1),
consistency_loss_all / (n_iter + 1),
# consistency_loss_ema_all / (n_iter + 1),
epoch_loss / (n_iter + 1)))
print(
blue("BMN training loss(epoch %d): tem_loss: %.03f, pem class_loss: %.03f, pem reg_loss: %.03f, total_loss: %.03f" % (
epoch, epoch_tem_loss / (n_iter + 1),
epoch_pemclr_loss / (n_iter + 1),
epoch_pemreg_loss / (n_iter + 1),
epoch_loss / (n_iter + 1))))
def test_BMN(data_loader, model, epoch, bm_mask):
global eval_loss
model.eval()
best_loss = 1e10
epoch_pemreg_loss = 0
epoch_pemclr_loss = 0
epoch_tem_loss = 0
epoch_loss = 0
for n_iter, (input_data, label_confidence, label_start, label_end) in enumerate(data_loader):
input_data = input_data.cuda()
label_start = label_start.cuda()
label_end = label_end.cuda()
label_confidence = label_confidence.cuda()
confidence_map, start, end = model(input_data)
loss = bmn_loss_func(confidence_map, start, end, label_confidence, label_start, label_end, bm_mask.cuda())
epoch_pemreg_loss += loss[2].cpu().detach().numpy()
epoch_pemclr_loss += loss[3].cpu().detach().numpy()
epoch_tem_loss += loss[1].cpu().detach().numpy()
epoch_loss += loss[0].cpu().detach().numpy()
print(
blue("BMN val loss(epoch %d): tem_loss: %.03f, pem class_loss: %.03f, pem reg_loss: %.03f, total_loss: %.03f" % (
epoch, epoch_tem_loss / (n_iter + 1),
epoch_pemclr_loss / (n_iter + 1),
epoch_pemreg_loss / (n_iter + 1),
epoch_loss / (n_iter + 1))))
eval_loss.append(epoch_loss / (n_iter + 1))
state = {'epoch': epoch + 1,
'state_dict': model.state_dict()}
torch.save(state, opt["checkpoint_path"] + "/BMN_checkpoint.pth.tar") # ./checkpoint
if epoch_loss < model.module.tem_best_loss:
model.module.tem_best_loss = epoch_loss
torch.save(state, opt["checkpoint_path"] + "/BMN_best.pth.tar")
# eval_loss.append(epoch_loss / (n_iter + 1))
opt_file = open(opt["checkpoint_path"] + "/output_eval_loss.json", "w")
json.dump(eval_loss, opt_file)
opt_file.close()
def test_BMN_ema(data_loader, model, epoch, bm_mask):
model.eval()
best_loss = 1e10
epoch_pemreg_loss = 0
epoch_pemclr_loss = 0
epoch_tem_loss = 0
epoch_loss = 0
for n_iter, (input_data, label_confidence, label_start, label_end) in enumerate(data_loader):
input_data = input_data.cuda()
label_start = label_start.cuda()
label_end = label_end.cuda()
label_confidence = label_confidence.cuda()
confidence_map, start, end = model(input_data)
loss = bmn_loss_func(confidence_map, start, end, label_confidence, label_start, label_end, bm_mask.cuda())
epoch_pemreg_loss += loss[2].cpu().detach().numpy()
epoch_pemclr_loss += loss[3].cpu().detach().numpy()
epoch_tem_loss += loss[1].cpu().detach().numpy()
epoch_loss += loss[0].cpu().detach().numpy()
print(
blue("BMN val_ema loss(epoch %d): tem_loss: %.03f, pem class_loss: %.03f, pem reg_loss: %.03f, total_loss: %.03f" % (
epoch, epoch_tem_loss / (n_iter + 1),
epoch_pemclr_loss / (n_iter + 1),
epoch_pemreg_loss / (n_iter + 1),
epoch_loss / (n_iter + 1))))
state = {'epoch': epoch + 1,
'state_dict': model.state_dict()}
torch.save(state, opt["checkpoint_path"] + "/BMN_checkpoint_ema.pth.tar") # ./checkpoint
if epoch_loss < model.module.tem_best_loss:
model.module.tem_best_loss = epoch_loss
torch.save(state, opt["checkpoint_path"] + "/BMN_best_ema.pth.tar")
def BMN_Train(opt):
model = BMN(opt)
model = torch.nn.DataParallel(model, device_ids=[0, 1, 2, 3]).cuda()
model_ema = BMN(opt)
model_ema = torch.nn.DataParallel(model_ema, device_ids=[0, 1, 2, 3]).cuda()
for param in model_ema.parameters():
param.detach_()
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=opt["training_lr"],
weight_decay=opt["weight_decay"]) # 1e-4
train_loader = torch.utils.data.DataLoader(VideoDataSet(opt, subset="train"), # [16,400,100]
batch_size=opt["batch_size"], shuffle=True, drop_last=True,
num_workers=8, pin_memory=True)
if opt['use_semi'] and opt['unlabel_percent'] > 0.:
train_loader_unlabel = torch.utils.data.DataLoader(VideoDataSet_unlabel(opt, subset="unlabel"), # [16,400,100]
batch_size=min(max(round(opt["batch_size"]*opt['unlabel_percent']/(4*(1.-opt['unlabel_percent'])))*4, 4), 24), shuffle=True,drop_last=True,
num_workers=8, pin_memory=True)
test_loader = torch.utils.data.DataLoader(VideoDataSet(opt, subset="validation"),
batch_size=opt["batch_size"], shuffle=False,
num_workers=8, pin_memory=True)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=opt["step_size"], gamma=opt["step_gamma"]) # 7 0.1
bm_mask = get_mask(opt["temporal_scale"])
use_semi = opt['use_semi']
print('use {} label for training!!!'.format(1-opt['unlabel_percent']))
print('training batchsize : {}'.format(opt["batch_size"]))
print('unlabel_training batchsize : {}'.format(min(max(round(opt["batch_size"]*opt['unlabel_percent']/(4*(1.-opt['unlabel_percent'])))*4, 4), 24)))
for epoch in range(opt["train_epochs"]): # 9
# scheduler.step()
if use_semi:
if opt['unlabel_percent'] == 0.:
print('use Semi !!! use all label !!!')
train_BMN_Semi_Full(train_loader, model, model_ema, optimizer, epoch, bm_mask)
test_BMN(test_loader, model, epoch, bm_mask)
test_BMN_ema(test_loader, model_ema, epoch, bm_mask)
else:
print('use Semi !!!')
train_BMN_Semi(train_loader, train_loader_unlabel, model, model_ema, optimizer, epoch, bm_mask)
test_BMN(test_loader, model, epoch, bm_mask)
test_BMN_ema(test_loader, model_ema, epoch, bm_mask)
else:
print('use Fewer label !!!')
train_BMN(train_loader, model, optimizer, epoch, bm_mask)
test_BMN(test_loader, model, epoch, bm_mask)
scheduler.step()
def BMN_inference(opt, eval_name):
model = BMN(opt)
model = torch.nn.DataParallel(model, device_ids=[0, 1, 2, 3]).cuda()
model_checkpoint_dir = opt["checkpoint_path"] + eval_name # BMN_checkpoint.pth.tar BMN_best.pth.tar
checkpoint = torch.load(model_checkpoint_dir) # BMN_best.pth.tar
print('load :', model_checkpoint_dir, ' OK !')
model.load_state_dict(checkpoint['state_dict'])
model.eval()
test_loader = torch.utils.data.DataLoader(VideoDataSet(opt, subset="validation"),
batch_size=8, shuffle=False,
num_workers=8, pin_memory=True, drop_last=False)
tscale = opt["temporal_scale"]
with torch.no_grad():
for idx, input_data in test_loader:
# set_trace()
length = idx.shape[0]
# for ii in range(length):
video_name = []
for ii in range(length):
video_name_video = test_loader.dataset.video_list[idx[ii]]
video_name.append(video_name_video)
input_data = input_data.cuda()
confidence_map, start, end = model(input_data)
# set_trace()
for ii in range(length):
start_scores = start[ii].detach().cpu().numpy()
end_scores = end[ii].detach().cpu().numpy()
clr_confidence = (confidence_map[ii][1]).detach().cpu().numpy()
reg_confidence = (confidence_map[ii][0]).detach().cpu().numpy()
max_start = max(start_scores)
max_end = max(end_scores)
# generate the set of start points and end points
start_bins = np.zeros(len(start_scores))
start_bins[0] = 1 # [1,0,0...,0,1]
for idx in range(1, tscale - 1):
if start_scores[idx] > start_scores[idx + 1] and start_scores[idx] > start_scores[idx - 1]:
start_bins[idx] = 1
elif start_scores[idx] > (0.5 * max_start):
start_bins[idx] = 1
end_bins = np.zeros(len(end_scores))
end_bins[-1] = 1
for idx in range(1, tscale - 1):
if end_scores[idx] > end_scores[idx + 1] and end_scores[idx] > end_scores[idx - 1]:
end_bins[idx] = 1
elif end_scores[idx] > (0.5 * max_end):
end_bins[idx] = 1
#
new_props = []
for idx in range(tscale):
for jdx in range(tscale):
start_index = jdx
end_index = start_index + idx+1
if end_index < tscale and start_bins[start_index] == 1 and end_bins[end_index] == 1:
xmin = start_index/tscale
xmax = end_index/tscale
xmin_score = start_scores[start_index]
xmax_score = end_scores[end_index]
clr_score = clr_confidence[idx, jdx]
reg_score = reg_confidence[idx, jdx]
score = xmin_score * xmax_score * clr_score*reg_score
new_props.append([xmin, xmax, xmin_score, xmax_score, clr_score, reg_score, score])
new_props = np.stack(new_props)
col_name = ["xmin", "xmax", "xmin_score", "xmax_score", "clr_score", "reg_socre", "score"]
new_df = pd.DataFrame(new_props, columns=col_name)
new_df.to_csv("./output/BMN_results/" + video_name[ii] + ".csv", index=False)
def BMN_inference_ema(opt, eval_name):
model = BMN(opt)
model = torch.nn.DataParallel(model, device_ids=[0, 1, 2, 3]).cuda()
model_checkpoint_dir = opt["checkpoint_path"] + eval_name # BMN_checkpoint.pth.tar BMN_best.pth.tar
checkpoint = torch.load(model_checkpoint_dir) # BMN_best.pth.tar
print('load :', model_checkpoint_dir, ' OK !')
model.load_state_dict(checkpoint['state_dict'])
model.eval()
test_loader = torch.utils.data.DataLoader(VideoDataSet(opt, subset="validation"),
batch_size=8, shuffle=False,
num_workers=8, pin_memory=True, drop_last=False)
tscale = opt["temporal_scale"]
with torch.no_grad():
for idx, input_data in test_loader:
# set_trace()
length = idx.shape[0]
# for ii in range(length):
video_name = []
for ii in range(length):
video_name_video = test_loader.dataset.video_list[idx[ii]]
video_name.append(video_name_video)
input_data = input_data.cuda()
confidence_map, start, end = model(input_data)
# set_trace()
for ii in range(length):
start_scores = start[ii].detach().cpu().numpy()
end_scores = end[ii].detach().cpu().numpy()
clr_confidence = (confidence_map[ii][1]).detach().cpu().numpy()
reg_confidence = (confidence_map[ii][0]).detach().cpu().numpy()
max_start = max(start_scores)
max_end = max(end_scores)
# generate the set of start points and end points
start_bins = np.zeros(len(start_scores))
start_bins[0] = 1 # [1,0,0...,0,1]
for idx in range(1, tscale - 1):
if start_scores[idx] > start_scores[idx + 1] and start_scores[idx] > start_scores[idx - 1]:
start_bins[idx] = 1
elif start_scores[idx] > (0.5 * max_start):
start_bins[idx] = 1
end_bins = np.zeros(len(end_scores))
end_bins[-1] = 1
for idx in range(1, tscale - 1):
if end_scores[idx] > end_scores[idx + 1] and end_scores[idx] > end_scores[idx - 1]:
end_bins[idx] = 1
elif end_scores[idx] > (0.5 * max_end):
end_bins[idx] = 1
new_props = []
for idx in range(tscale):
for jdx in range(tscale):
start_index = jdx
end_index = start_index + idx+1
if end_index < tscale and start_bins[start_index] == 1 and end_bins[end_index] == 1:
xmin = start_index/tscale
xmax = end_index/tscale
xmin_score = start_scores[start_index]
xmax_score = end_scores[end_index]
clr_score = clr_confidence[idx, jdx]
reg_score = reg_confidence[idx, jdx]
score = xmin_score * xmax_score * clr_score*reg_score
new_props.append([xmin, xmax, xmin_score, xmax_score, clr_score, reg_score, score])
new_props = np.stack(new_props)
col_name = ["xmin", "xmax", "xmin_score", "xmax_score", "clr_score", "reg_socre", "score"]
new_df = pd.DataFrame(new_props, columns=col_name)
new_df.to_csv("./output/BMN_results/" + video_name[ii] + ".csv", index=False)
def main(opt):
if opt["mode"] == "train":
BMN_Train(opt)
elif opt["mode"] == "inference":
if not os.path.exists("output/BMN_results"):
os.makedirs("output/BMN_results")
print('unlabel percent: ', opt['unlabel_percent'])
print('eval student model !!')
for eval_name in ['/BMN_checkpoint.pth.tar', '/BMN_best.pth.tar']:
BMN_inference(opt, eval_name)
print("Post processing start")
BMN_post_processing(opt)
print("Post processing finished")
evaluation_proposal(opt)
print('eval teacher model !!')
for eval_name in ['/BMN_checkpoint_ema.pth.tar', '/BMN_best_ema.pth.tar']:
BMN_inference_ema(opt, eval_name)
print("Post processing start")
BMN_post_processing(opt)
print("Post processing finished")
evaluation_proposal(opt)
if __name__ == '__main__':
opt = opts.parse_opt()
opt = vars(opt)
if not os.path.exists(opt["checkpoint_path"]):
os.makedirs(opt["checkpoint_path"])
if not os.path.exists('./output'):
os.makedirs('./output')
opt_file = open(opt["checkpoint_path"] + "/opts.json", "w")
json.dump(opt, opt_file)
opt_file.close()
main(opt)
| 42,436 | 48.173812 | 190 | py |
SSTAP | SSTAP-main/gen_unlabel_videos.py | import numpy as np
import pandas as pd
import json
import random
def load_json(file):
with open(file) as json_file:
json_data = json.load(json_file)
return json_data
anno_df = pd.read_csv("./data/activitynet_annotations/video_info_new.csv")
anno_database = load_json("./data/activitynet_annotations/anet_anno_action.json")
subset = 'training'
training_video = []
action_dict = {}
action_dict_num = {}
# get all training video names
for i in range(len(anno_df)):
video_name = anno_df.video.values[i]
video_info = anno_database[video_name]
video_subset = anno_df.subset.values[i]
if subset in video_subset:
training_video.append(video_name)
label = video_info["annotations"][0]['label']
if label not in action_dict:
action_dict[label] = [video_name]
else:
action_dict[label].append(video_name)
for label_name in action_dict:
action_dict_num[label_name] = len(action_dict[label_name])
opt_file = open("./data/activitynet_annotations/per_label_num.json", "w")
json.dump(action_dict_num, opt_file)
opt_file.close()
# unlabel percents
label_percent = np.linspace(0.1, 0.9, 9)
# unlabeled_video = []
for percent in label_percent:
unlabeled_video = []
new_props = []
for label_name in action_dict:
unlabeled_video.extend(random.sample(action_dict[label_name], round(percent*len(action_dict[label_name]))))
for i in range(len(anno_df)):
video_name = anno_df.video.values[i]
numFrame = anno_df.numFrame.values[i]
seconds = anno_df.seconds.values[i]
fps = anno_df.fps.values[i]
rfps = anno_df.rfps.values[i]
featureFrame = anno_df.featureFrame.values[i]
video_info = anno_database[video_name]
video_subset = anno_df.subset.values[i]
if video_name in unlabeled_video:
new_props.append([video_name, numFrame, seconds, fps, rfps, 'training_unlabel', featureFrame])
else:
new_props.append([video_name, numFrame, seconds, fps, rfps, video_subset, featureFrame])
new_props = np.stack(new_props)
col_name = ["video", "numFrame", "seconds", "fps", "rfps", "subset", "featureFrame"]
new_df = pd.DataFrame(new_props, columns=col_name)
new_df.to_csv("./data/activitynet_annotations/video_info_new_{}.csv".format(round(percent, 1)), index=False) | 2,370 | 36.046875 | 115 | py |
SSTAP | SSTAP-main/opts.py | import argparse
def parse_opt():
parser = argparse.ArgumentParser()
# Overall settings
parser.add_argument(
'--mode',
type=str,
default='train')
parser.add_argument(
'--checkpoint_path',
type=str,
default='./checkpoint')
parser.add_argument(
'--use_semi',
type=bool,
default=True)
parser.add_argument(
'--training_lr',
type=float,
default=0.001)
parser.add_argument(
'--unlabel_percent',
type=float,
default=0.5) # 0.5
parser.add_argument(
'--weight_decay',
type=float,
default=1e-4)
parser.add_argument(
'--train_epochs',
type=int,
default=10)
parser.add_argument(
'--batch_size',
type=int,
default=16) # 16
parser.add_argument(
'--step_size',
type=int,
default=7)
parser.add_argument(
'--step_gamma',
type=float,
default=0.1)
# Overall Dataset settings
parser.add_argument(
'--video_info',
type=str,
default="./data/activitynet_annotations/video_info_new.csv")
parser.add_argument(
'--video_anno',
type=str,
default="./data/activitynet_annotations/anet_anno_action.json")
parser.add_argument(
'--temporal_scale',
type=int,
default=100)
parser.add_argument(
'--feature_path',
type=str,
default="../BSN/data/activitynet_feature_cuhk/")
parser.add_argument(
'--num_sample',
type=int,
default=32)
parser.add_argument(
'--num_sample_perbin',
type=int,
default=3)
parser.add_argument(
'--prop_boundary_ratio',
type=int,
default=0.5)
parser.add_argument(
'--feat_dim',
type=int,
default=400)
# Post processing
parser.add_argument(
'--post_process_thread',
type=int,
default=8)
parser.add_argument(
'--soft_nms_alpha',
type=float,
default=0.4)
parser.add_argument(
'--soft_nms_low_thres',
type=float,
default=0.5)
parser.add_argument(
'--soft_nms_high_thres',
type=float,
default=0.9)
parser.add_argument(
'--result_file',
type=str,
default="./output/result_proposal.json")
parser.add_argument(
'--save_fig_path',
type=str,
default="./output/evaluation_result.jpg")
args = parser.parse_args()
return args
| 2,615 | 21.747826 | 71 | py |
SSTAP | SSTAP-main/utils.py | import numpy as np
def ioa_with_anchors(anchors_min, anchors_max, box_min, box_max):
# calculate the overlap proportion between the anchor and all bbox for supervise signal,
# the length of the anchor is 0.01
len_anchors = anchors_max - anchors_min
int_xmin = np.maximum(anchors_min, box_min)
int_xmax = np.minimum(anchors_max, box_max)
inter_len = np.maximum(int_xmax - int_xmin, 0.)
scores = np.divide(inter_len, len_anchors)
return scores
def iou_with_anchors(anchors_min, anchors_max, box_min, box_max):
"""Compute jaccard score between a box and the anchors.
"""
len_anchors = anchors_max - anchors_min
int_xmin = np.maximum(anchors_min, box_min)
int_xmax = np.minimum(anchors_max, box_max)
inter_len = np.maximum(int_xmax - int_xmin, 0.)
union_len = len_anchors - inter_len + box_max - box_min
# print inter_len,union_len
jaccard = np.divide(inter_len, union_len)
return jaccard | 960 | 37.44 | 92 | py |
SSTAP | SSTAP-main/dataset.py | import numpy as np
import pandas as pd
import json
import torch.utils.data as data
import torch
from utils import ioa_with_anchors, iou_with_anchors
from ipdb import set_trace
def load_json(file):
with open(file) as json_file:
json_data = json.load(json_file)
return json_data
class VideoDataSet(data.Dataset):
def __init__(self, opt, subset="train"):
self.temporal_scale = opt["temporal_scale"] # 100
self.temporal_gap = 1. / self.temporal_scale
self.subset = subset
self.mode = opt["mode"]
self.feature_path = opt["feature_path"]
self.video_info_path = "./data/activitynet_annotations/video_info_new_{}.csv".format(opt['unlabel_percent'])
self.video_anno_path = opt["video_anno"]
self._getDatasetDict()
self._get_match_map()
# set_trace()
def _getDatasetDict(self):
anno_df = pd.read_csv(self.video_info_path)
anno_database = load_json(self.video_anno_path)
self.video_dict = {}
for i in range(len(anno_df)):
video_name = anno_df.video.values[i]
video_info = anno_database[video_name]
video_subset = anno_df.subset.values[i]
if self.subset in video_subset:
if 'unlabel' not in video_subset:
self.video_dict[video_name] = video_info
self.video_list = list(self.video_dict.keys())
print("%s subset video numbers: %d" % (self.subset, len(self.video_list)))
def __getitem__(self, index):
video_data = self._load_file(index)
if self.mode == "train":
match_score_start, match_score_end, confidence_score = self._get_train_label(index, self.anchor_xmin,
self.anchor_xmax)
return video_data,confidence_score, match_score_start, match_score_end # [400,100],[100,100],[100]
else:
return index, video_data
def _get_match_map(self):
match_map = []
for idx in range(self.temporal_scale):
tmp_match_window = []
xmin = self.temporal_gap * idx
for jdx in range(1, self.temporal_scale + 1):
xmax = xmin + self.temporal_gap * jdx
tmp_match_window.append([xmin, xmax])
match_map.append(tmp_match_window)
match_map = np.array(match_map) # 100x100x2
match_map = np.transpose(match_map, [1, 0, 2]) # [0,1] [1,2] [2,3].....[99,100]
match_map = np.reshape(match_map, [-1, 2]) # [0,2] [1,3] [2,4].....[99,101] # duration x start
self.match_map = match_map # duration is same in row, start is same in col [10000,2]
self.anchor_xmin = [self.temporal_gap * (i-0.5) for i in range(self.temporal_scale)] # [-0.5/100,0.5/100,...98.5/100]
self.anchor_xmax = [self.temporal_gap * (i+0.5) for i in range(1, self.temporal_scale + 1)] # [1.5/100,...,100.5/100]
def _load_file(self, index):
video_name = self.video_list[index]
video_df = pd.read_csv(self.feature_path + "csv_mean_" + str(self.temporal_scale) + "/" + video_name + ".csv")
video_data = video_df.values[:, :]
video_data = torch.Tensor(video_data)
video_data = torch.transpose(video_data, 0, 1)
video_data.float()
return video_data
def _get_train_label(self, index, anchor_xmin, anchor_xmax):
video_name = self.video_list[index] # video_name
video_info = self.video_dict[video_name]
video_frame = video_info['duration_frame']
video_second = video_info['duration_second']
feature_frame = video_info['feature_frame']
corrected_second = float(feature_frame) / video_frame * video_second # there are some frames not used
video_labels = video_info['annotations'] # the measurement is second, not frame
# change the measurement from second to percentage
gt_bbox = []
gt_iou_map = []
for j in range(len(video_labels)):
tmp_info = video_labels[j]
tmp_start = max(min(1, tmp_info['segment'][0] / corrected_second), 0)
tmp_end = max(min(1, tmp_info['segment'][1] / corrected_second), 0)
gt_bbox.append([tmp_start, tmp_end]) # gt_bbox [0~1]
tmp_gt_iou_map = iou_with_anchors(
self.match_map[:, 0], self.match_map[:, 1], tmp_start, tmp_end) # [100*100]
tmp_gt_iou_map = np.reshape(tmp_gt_iou_map,
[self.temporal_scale, self.temporal_scale])
gt_iou_map.append(tmp_gt_iou_map)
gt_iou_map = np.array(gt_iou_map) # gt [100*100]
gt_iou_map = np.max(gt_iou_map, axis=0)
gt_iou_map = torch.Tensor(gt_iou_map) # [100,100]
# generate R_s and R_e
gt_bbox = np.array(gt_bbox) # gt [start,end]
gt_xmins = gt_bbox[:, 0]
gt_xmaxs = gt_bbox[:, 1]
gt_lens = gt_xmaxs - gt_xmins
gt_len_small = 3 * self.temporal_gap # np.maximum(self.temporal_gap, self.boundary_ratio * gt_lens)
gt_start_bboxs = np.stack((gt_xmins - gt_len_small / 2, gt_xmins + gt_len_small / 2), axis=1)
gt_end_bboxs = np.stack((gt_xmaxs - gt_len_small / 2, gt_xmaxs + gt_len_small / 2), axis=1)
# calculate the ioa for all timestamp
match_score_start = []
for jdx in range(len(anchor_xmin)):
match_score_start.append(np.max(
ioa_with_anchors(anchor_xmin[jdx], anchor_xmax[jdx], gt_start_bboxs[:, 0], gt_start_bboxs[:, 1])))
match_score_end = []
for jdx in range(len(anchor_xmin)):
match_score_end.append(np.max(
ioa_with_anchors(anchor_xmin[jdx], anchor_xmax[jdx], gt_end_bboxs[:, 0], gt_end_bboxs[:, 1])))
match_score_start = torch.Tensor(match_score_start)
match_score_end = torch.Tensor(match_score_end)
return match_score_start, match_score_end, gt_iou_map
def __len__(self):
return len(self.video_list)
class VideoDataSet_unlabel(data.Dataset):
def __init__(self, opt, subset="unlabel"):
self.temporal_scale = opt["temporal_scale"] # 100
self.temporal_gap = 1. / self.temporal_scale
self.subset = subset
self.mode = opt["mode"]
self.feature_path = opt["feature_path"]
self.video_info_path = "./data/activitynet_annotations/video_info_new_{}.csv".format(opt['unlabel_percent'])
self.video_anno_path = opt["video_anno"]
self._getDatasetDict()
self.unlabel_percent = opt['unlabel_percent']
self._get_match_map()
def _getDatasetDict(self):
anno_df = pd.read_csv(self.video_info_path)
anno_database = load_json(self.video_anno_path)
self.video_dict = {}
for i in range(len(anno_df)):
video_name = anno_df.video.values[i]
video_info = anno_database[video_name]
video_subset = anno_df.subset.values[i]
if self.subset in video_subset:
self.video_dict[video_name] = 'unseen'
self.video_list = list(self.video_dict.keys())
print("%s unlabeled subset video numbers: %d" % (self.subset, len(self.video_list)))
def __getitem__(self, index):
video_data = self._load_file(index)
if self.mode == "train":
# match_score_start, match_score_end, confidence_score = self._get_train_label(index, self.anchor_xmin,
# self.anchor_xmax)
return video_data # ,confidence_score, match_score_start, match_score_end # [400,100],[100,100],[100]
else:
return index, video_data
def _get_match_map(self):
match_map = []
for idx in range(self.temporal_scale):
tmp_match_window = []
xmin = self.temporal_gap * idx
for jdx in range(1, self.temporal_scale + 1):
xmax = xmin + self.temporal_gap * jdx
tmp_match_window.append([xmin, xmax])
match_map.append(tmp_match_window)
match_map = np.array(match_map) # 100x100x2
match_map = np.transpose(match_map, [1, 0, 2]) # [0,1] [1,2] [2,3].....[99,100]
match_map = np.reshape(match_map, [-1, 2]) # [0,2] [1,3] [2,4].....[99,101] # duration x start
self.match_map = match_map # duration is same in row, start is same in col [10000,2]
self.anchor_xmin = [self.temporal_gap * (i-0.5) for i in range(self.temporal_scale)] # [-0.5/100,0.5/100,...98.5/100]
self.anchor_xmax = [self.temporal_gap * (i+0.5) for i in range(1, self.temporal_scale + 1)] # [1.5/100,...,100.5/100]
def _load_file(self, index):
video_name = self.video_list[index]
video_df = pd.read_csv(self.feature_path + "csv_mean_" + str(self.temporal_scale) + "/" + video_name + ".csv")
video_data = video_df.values[:, :]
video_data = torch.Tensor(video_data)
video_data = torch.transpose(video_data, 0, 1)
video_data.float()
return video_data
def _get_train_label(self, index, anchor_xmin, anchor_xmax):
video_name = self.video_list[index] # video_name
video_info = self.video_dict[video_name]
video_frame = video_info['duration_frame']
video_second = video_info['duration_second']
feature_frame = video_info['feature_frame']
corrected_second = float(feature_frame) / video_frame * video_second # there are some frames not used
video_labels = video_info['annotations'] # the measurement is second, not frame
# change the measurement from second to percentage
gt_bbox = []
gt_iou_map = []
for j in range(len(video_labels)):
tmp_info = video_labels[j]
tmp_start = max(min(1, tmp_info['segment'][0] / corrected_second), 0)
tmp_end = max(min(1, tmp_info['segment'][1] / corrected_second), 0)
gt_bbox.append([tmp_start, tmp_end]) # gt_bbox [0~1]
tmp_gt_iou_map = iou_with_anchors(
self.match_map[:, 0], self.match_map[:, 1], tmp_start, tmp_end) # [100*100]
tmp_gt_iou_map = np.reshape(tmp_gt_iou_map,
[self.temporal_scale, self.temporal_scale])
gt_iou_map.append(tmp_gt_iou_map)
gt_iou_map = np.array(gt_iou_map) # gt个[100*100]
gt_iou_map = np.max(gt_iou_map, axis=0)
gt_iou_map = torch.Tensor(gt_iou_map) # [100,100]
# generate R_s and R_e
gt_bbox = np.array(gt_bbox) # gt个[start,end]
gt_xmins = gt_bbox[:, 0]
gt_xmaxs = gt_bbox[:, 1]
gt_lens = gt_xmaxs - gt_xmins
gt_len_small = 3 * self.temporal_gap # np.maximum(self.temporal_gap, self.boundary_ratio * gt_lens)
gt_start_bboxs = np.stack((gt_xmins - gt_len_small / 2, gt_xmins + gt_len_small / 2), axis=1)
gt_end_bboxs = np.stack((gt_xmaxs - gt_len_small / 2, gt_xmaxs + gt_len_small / 2), axis=1)
# calculate the ioa for all timestamp
match_score_start = []
for jdx in range(len(anchor_xmin)):
match_score_start.append(np.max(
ioa_with_anchors(anchor_xmin[jdx], anchor_xmax[jdx], gt_start_bboxs[:, 0], gt_start_bboxs[:, 1])))
match_score_end = []
for jdx in range(len(anchor_xmin)):
match_score_end.append(np.max(
ioa_with_anchors(anchor_xmin[jdx], anchor_xmax[jdx], gt_end_bboxs[:, 0], gt_end_bboxs[:, 1])))
match_score_start = torch.Tensor(match_score_start)
match_score_end = torch.Tensor(match_score_end)
return match_score_start, match_score_end, gt_iou_map
def __len__(self):
return len(self.video_list)
if __name__ == '__main__':
import opts
opt = opts.parse_opt()
opt = vars(opt)
train_loader = torch.utils.data.DataLoader(VideoDataSet(opt, subset="train"),
batch_size=opt["batch_size"], shuffle=True,
num_workers=8, pin_memory=True)
for aaa,bbb,ccc,ddd in train_loader: # len(train_loader)=604
set_trace()
print(aaa.shape,bbb.shape,ccc.shape,ddd.shape) # torch.Size([16, 400, 100]) torch.Size([16, 100, 100]) torch.Size([16, 100]) torch.Size([16, 100])
# set_trace()
break
| 14,230 | 51.707407 | 155 | py |
SSTAP | SSTAP-main/loss_function.py | import torch
import numpy as np
import torch.nn.functional as F
def get_mask(tscale):
bm_mask = []
for idx in range(tscale):
mask_vector = [1 for i in range(tscale - idx)
] + [0 for i in range(idx)]
bm_mask.append(mask_vector)
bm_mask = np.array(bm_mask, dtype=np.float32)
return torch.Tensor(bm_mask)
''' [1, 1, 1, 1, 1]
[1, 1, 1, 1, 0]
[1, 1, 1, 0, 0]
[1, 1, 0, 0, 0]
[1, 0, 0, 0, 0]'''
def bmn_loss_func(pred_bm, pred_start, pred_end, gt_iou_map, gt_start, gt_end, bm_mask):
pred_bm_reg = pred_bm[:, 0].contiguous()
pred_bm_cls = pred_bm[:, 1].contiguous()
gt_iou_map = gt_iou_map * bm_mask # [b,100,100]*[100,100] ->[B,100,100]
pem_reg_loss = pem_reg_loss_func(pred_bm_reg, gt_iou_map, bm_mask)
pem_cls_loss = pem_cls_loss_func(pred_bm_cls, gt_iou_map, bm_mask)
tem_loss = tem_loss_func(pred_start, pred_end, gt_start, gt_end)
loss = tem_loss + 10 * pem_reg_loss + pem_cls_loss
return loss, tem_loss, pem_reg_loss, pem_cls_loss
def tem_loss_func(pred_start, pred_end, gt_start, gt_end):
def bi_loss(pred_score, gt_label):
pred_score = pred_score.view(-1)
gt_label = gt_label.view(-1)
pmask = (gt_label > 0.5).float()
num_entries = len(pmask)
num_positive = torch.sum(pmask)
ratio = num_entries / num_positive
coef_0 = 0.5 * ratio / (ratio - 1)
coef_1 = 0.5 * ratio
epsilon = 0.000001
loss_pos = coef_1 * torch.log(pred_score + epsilon) * pmask
loss_neg = coef_0 * torch.log(1.0 - pred_score + epsilon)*(1.0 - pmask)
loss = -1 * torch.mean(loss_pos + loss_neg)
return loss
loss_start = bi_loss(pred_start, gt_start)
loss_end = bi_loss(pred_end, gt_end)
loss = loss_start + loss_end
return loss
def pem_reg_loss_func(pred_score, gt_iou_map, mask):
u_hmask = (gt_iou_map > 0.7).float()
u_mmask = ((gt_iou_map <= 0.7) & (gt_iou_map > 0.3)).float()
u_lmask = ((gt_iou_map <= 0.3) & (gt_iou_map > 0.)).float()
u_lmask = u_lmask * mask
num_h = torch.sum(u_hmask)
num_m = torch.sum(u_mmask)
num_l = torch.sum(u_lmask)
r_m = num_h / num_m
u_smmask = torch.Tensor(np.random.rand(*gt_iou_map.shape)).cuda()
u_smmask = u_mmask * u_smmask
u_smmask = (u_smmask > (1. - r_m)).float()
r_l = num_h / num_l
u_slmask = torch.Tensor(np.random.rand(*gt_iou_map.shape)).cuda()
u_slmask = u_lmask * u_slmask
u_slmask = (u_slmask > (1. - r_l)).float()
weights = u_hmask + u_smmask + u_slmask
loss = F.mse_loss(pred_score* weights, gt_iou_map* weights)
loss = 0.5 * torch.sum(loss*torch.ones(*weights.shape).cuda()) / torch.sum(weights)
return loss
def pem_cls_loss_func(pred_score, gt_iou_map, mask):
pmask = (gt_iou_map > 0.9).float()
nmask = (gt_iou_map <= 0.9).float()
nmask = nmask * mask
num_positive = torch.sum(pmask)
num_entries = num_positive + torch.sum(nmask)
ratio = num_entries / num_positive
coef_0 = 0.5 * ratio / (ratio - 1)
coef_1 = 0.5 * ratio
epsilon = 0.000001
loss_pos = coef_1 * torch.log(pred_score + epsilon) * pmask
loss_neg = coef_0 * torch.log(1.0 - pred_score + epsilon) * nmask
loss = -1 * torch.sum(loss_pos + loss_neg) / num_entries
return loss
| 3,482 | 32.171429 | 90 | py |
SSTAP | SSTAP-main/eval.py | import sys
import warnings
warnings.filterwarnings('ignore')
sys.path.append('./Evaluation')
from eval_proposal import ANETproposal
import matplotlib.pyplot as plt
import numpy as np
def run_evaluation(ground_truth_filename, proposal_filename,
max_avg_nr_proposals=100,
tiou_thresholds=np.linspace(0.5, 0.95, 10),
subset='validation'):
anet_proposal = ANETproposal(ground_truth_filename, proposal_filename,
tiou_thresholds=tiou_thresholds,
max_avg_nr_proposals=max_avg_nr_proposals,
subset=subset, verbose=True, check_status=False)
anet_proposal.evaluate()
recall = anet_proposal.recall
average_recall = anet_proposal.avg_recall
average_nr_proposals = anet_proposal.proposals_per_video
return (average_nr_proposals, average_recall, recall)
def plot_metric(opt,average_nr_proposals, average_recall, recall, tiou_thresholds=np.linspace(0.5, 0.95, 10)):
fn_size = 14
plt.figure(num=None, figsize=(12, 8))
ax = plt.subplot(1,1,1)
colors = ['k', 'r', 'yellow', 'b', 'c', 'm', 'b', 'pink', 'lawngreen', 'indigo']
area_under_curve = np.zeros_like(tiou_thresholds)
for i in range(recall.shape[0]):
area_under_curve[i] = np.trapz(recall[i], average_nr_proposals)
for idx, tiou in enumerate(tiou_thresholds[::2]):
ax.plot(average_nr_proposals, recall[2*idx,:], color=colors[idx+1],
label="tiou=[" + str(tiou) + "], area=" + str(int(area_under_curve[2*idx]*100)/100.),
linewidth=4, linestyle='--', marker=None)
# Plots Average Recall vs Average number of proposals.
ax.plot(average_nr_proposals, average_recall, color=colors[0],
label="tiou = 0.5:0.05:0.95," + " area=" + str(int(np.trapz(average_recall, average_nr_proposals)*100)/100.),
linewidth=4, linestyle='-', marker=None)
handles, labels = ax.get_legend_handles_labels()
ax.legend([handles[-1]] + handles[:-1], [labels[-1]] + labels[:-1], loc='best')
plt.ylabel('Average Recall', fontsize=fn_size)
plt.xlabel('Average Number of Proposals per Video', fontsize=fn_size)
plt.grid(b=True, which="both")
plt.ylim([0, 1.0])
plt.setp(plt.axes().get_xticklabels(), fontsize=fn_size)
plt.setp(plt.axes().get_yticklabels(), fontsize=fn_size)
#plt.show()
plt.savefig(opt["save_fig_path"])
def evaluation_proposal(opt):
uniform_average_nr_proposals_valid, uniform_average_recall_valid, uniform_recall_valid = run_evaluation(
"./Evaluation/data/activity_net_1_3_new.json", # filter_activity_net_1_3_new.json
opt["result_file"],
max_avg_nr_proposals=100,
tiou_thresholds=np.linspace(0.5, 0.95, 10),
subset='validation')
plot_metric(opt,uniform_average_nr_proposals_valid, uniform_average_recall_valid, uniform_recall_valid)
print( "AR@1 is \t",np.mean(uniform_recall_valid[:,0]))
print( "AR@5 is \t",np.mean(uniform_recall_valid[:,4]))
print( "AR@10 is \t",np.mean(uniform_recall_valid[:,9]))
print( "AR@100 is \t",np.mean(uniform_recall_valid[:,-1])) | 3,247 | 44.111111 | 122 | py |
SSTAP | SSTAP-main/models.py | import math
import numpy as np
import torch
import torch.nn as nn
from ipdb import set_trace
import random
import torch.nn.functional as F
class TemporalShift(nn.Module):
def __init__(self, n_segment=3, n_div=8, inplace=False):
super(TemporalShift, self).__init__()
# self.net = net
self.n_segment = n_segment
self.fold_div = n_div
self.inplace = inplace
self.channels_range = list(range(400)) # feature_channels
if inplace:
print('=> Using in-place shift...')
# print('=> Using fold div: {}'.format(self.fold_div))
def forward(self, x):
# self.fold_div = n_div
x = self.shift(x, self.n_segment, fold_div=self.fold_div, inplace=self.inplace, channels_range =self.channels_range)
return x
@staticmethod
def shift(x, n_segment, fold_div=8, inplace=False, channels_range=[1,2]):
x = x.permute(0, 2, 1) # [B,C,T] --> [B, T, C]
# set_trace()
n_batch, T, c = x.size()
# nt, c, h, w = x.size()
# n_batch = nt // n_segment
# x = x.view(n_batch, n_segment, c, h, w)
# x = x.view(n_batch, T, c, h, w)
fold = c // 2*fold_div
# all = random.sample(channels_range, fold*2)
# forward = sorted(all[:fold])
# backward = sorted(all[fold:])
# fixed = list(set(channels_range) - set(all))
# fold = c // fold_div
if inplace:
# Due to some out of order error when performing parallel computing.
# May need to write a CUDA kernel.
raise NotImplementedError
# out = InplaceShift.apply(x, fold)
else:
out = torch.zeros_like(x)
out[:, :-1, :fold] = x[:, 1:, :fold] # shift left
out[:, 1:, fold: 2 * fold] = x[:, :-1, fold: 2 * fold] # shift right
out[:, :, 2 * fold:200] = x[:, :, 2 * fold:200] # not shift
out[:, :-1, 200:200+fold] = x[:, 1:, 200:200+fold] # shift left
out[:, 1:, 200+fold: 200+2 * fold] = x[:, :-1, 200+fold: 200+2 * fold] # shift right
out[:, :, 200+2 * fold:] = x[:, :, 200 + 2 * fold:] # not shift
# out = torch.zeros_like(x)
# out[:, :-1, forward] = x[:, 1:, forward] # shift left
# out[:, 1:, backward] = x[:, :-1, backward] # shift right
# out[:, :, fixed] = x[:, :, fixed] # not shift
# return out.view(nt, c, h, w)
return out.permute(0, 2, 1)
class TemporalShift_random(nn.Module):
def __init__(self, n_segment=3, n_div=8, inplace=False):
super(TemporalShift_random, self).__init__()
# self.net = net
self.n_segment = n_segment
self.fold_div = n_div
self.inplace = inplace
self.channels_range = list(range(400)) # feature_channels
if inplace:
print('=> Using in-place shift...')
# print('=> Using fold div: {}'.format(self.fold_div))
def forward(self, x):
# self.fold_div = n_div
x = self.shift(x, self.n_segment, fold_div=self.fold_div, inplace=self.inplace, channels_range =self.channels_range)
return x
@staticmethod
def shift(x, n_segment, fold_div=8, inplace=False, channels_range=[1,2]):
x = x.permute(0, 2, 1) # [B,C,T] --> [B, T, C]
# set_trace()
n_batch, T, c = x.size()
# nt, c, h, w = x.size()
# n_batch = nt // n_segment
# x = x.view(n_batch, n_segment, c, h, w)
# x = x.view(n_batch, T, c, h, w)
fold = c // fold_div
all = random.sample(channels_range, fold*2)
forward = sorted(all[:fold])
backward = sorted(all[fold:])
fixed = list(set(channels_range) - set(all))
# fold = c // fold_div
if inplace:
# Due to some out of order error when performing parallel computing.
# May need to write a CUDA kernel.
raise NotImplementedError
# out = InplaceShift.apply(x, fold)
else:
# out = torch.zeros_like(x)
# out[:, :-1, :fold] = x[:, 1:, :fold] # shift left
# out[:, 1:, fold: 2 * fold] = x[:, :-1, fold: 2 * fold] # shift right
# out[:, :, 2 * fold:] = x[:, :, 2 * fold:] # not shift
out = torch.zeros_like(x)
out[:, :-1, forward] = x[:, 1:, forward] # shift left
out[:, 1:, backward] = x[:, :-1, backward] # shift right
out[:, :, fixed] = x[:, :, fixed] # not shift
# return out.view(nt, c, h, w)
return out.permute(0, 2, 1)
class InplaceShift(torch.autograd.Function):
# Special thanks to @raoyongming for the help to this function
@staticmethod
def forward(ctx, input, fold):
# not support higher order gradient
# input = input.detach_()
ctx.fold_ = fold
n, t, c, h, w = input.size()
buffer = input.data.new(n, t, fold, h, w).zero_()
buffer[:, :-1] = input.data[:, 1:, :fold]
input.data[:, :, :fold] = buffer
buffer.zero_()
buffer[:, 1:] = input.data[:, :-1, fold: 2 * fold]
input.data[:, :, fold: 2 * fold] = buffer
return input
@staticmethod
def backward(ctx, grad_output):
# grad_output = grad_output.detach_()
fold = ctx.fold_
n, t, c, h, w = grad_output.size()
buffer = grad_output.data.new(n, t, fold, h, w).zero_()
buffer[:, 1:] = grad_output.data[:, :-1, :fold]
grad_output.data[:, :, :fold] = buffer
buffer.zero_()
buffer[:, :-1] = grad_output.data[:, 1:, fold: 2 * fold]
grad_output.data[:, :, fold: 2 * fold] = buffer
return grad_output, None
class BMN(nn.Module):
def __init__(self, opt):
super(BMN, self).__init__()
self.tscale = opt["temporal_scale"] # 100
self.prop_boundary_ratio = opt["prop_boundary_ratio"] # 0.5
self.num_sample = opt["num_sample"] # 32
self.num_sample_perbin = opt["num_sample_perbin"] # 3
self.feat_dim=opt["feat_dim"] # 400
self.tem_best_loss = 10000000
self.hidden_dim_1d = 256
self.hidden_dim_2d = 128
self.hidden_dim_3d = 512
self._get_interp1d_mask()
# Base Module
self.x_1d_b = nn.Sequential(
nn.Conv1d(self.feat_dim, self.hidden_dim_1d, kernel_size=3, padding=1, groups=4),
nn.ReLU(inplace=True),
nn.Conv1d(self.hidden_dim_1d, self.hidden_dim_1d, kernel_size=3, padding=1, groups=4), # 256
nn.ReLU(inplace=True)
)
self.recons = nn.Sequential(
nn.Conv1d(self.hidden_dim_1d, self.hidden_dim_1d, kernel_size=3, padding=1, groups=4),
nn.ReLU(inplace=True),
nn.Conv1d(self.hidden_dim_1d, self.feat_dim, kernel_size=3, padding=1, groups=4), # 256
# nn.ReLU(inplace=True)
)
self.clip_order = nn.Sequential(
# nn.Conv1d(self.hidden_dim_1d, self.hidden_dim_1d, kernel_size=3, padding=1, groups=4),
# nn.ReLU(inplace=True),
nn.Conv1d(self.hidden_dim_1d, 1, kernel_size=3, padding=1), # 256
nn.ReLU(inplace=True)
)
self.clip_order_drop = nn.Dropout(0.5)
self.clip_order_linear = nn.Linear(100, 2)
# Temporal Evaluation Module
self.x_1d_s = nn.Sequential(
nn.Conv1d(self.hidden_dim_1d, self.hidden_dim_1d, kernel_size=3, padding=1, groups=4),
nn.ReLU(inplace=True),
nn.Conv1d(self.hidden_dim_1d, 1, kernel_size=1),
nn.Sigmoid()
)
self.x_1d_e = nn.Sequential(
nn.Conv1d(self.hidden_dim_1d, self.hidden_dim_1d, kernel_size=3, padding=1, groups=4),
nn.ReLU(inplace=True),
nn.Conv1d(self.hidden_dim_1d, 1, kernel_size=1),
nn.Sigmoid()
)
# Proposal Evaluation Module
self.x_1d_p = nn.Sequential(
nn.Conv1d(self.hidden_dim_1d, self.hidden_dim_1d, kernel_size=3, padding=1),
nn.ReLU(inplace=True)
)
self.x_3d_p = nn.Sequential(
nn.Conv3d(self.hidden_dim_1d, self.hidden_dim_3d, kernel_size=(self.num_sample, 1, 1), stride=(self.num_sample, 1, 1)), # 512
nn.ReLU(inplace=True)
)
self.x_2d_p = nn.Sequential(
nn.Conv2d(self.hidden_dim_3d, self.hidden_dim_2d, kernel_size=1),
nn.ReLU(inplace=True),
nn.Conv2d(self.hidden_dim_2d, self.hidden_dim_2d, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(self.hidden_dim_2d, self.hidden_dim_2d, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(self.hidden_dim_2d, 2, kernel_size=1),
nn.Sigmoid()
)
def forward(self, x, recons=False, clip_order=False): # [B,400,100]
base_feature = self.x_1d_b(x) # [B,256,100]
recons_feature = self.recons(base_feature)
if recons:
return recons_feature
batch_size, C, T = base_feature.size()
if clip_order:
return self.clip_order_linear(self.clip_order_drop(self.clip_order(base_feature).view(batch_size, T)))
start = self.x_1d_s(base_feature).squeeze(1) # [B,1,100]->[B,100] sigmoid()
end = self.x_1d_e(base_feature).squeeze(1)
confidence_map = self.x_1d_p(base_feature) # [B,256,100]———>[B,256,100]+relu()
confidence_map = self._boundary_matching_layer(confidence_map) # [B, 256, 32, 100, 100]
# set_trace()
confidence_map = self.x_3d_p(confidence_map).squeeze(2)
confidence_map = self.x_2d_p(confidence_map) # [B, 2, 100, 100]
return confidence_map, start, end # [B, 2, 100, 100], [B,100],[B,100]
def _boundary_matching_layer(self, x):
input_size = x.size() # [B,256,100]
out = torch.matmul(x, self.sample_mask).reshape(input_size[0],input_size[1],self.num_sample,self.tscale,self.tscale)
return out # sample_mask= [100, 320000]
def _get_interp1d_bin_mask(self, seg_xmin, seg_xmax, tscale, num_sample, num_sample_perbin):
# generate sample mask for a boundary-matching pair
plen = float(seg_xmax - seg_xmin) # during
plen_sample = plen / (num_sample * num_sample_perbin - 1.0)
total_samples = [
seg_xmin + plen_sample * ii
for ii in range(num_sample * num_sample_perbin)
] # num_sample * num_sample_perbin
p_mask = []
for idx in range(num_sample): # 32
bin_samples = total_samples[idx * num_sample_perbin:(idx + 1) * num_sample_perbin]
bin_vector = np.zeros([tscale])
for sample in bin_samples:
sample_upper = math.ceil(sample)
sample_decimal, sample_down = math.modf(sample)
if int(sample_down) <= (tscale - 1) and int(sample_down) >= 0:
bin_vector[int(sample_down)] += 1 - sample_decimal # down
if int(sample_upper) <= (tscale - 1) and int(sample_upper) >= 0:
bin_vector[int(sample_upper)] += sample_decimal # upper
bin_vector = 1.0 / num_sample_perbin * bin_vector
p_mask.append(bin_vector)
p_mask = np.stack(p_mask, axis=1) # 100*32
return p_mask
def _get_interp1d_mask(self):
# generate sample mask for each point in Boundary-Matching Map
mask_mat = []
for start_index in range(self.tscale): # 100
mask_mat_vector = []
for duration_index in range(self.tscale): # 100
if start_index + duration_index < self.tscale: #
p_xmin = start_index # start
p_xmax = start_index + duration_index # end
center_len = float(p_xmax - p_xmin) + 1 # during
sample_xmin = p_xmin - center_len * self.prop_boundary_ratio # sample_start
sample_xmax = p_xmax + center_len * self.prop_boundary_ratio # sample_end
p_mask = self._get_interp1d_bin_mask(
sample_xmin, sample_xmax, self.tscale, self.num_sample, # 32
self.num_sample_perbin)
else:
p_mask = np.zeros([self.tscale, self.num_sample]) # [100,32]
mask_mat_vector.append(p_mask) #
mask_mat_vector = np.stack(mask_mat_vector, axis=2) # [100,32,100]
mask_mat.append(mask_mat_vector)
mask_mat = np.stack(mask_mat, axis=3) # [100,32,100,100]
mask_mat = mask_mat.astype(np.float32)
self.sample_mask = nn.Parameter(torch.Tensor(mask_mat).view(self.tscale, -1), requires_grad=False) # [100,32*100*100]
if __name__ == '__main__':
import opts
opt = opts.parse_opt()
opt = vars(opt)
model=BMN(opt).cuda()
input=torch.randn(2,400,100).cuda()
a,b,c=model(input)
print(a.shape,b.shape,c.shape)
| 13,366 | 43.115512 | 138 | py |
SSTAP | SSTAP-main/data/activitynet_feature_cuhk/data_process.py |
import random
import numpy as np
import scipy
import pandas as pd
import pandas
import numpy
import json
def resizeFeature(inputData,newSize):
# inputX: (temporal_length,feature_dimension) #
originalSize=len(inputData)
#print originalSize
if originalSize==1:
inputData=np.reshape(inputData,[-1])
return np.stack([inputData]*newSize)
x=numpy.array(range(originalSize))
f=scipy.interpolate.interp1d(x,inputData,axis=0)
x_new=[i*float(originalSize-1)/(newSize-1) for i in range(newSize)]
y_new=f(x_new)
return y_new
def readData(video_name,data_type=["spatial","temporal"]):
spatial_dir="./spatial/csv_action/"
temporal_dir="./temporal/csv_action/"
data=[]
for dtype in data_type:
if dtype=="spatial":
df=pandas.read_csv(spatial_dir+video_name+".csv")
elif dtype=="temporal":
df=pandas.read_csv(temporal_dir+video_name+".csv")
data.append(df.values[:,:])
lens=[len(d) for d in data]
#print lens
min_len=min(lens)
new_data=[d[:min_len] for d in data]
new_data=numpy.concatenate(new_data,axis=1)
return new_data
def load_json(file):
with open(file) as json_file:
data = json.load(json_file)
return data
def getDatasetDict():
df=pd.read_csv("./info/video_info.csv")
json_data= load_json("./info/activity_net.v1-3.min.json")
database=json_data['database']
out_dict={}
for i in range(len(df)):
video_name=df.video.values[i]
video_info=database[video_name[2:]]
video_new_info={}
video_new_info['duration_frame']=df.numFrame.values[i]
video_new_info['duration_second']=df.seconds.values[i]
video_new_info['annotations']=video_info['annotations']
out_dict[video_name]=video_new_info
return out_dict
def poolData(data,videoAnno,num_prop=100,num_bin=1,num_sample_bin=3,pool_type="mean"):
feature_frame=len(data)*16
video_frame=videoAnno['duration_frame']
video_second=videoAnno['duration_second']
corrected_second=float(feature_frame)/video_frame*video_second
fps=float(video_frame)/video_second
st=16/fps
if len(data)==1:
video_feature=np.stack([data]*num_prop)
video_feature=np.reshape(video_feature,[num_prop,400])
return video_feature
x=[st/2+ii*st for ii in range(len(data))]
f=scipy.interpolate.interp1d(x,data,axis=0)
video_feature=[]
zero_sample=np.zeros(num_bin*400)
tmp_anchor_xmin=[1.0/num_prop*i for i in range(num_prop)]
tmp_anchor_xmax=[1.0/num_prop*i for i in range(1,num_prop+1)]
num_sample=num_bin*num_sample_bin
for idx in range(num_prop):
xmin=max(x[0]+0.0001,tmp_anchor_xmin[idx]*corrected_second)
xmax=min(x[-1]-0.0001,tmp_anchor_xmax[idx]*corrected_second)
if xmax<x[0]:
#print "fuck"
video_feature.append(zero_sample)
continue
if xmin>x[-1]:
video_feature.append(zero_sample)
continue
plen=(xmax-xmin)/(num_sample-1)
x_new=[xmin+plen*ii for ii in range(num_sample)]
y_new=f(x_new)
y_new_pool=[]
for b in range(num_bin):
tmp_y_new=y_new[num_sample_bin*b:num_sample_bin*(b+1)]
if pool_type=="mean":
tmp_y_new=np.mean(y_new,axis=0)
elif pool_type=="max":
tmp_y_new=np.max(y_new,axis=0)
y_new_pool.append(tmp_y_new)
y_new_pool=np.stack(y_new_pool)
y_new_pool=np.reshape(y_new_pool,[-1])
video_feature.append(y_new_pool)
video_feature=np.stack(video_feature)
return video_feature
videoDict=getDatasetDict()
videoNameList=videoDict.keys()
random.shuffle(videoNameList)
col_names=[]
for i in range(400):
col_names.append("f"+str(i))
for videoName in videoNameList:
videoAnno=videoDict[videoName]
data=readData(videoName)
numFrame=videoAnno['duration_frame']
featureFrame=len(data)*16
videoAnno["feature_frame"]=featureFrame
videoDict[videoName]=videoAnno
print numFrame,featureFrame
videoFeature_mean=poolData(data,videoAnno,num_prop=100,num_bin=1,num_sample_bin=3,pool_type="mean")
outDf=pd.DataFrame(videoFeature_mean,columns=col_names)
outDf.to_csv("./csv_mean_100/"+videoName+".csv",index=False)
outfile=open("./anet_anno_anet.json","w")
json.dump(videoDict,outfile)
outfile.close()
| 4,484 | 31.737226 | 103 | py |
SSTAP | SSTAP-main/data/activitynet_feature_cuhk/ldb_process.py | """
Created on Mon May 15 22:31:31 2017
@author: wzmsltw
"""
import caffe
import leveldb
import numpy as np
from caffe.proto import caffe_pb2
import pandas as pd
col_names=[]
for i in range(200):
col_names.append("f"+str(i))
df=pd.read_table("./input_spatial_list.txt",names=['image','frame','label'],sep=" ")
db = leveldb.LevelDB('./LDB')
datum = caffe_pb2.Datum()
i=0
video_name="init"
videoData=np.reshape([],[-1,200])
for key, value in db.RangeIter():
tmp_video_name=df.image.values[i].split('/')[-1]
if tmp_video_name !=video_name:
outDf=pd.DataFrame(videoData,columns=col_names)
outDf.to_csv("./csv_raw/"+video_name+".csv",index=False)
videoData=np.reshape([],[-1,200])
video_name=tmp_video_name
i+=1
datum.ParseFromString(value)
label = datum.label
data = caffe.io.datum_to_array(datum)
data=np.reshape(data,[1,200])
videoData=np.concatenate((videoData,data))
del db
| 983 | 21.883721 | 84 | py |
SSTAP | SSTAP-main/Evaluation/eval_proposal.py | import json
import numpy as np
import pandas as pd
def interpolated_prec_rec(prec, rec):
"""Interpolated AP - VOCdevkit from VOC 2011.
"""
mprec = np.hstack([[0], prec, [0]])
mrec = np.hstack([[0], rec, [1]])
for i in range(len(mprec) - 1)[::-1]:
mprec[i] = max(mprec[i], mprec[i + 1])
idx = np.where(mrec[1::] != mrec[0:-1])[0] + 1
ap = np.sum((mrec[idx] - mrec[idx - 1]) * mprec[idx])
return ap
def segment_iou(target_segment, candidate_segments):
"""Compute the temporal intersection over union between a
target segment and all the test segments.
Parameters
----------
target_segment : 1d array
Temporal target segment containing [starting, ending] times.
candidate_segments : 2d array
Temporal candidate segments containing N x [starting, ending] times.
Outputs
-------
tiou : 1d array
Temporal intersection over union score of the N's candidate segments.
"""
tt1 = np.maximum(target_segment[0], candidate_segments[:, 0])
tt2 = np.minimum(target_segment[1], candidate_segments[:, 1])
# Intersection including Non-negative overlap score.
segments_intersection = (tt2 - tt1).clip(0)
# Segment union.
segments_union = (candidate_segments[:, 1] - candidate_segments[:, 0]) \
+ (target_segment[1] - target_segment[0]) - segments_intersection
# Compute overlap as the ratio of the intersection
# over union of two segments.
tIoU = segments_intersection.astype(float) / segments_union
return tIoU
def wrapper_segment_iou(target_segments, candidate_segments):
"""Compute intersection over union btw segments
Parameters
----------
target_segments : ndarray
2-dim array in format [m x 2:=[init, end]]
candidate_segments : ndarray
2-dim array in format [n x 2:=[init, end]]
Outputs
-------
tiou : ndarray
2-dim array [n x m] with IOU ratio.
Note: It assumes that candidate-segments are more scarce that target-segments
"""
if candidate_segments.ndim != 2 or target_segments.ndim != 2:
raise ValueError('Dimension of arguments is incorrect')
n, m = candidate_segments.shape[0], target_segments.shape[0]
tiou = np.empty((n, m))
for i in range(m):
tiou[:, i] = segment_iou(target_segments[i,:], candidate_segments)
return tiou
class ANETproposal(object):
GROUND_TRUTH_FIELDS = ['database', 'taxonomy', 'version']
PROPOSAL_FIELDS = ['results', 'version', 'external_data']
def __init__(self, ground_truth_filename=None, proposal_filename=None,
ground_truth_fields=GROUND_TRUTH_FIELDS,
proposal_fields=PROPOSAL_FIELDS,
tiou_thresholds=np.linspace(0.5, 0.95, 10),
max_avg_nr_proposals=None,
subset='validation', verbose=False,
check_status=False):
if not ground_truth_filename:
raise IOError('Please input a valid ground truth file.')
if not proposal_filename:
raise IOError('Please input a valid proposal file.')
self.subset = subset
self.tiou_thresholds = tiou_thresholds
self.max_avg_nr_proposals = max_avg_nr_proposals
self.verbose = verbose
self.gt_fields = ground_truth_fields
self.pred_fields = proposal_fields
self.recall = None
self.avg_recall = None
self.proposals_per_video = None
self.check_status = check_status
# Retrieve blocked videos from server.
if self.check_status:
self.blocked_videos = get_blocked_videos()
else:
self.blocked_videos = list()
# Import ground truth and proposals.
self.ground_truth, self.activity_index = self._import_ground_truth(
ground_truth_filename)
self.proposal = self._import_proposal(proposal_filename)
if self.verbose:
print ('[INIT] Loaded annotations from {} subset.'.format(subset))
nr_gt = len(self.ground_truth)
print ('\tNumber of ground truth instances: {}'.format(nr_gt))
nr_pred = len(self.proposal)
print ('\tNumber of proposals: {}'.format(nr_pred))
print ('\tFixed threshold for tiou score: {}'.format(self.tiou_thresholds))
def _import_ground_truth(self, ground_truth_filename):
"""Reads ground truth file, checks if it is well formatted, and returns
the ground truth instances and the activity classes.
Parameters
----------
ground_truth_filename : str
Full path to the ground truth json file.
Outputs
-------
ground_truth : df
Data frame containing the ground truth instances.
activity_index : dict
Dictionary containing class index.
"""
with open(ground_truth_filename, 'r') as fobj:
data = json.load(fobj)
# Checking format
if not all([field in data.keys() for field in self.gt_fields]):
raise IOError('Please input a valid ground truth file.')
# Read ground truth data.
activity_index, cidx = {}, 0
video_lst, t_start_lst, t_end_lst, label_lst = [], [], [], []
for videoid, v in data['database'].items():
if self.subset != v['subset']:
continue
if videoid in self.blocked_videos:
continue
for ann in v['annotations']:
if ann['label'] not in activity_index:
activity_index[ann['label']] = cidx
cidx += 1
video_lst.append(videoid)
t_start_lst.append(ann['segment'][0])
t_end_lst.append(ann['segment'][1])
label_lst.append(activity_index[ann['label']])
ground_truth = pd.DataFrame({'video-id': video_lst,
't-start': t_start_lst,
't-end': t_end_lst,
'label': label_lst})
return ground_truth, activity_index
def _import_proposal(self, proposal_filename):
"""Reads proposal file, checks if it is well formatted, and returns
the proposal instances.
Parameters
----------
proposal_filename : str
Full path to the proposal json file.
Outputs
-------
proposal : df
Data frame containing the proposal instances.
"""
with open(proposal_filename, 'r') as fobj:
data = json.load(fobj)
# Checking format...
if not all([field in data.keys() for field in self.pred_fields]):
raise IOError('Please input a valid proposal file.')
# Read predictions.
video_lst, t_start_lst, t_end_lst = [], [], []
score_lst = []
for videoid, v in data['results'].items():
if videoid in self.blocked_videos:
continue
for result in v:
video_lst.append(videoid)
t_start_lst.append(result['segment'][0])
t_end_lst.append(result['segment'][1])
score_lst.append(result['score'])
proposal = pd.DataFrame({'video-id': video_lst,
't-start': t_start_lst,
't-end': t_end_lst,
'score': score_lst})
return proposal
def evaluate(self):
"""Evaluates a proposal file. To measure the performance of a
method for the proposal task, we computes the area under the
average recall vs average number of proposals per video curve.
"""
recall, avg_recall, proposals_per_video = average_recall_vs_avg_nr_proposals(
self.ground_truth, self.proposal,
max_avg_nr_proposals=self.max_avg_nr_proposals,
tiou_thresholds=self.tiou_thresholds)
area_under_curve = np.trapz(avg_recall, proposals_per_video)
if self.verbose:
print('[RESULTS] Performance on ActivityNet proposal task.')
print('\tArea Under the AR vs AN curve: {}%'.format(100.*float(area_under_curve)/proposals_per_video[-1]))
self.recall = recall
self.avg_recall = avg_recall
self.proposals_per_video = proposals_per_video
def average_recall_vs_avg_nr_proposals(ground_truth, proposals,
max_avg_nr_proposals=None,
tiou_thresholds=np.linspace(0.5, 0.95, 10)):
""" Computes the average recall given an average number
of proposals per video.
Parameters
----------
ground_truth : df
Data frame containing the ground truth instances.
Required fields: ['video-id', 't-start', 't-end']
proposal : df
Data frame containing the proposal instances.
Required fields: ['video-id, 't-start', 't-end', 'score']
tiou_thresholds : 1darray, optional
array with tiou thresholds.
Outputs
-------
recall : 2darray
recall[i,j] is recall at ith tiou threshold at the jth average number of average number of proposals per video.
average_recall : 1darray
recall averaged over a list of tiou threshold. This is equivalent to recall.mean(axis=0).
proposals_per_video : 1darray
average number of proposals per video.
"""
# Get list of videos.
video_lst = ground_truth['video-id'].unique()
if not max_avg_nr_proposals:
max_avg_nr_proposals = float(proposals.shape[0])/video_lst.shape[0]
ratio = max_avg_nr_proposals*float(video_lst.shape[0])/proposals.shape[0]
# Adaptation to query faster
ground_truth_gbvn = ground_truth.groupby('video-id')
proposals_gbvn = proposals.groupby('video-id')
# For each video, computes tiou scores among the retrieved proposals.
score_lst = []
total_nr_proposals = 0
for videoid in video_lst:
# Get proposals for this video.
# try:
proposals_videoid = proposals_gbvn.get_group(videoid)
# except:
# continue
this_video_proposals = proposals_videoid.loc[:, ['t-start', 't-end']].values
# Sort proposals by score.
sort_idx = proposals_videoid['score'].argsort()[::-1]
this_video_proposals = this_video_proposals[sort_idx, :]
# Get ground-truth instances associated to this video.
ground_truth_videoid = ground_truth_gbvn.get_group(videoid)
this_video_ground_truth = ground_truth_videoid.loc[:,['t-start', 't-end']].values
if this_video_proposals.shape[0] == 0:
n = this_video_ground_truth.shape[0]
score_lst.append(np.zeros((n, 1)))
continue
if this_video_proposals.ndim != 2:
this_video_proposals = np.expand_dims(this_video_proposals, axis=0)
if this_video_ground_truth.ndim != 2:
this_video_ground_truth = np.expand_dims(this_video_ground_truth, axis=0)
nr_proposals = np.minimum(int(this_video_proposals.shape[0] * ratio), this_video_proposals.shape[0])
total_nr_proposals += nr_proposals
this_video_proposals = this_video_proposals[:nr_proposals, :]
# Compute tiou scores.
tiou = wrapper_segment_iou(this_video_proposals, this_video_ground_truth)
score_lst.append(tiou)
# Given that the length of the videos is really varied, we
# compute the number of proposals in terms of a ratio of the total
# proposals retrieved, i.e. average recall at a percentage of proposals
# retrieved per video.
# Computes average recall.
pcn_lst = np.arange(1, 101) / 100.0 *(max_avg_nr_proposals*float(video_lst.shape[0])/total_nr_proposals)
matches = np.empty((video_lst.shape[0], pcn_lst.shape[0]))
positives = np.empty(video_lst.shape[0])
recall = np.empty((tiou_thresholds.shape[0], pcn_lst.shape[0]))
# Iterates over each tiou threshold.
for ridx, tiou in enumerate(tiou_thresholds):
# Inspect positives retrieved per video at different
# number of proposals (percentage of the total retrieved).
for i, score in enumerate(score_lst):
# Total positives per video.
positives[i] = score.shape[0]
# Find proposals that satisfies minimum tiou threshold.
true_positives_tiou = score >= tiou
# Get number of proposals as a percentage of total retrieved.
pcn_proposals = np.minimum((score.shape[1] * pcn_lst).astype(np.int), score.shape[1])
for j, nr_proposals in enumerate(pcn_proposals):
# Compute the number of matches for each percentage of the proposals
matches[i, j] = np.count_nonzero((true_positives_tiou[:, :nr_proposals]).sum(axis=1))
# Computes recall given the set of matches per video.
recall[ridx, :] = matches.sum(axis=0) / positives.sum()
# Recall is averaged.
avg_recall = recall.mean(axis=0)
# Get the average number of proposals per video.
proposals_per_video = pcn_lst * (float(total_nr_proposals) / video_lst.shape[0])
return recall, avg_recall, proposals_per_video
| 13,318 | 38.877246 | 119 | py |
SSTAP | SSTAP-main/Evaluation/utils.py | import json
import urllib2
import numpy as np
API = 'http://ec2-52-11-11-89.us-west-2.compute.amazonaws.com/challenge16/api.py'
def get_blocked_videos(api=API):
api_url = '{}?action=get_blocked'.format(api)
req = urllib2.Request(api_url)
response = urllib2.urlopen(req)
return json.loads(response.read())
def interpolated_prec_rec(prec, rec):
"""Interpolated AP - VOCdevkit from VOC 2011.
"""
mprec = np.hstack([[0], prec, [0]])
mrec = np.hstack([[0], rec, [1]])
for i in range(len(mprec) - 1)[::-1]:
mprec[i] = max(mprec[i], mprec[i + 1])
idx = np.where(mrec[1::] != mrec[0:-1])[0] + 1
ap = np.sum((mrec[idx] - mrec[idx - 1]) * mprec[idx])
return ap
def segment_iou(target_segment, candidate_segments):
"""Compute the temporal intersection over union between a
target segment and all the test segments.
Parameters
----------
target_segment : 1d array
Temporal target segment containing [starting, ending] times.
candidate_segments : 2d array
Temporal candidate segments containing N x [starting, ending] times.
Outputs
-------
tiou : 1d array
Temporal intersection over union score of the N's candidate segments.
"""
tt1 = np.maximum(target_segment[0], candidate_segments[:, 0])
tt2 = np.minimum(target_segment[1], candidate_segments[:, 1])
# Intersection including Non-negative overlap score.
segments_intersection = (tt2 - tt1).clip(0)
# Segment union.
segments_union = (candidate_segments[:, 1] - candidate_segments[:, 0]) \
+ (target_segment[1] - target_segment[0]) - segments_intersection
# Compute overlap as the ratio of the intersection
# over union of two segments.
tIoU = segments_intersection.astype(float) / segments_union
return tIoU
def wrapper_segment_iou(target_segments, candidate_segments):
"""Compute intersection over union btw segments
Parameters
----------
target_segments : ndarray
2-dim array in format [m x 2:=[init, end]]
candidate_segments : ndarray
2-dim array in format [n x 2:=[init, end]]
Outputs
-------
tiou : ndarray
2-dim array [n x m] with IOU ratio.
Note: It assumes that candidate-segments are more scarce that target-segments
"""
if candidate_segments.ndim != 2 or target_segments.ndim != 2:
raise ValueError('Dimension of arguments is incorrect')
n, m = candidate_segments.shape[0], target_segments.shape[0]
tiou = np.empty((n, m))
for i in xrange(m):
tiou[:, i] = segment_iou(target_segments[i,:], candidate_segments)
return tiou
| 2,648 | 33.855263 | 81 | py |
xSLHA | xSLHA-master/setup.py | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="xslha",
version="1.0.2",
author="Florian Staub",
author_email="florian.staub@gmail.com",
description="A python package to read (big/many) SLHA files",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/fstaub/xSLHA",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 2",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS"
],
)
| 710 | 28.625 | 65 | py |
xSLHA | xSLHA-master/xslha/main.py | import subprocess
import os
from six import string_types
# SLHA parser
# SLHA Class
class SLHA():
def __init__(self):
self.blocks = {}
self.br = {}
self.widths = {}
self.br1L = {}
self.widths1L = {}
self.xsections = {}
self.block_name = None
self.entries = {}
self.reading_block = False
self.reading_decay = False
self.reading_xsection = False
self.reading_hb_fermion = False
self.reading_hb_boson = False
self.decay1L = False
self.decay_part = 0
# return wdith and BR
def BR(self, init, final):
# frozenset: make sure that the final states are order-less
return self.br[init][tuple(sorted(final))]
def Width(self, pdg):
return self.widths[pdg]
def Value(self, block, number):
'''return value of a parameter defined by block and entry
or the width or an BR'''
if block == 'WIDTH':
return self.widths[number]
elif block == 'BR':
return self.br[number[0]][tuple(sorted(number[1]))]
elif block == 'WIDTH1L':
return self.widths1L[number]
elif block == 'BR1L':
return self.br1L[number[0]][tuple(sorted(number[1]))]
elif block == 'XSECTION':
xs = self.xsections[tuple(number)]
return [[x, xs[x]] for x in xs.keys()]
else:
return self.blocks[block.upper()][
str(number)[1:-1].replace(" ", "")]
def start_decay(self, li):
parsed = list(filter(None, li.split(' ')))
self.decay1L = li.upper().startswith("DECAY1L")
self.decay_part = int(parsed[1])
if self.decay1L:
self.widths1L[self.decay_part] = float(parsed[2])
else:
self.widths[self.decay_part] = float(parsed[2])
self.entries = {}
self.reading_block, self.reading_decay, self.reading_xsection \
= False, True, False
def start_block(self, li):
self.block_name = (list(filter(None, li.split(' ')))[1]).upper()
self.entries = {}
self.reading_block, self.reading_decay, self.reading_xsection \
= True, False, False
self.reading_hb_boson = \
self.block_name in ["HIGGSBOUNDSINPUTHIGGSCOUPLINGSBOSONS",
"HIGGSCOUPLINGSBOSONS"]
self.reading_hb_fermion = \
self.block_name in ["HIGGSBOUNDSINPUTHIGGSCOUPLINGSFERMIONS",
"HIGGSCOUPLINGSFERMIONS"]
def start_xsection(self, li):
parsed = list(filter(None, li.split(' ')))
if "#" in parsed:
parsed = parsed[:parsed.index("#")] # remove comments
self.xs_head = tuple(
[float(parsed[1]),
tuple([int(parsed[2]), int(parsed[3])]),
tuple([int(parsed[-2]), int(parsed[-1])])
])
self.entries = {}
self.reading_block, self.reading_decay, self.reading_xsection \
= False, False, True
def flush(self):
'''store the information once a block is completely parsed'''
if len(self.entries) > 0:
if self.reading_block:
self.blocks[self.block_name] = self.entries
if self.reading_decay:
if self.decay1L:
self.br1L[self.decay_part] = self.entries
else:
self.br[self.decay_part] = self.entries
if self.reading_xsection:
self.xsections[self.xs_head] = self.entries
# Reading
# now the main function to read the SLHA file
def read(file, separator=None, verbose=False):
spc = SLHA()
if separator is not None:
all_files = []
count = 1
with open(file) as infile:
for line in infile:
li = line.strip().upper()
if li.startswith("#") or len(li) < 1:
continue
if separator is not None:
if li.startswith(separator):
spc.flush()
if max(len(spc.blocks.keys()),len(spc.widths.keys())) > 0:
all_files.append(spc)
# start next point
spc = SLHA()
count = count + 1
if verbose:
print("Read spc file:", count)
continue
# New block started
if li.startswith("BLOCK"):
spc.flush() # store information which was read
spc.start_block(li)
elif li.startswith("DECAY"):
spc.flush() # store information which was read
spc.start_decay(li)
elif li.startswith("XSECTION"):
spc.flush() # store information which was read
spc.start_xsection(li)
# Reading and parsing values
else:
parsed = list(filter(None, li.split(' ')))
if "#" in parsed:
parsed = parsed[:parsed.index("#")] # remove comments
if spc.reading_block:
if spc.reading_hb_fermion:
spc.entries[",".join(parsed[3:])] = \
[float(parsed[0]), float(parsed[1])]
elif spc.reading_hb_boson:
spc.entries[",".join(parsed[2:])] = \
float(parsed[0])
else:
# Value might be a string like in SPINFO block
try:
value = float(parsed[-1])
except:
value = parsed[-2]
spc.entries[",".join(parsed[0:-1])] = value
if spc.reading_decay:
spc.entries[
tuple(sorted(eval("[" + ",".join(parsed[2:]) + "]")))
] = float(parsed[0])
if spc.reading_xsection:
spc.entries[
tuple(eval("[" + ",".join(parsed[0:-2]) + "]"))
] = float(parsed[-2])
spc.flush() # save the very last block in the file
if verbose:
print("Read %i blocks and %i decays" % (len(spc.blocks), len(spc.br)))
if separator is None:
return spc
else:
if len(spc.entries) > 0:
all_files.append(spc)
return all_files
# wrapper for faster read-in of multiple files
# squeeze the file (just keeping the necessary entries) to make the reading more efficient
# example: read_small_spc(filename,["# m0","# m12","# relic"],separator="ENDOF")
def read_small(file, entries, sep):
if entries is None:
out = read(file, separator=sep)
else:
string = "--regexp=\"" + sep + "\" --regexp=\"Block\" "
for i in entries:
string = string + "--regexp=\"" + i + "\" "
if os.path.isfile("temp.spc"):
subprocess.call("rm temp.spc", shell=True)
subprocess.call("cat " + file + " | grep -i " + string
+ " > temp_read_small.spc", shell=True)
out = read("temp_read_small.spc", separator=sep)
subprocess.call("rm temp_read_small.spc", shell=True)
return out
def read_dir(dir, entries=None):
if os.path.isfile("temp_read_dir.spc"):
subprocess.call("rm temp_read_dir.spc", shell=True)
# subprocess.check_call("cat "+dir+"/* > temp_read_dir.spc",shell=True)
subprocess.check_call("tail -n+1 " + dir + "/* > temp_read_dir.spc",
shell=True)
out = read_small("temp_read_dir.spc", entries, "==>")
subprocess.call("rm temp_read_dir.spc", shell=True)
return out
#def read_dir(dir,entries=None):
#subprocess.call("rm temp_read_dir.spc",shell=True)
#subprocess.check_call("cat "+dir+"/* > temp_read_dir.spc",shell=True)
#with open("temp_read_dir.spc") as infile:
#for line in infile:
#li=line.strip().upper()
#if li.startswith("#") or len(li)<1:
#continue
#else:
#file_sep=li[:li.index("#")]
#break
#out=read_small("temp_read_dir.spc",entries,file_sep)
#subprocess.call("rm temp_read_dir.spc",shell=True)
#return out
# Writing
def write(blocks, file):
with open(file, 'w+') as f:
for b in blocks:
write_block_head(b, f)
write_block_entries(blocks[b], f)
def write_block_entries(values, file):
for v in values.keys():
file.write(' %s %10.4e # \n' % (v, float(values[v])))
def write_les_houches(block, values, point, file):
write_block_head(block, file)
write_block_numbers(block, values, point, file)
def write_block_head(name, file):
file.write("Block " + name.upper() + " # \n")
def write_block_numbers(name, values, Variable, file):
for v in values.keys():
# if type(values[v]) is string_types:
if isinstance(values[v], string_types): # to be 2 and 3 compatible
if str(eval(values[v]))==values[v]:
file.write(' %s %s # %s \n'
% (v, values[v],
name.upper() + "[" + str(v) + "]"))
else:
file.write(' %s %10.4e # %s \n'
% (v, float(eval(values[v])),
name.upper() + "[" + str(v) + "]"))
elif isinstance(values[v], int):
file.write(' %s %i # %s \n'
% (v, (values[v]), name.upper() + "[" + str(v) + "]"))
else:
file.write(' %s %10.4e # %s \n'
% (v, float(values[v]),
name.upper() + "[" + str(v) + "]"))
| 10,207 | 34.817544 | 90 | py |
enterprise_extensions | enterprise_extensions-master/setup.py |
"""The setup script."""
from setuptools import setup
with open("README.rst") as readme_file:
readme = readme_file.read()
with open("HISTORY.rst") as history_file:
history = history_file.read()
requirements = [
"numpy>=1.16.3",
"scipy>=1.2.0",
"ephem>=3.7.6.0",
"healpy>=1.14.0",
"scikit-sparse>=0.4.5",
"pint-pulsar>=0.8.2",
"libstempo>=2.4.0",
"enterprise-pulsar>=3.3.0",
"scikit-learn>=0.24",
"emcee",
"ptmcmcsampler",
]
test_requirements = []
# Extract version
def get_version():
with open("enterprise_extensions/__init__.py") as f:
for line in f.readlines():
if "__version__" in line:
return line.split('"')[1]
setup(
name="enterprise_extensions",
version=get_version(),
description="Extensions, model shortcuts, and utilities for the enterprise PTA analysis framework.",
long_description=readme + "\n\n" + history,
long_description_content_type='text/x-rst',
classifiers=[
"Topic :: Scientific/Engineering :: Astronomy",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Scientific/Engineering :: Mathematics",
"Intended Audience :: Science/Research",
"Programming Language :: Python :: 3",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords="gravitational-wave, black-hole binary, pulsar-timing arrays",
url="https://github.com/stevertaylor/enterprise_extensions",
author="Stephen R. Taylor, Paul T. Baker, Jeffrey S. Hazboun, Sarah Vigeland",
author_email="jeffrey.hazboun@gmail.com",
license="MIT",
packages=[
"enterprise_extensions",
"enterprise_extensions.frequentist",
"enterprise_extensions.chromatic",
],
package_data={
"enterprise_extensions.chromatic": [
"ACE_SWEPAM_daily_proton_density_1998_2018_MJD_cm-3.txt"
]
},
test_suite="tests",
tests_require=test_requirements,
install_requires=requirements,
zip_safe=False,
)
| 2,094 | 27.310811 | 104 | py |
enterprise_extensions | enterprise_extensions-master/enterprise_extensions/hypermodel.py |
import os
import numpy as np
import scipy.linalg as sl
from enterprise import constants as const
from PTMCMCSampler.PTMCMCSampler import PTSampler as ptmcmc
from .sampler import JumpProposal, get_parameter_groups, save_runtime_info
class HyperModel(object):
"""
Class to define hyper-model that is the concatenation of all models.
"""
def __init__(self, models, log_weights=None):
self.models = models
self.num_models = len(self.models)
self.log_weights = log_weights
self.param_names, ind = np.unique(np.concatenate([p.param_names
for p in self.models.values()]),
return_index=True)
self.param_names = self.param_names[np.argsort(ind)]
self.param_names = np.append(self.param_names, 'nmodel').tolist()
self.pulsars = np.unique(np.concatenate([p.pulsars
for p in self.models.values()]))
self.pulsars = np.sort(self.pulsars)
self.params = [p for p in self.models[0].params] # start of param list
uniq_params = [str(p) for p in self.models[0].params] # which params are unique
for model in self.models.values():
# find differences between next model and concatenation of previous
param_diffs = np.setdiff1d([str(p) for p in model.params], uniq_params)
mask = np.array([str(p) in param_diffs for p in model.params])
# concatenate for next loop iteration
uniq_params = np.union1d([str(p) for p in model.params], uniq_params)
# extend list of unique parameters
self.params.extend([pp for pp in np.array(model.params)[mask]])
# get signal collections
self.snames = dict.fromkeys(np.unique(sum(sum([[[qq.signal_name for qq in pp._signals]
for pp in self.models[mm]._signalcollections]
for mm in self.models], []), [])))
for key in self.snames:
self.snames[key] = []
for mm in self.models:
for sc in self.models[mm]._signalcollections:
for signal in sc._signals:
self.snames[signal.signal_name].extend(signal.params)
for key in self.snames:
self.snames[key] = list(set(self.snames[key]))
for key in self.snames:
uniq_params, ind = np.unique([p.name for p in self.snames[key]],
return_index=True)
uniq_params = uniq_params[np.argsort(ind)].tolist()
all_params = [p.name for p in self.snames[key]]
self.snames[key] = np.array(self.snames[key])[[all_params.index(q)
for q in uniq_params]].tolist()
def get_lnlikelihood(self, x):
# find model index variable
idx = list(self.param_names).index('nmodel')
nmodel = int(np.rint(x[idx]))
# find parameters of active model
q = []
for par in self.models[nmodel].param_names:
idx = self.param_names.index(par)
q.append(x[idx])
# only active parameters enter likelihood
active_lnlike = self.models[nmodel].get_lnlikelihood(q)
if self.log_weights is not None:
active_lnlike += self.log_weights[nmodel]
return active_lnlike
def get_lnprior(self, x):
# find model index variable
idx = list(self.param_names).index('nmodel')
nmodel = int(np.rint(x[idx]))
if nmodel not in self.models.keys():
return -np.inf
else:
lnP = 0
for p in self.models.values():
q = []
for par in p.param_names:
idx = self.param_names.index(par)
q.append(x[idx])
lnP += p.get_lnprior(np.array(q))
return lnP
def get_parameter_groups(self):
unique_groups = []
for p in self.models.values():
groups = get_parameter_groups(p)
# check for any duplicate groups
# e.g. the GWB may have different indices in model 1 and model 2
for group in groups:
check_group = []
for idx in group:
param_name = p.param_names[idx]
check_group.append(self.param_names.index(param_name))
if check_group not in unique_groups:
unique_groups.append(check_group)
unique_groups.extend([[len(self.param_names) - 1]])
return unique_groups
def initial_sample(self):
"""
Draw an initial sample from within the hyper-model prior space.
"""
x0 = [np.array(p.sample()).ravel().tolist() for p in self.models[0].params]
uniq_params = [str(p) for p in self.models[0].params]
for model in self.models.values():
param_diffs = np.setdiff1d([str(p) for p in model.params], uniq_params)
mask = np.array([str(p) in param_diffs for p in model.params])
x0.extend([np.array(pp.sample()).ravel().tolist() for pp in np.array(model.params)[mask]])
uniq_params = np.union1d([str(p) for p in model.params], uniq_params)
x0.extend([[0.1]])
return np.array([p for sublist in x0 for p in sublist])
def draw_from_nmodel_prior(self, x, iter, beta):
"""
Model-index uniform distribution prior draw.
"""
q = x.copy()
idx = list(self.param_names).index('nmodel')
q[idx] = np.random.uniform(-0.5, self.num_models-0.5)
lqxy = 0
return q, float(lqxy)
def setup_sampler(self, outdir='chains', resume=False, sample_nmodel=True,
empirical_distr=None, groups=None, human=None,
loglkwargs={}, logpkwargs={}):
"""
Sets up an instance of PTMCMC sampler.
We initialize the sampler the likelihood and prior function
from the PTA object. We set up an initial jump covariance matrix
with fairly small jumps as this will be adapted as the MCMC runs.
We will setup an output directory in `outdir` that will contain
the chain (first n columns are the samples for the n parameters
and last 4 are log-posterior, log-likelihood, acceptance rate, and
an indicator variable for parallel tempering but it doesn't matter
because we aren't using parallel tempering).
We then add several custom jump proposals to the mix based on
whether or not certain parameters are in the model. These are
all either draws from the prior distribution of parameters or
draws from uniform distributions.
"""
# dimension of parameter space
ndim = len(self.param_names)
# initial jump covariance matrix
if os.path.exists(outdir+'/cov.npy') and resume:
cov = np.load(outdir+'/cov.npy')
# check that the one we load is the same shape as our data
cov_new = np.diag(np.ones(ndim) * 1.0**2)
if cov.shape != cov_new.shape:
msg = 'The covariance matrix (cov.npy) in the output folder is '
msg += 'the wrong shape for the parameters given. '
msg += 'Start with a different output directory or '
msg += 'change resume to False to overwrite the run that exists.'
raise ValueError(msg)
else:
cov = np.diag(np.ones(ndim) * 1.0**2) # used to be 0.1
# parameter groupings
if groups is None:
groups = self.get_parameter_groups()
sampler = ptmcmc(ndim, self.get_lnlikelihood, self.get_lnprior, cov,
groups=groups, outDir=outdir, resume=resume,
loglkwargs=loglkwargs, logpkwargs=logpkwargs)
save_runtime_info(self, sampler.outDir, human)
# additional jump proposals
jp = JumpProposal(self, self.snames, empirical_distr=empirical_distr)
sampler.jp = jp
# always add draw from prior
sampler.addProposalToCycle(jp.draw_from_prior, 5)
# try adding empirical proposals
if empirical_distr is not None:
print('Adding empirical proposals...\n')
sampler.addProposalToCycle(jp.draw_from_empirical_distr, 25)
# Red noise prior draw
if 'red noise' in self.snames:
print('Adding red noise prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_red_prior, 10)
# DM GP noise prior draw
if 'dm_gp' in self.snames:
print('Adding DM GP noise prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_dm_gp_prior, 10)
# DM annual prior draw
if 'dm_s1yr' in jp.snames:
print('Adding DM annual prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_dm1yr_prior, 10)
# DM dip prior draw
if 'dmexp' in '\t'.join(jp.snames):
print('Adding DM exponential dip prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_dmexpdip_prior, 10)
# DM cusp prior draw
if 'dm_cusp' in jp.snames:
print('Adding DM exponential cusp prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_dmexpcusp_prior, 10)
# DMX prior draw
if 'dmx_signal' in jp.snames:
print('Adding DMX prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_dmx_prior, 10)
# Chromatic GP noise prior draw
if 'chrom_gp' in self.snames:
print('Adding Chromatic GP noise prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_chrom_gp_prior, 10)
# SW prior draw
if 'gp_sw' in jp.snames:
print('Adding Solar Wind DM GP prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_dm_sw_prior, 10)
# Chromatic GP noise prior draw
if 'chrom_gp' in self.snames:
print('Adding Chromatic GP noise prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_chrom_gp_prior, 10)
# Ephemeris prior draw
if 'd_jupiter_mass' in self.param_names:
print('Adding ephemeris model prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_ephem_prior, 10)
# GWB uniform distribution draw
if np.any([('gw' in par and 'log10_A' in par) for par in self.param_names]):
print('Adding GWB uniform distribution draws...\n')
sampler.addProposalToCycle(jp.draw_from_gwb_log_uniform_distribution, 10)
# Dipole uniform distribution draw
if 'dipole_log10_A' in self.param_names:
print('Adding dipole uniform distribution draws...\n')
sampler.addProposalToCycle(jp.draw_from_dipole_log_uniform_distribution, 10)
# Monopole uniform distribution draw
if 'monopole_log10_A' in self.param_names:
print('Adding monopole uniform distribution draws...\n')
sampler.addProposalToCycle(jp.draw_from_monopole_log_uniform_distribution, 10)
# BWM prior draw
if 'bwm_log10_A' in self.param_names:
print('Adding BWM prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_bwm_prior, 10)
# FDM prior draw
if 'fdm_log10_A' in self.param_names:
print('Adding FDM prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_fdm_prior, 10)
# CW prior draw
if 'cw_log10_h' in self.param_names:
print('Adding CW prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_cw_log_uniform_distribution, 10)
# free spectrum prior draw
if np.any(['log10_rho' in par for par in self.param_names]):
print('Adding free spectrum prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_gw_rho_prior, 25)
# Prior distribution draw for parameters named GW
if any([str(p).split(':')[0] for p in list(self.params) if 'gw' in str(p)]):
print('Adding gw param prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_par_prior(
par_names=[str(p).split(':')[0] for
p in list(self.params)
if 'gw' in str(p)]), 10)
# Model index distribution draw
if sample_nmodel:
if 'nmodel' in self.param_names:
print('Adding nmodel uniform distribution draws...\n')
sampler.addProposalToCycle(self.draw_from_nmodel_prior, 25)
return sampler
def get_process_timeseries(self, psr, chain, burn, comp='DM',
mle=False, model=0):
"""
Construct a time series realization of various constrained processes.
:param psr: enterprise pulsar object
:param chain: MCMC chain from sampling all models
:param burn: desired number of initial samples to discard
:param comp: which process to reconstruct? (red noise or DM) [default=DM]
:param mle: create time series from ML of GP hyper-parameters? [default=False]
:param model: which sub-model within the super-model to reconstruct from? [default=0]
:return ret: time-series of the reconstructed process
"""
wave = 0
pta = self.models[model]
model_chain = chain[np.rint(chain[:, -5])==model, :]
# get parameter dictionary
if mle:
ind = np.argmax(model_chain[:, -4])
else:
ind = np.random.randint(burn, model_chain.shape[0])
params = {par: model_chain[ind, ct]
for ct, par in enumerate(self.param_names)
if par in pta.param_names}
# deterministic signal part
wave += pta.get_delay(params=params)[0]
# get linear parameters
# Nvec = pta.get_ndiag(params)[0] # Not currently used in code
phiinv = pta.get_phiinv(params, logdet=False)[0]
T = pta.get_basis(params)[0]
d = pta.get_TNr(params)[0]
TNT = pta.get_TNT(params)[0]
# Red noise piece
Sigma = TNT + (np.diag(phiinv) if phiinv.ndim == 1 else phiinv)
try:
u, s, _ = sl.svd(Sigma)
mn = np.dot(u, np.dot(u.T, d)/s)
Li = u * np.sqrt(1/s)
except np.linalg.LinAlgError:
Q, R = sl.qr(Sigma)
Sigi = sl.solve(R, Q.T)
mn = np.dot(Sigi, d)
u, s, _ = sl.svd(Sigi)
Li = u * np.sqrt(1/s)
b = mn + np.dot(Li, np.random.randn(Li.shape[0]))
# find basis indices
pardict = {}
for sc in pta._signalcollections:
ntot = 0
for sig in sc._signals:
if sig.signal_type == 'basis':
basis = sig.get_basis(params=params)
nb = basis.shape[1]
pardict[sig.signal_name] = np.arange(ntot, nb+ntot)
ntot += nb
# DM quadratic + GP
if comp == 'DM':
idx = pardict['dm_gp']
wave += np.dot(T[:, idx], b[idx])
ret = wave * (psr.freqs**2 * const.DM_K * 1e12)
elif comp == 'scattering':
idx = pardict['scattering_gp']
wave += np.dot(T[:, idx], b[idx])
ret = wave * (psr.freqs**4) # * const.DM_K * 1e12)
elif comp == 'red':
idx = pardict['red noise']
wave += np.dot(T[:, idx], b[idx])
ret = wave
elif comp == 'FD':
idx = pardict['FD']
wave += np.dot(T[:, idx], b[idx])
ret = wave
elif comp == 'all':
wave += np.dot(T, b)
ret = wave
else:
ret = wave
return ret
def summary(self, to_stdout=False):
"""generate summary string for HyperModel, including all PTAs
:param to_stdout: [bool]
print summary to `stdout` instead of returning it
:return: [string]
"""
summary = ""
for ii, pta in self.models.items():
summary += "model " + str(ii) + "\n"
summary += "=" * 9 + "\n\n"
summary += pta.summary()
summary += "=" * 90 + "\n\n"
if to_stdout:
print(summary)
else:
return summary
| 16,731 | 37.200913 | 102 | py |
enterprise_extensions | enterprise_extensions-master/enterprise_extensions/gp_kernels.py |
import numpy as np
from enterprise.signals import signal_base, utils
__all__ = ['linear_interp_basis_dm',
'linear_interp_basis_freq',
'dmx_ridge_prior',
'periodic_kernel',
'se_kernel',
'se_dm_kernel',
'get_tf_quantization_matrix',
'tf_kernel',
'sf_kernel',
]
# linear interpolation basis in time with nu^-2 scaling
@signal_base.function
def linear_interp_basis_dm(toas, freqs, dt=30*86400):
# get linear interpolation basis in time
U, avetoas = utils.linear_interp_basis(toas, dt=dt)
# scale with radio frequency
Dm = (1400/freqs)**2
return U * Dm[:, None], avetoas
@signal_base.function
def linear_interp_basis_chromatic(toas, freqs, dt=30*86400, idx=4):
"""Linear interpolation basis in time with nu^-4 scaling"""
# get linear interpolation basis in time
U, avetoas = utils.linear_interp_basis(toas, dt=dt)
# scale with radio frequency
Dm = (1400/freqs)**idx
return U * Dm[:, None], avetoas
@signal_base.function
def linear_interp_basis_freq(freqs, df=64):
"""Linear interpolation in radio frequency"""
return utils.linear_interp_basis(freqs, dt=df)
@signal_base.function
def dmx_ridge_prior(avetoas, log10_sigma=-7):
"""DMX-like signal with Gaussian prior"""
sigma = 10**log10_sigma
return sigma**2 * np.ones_like(avetoas)
@signal_base.function
def periodic_kernel(avetoas, log10_sigma=-7, log10_ell=2,
log10_gam_p=0, log10_p=0):
"""Quasi-periodic kernel for DM"""
r = np.abs(avetoas[None, :] - avetoas[:, None])
# convert units to seconds
sigma = 10**log10_sigma
l = 10**log10_ell * 86400
p = 10**log10_p * 3.16e7
gam_p = 10**log10_gam_p
d = np.eye(r.shape[0]) * (sigma/500)**2
K = sigma**2 * np.exp(-r**2/2/l**2 - gam_p*np.sin(np.pi*r/p)**2) + d
return K
@signal_base.function
def se_kernel(avefreqs, log10_sigma=-7, log10_lam=3):
"""Squared-exponential kernel for FD"""
tm = np.abs(avefreqs[None, :] - avefreqs[:, None])
lam = 10**log10_lam
sigma = 10**log10_sigma
d = np.eye(tm.shape[0]) * (sigma/500)**2
return sigma**2 * np.exp(-tm**2/2/lam) + d
@signal_base.function
def se_dm_kernel(avetoas, log10_sigma=-7, log10_ell=2):
"""Squared-exponential kernel for DM"""
r = np.abs(avetoas[None, :] - avetoas[:, None])
# Convert everything into seconds
l = 10**log10_ell * 86400
sigma = 10**log10_sigma
d = np.eye(r.shape[0]) * (sigma/500)**2
K = sigma**2 * np.exp(-r**2/2/l**2) + d
return K
@signal_base.function
def get_tf_quantization_matrix(toas, freqs, dt=30*86400, df=None, dm=False, dm_idx=2):
"""
Quantization matrix in time and radio frequency to cut down on the kernel
size.
"""
if df is None:
dfs = [(600, 1000), (1000, 1900), (1900, 3000), (3000, 5000)]
else:
fmin = freqs.min()
fmax = freqs.max()
fs = np.arange(fmin, fmax+df, df)
dfs = [(fs[ii], fs[ii+1]) for ii in range(len(fs)-1)]
Us, avetoas, avefreqs, masks = [], [], [], []
for rng in dfs:
mask = np.logical_and(freqs>=rng[0], freqs<rng[1])
if any(mask):
masks.append(mask)
U, _ = utils.create_quantization_matrix(toas[mask],
dt=dt, nmin=1)
avetoa = np.array([toas[mask][idx.astype(bool)].mean()
for idx in U.T])
avefreq = np.array([freqs[mask][idx.astype(bool)].mean()
for idx in U.T])
Us.append(U)
avetoas.append(avetoa)
avefreqs.append(avefreq)
nc = np.sum(U.shape[1] for U in Us)
U = np.zeros((len(toas), nc))
avetoas = np.concatenate(avetoas)
idx = np.argsort(avetoas)
avefreqs = np.concatenate(avefreqs)
nctot = 0
for ct, mask in enumerate(masks):
Umat = Us[ct]
nn = Umat.shape[1]
U[mask, nctot:nn+nctot] = Umat
nctot += nn
if dm:
weights = (1400/freqs)**dm_idx
else:
weights = np.ones_like(freqs)
return U[:, idx] * weights[:, None], {'avetoas': avetoas[idx],
'avefreqs': avefreqs[idx]}
@signal_base.function
def tf_kernel(labels, log10_sigma=-7, log10_ell=2, log10_gam_p=0,
log10_p=0, log10_ell2=4, log10_alpha_wgt=0):
"""
The product of a quasi-periodic time kernel and
a rational-quadratic frequency kernel.
"""
avetoas = labels['avetoas']
avefreqs = labels['avefreqs']
r = np.abs(avetoas[None, :] - avetoas[:, None])
r2 = np.abs(avefreqs[None, :] - avefreqs[:, None])
# convert units to seconds
sigma = 10**log10_sigma
l = 10**log10_ell * 86400
l2 = 10**log10_ell2
p = 10**log10_p * 3.16e7
gam_p = 10**log10_gam_p
alpha_wgt = 10**log10_alpha_wgt
d = np.eye(r.shape[0]) * (sigma/500)**2
Kt = sigma**2 * np.exp(-r**2/2/l**2 - gam_p*np.sin(np.pi*r/p)**2)
Kv = (1+r2**2/2/alpha_wgt/l2**2)**(-alpha_wgt)
return Kt * Kv + d
@signal_base.function
def sf_kernel(labels, log10_sigma=-7, log10_ell=2,
log10_ell2=4, log10_alpha_wgt=0):
"""
The product of a squared-exponential time kernel and
a rational-quadratic frequency kernel.
"""
avetoas = labels['avetoas']
avefreqs = labels['avefreqs']
r = np.abs(avetoas[None, :] - avetoas[:, None])
r2 = np.abs(avefreqs[None, :] - avefreqs[:, None])
# Convert everything into seconds
l = 10**log10_ell * 86400
sigma = 10**log10_sigma
l2 = 10**log10_ell2
alpha_wgt = 10**log10_alpha_wgt
d = np.eye(r.shape[0]) * (sigma/500)**2
Kt = sigma**2 * np.exp(-r**2/2/l**2)
Kv = (1+r2**2/2/alpha_wgt/l2**2)**(-alpha_wgt)
return Kt * Kv + d
| 5,878 | 28.691919 | 86 | py |
enterprise_extensions | enterprise_extensions-master/enterprise_extensions/deterministic.py |
import numpy as np
from enterprise import constants as const
from enterprise.signals import (deterministic_signals, parameter, signal_base,
utils)
def fdm_block(Tmin, Tmax, amp_prior='log-uniform', name='fdm',
amp_lower=-18, amp_upper=-11,
freq_lower=-9, freq_upper=-7,
use_fixed_freq=False, fixed_freq=-8):
"""
Returns deterministic fuzzy dark matter model:
1. FDM parameterized by frequency, phase,
and amplitude (mass and DM energy density).
:param Tmin:
Min time to search, probably first TOA (MJD).
:param Tmax:
Max time to search, probably last TOA (MJD).
:param amp_prior:
Prior on log10_A.
:param logmin:
log of minimum FDM amplitude for prior (log10)
:param logmax:
log of maximum FDM amplitude for prior (log10)
:param name:
Name of FDM signal.
:param amp_upper, amp_lower, freq_upper, freq_lower:
The log-space bounds on the amplitude and frequency priors.
:param use_fixed_freq:
Whether to do a fixed-frequency run and not search over the frequency.
:param fixed_freq:
The frequency value to do a fixed-frequency run with.
"""
# BWM parameters
amp_name = '{}_log10_A'.format(name)
log10_A_fdm = parameter.Uniform(amp_lower, amp_upper)(amp_name)
if use_fixed_freq is True:
log10_f_fdm = fixed_freq
if use_fixed_freq is False:
freq_name = '{}_log10_f'.format(name)
log10_f_fdm = parameter.Uniform(freq_lower, freq_upper)(freq_name)
phase_e_name = '{}_phase_e'.format(name)
phase_e_fdm = parameter.Uniform(0, 2*np.pi)(phase_e_name)
phase_p = parameter.Uniform(0, 2*np.pi)
fdm_wf = fdm_delay(log10_A=log10_A_fdm, log10_f=log10_f_fdm,
phase_e=phase_e_fdm, phase_p=phase_p)
fdm = deterministic_signals.Deterministic(fdm_wf, name=name)
return fdm
def cw_block_circ(amp_prior='log-uniform', dist_prior=None,
skyloc=None, log10_fgw=None,
psrTerm=False, tref=0, name='cw'):
"""
Returns deterministic, cirular orbit continuous GW model:
:param amp_prior:
Prior on log10_h. Default is "log-uniform."
Use "uniform" for upper limits, or "None" to search over
log10_dist instead.
:param dist_prior:
Prior on log10_dist. Default is "None," meaning that the
search is over log10_h instead of log10_dist. Use "log-uniform"
to search over log10_h with a log-uniform prior.
:param skyloc:
Fixed sky location of CW signal search as [cos(theta), phi].
Search over sky location if ``None`` given.
:param log10_fgw:
Fixed log10 GW frequency of CW signal search.
Search over GW frequency if ``None`` given.
:param ecc:
Fixed log10 distance to SMBHB search.
Search over distance or strain if ``None`` given.
:param psrTerm:
Boolean for whether to include the pulsar term. Default is False.
:param name:
Name of CW signal.
"""
if dist_prior is None:
log10_dist = None
if amp_prior == 'uniform':
log10_h = parameter.LinearExp(-18.0, -11.0)('{}_log10_h'.format(name))
elif amp_prior == 'log-uniform':
log10_h = parameter.Uniform(-18.0, -11.0)('{}_log10_h'.format(name))
elif dist_prior == 'log-uniform':
log10_dist = parameter.Uniform(-2.0, 4.0)('{}_log10_dL'.format(name))
log10_h = None
# chirp mass [Msol]
log10_Mc = parameter.Uniform(6.0, 10.0)('{}_log10_Mc'.format(name))
# GW frequency [Hz]
if log10_fgw is None:
log10_fgw = parameter.Uniform(-9.0, -7.0)('{}_log10_fgw'.format(name))
else:
log10_fgw = parameter.Constant(log10_fgw)('{}_log10_fgw'.format(name))
# orbital inclination angle [radians]
cosinc = parameter.Uniform(-1.0, 1.0)('{}_cosinc'.format(name))
# initial GW phase [radians]
phase0 = parameter.Uniform(0.0, 2*np.pi)('{}_phase0'.format(name))
# polarization
psi_name = '{}_psi'.format(name)
psi = parameter.Uniform(0, np.pi)(psi_name)
# sky location
costh_name = '{}_costheta'.format(name)
phi_name = '{}_phi'.format(name)
if skyloc is None:
costh = parameter.Uniform(-1, 1)(costh_name)
phi = parameter.Uniform(0, 2*np.pi)(phi_name)
else:
costh = parameter.Constant(skyloc[0])(costh_name)
phi = parameter.Constant(skyloc[1])(phi_name)
if psrTerm:
# orbital phase
p_phase = parameter.Uniform(0, np.pi)
p_dist = parameter.Normal(0, 1)
else:
p_phase = None
p_dist = 0
# continuous wave signal
wf = cw_delay(cos_gwtheta=costh, gwphi=phi, cos_inc=cosinc,
log10_mc=log10_Mc, log10_fgw=log10_fgw,
log10_h=log10_h, log10_dist=log10_dist,
phase0=phase0, psi=psi,
psrTerm=True, p_dist=p_dist, p_phase=p_phase,
phase_approx=True, check=False,
tref=tref)
cw = CWSignal(wf, ecc=False, psrTerm=psrTerm)
return cw
def cw_block_ecc(amp_prior='log-uniform', skyloc=None, log10_F=None,
ecc=None, psrTerm=False, tref=0, name='cw'):
"""
Returns deterministic, eccentric orbit continuous GW model:
:param amp_prior:
Prior on log10_h and log10_Mc/log10_dL. Default is "log-uniform" with
log10_Mc and log10_dL searched over. Use "uniform" for upper limits,
log10_h searched over.
:param skyloc:
Fixed sky location of CW signal search as [cos(theta), phi].
Search over sky location if ``None`` given.
:param log10_F:
Fixed log-10 orbital frequency of CW signal search.
Search over orbital frequency if ``None`` given.
:param ecc:
Fixed eccentricity of SMBHB search.
Search over eccentricity if ``None`` given.
:param psrTerm:
Boolean for whether to include the pulsar term. Default is False.
:param name:
Name of CW signal.
"""
if amp_prior == 'uniform':
log10_h = parameter.LinearExp(-18.0, -11.0)('{}_log10_h'.format(name))
elif amp_prior == 'log-uniform':
log10_h = None
# chirp mass [Msol]
log10_Mc = parameter.Uniform(6.0, 10.0)('{}_log10_Mc'.format(name))
# luminosity distance [Mpc]
log10_dL = parameter.Uniform(-2.0, 4.0)('{}_log10_dL'.format(name))
# orbital frequency [Hz]
if log10_F is None:
log10_Forb = parameter.Uniform(-9.0, -7.0)('{}_log10_Forb'.format(name))
else:
log10_Forb = parameter.Constant(log10_F)('{}_log10_Forb'.format(name))
# orbital inclination angle [radians]
cosinc = parameter.Uniform(-1.0, 1.0)('{}_cosinc'.format(name))
# periapsis position angle [radians]
gamma_0 = parameter.Uniform(0.0, np.pi)('{}_gamma0'.format(name))
# Earth-term eccentricity
if ecc is None:
e_0 = parameter.Uniform(0.0, 0.99)('{}_e0'.format(name))
else:
e_0 = parameter.Constant(ecc)('{}_e0'.format(name))
# initial mean anomaly [radians]
l_0 = parameter.Uniform(0.0, 2.0*np.pi)('{}_l0'.format(name))
# mass ratio = M_2/M_1
q = parameter.Constant(1.0)('{}_q'.format(name))
# polarization
pol_name = '{}_pol'.format(name)
pol = parameter.Uniform(0, np.pi)(pol_name)
# sky location
costh_name = '{}_costheta'.format(name)
phi_name = '{}_phi'.format(name)
if skyloc is None:
costh = parameter.Uniform(-1, 1)(costh_name)
phi = parameter.Uniform(0, 2*np.pi)(phi_name)
else:
costh = parameter.Constant(skyloc[0])(costh_name)
phi = parameter.Constant(skyloc[1])(phi_name)
# continuous wave signal
wf = compute_eccentric_residuals(cos_gwtheta=costh, gwphi=phi,
log10_mc=log10_Mc, log10_dist=log10_dL,
log10_h=log10_h, log10_F=log10_Forb,
cos_inc=cosinc, psi=pol, gamma0=gamma_0,
e0=e_0, l0=l_0, q=q, nmax=400,
pdist=None, pphase=None, pgam=None,
tref=tref, check=False)
cw = CWSignal(wf, ecc=True, psrTerm=psrTerm)
return cw
@signal_base.function
def cw_delay(toas, pos, pdist,
cos_gwtheta=0, gwphi=0, cos_inc=0,
log10_mc=9, log10_fgw=-8, log10_dist=None, log10_h=None,
phase0=0, psi=0,
psrTerm=False, p_dist=1, p_phase=None,
evolve=False, phase_approx=False, check=False,
tref=0):
"""
Function to create GW incuced residuals from a SMBMB as
defined in Ellis et. al 2012,2013.
:param toas:
Pular toas in seconds
:param pos:
Unit vector from the Earth to the pulsar
:param pdist:
Pulsar distance (mean and uncertainty) [kpc]
:param cos_gwtheta:
Cosine of Polar angle of GW source in celestial coords [radians]
:param gwphi:
Azimuthal angle of GW source in celestial coords [radians]
:param cos_inc:
cosine of Inclination of GW source [radians]
:param log10_mc:
log10 of Chirp mass of SMBMB [solar masses]
:param log10_fgw:
log10 of Frequency of GW (twice the orbital frequency) [Hz]
:param log10_dist:
log10 of Luminosity distance to SMBMB [Mpc],
used to compute strain, if not None
:param log10_h:
log10 of GW strain,
used to compute distance, if not None
:param phase0:
Initial GW phase of source [radians]
:param psi:
Polarization angle of GW source [radians]
:param psrTerm:
Option to include pulsar term [boolean]
:param p_dist:
Pulsar distance parameter
:param p_phase:
Use pulsar phase to determine distance [radian]
:param evolve:
Option to include/exclude full evolution [boolean]
:param phase_approx:
Option to include/exclude phase evolution across observation time
[boolean]
:param check:
Check if frequency evolves significantly over obs. time [boolean]
:param tref:
Reference time for phase and frequency [s]
:return: Vector of induced residuals
"""
# convert units to time
mc = 10**log10_mc * const.Tsun
fgw = 10**log10_fgw
gwtheta = np.arccos(cos_gwtheta)
inc = np.arccos(cos_inc)
p_dist = (pdist[0] + pdist[1]*p_dist)*const.kpc/const.c
if log10_h is None and log10_dist is None:
raise ValueError("one of log10_dist or log10_h must be non-None")
elif log10_h is not None and log10_dist is not None:
raise ValueError("only one of log10_dist or log10_h can be non-None")
elif log10_h is None:
dist = 10**log10_dist * const.Mpc / const.c
else:
dist = 2 * mc**(5/3) * (np.pi*fgw)**(2/3) / 10**log10_h
if check:
# check that frequency is not evolving significantly over obs. time
fstart = fgw * (1 - 256/5 * mc**(5/3) * fgw**(8/3) * toas[0])**(-3/8)
fend = fgw * (1 - 256/5 * mc**(5/3) * fgw**(8/3) * toas[-1])**(-3/8)
df = fend - fstart
# observation time
Tobs = toas.max()-toas.min()
fbin = 1/Tobs
if np.abs(df) > fbin:
print('WARNING: Frequency is evolving over more than one '
'frequency bin.')
print('f0 = {0}, f1 = {1}, df = {2}, fbin = {3}'.format(fstart, fend, df, fbin))
return np.ones(len(toas)) * np.nan
# get antenna pattern funcs and cosMu
# write function to get pos from theta,phi
fplus, fcross, cosMu = utils.create_gw_antenna_pattern(pos, gwtheta, gwphi)
# get pulsar time
toas -= tref
if p_dist > 0:
tp = toas-p_dist*(1-cosMu)
else:
tp = toas
# orbital frequency
w0 = np.pi * fgw
phase0 /= 2 # convert GW to orbital phase
# omegadot = 96/5 * mc**(5/3) * w0**(11/3) # Not currently used in code
# evolution
if evolve:
# calculate time dependent frequency at earth and pulsar
omega = w0 * (1 - 256/5 * mc**(5/3) * w0**(8/3) * toas)**(-3/8)
omega_p = w0 * (1 - 256/5 * mc**(5/3) * w0**(8/3) * tp)**(-3/8)
if p_dist > 0:
omega_p0 = w0 * (1 + 256/5
* mc**(5/3) * w0**(8/3) * p_dist*(1-cosMu))**(-3/8)
else:
omega_p0 = w0
# calculate time dependent phase
phase = phase0 + 1/32/mc**(5/3) * (w0**(-5/3) - omega**(-5/3))
if p_phase is None:
phase_p = phase0 + 1/32/mc**(5/3) * (w0**(-5/3) - omega_p**(-5/3))
else:
phase_p = (phase0 + p_phase
+ 1/32*mc**(-5/3) * (omega_p0**(-5/3) - omega_p**(-5/3)))
elif phase_approx:
# monochromatic
omega = w0
if p_dist > 0:
omega_p = w0 * (1 + 256/5
* mc**(5/3) * w0**(8/3) * p_dist*(1-cosMu))**(-3/8)
else:
omega_p = w0
# phases
phase = phase0 + omega * toas
if p_phase is not None:
phase_p = phase0 + p_phase + omega_p*toas
else:
phase_p = (phase0 + omega_p*toas
+ 1/32/mc**(5/3) * (w0**(-5/3) - omega_p**(-5/3)))
# no evolution
else:
# monochromatic
omega = np.pi*fgw
omega_p = omega
# phases
phase = phase0 + omega * toas
phase_p = phase0 + omega * tp
# define time dependent coefficients
At = -0.5*np.sin(2*phase)*(3+np.cos(2*inc))
Bt = 2*np.cos(2*phase)*np.cos(inc)
At_p = -0.5*np.sin(2*phase_p)*(3+np.cos(2*inc))
Bt_p = 2*np.cos(2*phase_p)*np.cos(inc)
# now define time dependent amplitudes
alpha = mc**(5./3.)/(dist*omega**(1./3.))
alpha_p = mc**(5./3.)/(dist*omega_p**(1./3.))
# define rplus and rcross
rplus = alpha*(-At*np.cos(2*psi)+Bt*np.sin(2*psi))
rcross = alpha*(At*np.sin(2*psi)+Bt*np.cos(2*psi))
rplus_p = alpha_p*(-At_p*np.cos(2*psi)+Bt_p*np.sin(2*psi))
rcross_p = alpha_p*(At_p*np.sin(2*psi)+Bt_p*np.cos(2*psi))
# residuals
if psrTerm:
res = fplus*(rplus_p-rplus)+fcross*(rcross_p-rcross)
else:
res = -fplus*rplus - fcross*rcross
return res
@signal_base.function
def bwm_delay(toas, pos, log10_h=-14.0, cos_gwtheta=0.0, gwphi=0.0, gwpol=0.0, t0=55000,
antenna_pattern_fn=None):
"""
Function that calculates the earth-term gravitational-wave
burst-with-memory signal, as described in:
Seto et al, van haasteren and Levin, phsirkov et al, Cordes and Jenet.
This version uses the F+/Fx polarization modes, as verified with the
Continuous Wave and Anisotropy papers.
:param toas: Time-of-arrival measurements [s]
:param pos: Unit vector from Earth to pulsar
:param log10_h: log10 of GW strain
:param cos_gwtheta: Cosine of GW polar angle
:param gwphi: GW azimuthal polar angle [rad]
:param gwpol: GW polarization angle
:param t0: Burst central time [day]
:param antenna_pattern_fn:
User defined function that takes `pos`, `gwtheta`, `gwphi` as
arguments and returns (fplus, fcross)
:return: the waveform as induced timing residuals (seconds)
"""
# convert
h = 10 ** log10_h
gwtheta = np.arccos(cos_gwtheta)
t0 *= const.day
# antenna patterns
if antenna_pattern_fn is None:
apc = utils.create_gw_antenna_pattern(pos, gwtheta, gwphi)
else:
apc = antenna_pattern_fn(pos, gwtheta, gwphi)
# grab fplus, fcross
fp, fc = apc[0], apc[1]
# combined polarization
pol = np.cos(2 * gwpol) * fp + np.sin(2 * gwpol) * fc
# Return the time-series for the pulsar
return pol * h * np.heaviside(toas - t0, 0.5) * (toas - t0)
@signal_base.function
def bwm_sglpsr_delay(toas, sign, log10_A=-15, t0=55000):
"""
Function that calculates the earth-term gravitational-wave
burst-with-memory signal for an optimally oriented source in a single pulsar
:param toas: Time-of-arrival measurements [s]
:param log10_A: log10 of the amplitude of the ramp (delta_f/f)
:param t0: Burst central time [day]
:return: the waveform as induced timing residuals (seconds)
"""
A = 10 ** log10_A
t0 *= const.day
# Return the time-series for the pulsar
def heaviside(x):
return 0.5 * (np.sign(x) + 1)
# return 0 #Fix the return to 0 in order to test what the heck is wrong with red noise detection in bwm
return A * np.sign(sign) * heaviside(toas - t0) * (toas - t0)
@signal_base.function
def compute_eccentric_residuals(toas, theta, phi, cos_gwtheta, gwphi,
log10_mc, log10_dist, log10_h, log10_F, cos_inc,
psi, gamma0, e0, l0, q, nmax=400, pdist=1.0,
pphase=None, pgam=None, psrTerm=False,
tref=0, check=False):
"""
Simulate GW from eccentric SMBHB. Waveform models from
Taylor et al. (2015) and Barack and Cutler (2004).
WARNING: This residual waveform is only accurate if the
GW frequency is not significantly evolving over the
observation time of the pulsar.
:param toa: pulsar observation times
:param theta: polar coordinate of pulsar
:param phi: azimuthal coordinate of pulsar
:param gwtheta: Polar angle of GW source in celestial coords [radians]
:param gwphi: Azimuthal angle of GW source in celestial coords [radians]
:param log10_mc: Base-10 lof of chirp mass of SMBMB [solar masses]
:param log10_dist: Base-10 uminosity distance to SMBMB [Mpc]
:param log10_F: base-10 orbital frequency of SMBHB [Hz]
:param inc: Inclination of GW source [radians]
:param psi: Polarization of GW source [radians]
:param gamma0: Initial angle of periastron [radians]
:param e0: Initial eccentricity of SMBHB
:param l0: Initial mean anomoly [radians]
:param q: Mass ratio of SMBHB
:param nmax: Number of harmonics to use in waveform decomposition
:param pdist: Pulsar distance [kpc]
:param pphase: Pulsar phase [rad]
:param pgam: Pulsar angle of periastron [rad]
:param psrTerm: Option to include pulsar term [boolean]
:param tref: Fidicuial time at which initial parameters are referenced [s]
:param check: Check if frequency evolves significantly over obs. time
:returns: Vector of induced residuals
"""
# convert from sampling
F = 10.0**log10_F
mc = 10.0**log10_mc
dist = 10.0**log10_dist
if log10_h is not None:
h0 = 10.0**log10_h
else:
h0 = None
inc = np.arccos(cos_inc)
gwtheta = np.arccos(cos_gwtheta)
# define variable for later use
cosgwtheta, cosgwphi = np.cos(gwtheta), np.cos(gwphi)
singwtheta, singwphi = np.sin(gwtheta), np.sin(gwphi)
sin2psi, cos2psi = np.sin(2*psi), np.cos(2*psi)
# unit vectors to GW source
m = np.array([singwphi, -cosgwphi, 0.0])
n = np.array([-cosgwtheta*cosgwphi, -cosgwtheta*singwphi, singwtheta])
omhat = np.array([-singwtheta*cosgwphi, -singwtheta*singwphi, -cosgwtheta])
# pulsar position vector
phat = np.array([np.sin(theta)*np.cos(phi), np.sin(theta)*np.sin(phi),
np.cos(theta)])
fplus = 0.5 * (np.dot(m, phat)**2 - np.dot(n, phat)**2) / (1+np.dot(omhat, phat))
fcross = (np.dot(m, phat)*np.dot(n, phat)) / (1 + np.dot(omhat, phat))
cosMu = -np.dot(omhat, phat)
# get values from pulsar object
toas = toas.copy() - tref
if check:
# check that frequency is not evolving significantly over obs. time
y = utils.solve_coupled_ecc_solution(F, e0, gamma0, l0, mc, q,
np.array([0.0, toas.max()]))
# initial and final values over observation time
Fc0, ec0, gc0, phic0 = y[0, :]
Fc1, ec1, gc1, phic1 = y[-1, :]
# observation time
Tobs = 1/(toas.max()-toas.min())
if np.abs(Fc0-Fc1) > 1/Tobs:
print('WARNING: Frequency is evolving over more than one frequency bin.')
print('F0 = {0}, F1 = {1}, delta f = {2}'.format(Fc0, Fc1, 1/Tobs))
return np.ones(len(toas)) * np.nan
# get gammadot for earth term
gammadot = utils.get_gammadot(F, mc, q, e0)
# get number of harmonics to use
if not isinstance(nmax, int):
if e0 < 0.999 and e0 > 0.001:
nharm = int(nmax(e0))
elif e0 < 0.001:
nharm = 2
else:
nharm = int(nmax(0.999))
else:
nharm = nmax
# no more than 100 harmonics
nharm = min(nharm, 100)
splus, scross = utils.calculate_splus_scross(nmax=nharm, mc=mc, dl=dist,
h0=h0, F=F, e=e0, t=toas.copy(),
l0=l0, gamma=gamma0,
gammadot=gammadot, inc=inc)
if psrTerm:
# pulsar distance
pd = pdist
# convert units
pd *= const.kpc / const.c
# get pulsar time
tp = toas.copy() - pd * (1-cosMu)
# solve coupled system of equations to get pulsar term values
y = utils.solve_coupled_ecc_solution(F, e0, gamma0, l0, mc,
q, np.array([0.0, tp.min()]))
# get pulsar term values
if np.any(y):
Fp, ep, gp, phip = y[-1, :]
# get gammadot at pulsar term
gammadotp = utils.get_gammadot(Fp, mc, q, ep)
# get phase at pulsar
if pphase is None:
lp = phip
else:
lp = pphase
# get angle of periastron at pulsar
if pgam is None:
gp = gp
else:
gp = pgam
# get number of harmonics to use
if not isinstance(nmax, int):
if e0 < 0.999 and e0 > 0.001:
nharm = int(nmax(e0))
elif e0 < 0.001:
nharm = 2
else:
nharm = int(nmax(0.999))
else:
nharm = nmax
# no more than 1000 harmonics
nharm = min(nharm, 100)
splusp, scrossp = utils.calculate_splus_scross(nmax=nharm, mc=mc,
dl=dist, h0=h0,
F=Fp, e=ep,
t=toas.copy(),
l0=lp, gamma=gp,
gammadot=gammadotp,
inc=inc)
rr = (fplus*cos2psi - fcross*sin2psi) * (splusp - splus) + \
(fplus*sin2psi + fcross*cos2psi) * (scrossp - scross)
else:
rr = np.ones(len(toas)) * np.nan
else:
rr = - (fplus*cos2psi - fcross*sin2psi) * splus - \
(fplus*sin2psi + fcross*cos2psi) * scross
return rr
def CWSignal(cw_wf, ecc=False, psrTerm=False, name='cw'):
BaseClass = deterministic_signals.Deterministic(cw_wf, name=name)
class CWSignal(BaseClass):
def __init__(self, psr):
super(CWSignal, self).__init__(psr)
self._wf[''].add_kwarg(psrTerm=psrTerm)
if ecc:
pgam = parameter.Uniform(0, 2*np.pi)('_'.join([psr.name,
'pgam',
name]))
self._params['pgam'] = pgam
self._wf['']._params['pgam'] = pgam
return CWSignal
@signal_base.function
def generalized_gwpol_psd(f, log10_A_tt=-15, log10_A_st=-15,
log10_A_vl=-15, log10_A_sl=-15,
kappa=10/3, p_dist=1.0):
"""
PSD for a generalized mixture of scalar+vector dipole radiation
and tensorial quadrupole radiation from SMBHBs.
"""
df = np.diff(np.concatenate((np.array([0]), f[::2])))
euler_e = 0.5772156649
pdist = p_dist * const.kpc / const.c
orf_aa_tt = (2/3) * np.ones(len(f))
orf_aa_st = (2/3) * np.ones(len(f))
orf_aa_vl = 2*np.log(4*np.pi*f*pdist) - 14/3 + 2*euler_e
orf_aa_sl = np.pi**2*f*pdist/4 - \
np.log(4*np.pi*f*pdist) + 37/24 - euler_e
prefactor = (1 + kappa**2) / (1 + kappa**2 * (f / const.fyr)**(-2/3))
gwpol_amps = 10**(2*np.array([log10_A_tt, log10_A_st,
log10_A_vl, log10_A_sl]))
gwpol_factors = np.array([orf_aa_tt*gwpol_amps[0],
orf_aa_st*gwpol_amps[1],
orf_aa_vl*gwpol_amps[2],
orf_aa_sl*gwpol_amps[3]])
S_psd = prefactor * (gwpol_factors[0, :] * (f / const.fyr)**(-4/3) +
np.sum(gwpol_factors[1:, :], axis=0) *
(f / const.fyr)**(-2)) / \
(8*np.pi**2*f**3)
return S_psd * np.repeat(df, 2)
@signal_base.function
def fdm_delay(toas, log10_A, log10_f, phase_e, phase_p):
"""
Function that calculates the earth-term gravitational-wave
fuzzy dark matter signal, as described in:
Kato et al. (2020).
:param toas: Time-of-arrival measurements [s]
:param log10_A: log10 of GW strain
:param log10_f: log10 of GW frequency
:param phase_e: The Earth-term phase of the GW
:param phase_p: The Pulsar-term phase of the GW
:return: the waveform as induced timing residuals (seconds)
"""
# convert
A = 10 ** log10_A
f = 10 ** log10_f
# Return the time-series for the pulsar
return - A / (2 * np.pi * f) * (np.sin(2 * np.pi * f * toas + phase_e) - np.sin(2 * np.pi * f * toas + phase_p))
| 26,111 | 34.334235 | 116 | py |
enterprise_extensions | enterprise_extensions-master/enterprise_extensions/sampler.py |
import glob
import os
import pickle
import platform
import healpy as hp
import numpy as np
from PTMCMCSampler import __version__ as __vPTMCMC__
from PTMCMCSampler.PTMCMCSampler import PTSampler as ptmcmc
from enterprise_extensions import __version__
from enterprise_extensions.empirical_distr import (EmpiricalDistribution1D,
EmpiricalDistribution1DKDE,
EmpiricalDistribution2D,
EmpiricalDistribution2DKDE)
def extend_emp_dists(pta, emp_dists, npoints=100_000, save_ext_dists=False, outdir='./chains'):
new_emp_dists = []
modified = False # check if anything was changed
for emp_dist in emp_dists:
if isinstance(emp_dist, EmpiricalDistribution2D) or isinstance(emp_dist, EmpiricalDistribution2DKDE):
# check if we need to extend the distribution
prior_ok=True
for ii, (param, nbins) in enumerate(zip(emp_dist.param_names, emp_dist._Nbins)):
param_names = [par.name for par in pta.params]
if param not in param_names: # skip if one of the parameters isn't in our PTA object
short_par = '_'.join(param.split('_')[:-1]) # make sure we aren't skipping priors with size!=None
if short_par in param_names:
param = short_par
else:
continue
# check 2 conditions on both params to make sure that they cover their priors
# skip if emp dist already covers the prior
param_idx = param_names.index(param)
if pta.params[param_idx].type not in ['uniform', 'normal']:
msg = '{} cannot be covered automatically by the empirical distribution\n'.format(pta.params[param_idx].prior)
msg += 'Please check that your prior is covered by the empirical distribution.\n'
print(msg)
continue
elif pta.params[param_idx].type == 'uniform':
prior_min = pta.params[param_idx].prior._defaults['pmin']
prior_max = pta.params[param_idx].prior._defaults['pmax']
elif pta.params[param_idx].type == 'normal':
prior_min = pta.params[param_idx].prior._defaults['mu'] - 10 * pta.params[param_idx].prior._defaults['sigma']
prior_max = pta.params[param_idx].prior._defaults['mu'] + 10 * pta.params[param_idx].prior._defaults['sigma']
# no need to extend if histogram edges are already prior min/max
if isinstance(emp_dist, EmpiricalDistribution2D):
if not (emp_dist._edges[ii][0] == prior_min and emp_dist._edges[ii][-1] == prior_max):
prior_ok = False
continue
elif isinstance(emp_dist, EmpiricalDistribution2DKDE):
if not (emp_dist.minvals[ii] == prior_min and emp_dist.maxvals[ii] == prior_max):
prior_ok=False
continue
if prior_ok:
new_emp_dists.append(emp_dist)
continue
modified = True
samples = np.zeros((npoints, emp_dist.draw().shape[0]))
for ii in range(npoints): # generate samples from old emp dist
samples[ii] = emp_dist.draw()
new_bins = []
minvals = []
maxvals = []
idxs_to_remove = []
for ii, (param, nbins) in enumerate(zip(emp_dist.param_names, emp_dist._Nbins)):
param_idx = param_names.index(param)
if pta.params[param_idx].type == 'uniform':
prior_min = pta.params[param_idx].prior._defaults['pmin']
prior_max = pta.params[param_idx].prior._defaults['pmax']
elif pta.params[param_idx].type == 'normal':
prior_min = pta.params[param_idx].prior._defaults['mu'] - 10 * pta.params[param_idx].prior._defaults['sigma']
prior_max = pta.params[param_idx].prior._defaults['mu'] + 10 * pta.params[param_idx].prior._defaults['sigma']
# drop samples that are outside the prior range (in case prior is smaller than samples)
if isinstance(emp_dist, EmpiricalDistribution2D):
samples[(samples[:, ii] < prior_min) | (samples[:, ii] > prior_max), ii] = -np.inf
elif isinstance(emp_dist, EmpiricalDistribution2DKDE):
idxs_to_remove.extend(np.arange(npoints)[(samples[:, ii] < prior_min) | (samples[:, ii] > prior_max)])
minvals.append(prior_min)
maxvals.append(prior_max)
# new distribution with more bins this time to extend it all the way out in same style as above.
new_bins.append(np.linspace(prior_min, prior_max, nbins + 40))
samples = np.delete(samples, idxs_to_remove, axis=0)
if isinstance(emp_dist, EmpiricalDistribution2D):
new_emp = EmpiricalDistribution2D(emp_dist.param_names, samples.T, new_bins)
elif isinstance(emp_dist, EmpiricalDistribution2DKDE):
# new distribution with more bins this time to extend it all the way out in same style as above.
new_emp = EmpiricalDistribution2DKDE(emp_dist.param_names, samples.T, minvals=minvals, maxvals=maxvals, nbins=nbins+40, bandwidth=emp_dist.bandwidth)
new_emp_dists.append(new_emp)
elif isinstance(emp_dist, EmpiricalDistribution1D) or isinstance(emp_dist, EmpiricalDistribution1DKDE):
param_names = [par.name for par in pta.params]
if emp_dist.param_name not in param_names: # skip if one of the parameters isn't in our PTA object
short_par = '_'.join(emp_dist.param_name.split('_')[:-1]) # make sure we aren't skipping priors with size!=None
if short_par in param_names:
param = short_par
else:
continue
else:
param = emp_dist.param_name
param_idx = param_names.index(param)
if pta.params[param_idx].type not in ['uniform', 'normal']:
msg = 'This prior cannot be covered automatically by the empirical distribution\n'
msg += 'Please check that your prior is covered by the empirical distribution.\n'
print(msg)
continue
if pta.params[param_idx].type == 'uniform':
prior_min = pta.params[param_idx].prior._defaults['pmin']
prior_max = pta.params[param_idx].prior._defaults['pmax']
elif pta.params[param_idx].type == 'uniform':
prior_min = pta.params[param_idx].prior._defaults['mu'] - 10 * pta.params[param_idx].prior._defaults['sigma']
prior_max = pta.params[param_idx].prior._defaults['mu'] + 10 * pta.params[param_idx].prior._defaults['sigma']
# check 2 conditions on param to make sure that it covers the prior
# skip if emp dist already covers the prior
if isinstance(emp_dist, EmpiricalDistribution1D):
if emp_dist._edges[0] == prior_min and emp_dist._edges[-1] == prior_max:
new_emp_dists.append(emp_dist)
continue
elif isinstance(emp_dist, EmpiricalDistribution1DKDE):
if emp_dist.minval == prior_min and emp_dist.maxval == prior_max:
new_emp_dists.append(emp_dist)
continue
modified = True
samples = np.zeros((npoints, 1))
for ii in range(npoints): # generate samples from old emp dist
samples[ii] = emp_dist.draw()
new_bins = []
idxs_to_remove = []
# drop samples that are outside the prior range (in case prior is smaller than samples)
if isinstance(emp_dist, EmpiricalDistribution1D):
samples[(samples < prior_min) | (samples > prior_max)] = -np.inf
elif isinstance(emp_dist, EmpiricalDistribution1DKDE):
idxs_to_remove.extend(np.arange(npoints)[(samples.squeeze() < prior_min) | (samples.squeeze() > prior_max)])
samples = np.delete(samples, idxs_to_remove, axis=0)
new_bins = np.linspace(prior_min, prior_max, emp_dist._Nbins + 40)
if isinstance(emp_dist, EmpiricalDistribution1D):
new_emp = EmpiricalDistribution1D(emp_dist.param_name, samples, new_bins)
elif isinstance(emp_dist, EmpiricalDistribution1DKDE):
new_emp = EmpiricalDistribution1DKDE(emp_dist.param_name, samples,
minval=prior_min, maxval=prior_max,
bandwidth=emp_dist.bandwidth)
new_emp_dists.append(new_emp)
else:
print('Unable to extend class of unknown type to the edges of the priors.')
new_emp_dists.append(emp_dist)
continue
if save_ext_dists and modified: # if user wants to save them, and they have been modified...
with open(outdir + '/new_emp_dists.pkl', 'wb') as f:
pickle.dump(new_emp_dists, f)
return new_emp_dists
class JumpProposal(object):
def __init__(self, pta, snames=None, empirical_distr=None, f_stat_file=None, save_ext_dists=False, outdir='./chains'):
"""Set up some custom jump proposals"""
self.params = pta.params
self.pnames = pta.param_names
self.psrnames = pta.pulsars
self.ndim = sum(p.size or 1 for p in pta.params)
self.plist = [p.name for p in pta.params]
# parameter map
self.pmap = {}
ct = 0
for p in pta.params:
size = p.size or 1
self.pmap[str(p)] = slice(ct, ct+size)
ct += size
# parameter indices map
self.pimap = {}
for ct, p in enumerate(pta.param_names):
self.pimap[p] = ct
# collecting signal parameters across pta
if snames is None:
allsigs = np.hstack([[qq.signal_name for qq in pp._signals]
for pp in pta._signalcollections])
self.snames = dict.fromkeys(np.unique(allsigs))
for key in self.snames:
self.snames[key] = []
for sc in pta._signalcollections:
for signal in sc._signals:
self.snames[signal.signal_name].extend(signal.params)
for key in self.snames:
self.snames[key] = list(set(self.snames[key]))
else:
self.snames = snames
# empirical distributions
if isinstance(empirical_distr, list):
# check if a list of emp dists is provided
self.empirical_distr = empirical_distr
# check if a directory of empirical dist pkl files are provided
elif empirical_distr is not None and os.path.isdir(empirical_distr):
dir_files = glob.glob(empirical_distr+'*.pkl') # search for pkls
pickled_distr = np.array([])
for idx, emp_file in enumerate(dir_files):
try:
with open(emp_file, 'rb') as f:
pickled_distr = np.append(pickled_distr, pickle.load(f))
except:
try:
with open(emp_file, 'rb') as f:
pickled_distr = np.append(pickled_distr, pickle.load(f))
except:
print(f'\nI can\'t open the empirical distribution pickle file at location {idx} in list!')
print("Empirical distributions set to 'None'")
pickled_distr = None
break
self.empirical_distr = pickled_distr
# check if single pkl file provided
elif empirical_distr is not None and os.path.isfile(empirical_distr): # checking for single file
try:
# try opening the file
with open(empirical_distr, 'rb') as f:
pickled_distr = pickle.load(f)
except:
# second attempt at opening the file
try:
with open(empirical_distr, 'rb') as f:
pickled_distr = pickle.load(f)
# if the second attempt fails...
except:
print('\nI can\'t open the empirical distribution pickle file!')
pickled_distr = None
self.empirical_distr = pickled_distr
# all other cases - emp dists set to None
else:
self.empirical_distr = None
if self.empirical_distr is not None:
# only save the empirical distributions for parameters that are in the model
mask = []
for idx, d in enumerate(self.empirical_distr):
if d.ndim == 1:
if d.param_name in pta.param_names:
mask.append(idx)
else:
if d.param_names[0] in pta.param_names and d.param_names[1] in pta.param_names:
mask.append(idx)
if len(mask) >= 1:
self.empirical_distr = [self.empirical_distr[m] for m in mask]
# extend empirical_distr here:
print('Extending empirical distributions to priors...\n')
self.empirical_distr = extend_emp_dists(pta, self.empirical_distr, npoints=100_000,
save_ext_dists=save_ext_dists, outdir=outdir)
else:
self.empirical_distr = None
if empirical_distr is not None and self.empirical_distr is None:
# if an emp dist path is provided, but fails the code, this helpful msg is provided
print("Adding empirical distributions failed!! Empirical distributions set to 'None'\n")
# F-statistic map
if f_stat_file is not None and os.path.isfile(f_stat_file):
npzfile = np.load(f_stat_file)
self.fe_freqs = npzfile['freqs']
self.fe = npzfile['fe']
def draw_from_prior(self, x, iter, beta):
"""Prior draw.
The function signature is specific to PTMCMCSampler.
"""
q = x.copy()
lqxy = 0
# randomly choose parameter
param = np.random.choice(self.params)
# if vector parameter jump in random component
if param.size:
idx2 = np.random.randint(0, param.size)
q[self.pmap[str(param)]][idx2] = param.sample()[idx2]
# scalar parameter
else:
q[self.pmap[str(param)]] = param.sample()
# forward-backward jump probability
lqxy = (param.get_logpdf(x[self.pmap[str(param)]]) -
param.get_logpdf(q[self.pmap[str(param)]]))
return q, float(lqxy)
def draw_from_red_prior(self, x, iter, beta):
q = x.copy()
lqxy = 0
signal_name = 'red noise'
# draw parameter from signal model
param = np.random.choice(self.snames[signal_name])
if param.size:
idx2 = np.random.randint(0, param.size)
q[self.pmap[str(param)]][idx2] = param.sample()[idx2]
# scalar parameter
else:
q[self.pmap[str(param)]] = param.sample()
# forward-backward jump probability
lqxy = (param.get_logpdf(x[self.pmap[str(param)]]) -
param.get_logpdf(q[self.pmap[str(param)]]))
return q, float(lqxy)
def draw_from_empirical_distr(self, x, iter, beta):
q = x.copy()
lqxy = 0
if self.empirical_distr is not None:
# randomly choose one of the empirical distributions
distr_idx = np.random.randint(0, len(self.empirical_distr))
if self.empirical_distr[distr_idx].ndim == 1:
idx = self.pnames.index(self.empirical_distr[distr_idx].param_name)
q[idx] = self.empirical_distr[distr_idx].draw()
lqxy = (self.empirical_distr[distr_idx].logprob(x[idx]) -
self.empirical_distr[distr_idx].logprob(q[idx]))
dist = self.empirical_distr[distr_idx]
# if we fall outside the emp distr support, pull from prior instead
if x[idx] < dist._edges[0] or x[idx] > dist._edges[-1]:
q, lqxy = self.draw_from_prior(x, iter, beta)
else:
dist = self.empirical_distr[distr_idx]
oldsample = [x[self.pnames.index(p)] for p in dist.param_names]
newsample = dist.draw()
lqxy = (dist.logprob(oldsample) - dist.logprob(newsample))
for p, n in zip(dist.param_names, newsample):
q[self.pnames.index(p)] = n
# if we fall outside the emp distr support, pull from prior instead
for ii in range(len(oldsample)):
if oldsample[ii] < dist._edges[ii][0] or oldsample[ii] > dist._edges[ii][-1]:
q, lqxy = self.draw_from_prior(x, iter, beta)
return q, float(lqxy)
def draw_from_psr_empirical_distr(self, x, iter, beta):
q = x.copy()
lqxy = 0
if self.empirical_distr is not None:
# make list of empirical distributions with psr name
psr = np.random.choice(self.psrnames)
pnames = [ed.param_name if ed.ndim==1 else ed.param_names
for ed in self.empirical_distr]
# Retrieve indices of emp dists with pulsar pars.
idxs = []
for par in pnames:
if isinstance(par, str):
if psr in par:
idxs.append(pnames.index(par))
elif isinstance(par, list):
if any([psr in p for p in par]):
idxs.append(pnames.index(par))
for idx in idxs:
if self.empirical_distr[idx].ndim == 1:
pidx = self.pimap[self.empirical_distr[idx].param_name]
q[pidx] = self.empirical_distr[idx].draw()
lqxy += (self.empirical_distr[idx].logprob(x[pidx]) -
self.empirical_distr[idx].logprob(q[pidx]))
else:
oldsample = [x[self.pnames.index(p)]
for p in self.empirical_distr[idx].param_names]
newsample = self.empirical_distr[idx].draw()
for p, n in zip(self.empirical_distr[idx].param_names, newsample):
q[self.pnames.index(p)] = n
lqxy += (self.empirical_distr[idx].logprob(oldsample) -
self.empirical_distr[idx].logprob(newsample))
return q, float(lqxy)
def draw_from_dm_gp_prior(self, x, iter, beta):
q = x.copy()
lqxy = 0
signal_name = 'dm_gp'
# draw parameter from signal model
param = np.random.choice(self.snames[signal_name])
if param.size:
idx2 = np.random.randint(0, param.size)
q[self.pmap[str(param)]][idx2] = param.sample()[idx2]
# scalar parameter
else:
q[self.pmap[str(param)]] = param.sample()
# forward-backward jump probability
lqxy = (param.get_logpdf(x[self.pmap[str(param)]]) -
param.get_logpdf(q[self.pmap[str(param)]]))
return q, float(lqxy)
def draw_from_dm1yr_prior(self, x, iter, beta):
q = x.copy()
dm1yr_names = [dmname for dmname in self.pnames if 'dm_s1yr' in dmname]
dmname = np.random.choice(dm1yr_names)
idx = self.pnames.index(dmname)
if 'log10_Amp' in dmname:
q[idx] = np.random.uniform(-10, -2)
elif 'phase' in dmname:
q[idx] = np.random.uniform(0, 2*np.pi)
return q, 0
def draw_from_dmexpdip_prior(self, x, iter, beta):
q = x.copy()
dmexp_names = [dmname for dmname in self.pnames if 'dmexp' in dmname]
dmname = np.random.choice(dmexp_names)
idx = self.pnames.index(dmname)
if 'log10_Amp' in dmname:
q[idx] = np.random.uniform(-10, -2)
elif 'log10_tau' in dmname:
q[idx] = np.random.uniform(0, 2.5)
elif 'sign_param' in dmname:
q[idx] = np.random.uniform(-1.0, 1.0)
return q, 0
def draw_from_dmexpcusp_prior(self, x, iter, beta):
q = x.copy()
dmexp_names = [dmname for dmname in self.pnames if 'dm_cusp' in dmname]
dmname = np.random.choice(dmexp_names)
idx = self.pnames.index(dmname)
if 'log10_Amp' in dmname:
q[idx] = np.random.uniform(-10, -2)
elif 'log10_tau' in dmname:
q[idx] = np.random.uniform(0, 2.5)
# elif 't0' in dmname:
# q[idx] = np.random.uniform(53393.0, 57388.0)
elif 'sign_param' in dmname:
q[idx] = np.random.uniform(-1.0, 1.0)
return q, 0
def draw_from_dmx_prior(self, x, iter, beta):
q = x.copy()
lqxy = 0
signal_name = 'dmx_signal'
# draw parameter from signal model
param = np.random.choice(self.snames[signal_name])
if param.size:
idx2 = np.random.randint(0, param.size)
q[self.pmap[str(param)]][idx2] = param.sample()[idx2]
# scalar parameter
else:
q[self.pmap[str(param)]] = param.sample()
# forward-backward jump probability
lqxy = (param.get_logpdf(x[self.pmap[str(param)]]) -
param.get_logpdf(q[self.pmap[str(param)]]))
return q, float(lqxy)
def draw_from_chrom_gp_prior(self, x, iter, beta):
q = x.copy()
lqxy = 0
signal_name = 'chrom_gp'
# draw parameter from signal model
param = np.random.choice(self.snames[signal_name])
if param.size:
idx2 = np.random.randint(0, param.size)
q[self.pmap[str(param)]][idx2] = param.sample()[idx2]
# scalar parameter
else:
q[self.pmap[str(param)]] = param.sample()
# forward-backward jump probability
lqxy = (param.get_logpdf(x[self.pmap[str(param)]]) -
param.get_logpdf(q[self.pmap[str(param)]]))
return q, float(lqxy)
def draw_from_gwb_log_uniform_distribution(self, x, iter, beta):
q = x.copy()
lqxy = 0
# draw parameter from signal model
signal_name = [par for par in self.pnames
if ('gw' in par and 'log10_A' in par)][0]
idx = list(self.pnames).index(signal_name)
param = self.params[idx]
q[self.pmap[str(param)]] = np.random.uniform(param.prior._defaults['pmin'], param.prior._defaults['pmax'])
# forward-backward jump probability
lqxy = (param.get_logpdf(x[self.pmap[str(param)]]) -
param.get_logpdf(q[self.pmap[str(param)]]))
return q, float(lqxy)
def draw_from_dipole_log_uniform_distribution(self, x, iter, beta):
q = x.copy()
# draw parameter from signal model
idx = self.pnames.index('dipole_log10_A')
q[idx] = np.random.uniform(-18, -11)
return q, 0
def draw_from_monopole_log_uniform_distribution(self, x, iter, beta):
q = x.copy()
# draw parameter from signal model
idx = self.pnames.index('monopole_log10_A')
q[idx] = np.random.uniform(-18, -11)
return q, 0
def draw_from_altpol_log_uniform_distribution(self, x, iter, beta):
q = x.copy()
# draw parameter from signal model
polnames = [pol for pol in self.pnames if 'log10Apol' in pol]
if 'kappa' in self.pnames:
polnames.append('kappa')
pol = np.random.choice(polnames)
idx = self.pnames.index(pol)
if pol == 'log10Apol_tt':
q[idx] = np.random.uniform(-18, -12)
elif pol == 'log10Apol_st':
q[idx] = np.random.uniform(-18, -12)
elif pol == 'log10Apol_vl':
q[idx] = np.random.uniform(-18, -15)
elif pol == 'log10Apol_sl':
q[idx] = np.random.uniform(-18, -16)
elif pol == 'kappa':
q[idx] = np.random.uniform(0, 10)
return q, 0
def draw_from_ephem_prior(self, x, iter, beta):
q = x.copy()
lqxy = 0
signal_name = 'phys_ephem'
# draw parameter from signal model
param = np.random.choice(self.snames[signal_name])
if param.size:
idx2 = np.random.randint(0, param.size)
q[self.pmap[str(param)]][idx2] = param.sample()[idx2]
# scalar parameter
else:
q[self.pmap[str(param)]] = param.sample()
# forward-backward jump probability
lqxy = (param.get_logpdf(x[self.pmap[str(param)]]) -
param.get_logpdf(q[self.pmap[str(param)]]))
return q, float(lqxy)
def draw_from_bwm_prior(self, x, iter, beta):
q = x.copy()
lqxy = 0
signal_name = 'bwm'
# draw parameter from signal model
param = np.random.choice(self.snames[signal_name])
if param.size:
idx2 = np.random.randint(0, param.size)
q[self.pmap[str(param)]][idx2] = param.sample()[idx2]
# scalar parameter
else:
q[self.pmap[str(param)]] = param.sample()
# forward-backward jump probability
lqxy = (param.get_logpdf(x[self.pmap[str(param)]]) -
param.get_logpdf(q[self.pmap[str(param)]]))
return q, float(lqxy)
def draw_from_fdm_prior(self, x, iter, beta):
q = x.copy()
lqxy = 0
signal_name = 'fdm'
# draw parameter from signal model
param = np.random.choice(self.snames[signal_name])
if param.size:
idx2 = np.random.randint(0, param.size)
q[self.pmap[str(param)]][idx2] = param.sample()[idx2]
# scalar parameter
else:
q[self.pmap[str(param)]] = param.sample()
# forward-backward jump probability
lqxy = (param.get_logpdf(x[self.pmap[str(param)]]) -
param.get_logpdf(q[self.pmap[str(param)]]))
return q, float(lqxy)
def draw_from_cw_prior(self, x, iter, beta):
q = x.copy()
lqxy = 0
signal_name = 'cw'
# draw parameter from signal model
param = np.random.choice(self.snames[signal_name])
if param.size:
idx2 = np.random.randint(0, param.size)
q[self.pmap[str(param)]][idx2] = param.sample()[idx2]
# scalar parameter
else:
q[self.pmap[str(param)]] = param.sample()
# forward-backward jump probability
lqxy = (param.get_logpdf(x[self.pmap[str(param)]]) -
param.get_logpdf(q[self.pmap[str(param)]]))
return q, float(lqxy)
def draw_from_cw_log_uniform_distribution(self, x, iter, beta):
q = x.copy()
# draw parameter from signal model
idx = self.pnames.index('log10_h')
q[idx] = np.random.uniform(-18, -11)
return q, 0
def draw_from_dm_sw_prior(self, x, iter, beta):
q = x.copy()
lqxy = 0
signal_name = 'gp_sw'
# draw parameter from signal model
param = np.random.choice(self.snames[signal_name])
if param.size:
idx2 = np.random.randint(0, param.size)
q[self.pmap[str(param)]][idx2] = param.sample()[idx2]
# scalar parameter
else:
q[self.pmap[str(param)]] = param.sample()
# forward-backward jump probability
lqxy = (param.get_logpdf(x[self.pmap[str(param)]]) -
param.get_logpdf(q[self.pmap[str(param)]]))
return q, float(lqxy)
def draw_from_gw_rho_prior(self, x, iter, beta):
"""
Jump proposals on free spec
"""
q = x.copy()
lqxy = 0
# draw parameter from signal model
parnames = [par.name for par in self.params]
pname = [pnm for pnm in parnames
if ('gw' in pnm and 'rho' in pnm)][0]
idx = parnames.index(pname)
param = self.params[idx]
if param.size:
idx2 = np.random.randint(0, param.size)
q[self.pmap[str(param)]][idx2] = param.sample()[idx2]
# scalar parameter
else:
q[self.pmap[str(param)]] = param.sample()
# forward-backward jump probability
lqxy = (param.get_logpdf(x[self.pmap[str(param)]]) -
param.get_logpdf(q[self.pmap[str(param)]]))
return q, float(lqxy)
def draw_from_signal_prior(self, x, iter, beta):
q = x.copy()
lqxy = 0
std = ['linear timing model',
'red noise',
'phys_ephem',
'gw',
'cw',
'bwm',
'fdm',
'gp_sw',
'ecorr_sherman-morrison',
'ecorr',
'efac',
'equad',
]
non_std = [nm for nm in self.snames.keys() if nm not in std]
# draw parameter from signal model
signal_name = np.random.choice(non_std)
param = np.random.choice(self.snames[signal_name])
if param.size:
idx2 = np.random.randint(0, param.size)
q[self.pmap[str(param)]][idx2] = param.sample()[idx2]
# scalar parameter
else:
q[self.pmap[str(param)]] = param.sample()
# forward-backward jump probability
lqxy = (param.get_logpdf(x[self.pmap[str(param)]]) -
param.get_logpdf(q[self.pmap[str(param)]]))
return q, float(lqxy)
def draw_from_par_prior(self, par_names):
# Preparing and comparing par_names with PTA parameters
par_names = np.atleast_1d(par_names)
par_list = []
name_list = []
for par_name in par_names:
pn_list = [n for n in self.plist if par_name in n]
if pn_list:
par_list.append(pn_list)
name_list.append(par_name)
if not par_list:
raise UserWarning("No parameter prior match found between {} and PTA.object."
.format(par_names))
par_list = np.concatenate(par_list, axis=None)
def draw(x, iter, beta):
"""Prior draw function generator for custom par_names.
par_names: list of strings
The function signature is specific to PTMCMCSampler.
"""
q = x.copy()
lqxy = 0
# randomly choose parameter
idx_name = np.random.choice(par_list)
idx = self.plist.index(idx_name)
# if vector parameter jump in random component
param = self.params[idx]
if param.size:
idx2 = np.random.randint(0, param.size)
q[self.pmap[str(param)]][idx2] = param.sample()[idx2]
# scalar parameter
else:
q[self.pmap[str(param)]] = param.sample()
# forward-backward jump probability
lqxy = (param.get_logpdf(x[self.pmap[str(param)]]) -
param.get_logpdf(q[self.pmap[str(param)]]))
return q, float(lqxy)
name_string = '_'.join(name_list)
draw.__name__ = 'draw_from_{}_prior'.format(name_string)
return draw
def draw_from_par_log_uniform(self, par_dict):
# Preparing and comparing par_dict.keys() with PTA parameters
par_list = []
name_list = []
for par_name in par_dict.keys():
pn_list = [n for n in self.plist if par_name in n and 'log' in n]
if pn_list:
par_list.append(pn_list)
name_list.append(par_name)
if not par_list:
raise UserWarning("No parameter dictionary match found between {} and PTA.object."
.format(par_dict.keys()))
par_list = np.concatenate(par_list, axis=None)
def draw(x, iter, beta):
"""log uniform prior draw function generator for custom par_names.
par_dict: dictionary with {"par_names":(lower bound,upper bound)}
{ "string":(float,float)}
The function signature is specific to PTMCMCSampler.
"""
q = x.copy()
# draw parameter from signal model
idx_name = np.random.choice(par_list)
idx = self.plist.index(idx_name)
q[idx] = np.random.uniform(par_dict[par_name][0], par_dict[par_name][1])
return q, 0
name_string = '_'.join(name_list)
draw.__name__ = 'draw_from_{}_log_uniform'.format(name_string)
return draw
def draw_from_psr_prior(self, x, iter, beta):
q = x.copy()
lqxy = 0
# draw parameter from pulsar names
psr = np.random.choice(self.psrnames)
idxs = [self.pimap[par] for par in self.pnames if psr in par]
for idx in idxs:
q[idx] = self.params[idx].sample()
# forward-backward jump probability
first = np.sum([self.params[idx].get_logpdf(x[idx]) for idx in idxs])
last = np.sum([self.params[idx].get_logpdf(q[idx]) for idx in idxs])
lqxy = first - last
return q, float(lqxy)
def draw_from_signal(self, signal_names):
# Preparing and comparing signal_names with PTA signals
signal_names = np.atleast_1d(signal_names)
signal_list = []
name_list = []
for signal_name in signal_names:
try:
param_list = self.snames[signal_name]
signal_list.append(param_list)
name_list.append(signal_name)
except:
pass
if not signal_list:
raise UserWarning("No signal match found between {} and PTA.object!"
.format(signal_names))
signal_list = np.concatenate(signal_list, axis=None)
def draw(x, iter, beta):
"""Signal draw function generator for custom signal_names.
signal_names: list of strings
The function signature is specific to PTMCMCSampler.
"""
q = x.copy()
lqxy = 0
# draw parameter from signal model
param = np.random.choice(signal_list)
if param.size:
idx2 = np.random.randint(0, param.size)
q[self.pmap[str(param)]][idx2] = param.sample()[idx2]
# scalar parameter
else:
q[self.pmap[str(param)]] = param.sample()
# forward-backward jump probability
lqxy = (param.get_logpdf(x[self.pmap[str(param)]]) -
param.get_logpdf(q[self.pmap[str(param)]]))
return q, float(lqxy)
name_string = '_'.join(name_list)
draw.__name__ = 'draw_from_{}_signal'.format(name_string)
return draw
def fe_jump(self, x, iter, beta):
q = x.copy()
lqxy = 0
fe_limit = np.max(self.fe)
# draw skylocation and frequency from f-stat map
accepted = False
while accepted is False:
log_f_new = self.params[self.pimap['log10_fgw']].sample()
f_idx = (np.abs(np.log10(self.fe_freqs) - log_f_new)).argmin()
gw_theta = np.arccos(self.params[self.pimap['cos_gwtheta']].sample())
gw_phi = self.params[self.pimap['gwphi']].sample()
hp_idx = hp.ang2pix(hp.get_nside(self.fe), gw_theta, gw_phi)
fe_new_point = self.fe[f_idx, hp_idx]
if np.random.uniform()<(fe_new_point/fe_limit):
accepted = True
# draw other parameters from prior
cos_inc = self.params[self.pimap['cos_inc']].sample()
psi = self.params[self.pimap['psi']].sample()
phase0 = self.params[self.pimap['phase0']].sample()
log10_h = self.params[self.pimap['log10_h']].sample()
# put new parameters into q
for param_name, new_param in zip(['log10_fgw', 'gwphi', 'cos_gwtheta', 'cos_inc', 'psi', 'phase0', 'log10_h'],
[log_f_new, gw_phi, np.cos(gw_theta), cos_inc, psi, phase0, log10_h]):
q[self.pimap[param_name]] = new_param
# calculate Hastings ratio
log_f_old = x[self.pimap['log10_fgw']]
f_idx_old = (np.abs(np.log10(self.fe_freqs) - log_f_old)).argmin()
gw_theta_old = np.arccos(x[self.pimap['cos_gwtheta']])
gw_phi_old = x[self.pimap['gwphi']]
hp_idx_old = hp.ang2pix(hp.get_nside(self.fe), gw_theta_old, gw_phi_old)
fe_old_point = self.fe[f_idx_old, hp_idx_old]
if fe_old_point>fe_limit:
fe_old_point = fe_limit
log10_h_old = x[self.pimap['log10_h']]
phase0_old = x[self.pimap['phase0']]
psi_old = x[self.pimap['psi']]
cos_inc_old = x[self.pimap['cos_inc']]
hastings_extra_factor = self.params[self.pimap['log10_h']].get_pdf(log10_h_old)
hastings_extra_factor *= 1/self.params[self.pimap['log10_h']].get_pdf(log10_h)
hastings_extra_factor = self.params[self.pimap['phase0']].get_pdf(phase0_old)
hastings_extra_factor *= 1/self.params[self.pimap['phase0']].get_pdf(phase0)
hastings_extra_factor = self.params[self.pimap['psi']].get_pdf(psi_old)
hastings_extra_factor *= 1/self.params[self.pimap['psi']].get_pdf(psi)
hastings_extra_factor = self.params[self.pimap['cos_inc']].get_pdf(cos_inc_old)
hastings_extra_factor *= 1/self.params[self.pimap['cos_inc']].get_pdf(cos_inc)
lqxy = np.log(fe_old_point/fe_new_point * hastings_extra_factor)
return q, float(lqxy)
def get_global_parameters(pta):
"""Utility function for finding global parameters."""
pars = []
for sc in pta._signalcollections:
pars.extend(sc.param_names)
gpars = list(set(par for par in pars if pars.count(par) > 1))
ipars = [par for par in pars if par not in gpars]
# gpars = np.unique(list(filter(lambda x: pars.count(x)>1, pars)))
# ipars = np.array([p for p in pars if p not in gpars])
return np.array(gpars), np.array(ipars)
def get_parameter_groups(pta):
"""Utility function to get parameter groupings for sampling."""
params = pta.param_names
ndim = len(params)
groups = [list(np.arange(0, ndim))]
# get global and individual parameters
gpars, ipars = get_global_parameters(pta)
if gpars.size:
# add a group of all global parameters
groups.append([params.index(gp) for gp in gpars])
# make a group for each signal, with all non-global parameters
for sc in pta._signalcollections:
for signal in sc._signals:
ind = [params.index(p) for p in signal.param_names if not gpars.size or p not in gpars]
if ind:
groups.append(ind)
return groups
def get_psr_groups(pta):
groups = []
for psr in pta.pulsars:
grp = [pta.param_names.index(par)
for par in pta.param_names if psr in par]
groups.append(grp)
return groups
def get_cw_groups(pta):
"""Utility function to get parameter groups for CW sampling.
These groups should be appended to the usual get_parameter_groups()
output.
"""
ang_pars = ['costheta', 'phi', 'cosinc', 'phase0', 'psi']
mfdh_pars = ['log10_Mc', 'log10_fgw', 'log10_dL', 'log10_h']
freq_pars = ['log10_Mc', 'log10_fgw', 'pdist', 'pphase']
groups = []
for pars in [ang_pars, mfdh_pars, freq_pars]:
groups.append(group_from_params(pta, pars))
return groups
def group_from_params(pta, params):
gr = []
for p in params:
for q in pta.param_names:
if p in q:
gr.append(pta.param_names.index(q))
return gr
def save_runtime_info(pta, outdir='chains', human=None):
"""save system info, enterprise PTA.summary, and other metadata to file
"""
# save system info and enterprise PTA.summary to single file
sysinfo = {}
if human is not None:
sysinfo.update({"human": human})
sysinfo.update(platform.uname()._asdict())
with open(os.path.join(outdir, "runtime_info.txt"), "w") as fout:
for field, data in sysinfo.items():
fout.write(field + " : " + data + "\n")
fout.write("\n")
fout.write("enterprise_extensions v" + __version__ +"\n")
fout.write("PTMCMCSampler v" + __vPTMCMC__ +"\n")
fout.write(pta.summary())
# save paramter list
with open(os.path.join(outdir, "pars.txt"), "w") as fout:
for pname in pta.param_names:
fout.write(pname + "\n")
# save list of priors
with open(os.path.join(outdir, "priors.txt"), "w") as fout:
for pp in pta.params:
fout.write(pp.__repr__() + "\n")
def setup_sampler(pta, outdir='chains', resume=False,
empirical_distr=None, groups=None, human=None,
save_ext_dists=False, loglkwargs={}, logpkwargs={}):
"""
Sets up an instance of PTMCMC sampler.
We initialize the sampler the likelihood and prior function
from the PTA object. We set up an initial jump covariance matrix
with fairly small jumps as this will be adapted as the MCMC runs.
We will setup an output directory in `outdir` that will contain
the chain (first n columns are the samples for the n parameters
and last 4 are log-posterior, log-likelihood, acceptance rate, and
an indicator variable for parallel tempering but it doesn't matter
because we aren't using parallel tempering).
We then add several custom jump proposals to the mix based on
whether or not certain parameters are in the model. These are
all either draws from the prior distribution of parameters or
draws from uniform distributions.
save_ext_dists: saves distributions that have been extended to
cover priors as a pickle to the outdir folder. These can then
be loaded later as distributions to save a minute at the start
of the run.
"""
# dimension of parameter space
params = pta.param_names
ndim = len(params)
# initial jump covariance matrix
if os.path.exists(outdir+'/cov.npy') and resume:
cov = np.load(outdir+'/cov.npy')
# check that the one we load is the same shape as our data
cov_new = np.diag(np.ones(ndim) * 0.1**2)
if cov.shape != cov_new.shape:
msg = 'The covariance matrix (cov.npy) in the output folder is '
msg += 'the wrong shape for the parameters given. '
msg += 'Start with a different output directory or '
msg += 'change resume to False to overwrite the run that exists.'
raise ValueError(msg)
else:
cov = np.diag(np.ones(ndim) * 0.1**2)
# parameter groupings
if groups is None:
groups = get_parameter_groups(pta)
sampler = ptmcmc(ndim, pta.get_lnlikelihood, pta.get_lnprior, cov, groups=groups,
outDir=outdir, resume=resume, loglkwargs=loglkwargs,
logpkwargs=logpkwargs)
save_runtime_info(pta, sampler.outDir, human)
# additional jump proposals
jp = JumpProposal(pta, empirical_distr=empirical_distr, save_ext_dists=save_ext_dists, outdir=outdir)
sampler.jp = jp
# always add draw from prior
sampler.addProposalToCycle(jp.draw_from_prior, 5)
# try adding empirical proposals
if empirical_distr is not None:
print('Attempting to add empirical proposals...\n')
sampler.addProposalToCycle(jp.draw_from_empirical_distr, 10)
# Red noise prior draw
if 'red noise' in jp.snames:
print('Adding red noise prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_red_prior, 10)
# DM GP noise prior draw
if 'dm_gp' in jp.snames:
print('Adding DM GP noise prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_dm_gp_prior, 10)
# DM annual prior draw
if 'dm_s1yr' in jp.snames:
print('Adding DM annual prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_dm1yr_prior, 10)
# DM dip prior draw
if 'dmexp' in jp.snames:
print('Adding DM exponential dip prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_dmexpdip_prior, 10)
# DM cusp prior draw
if 'dm_cusp' in jp.snames:
print('Adding DM exponential cusp prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_dmexpcusp_prior, 10)
# DMX prior draw
if 'dmx_signal' in jp.snames:
print('Adding DMX prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_dmx_prior, 10)
# Ephemeris prior draw
if 'd_jupiter_mass' in pta.param_names:
print('Adding ephemeris model prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_ephem_prior, 10)
# GWB uniform distribution draw
if np.any([('gw' in par and 'log10_A' in par) for par in pta.param_names]):
print('Adding GWB uniform distribution draws...\n')
sampler.addProposalToCycle(jp.draw_from_gwb_log_uniform_distribution, 10)
# Dipole uniform distribution draw
if 'dipole_log10_A' in pta.param_names:
print('Adding dipole uniform distribution draws...\n')
sampler.addProposalToCycle(jp.draw_from_dipole_log_uniform_distribution, 10)
# Monopole uniform distribution draw
if 'monopole_log10_A' in pta.param_names:
print('Adding monopole uniform distribution draws...\n')
sampler.addProposalToCycle(jp.draw_from_monopole_log_uniform_distribution, 10)
# Altpol uniform distribution draw
if 'log10Apol_tt' in pta.param_names:
print('Adding alternative GW-polarization uniform distribution draws...\n')
sampler.addProposalToCycle(jp.draw_from_altpol_log_uniform_distribution, 10)
# BWM prior draw
if 'bwm_log10_A' in pta.param_names:
print('Adding BWM prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_bwm_prior, 10)
# FDM prior draw
if 'fdm_log10_A' in pta.param_names:
print('Adding FDM prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_fdm_prior, 10)
# CW prior draw
if 'cw_log10_h' in pta.param_names:
print('Adding CW strain prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_cw_log_uniform_distribution, 10)
if 'cw_log10_Mc' in pta.param_names:
print('Adding CW prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_cw_distribution, 10)
# free spectrum prior draw
if np.any(['log10_rho' in par for par in pta.param_names]):
print('Adding free spectrum prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_gw_rho_prior, 25)
return sampler
| 47,414 | 37.330639 | 165 | py |
enterprise_extensions | enterprise_extensions-master/enterprise_extensions/model_utils.py |
import time
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as scistats
try:
import acor
except ImportError:
from emcee.autocorr import integrated_time as acor
from enterprise_extensions import models
# Log-spaced frequncies
def linBinning(T, logmode, f_min, nlin, nlog):
"""
Get the frequency binning for the low-rank approximations, including
log-spaced low-frequency coverage.
Credit: van Haasteren & Vallisneri, MNRAS, Vol. 446, Iss. 2 (2015)
:param T: Duration experiment
:param logmode: From which linear mode to switch to log
:param f_min: Down to which frequency we'll sample
:param nlin: How many linear frequencies we'll use
:param nlog: How many log frequencies we'll use
"""
if logmode < 0:
raise ValueError("Cannot do log-spacing when all frequencies are"
"linearly sampled")
# First the linear spacing and weights
df_lin = 1.0 / T
f_min_lin = (1.0 + logmode) / T
f_lin = np.linspace(f_min_lin, f_min_lin + (nlin-1)*df_lin, nlin)
w_lin = np.sqrt(df_lin * np.ones(nlin))
if nlog > 0:
# Now the log-spacing, and weights
f_min_log = np.log(f_min)
f_max_log = np.log((logmode+0.5)/T)
df_log = (f_max_log - f_min_log) / (nlog)
f_log = np.exp(np.linspace(f_min_log+0.5*df_log,
f_max_log-0.5*df_log, nlog))
w_log = np.sqrt(df_log * f_log)
return np.append(f_log, f_lin), np.append(w_log, w_lin)
else:
return f_lin, w_lin
# New filter for different cadences
def cadence_filter(psr, start_time=None, end_time=None, cadence=None):
""" Filter data for coarser cadences. """
if start_time is None and end_time is None and cadence is None:
mask = np.ones(psr._toas.shape, dtype=bool)
else:
# find start and end indices of cadence filtering
start_idx = (np.abs((psr._toas / 86400) - start_time)).argmin()
end_idx = (np.abs((psr._toas / 86400) - end_time)).argmin()
# make a safe copy of sliced toas
tmp_toas = psr._toas[start_idx:end_idx+1].copy()
# cumulative sum of time differences
cumsum = np.cumsum(np.diff(tmp_toas / 86400))
tspan = (tmp_toas.max() - tmp_toas.min()) / 86400
# find closest indices of sliced toas to desired cadence
mask = []
for ii in np.arange(1.0, tspan, cadence):
idx = (np.abs(cumsum - ii)).argmin()
mask.append(idx)
# append start and end segements with cadence-sliced toas
mask = np.append(np.arange(start_idx),
np.array(mask) + start_idx)
mask = np.append(mask, np.arange(end_idx, len(psr._toas)))
psr._toas = psr._toas[mask]
psr._toaerrs = psr._toaerrs[mask]
psr._residuals = psr._residuals[mask]
psr._ssbfreqs = psr._ssbfreqs[mask]
psr._designmatrix = psr._designmatrix[mask, :]
dmx_mask = np.sum(psr._designmatrix, axis=0) != 0.0
psr._designmatrix = psr._designmatrix[:, dmx_mask]
for key in psr._flags:
psr._flags[key] = psr._flags[key][mask]
if psr._planetssb is not None:
psr._planetssb = psr.planetssb[mask, :, :]
psr.sort_data()
def get_tspan(psrs):
""" Returns maximum time span for all pulsars.
:param psrs: List of pulsar objects
"""
tmin = np.min([p.toas.min() for p in psrs])
tmax = np.max([p.toas.max() for p in psrs])
return tmax - tmin
class PostProcessing(object):
def __init__(self, chain, pars, burn_percentage=0.25):
burn = int(burn_percentage*chain.shape[0])
self.chain = chain[burn:]
self.pars = pars
def plot_trace(self, plot_kwargs={}):
ndim = len(self.pars)
if ndim > 1:
ncols = 4
nrows = int(np.ceil(ndim/ncols))
else:
ncols, nrows = 1, 1
plt.figure(figsize=(15, 2*nrows))
for ii in range(ndim):
plt.subplot(nrows, ncols, ii+1)
plt.plot(self.chain[:, ii], **plot_kwargs)
plt.title(self.pars[ii], fontsize=8)
plt.tight_layout()
def plot_hist(self, hist_kwargs={'bins': 50, 'normed': True}):
ndim = len(self.pars)
if ndim > 1:
ncols = 4
nrows = int(np.ceil(ndim/ncols))
else:
ncols, nrows = 1, 1
plt.figure(figsize=(15, 2*nrows))
for ii in range(ndim):
plt.subplot(nrows, ncols, ii+1)
plt.hist(self.chain[:, ii], **hist_kwargs)
plt.title(self.pars[ii], fontsize=8)
plt.tight_layout()
def ul(chain, q=95.0):
"""
Computes upper limit and associated uncertainty.
:param chain: MCMC samples of GWB (or common red noise) amplitude
:param q: desired percentile of upper-limit value [out of 100, default=95]
:returns: (upper limit, uncertainty on upper limit)
"""
hist = np.histogram(10.0**chain, bins=100)
hist_dist = scistats.rv_histogram(hist)
A_ul = 10**np.percentile(chain, q=q)
p_ul = hist_dist.pdf(A_ul)
Aul_error = np.sqrt((q/100.) * (1.0 - (q/100.0)) /
(chain.shape[0]/acor.acor(chain)[0])) / p_ul
return A_ul, Aul_error
def bayes_fac(samples, ntol=200, logAmin=-18, logAmax=-14):
"""
Computes the Savage Dickey Bayes Factor and uncertainty.
:param samples: MCMCF samples of GWB (or common red noise) amplitude
:param ntol: Tolerance on number of samples in bin
:returns: (bayes factor, 1-sigma bayes factor uncertainty)
"""
prior = 1 / (logAmax - logAmin)
dA = np.linspace(0.01, 0.1, 100)
bf = []
bf_err = []
mask = [] # selecting bins with more than 200 samples
for ii, delta in enumerate(dA):
n = np.sum(samples <= (logAmin + delta))
N = len(samples)
post = n / N / delta
bf.append(prior/post)
bf_err.append(bf[ii]/np.sqrt(n))
if n > ntol:
mask.append(ii)
return np.mean(np.array(bf)[mask]), np.std(np.array(bf)[mask])
def odds_ratio(chain, models=[0, 1], uncertainty=True, thin=False):
if thin:
indep_samples = np.rint(chain.shape[0] / acor.acor(chain)[0])
samples = np.random.choice(chain.copy(), int(indep_samples))
else:
samples = chain.copy()
mask_top = np.rint(samples) == max(models)
mask_bot = np.rint(samples) == min(models)
top = float(np.sum(mask_top))
bot = float(np.sum(mask_bot))
if top == 0.0 and bot != 0.0:
bf = 1.0 / bot
elif bot == 0.0 and top != 0.0:
bf = top
else:
bf = top / bot
if uncertainty:
if bot == 0. or top == 0.:
sigma = 0.0
else:
# Counting transitions from model 1 model 2
ct_tb = 0
for ii in range(len(mask_top)-1):
if mask_top[ii]:
if not mask_top[ii+1]:
ct_tb += 1
# Counting transitions from model 2 to model 1
ct_bt = 0
for ii in range(len(mask_bot)-1):
if mask_bot[ii]:
if not mask_bot[ii+1]:
ct_bt += 1
try:
sigma = bf * np.sqrt((float(top) - float(ct_tb))/(float(top)*float(ct_tb)) +
(float(bot) - float(ct_bt))/(float(bot)*float(ct_bt)))
except ZeroDivisionError:
sigma = 0.0
return bf, sigma
elif not uncertainty:
return bf
def bic(chain, nobs, log_evidence=False):
"""
Computes the Bayesian Information Criterion.
:param chain: MCMC samples of all parameters, plus meta-data
:param nobs: Number of observations in data
:param evidence: return evidence estimate too?
:returns: (bic, evidence)
"""
nparams = chain.shape[1] - 4 # removing 4 aux columns
maxlnlike = chain[:, -4].max()
bic = np.log(nobs)*nparams - 2.0*maxlnlike
if log_evidence:
return (bic, -0.5*bic)
else:
return bic
def mask_filter(psr, mask):
"""filter given pulsar data by user defined mask"""
psr._toas = psr._toas[mask]
psr._toaerrs = psr._toaerrs[mask]
psr._residuals = psr._residuals[mask]
psr._ssbfreqs = psr._ssbfreqs[mask]
psr._designmatrix = psr._designmatrix[mask, :]
dmx_mask = np.sum(psr._designmatrix, axis=0) != 0.0
psr._designmatrix = psr._designmatrix[:, dmx_mask]
for key in psr._flags:
psr._flags[key] = psr._flags[key][mask]
if psr._planetssb is not None:
psr._planetssb = psr.planetssb[mask, :, :]
psr.sort_data()
class CompareTimingModels():
"""
Compare difference between the usual and marginalized timing models.
After instantiating, the __call__() method can be used for sampling for any number of points.
To see the results, use the results() method.
:param psrs: Pulsar object containing pulsars from model
:param model_name: String name of model to test. Model must be defined in enterprise_extensions.models.
:param abs_tol: absolute tolerance for error between timing models (default 1e-3), set to None to bypass errors
:param rel_tol: relative tolerance for error between timing models (default 1e-6), set to None to bypass errors
:param dense: use the dense cholesky algorithm over sparse
"""
def __init__(self, psrs, model_name='model_1', abs_tol=1e-3, rel_tol=1e-6, dense=True, **kwargs):
model = getattr(models, model_name)
self.abs_tol = abs_tol
self.rel_tol = rel_tol
if dense:
self.pta_marg = model(psrs, tm_marg=True, dense_like=True, **kwargs) # marginalized model
else:
self.pta_marg = model(psrs, tm_marg=True, **kwargs) # marginalized model
self.pta_norm = model(psrs, **kwargs) # normal model
self.tm_correction = 0
for psr in psrs:
self.tm_correction -= 0.5 * np.log(1e40) * psr.Mmat.shape[1]
self.abs_err = []
self.rel_err = []
self.count = 0
def check_timing(self, number=10_000):
print('Timing sample creation...')
start = time.time()
for __ in range(number):
x0 = np.hstack([p.sample() for p in self.pta_marg.params])
end = time.time()
sample_time = end - start
print('Sampling {0} points took {1} seconds.'.format(number, sample_time))
print('Timing MarginalizedTimingModel...')
start = time.time()
for __ in range(number):
x0 = np.hstack([p.sample() for p in self.pta_marg.params])
self.pta_marg.get_lnlikelihood(x0)
end = time.time()
time_marg = end - start - sample_time # remove sampling time from total time taken
print('Sampling {0} points took {1} seconds.'.format(number, time_marg))
print('Timing TimingModel...')
start = time.time()
for __ in range(number):
x0 = np.hstack([p.sample() for p in self.pta_marg.params])
self.pta_norm.get_lnlikelihood(x0)
end = time.time()
time_norm = end - start - sample_time # remove sampling time from total time taken
print('Sampling {0} points took {1} seconds.'.format(number, time_norm))
res = time_norm / time_marg
print('MarginalizedTimingModel is {0} times faster than TimingModel after {1} points.'.format(res, number))
return res
def get_sample_point(self):
x0 = np.hstack([p.sample() for p in self.pta_marg.params])
return x0
def __call__(self, x0):
res_norm = self.pta_norm.get_lnlikelihood(x0)
res_marg = self.pta_marg.get_lnlikelihood(x0)
abs_err = np.abs(res_marg - res_norm)
rel_err = abs_err / res_norm
self.abs_err.append(abs_err)
self.rel_err.append(rel_err)
self.count += 1
if self.abs_tol is not None and abs_err > self.abs_tol:
abs_raise = 'Absolute error is {0} at {1} which is larger than abs_tol of {2}.'.format(
abs_err, x0, self.abs_tol)
raise ValueError(abs_raise)
elif self.rel_tol is not None and rel_err > self.rel_tol:
rel_raise = 'Relative error is {0} at {1} which is larger than rel_tol of {2}.'.format(
rel_err, x0, self.rel_tol)
raise ValueError(rel_raise)
return res_norm
def results(self):
print('Number of points evaluated:', self.count)
print('Maximum absolute error:', np.max(self.abs_err))
print('Maximum relative error:', np.max(self.rel_err))
return self.abs_err, self.rel_err
| 12,737 | 31.914729 | 115 | py |
enterprise_extensions | enterprise_extensions-master/enterprise_extensions/empirical_distr.py |
import logging
import pickle
import numpy as np
try:
from sklearn.neighbors import KernelDensity
sklearn_available=True
except ModuleNotFoundError:
sklearn_available=False
from scipy.interpolate import interp1d, interp2d
logger = logging.getLogger(__name__)
class EmpiricalDistribution1D(object):
"""
Class used to define a 1D empirical distribution
based on posterior from another MCMC.
:param samples: samples for hist
:param bins: edges to use for hist (left and right) make sure bins
cover whole prior!
"""
def __init__(self, param_name, samples, bins):
self.ndim = 1
self.param_name = param_name
self._Nbins = len(bins)-1
hist, x_bins = np.histogram(samples, bins=bins)
self._edges = x_bins
self._wids = np.diff(x_bins)
hist += 1 # add a sample to every bin
counts = np.sum(hist)
self._pdf = hist / float(counts) / self._wids
self._cdf = np.cumsum((self._pdf*self._wids).ravel())
self._logpdf = np.log(self._pdf)
def draw(self):
draw = np.random.rand()
draw_bin = np.searchsorted(self._cdf, draw, side='right')
idx = np.unravel_index(draw_bin, self._Nbins)[0]
samp = self._edges[idx] + self._wids[idx]*np.random.rand()
return np.array(samp)
def prob(self, params):
ix = np.searchsorted(self._edges, params) - 1
return self._pdf[ix]
def logprob(self, params):
ix = np.searchsorted(self._edges, params) - 1
return self._logpdf[ix]
class EmpiricalDistribution1DKDE(object):
def __init__(self, param_name, samples, minval=None, maxval=None, bandwidth=0.1, nbins=40):
"""
Minvals and maxvals should specify priors for these. Should make these required.
"""
self.ndim = 1
self.param_name = param_name
self.bandwidth = bandwidth
# code below relies on samples axes being swapped. but we
# want to keep inputs the same
# create a 2D KDE from which to evaluate
self.kde = KernelDensity(kernel='gaussian', bandwidth=bandwidth).fit(samples.reshape((samples.size, 1)))
if minval is None:
# msg = "minvals for KDE empirical distribution were not supplied. Resulting distribution may not have support over full prior"
# logger.warning(msg)
# widen these to add support
minval = min(samples)
maxval = max(samples)
# significantly faster probability estimation using interpolation
# instead of evaluating KDE every time
self.minval = minval
self.maxval = maxval
xvals = np.linspace(minval, maxval, num=nbins)
self._Nbins = nbins
scores = np.array([self.kde.score(np.atleast_2d(xval)) for xval in xvals])
# interpolate within prior
self._logpdf = interp1d(xvals, scores, kind='linear', fill_value=-1000)
def draw(self):
params = self.kde.sample(1).T
return params.squeeze()
# class used to define a 2D empirical distribution
# based on posteriors from another MCMC
class EmpiricalDistribution2D(object):
"""
Class used to define a 1D empirical distribution
based on posterior from another MCMC.
:param samples: samples for hist
:param bins: edges to use for hist (left and right)
make sure bins cover whole prior!
"""
def __init__(self, param_names, samples, bins):
self.ndim = 2
self.param_names = param_names
self._Nbins = [len(b)-1 for b in bins]
hist, x_bins, y_bins = np.histogram2d(*samples, bins=bins)
self._edges = np.array([x_bins, y_bins])
self._wids = np.diff([x_bins, y_bins])
area = np.outer(*self._wids)
hist += 1 # add a sample to every bin
counts = np.sum(hist)
self._pdf = hist / counts / area
self._cdf = np.cumsum((self._pdf*area).ravel())
self._logpdf = np.log(self._pdf)
def draw(self):
draw = np.random.rand()
draw_bin = np.searchsorted(self._cdf, draw)
idx = np.unravel_index(draw_bin, self._Nbins)
samp = [self._edges[ii, idx[ii]] + self._wids[ii, idx[ii]]*np.random.rand()
for ii in range(2)]
return np.array(samp)
def prob(self, params):
ix, iy = [np.searchsorted(self._edges[ii], params[ii]) - 1 for ii in range(2)]
return self._pdf[ix, iy]
def logprob(self, params):
ix, iy = [np.searchsorted(self._edges[ii], params[ii]) - 1 for ii in range(2)]
return self._logpdf[ix, iy]
class EmpiricalDistribution2DKDE(object):
def __init__(self, param_names, samples, minvals=None, maxvals=None, bandwidth=0.1, nbins=40):
"""
Minvals and maxvals should specify priors for these. Should make these required.
:param param_names: 2-element list of parameter names
:param samples: samples, with dimension (2 x Nsamples)
:return distr: list of empirical distributions
"""
self.ndim = 2
self.param_names = param_names
self.bandwidth = bandwidth
# code below relies on samples axes being swapped. but we
# want to keep inputs the same
# create a 2D KDE from which to evaluate
self.kde = KernelDensity(kernel='gaussian', bandwidth=bandwidth).fit(samples.T)
if minvals is None:
msg = "minvals for KDE empirical distribution were not supplied. Resulting distribution may not have support over full prior"
logger.warning(msg)
# widen these to add support
minvals = (min(samples[0, :]), min(samples[1, :]))
maxvals = (max(samples[0, :]), max(samples[1, :]))
# significantly faster probability estimation using interpolation
# instead of evaluating KDE every time
self.minvals = minvals
self.maxvals = maxvals
xvals = np.linspace(minvals[0], maxvals[0], num=nbins)
yvals = np.linspace(minvals[1], maxvals[1], num=nbins)
self._Nbins = [yvals.size for ii in range(xvals.size)]
scores = np.array([self.kde.score(np.array([xvals[ii], yvals[jj]]).reshape((1, 2))) for ii in range(xvals.size) for jj in range(yvals.size)])
# interpolate within prior
self._logpdf = interp2d(xvals, yvals, scores, kind='linear', fill_value=-1000)
def draw(self):
params = self.kde.sample(1).T
return params.squeeze()
def prob(self, params):
# just in case...make sure to make this zero outside of our prior ranges
param1_out = params[0] < self.minvals[0] or params[0] > self.maxvals[0]
param2_out = params[1] < self.minvals[1] or params[1] > self.maxvals[1]
if param1_out or param2_out:
# essentially zero
return -1000
else:
return np.exp(self._logpdf(*params))[0]
def logprob(self, params):
return self._logpdf(*params)[0]
def make_empirical_distributions(pta, paramlist, params, chain,
burn=0, nbins=81, filename='distr.pkl',
return_distribution=True,
save_dists=True):
"""
Utility function to construct empirical distributions.
:param pta: the pta object used to generate the posteriors
:param paramlist: a list of parameter names,
either single parameters or pairs of parameters
:param chain: MCMC chain from a previous run
:param burn: desired number of initial samples to discard
:param nbins: number of bins to use for the empirical distributions
:return distr: list of empirical distributions
"""
distr = []
if not save_dists and not return_distribution:
msg = "no distribution returned or saved, are you sure??"
logger.info(msg)
for pl in paramlist:
if type(pl) is not list:
pl = [pl]
if len(pl) == 1:
idx = pta.param_names.index(pl[0])
prior_min = pta.params[idx].prior._defaults['pmin']
prior_max = pta.params[idx].prior._defaults['pmax']
# get the bins for the histogram
bins = np.linspace(prior_min, prior_max, nbins)
new_distr = EmpiricalDistribution1D(pl[0], chain[burn:, idx], bins)
distr.append(new_distr)
elif len(pl) == 2:
# get the parameter indices
idx = [pta.param_names.index(pl1) for pl1 in pl]
# get the bins for the histogram
bins = [np.linspace(pta.params[i].prior._defaults['pmin'],
pta.params[i].prior._defaults['pmax'], nbins) for i in idx]
new_distr = EmpiricalDistribution2D(pl, chain[burn:, idx].T, bins)
distr.append(new_distr)
else:
msg = 'WARNING: only 1D and 2D empirical distributions are currently allowed.'
logger.warning(msg)
# save the list of empirical distributions as a pickle file
if save_dists:
if len(distr) > 0:
with open(filename, 'wb') as f:
pickle.dump(distr, f)
msg = 'The empirical distributions have been pickled to {0}.'.format(filename)
logger.info(msg)
else:
msg = 'WARNING: No empirical distributions were made!'
logger.warning(msg)
if return_distribution:
return distr
def make_empirical_distributions_KDE(pta, paramlist, params, chain,
burn=0, nbins=41, filename='distr.pkl',
bandwidth=0.1,
return_distribution=True,
save_dists=True):
"""
Utility function to construct empirical distributions.
:param paramlist: a list of parameter names,
either single parameters or pairs of parameters
:param params: list of all parameter names for the MCMC chain
:param chain: MCMC chain from a previous run, has dimensions Nsamples x Nparams
:param burn: desired number of initial samples to discard
:param nbins: number of bins to use for the empirical distributions
:return distr: list of empirical distributions
"""
distr = []
if not save_dists and not return_distribution:
msg = "no distribution returned or saved, are you sure??"
logger.info(msg)
for pl in paramlist:
if type(pl) is not list:
pl = [pl]
if len(pl) == 1:
# get the parameter index
idx = pta.param_names.index(pl[0])
prior_min = pta.params[idx].prior._defaults['pmin']
prior_max = pta.params[idx].prior._defaults['pmax']
# get the bins for the histogram
new_distr = EmpiricalDistribution1DKDE(pl[0], chain[burn:, idx], bandwidth=bandwidth, minval=prior_min, maxval=prior_max)
distr.append(new_distr)
elif len(pl) == 2:
# get the parameter indices
idx = [pta.param_names.index(pl1) for pl1 in pl]
# get the bins for the histogram
bins = [np.linspace(pta.params[i].prior._defaults['pmin'],
pta.params[i].prior._defaults['pmax'], nbins) for i in idx]
minvals = [pta.params[0].prior._defaults['pmin'], pta.params[1].prior._defaults['pmin']]
maxvals = [pta.params[0].prior._defaults['pmax'], pta.params[1].prior._defaults['pmax']]
# get the bins for the histogram
if sklearn_available:
new_distr = EmpiricalDistribution2DKDE(pl, chain[burn:, idx].T, bandwidth=bandwidth, minvals=minvals, maxvals=maxvals)
else:
logger.warn('`sklearn` package not available. Fall back to using histgrams for empirical distribution')
new_distr = EmpiricalDistribution2D(pl, chain[burn:, idx].T, bins)
distr.append(new_distr)
else:
msg = 'WARNING: only 1D and 2D empirical distributions are currently allowed.'
logger.warning(msg)
# save the list of empirical distributions as a pickle file
if save_dists:
if len(distr) > 0:
with open(filename, 'wb') as f:
pickle.dump(distr, f)
msg = 'The empirical distributions have been pickled to {0}.'.format(filename)
logger.info(msg)
else:
msg = 'WARNING: No empirical distributions were made!'
logger.warning(msg)
if return_distribution:
return distr
| 12,755 | 35.033898 | 149 | py |
enterprise_extensions | enterprise_extensions-master/enterprise_extensions/sky_scrambles.py |
import pickle
import sys
import time
import numpy as np
from enterprise.signals import utils
def compute_match(orf1, orf1_mag, orf2, orf2_mag):
"""Computes the match between two different ORFs."""
match = np.abs(np.dot(orf1, orf2))/(orf1_mag*orf2_mag)
return match
def make_true_orf(psrs):
"""Computes the ORF by looping over pulsar pairs"""
npsr = len(psrs)
orf = np.zeros(int(npsr*(npsr-1)/2))
idx = 0
for i in range(npsr):
for j in range(i+1, npsr):
orf[idx] = utils.hd_orf(psrs[i].pos, psrs[j].pos)
idx += 1
return orf
def compute_orf(ptheta, pphi):
"""
Computes the ORF coefficient. Takes different input than utils.hd_orf().
:param ptheta: Array of values of pulsar positions theta
:param pphi: Array of values of pulsar positions phi
:returns:
orf: ORF for the given positions
orf_mag: Magnitude of the ORF
"""
npsr = len(ptheta)
pos = [np.array([np.cos(phi)*np.sin(theta),
np.sin(phi)*np.sin(theta),
np.cos(theta)]) for phi, theta in zip(pphi, ptheta)]
x = []
for i in range(npsr):
for j in range(i+1, npsr):
x.append(np.dot(pos[i], pos[j]))
x = np.array(x)
orf = 1.5*(1./3. + (1.-x)/2.*(np.log((1.-x)/2.)-1./6.))
return orf, np.sqrt(np.dot(orf, orf))
def get_scrambles(psrs, N=500, Nmax=10000, thresh=0.1,
filename='sky_scrambles.npz', resume=False):
"""
Get sky scramble ORFs and matches.
:param psrs: List of pulsar objects
:param N: Number of desired sky scrambles
:param Nmax: Maximum number of tries to get independent scrambles
:param thresh: Threshold value for match statistic.
:param filename: Name of the file where the sky scrambles should be saved.
Sky scrambles should be saved in `npz` file.
:param resume: Whether to resume from an earlier run.
"""
# compute the unscrambled ORF
orf_true = make_true_orf(psrs)
orf_true_mag = np.sqrt(np.dot(orf_true, orf_true))
npsr = len(psrs)
print('Generating {0} sky scrambles from {1} attempts with threshold {2}...'.format(N, Nmax, thresh))
orf_mags = []
if resume:
print('Resuming from earlier run... loading sky scrambles from file {0}'.format(filename))
npzfile = np.load(filename)
matches, orfs = npzfile['matches'], npzfile['orfs']
thetas, phis = npzfile['thetas'], npzfile['phis']
print('{0} sky scrambles have already been generated.'.format(len(matches)))
for o in orfs:
orf_mags.append(np.sqrt(np.dot(o, o)))
else:
matches, orfs, thetas, phis = [], [], [], []
ct = 0
tstart = time.time()
while ct <= Nmax and len(matches) <= N:
ptheta = np.arccos(np.random.uniform(-1, 1, npsr))
pphi = np.random.uniform(0, 2*np.pi, npsr)
orf_s, orf_s_mag = compute_orf(ptheta, pphi)
match = compute_match(orf_true, orf_true_mag, orf_s, orf_s_mag)
if thresh == 1.0:
if ct == 0:
print('There is no threshold! Keep all the sky scrambles')
if len(orfs) == 0:
orfs.append(orf_s)
matches.append(match)
orfs = np.array(orfs)
matches = np.array(matches)
thetas = ptheta[np.newaxis, ...]
phis = pphi[np.newaxis, ...]
orf_mags.append(np.sqrt(np.dot(orf_s, orf_s)))
else:
matches = np.append(matches, match)
orf_reshape = np.vstack(orf_s).T
orfs = np.append(orfs, orf_reshape, axis=0)
orf_mags.append(orf_s_mag)
thetas = np.concatenate((thetas, [ptheta]))
phis = np.concatenate((phis, [pphi]))
elif thresh < 1.0 and match <= thresh:
if len(orfs) == 0:
orfs.append(orf_s)
matches.append(match)
orfs = np.array(orfs)
matches = np.array(matches)
thetas = ptheta[np.newaxis, ...]
phis = pphi[np.newaxis, ...]
orf_mags.append(np.sqrt(np.dot(orf_s, orf_s)))
else:
check = np.all(map(lambda x, y: compute_match(orf_s, orf_s_mag, x, y)<=thresh, orfs, orf_mags))
if check:
matches = np.append(matches, match)
orf_reshape = np.vstack(orf_s).T
orfs = np.append(orfs, orf_reshape, axis=0)
orf_mags.append(orf_s_mag)
thetas = np.concatenate((thetas, [ptheta]))
phis = np.concatenate((phis, [pphi]))
ct += 1
if ct % 1000 == 0:
sys.stdout.write('\r')
sys.stdout.write('Finished %2.1f percent in %f min'
% (float(ct)/N*100, (time.time() - tstart)/60.))
sys.stdout.flush()
if len(matches) < N:
print('\nGenerated {0} matches rather than the desired {1} matches'.format(len(matches), N))
else:
print('\nGenerated the desired {0} matches in {1} attempts'.format(len(matches), ct))
print('Total runtime: {0:.1f} min'.format((time.time()-tstart)/60.))
np.savez(filename, matches=matches, orfs=orfs, thetas=thetas, phis=phis)
return (matches, orfs, thetas, phis)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument('--picklefile',
help='pickle file for the pulsars')
parser.add_argument('--threshold', default=0.1,
help='threshold for sky scrambles (DEFAULT 0.1)')
parser.add_argument('--nscrambles', default=1000,
help='number of sky scrambles to generate (DEFAULT 1000)')
parser.add_argument('--nmax', default=1000,
help='maximum number of attempts (DEFAULT 1000)')
parser.add_argument('--savefile', default='../data/scrambles_nano.npz',
help='outputfile name')
parser.add_argument('--resume', action='store_true',
help='resume from existing savefile?')
args = parser.parse_args()
with open(args.picklefile, 'rb') as f:
psrs = pickle.load(f)
get_scrambles(psrs, N=int(args.nscrambles), Nmax=int(args.nmax), thresh=float(args.threshold),
filename=args.savefile, resume=args.resume)
| 6,532 | 33.75 | 111 | py |
enterprise_extensions | enterprise_extensions-master/enterprise_extensions/timing.py |
from collections import OrderedDict
import numpy as np
from enterprise.signals import deterministic_signals, parameter, signal_base
# timing model delay
@signal_base.function
def tm_delay(residuals, t2pulsar, tmparams_orig, tmparams, which='all'):
"""
Compute difference in residuals due to perturbed timing model.
:param residuals: original pulsar residuals from Pulsar object
:param t2pulsar: libstempo pulsar object
:param tmparams_orig: dictionary of TM parameter tuples, (val, err)
:param tmparams: new timing model parameters, rescaled to be in sigmas
:param which: option to have all or only named TM parameters varied
:return: difference between new and old residuals in seconds
"""
if which == 'all':
keys = tmparams_orig.keys()
else:
keys = which
# grab original timing model parameters and errors in dictionary
orig_params = np.array([tmparams_orig[key] for key in keys])
# put varying parameters into dictionary
tmparams_rescaled = np.atleast_1d(np.double(orig_params[:, 0] +
tmparams * orig_params[:, 1]))
tmparams_vary = OrderedDict(zip(keys, tmparams_rescaled))
# set to new values
t2pulsar.vals(tmparams_vary)
new_res = np.double(t2pulsar.residuals().copy())
# remember to set values back to originals
t2pulsar.vals(OrderedDict(zip(keys,
np.atleast_1d(np.double(orig_params[:, 0])))))
# Sort the residuals
isort = np.argsort(t2pulsar.toas(), kind='mergesort')
return residuals[isort] - new_res[isort]
# Model component building blocks #
def timing_block(tmparam_list=['RAJ', 'DECJ', 'F0', 'F1',
'PMRA', 'PMDEC', 'PX']):
"""
Returns the timing model block of the model
:param tmparam_list: a list of parameters to vary in the model
"""
# default 5-sigma prior above and below the parfile mean
tm_params = parameter.Uniform(-5.0, 5.0, size=len(tmparam_list))
# timing model
tm_func = tm_delay(tmparams=tm_params, which=tmparam_list)
tm = deterministic_signals.Deterministic(tm_func, name='timing model')
return tm
| 2,236 | 31.897059 | 80 | py |
enterprise_extensions | enterprise_extensions-master/enterprise_extensions/dropout.py |
import enterprise
import numpy as np
from enterprise import constants as const
from enterprise.signals import (deterministic_signals,
parameter,
signal_base,
utils)
@signal_base.function
def dropout_powerlaw(f, name, log10_A=-16, gamma=5,
dropout_psr='B1855+09', k_drop=0.5, k_threshold=0.5):
"""
Dropout powerlaw for a stochastic process. Switches a stochastic
process on or off in a single pulsar depending on whether k_drop exceeds
k_threshold.
:param dropout_psr: Which pulsar to use a dropout switch on. The value 'all'
will use the method on all pulsars.
"""
df = np.diff(np.concatenate((np.array([0]), f[::2])))
if name == 'all':
if k_drop >= k_threshold:
k_switch = 1.0
elif k_drop < k_threshold:
k_switch = 0.0
return k_switch * ((10**log10_A)**2 / 12.0 / np.pi**2 *
const.fyr**(gamma-3) * f**(-gamma) * np.repeat(df, 2))
elif name == dropout_psr:
if k_drop >= k_threshold:
k_switch = 1.0
elif k_drop < k_threshold:
k_switch = 0.0
return k_switch * ((10**log10_A)**2 / 12.0 / np.pi**2 *
const.fyr**(gamma-3) * f**(-gamma) * np.repeat(df, 2))
else:
return ((10**log10_A)**2 / 12.0 / np.pi**2 *
const.fyr**(gamma-3) * f**(-gamma) * np.repeat(df, 2))
@signal_base.function
def dropout_physical_ephem_delay(toas, planetssb, pos_t, frame_drift_rate=0,
d_jupiter_mass=0, d_saturn_mass=0, d_uranus_mass=0,
d_neptune_mass=0, jup_orb_elements=np.zeros(6),
sat_orb_elements=np.zeros(6), inc_jupiter_orb=False,
jup_orbelxyz=None, jup_mjd=None, inc_saturn_orb=False,
sat_orbelxyz=None, sat_mjd=None, equatorial=True,
k_drop=0.5, k_threshold=0.5):
"""
Dropout BayesEphem model. Switches BayesEphem on or off depending on
whether k_drop exceeds k_threshold.
"""
# get dropout switch
if k_drop >= k_threshold:
k_switch = 1.0
elif k_drop < k_threshold:
k_switch = 0.0
# convert toas to MJD
mjd = toas / 86400
# grab planet-to-SSB vectors
earth = planetssb[:, 2, :3]
jupiter = planetssb[:, 4, :3]
saturn = planetssb[:, 5, :3]
uranus = planetssb[:, 6, :3]
neptune = planetssb[:, 7, :3]
# do frame rotation
earth = utils.ss_framerotate(mjd, earth, 0.0, 0.0, 0.0, frame_drift_rate,
offset=None, equatorial=equatorial)
# mass perturbations
mpert = [(jupiter, d_jupiter_mass), (saturn, d_saturn_mass),
(uranus, d_uranus_mass), (neptune, d_neptune_mass)]
for planet, dm in mpert:
earth += utils.dmass(planet, dm)
# jupter orbital element perturbations
if inc_jupiter_orb:
jup_perturb_tmp = 0.0009547918983127075 * np.einsum(
'i,ijk->jk', jup_orb_elements, jup_orbelxyz)
earth += np.array([np.interp(mjd, jup_mjd, jup_perturb_tmp[:, aa])
for aa in range(3)]).T
# saturn orbital element perturbations
if inc_saturn_orb:
sat_perturb_tmp = 0.00028588567008942334 * np.einsum(
'i,ijk->jk', sat_orb_elements, sat_orbelxyz)
earth += np.array([np.interp(mjd, sat_mjd, sat_perturb_tmp[:, aa])
for aa in range(3)]).T
# construct the true geocenter to barycenter roemer
tmp_roemer = np.einsum('ij,ij->i', planetssb[:, 2, :3], pos_t)
# create the delay
delay = tmp_roemer - np.einsum('ij,ij->i', earth, pos_t)
return k_switch * delay
def Dropout_PhysicalEphemerisSignal(
frame_drift_rate=parameter.Uniform(-1e-9, 1e-9)('frame_drift_rate'),
d_jupiter_mass=parameter.Normal(0, 1.54976690e-11)('d_jupiter_mass'),
d_saturn_mass=parameter.Normal(0, 8.17306184e-12)('d_saturn_mass'),
d_uranus_mass=parameter.Normal(0, 5.71923361e-11)('d_uranus_mass'),
d_neptune_mass=parameter.Normal(0, 7.96103855e-11)('d_neptune_mass'),
jup_orb_elements=parameter.Uniform(-0.05, 0.05, size=6)('jup_orb_elements'),
sat_orb_elements=parameter.Uniform(-0.5, 0.5, size=6)('sat_orb_elements'),
inc_jupiter_orb=True, inc_saturn_orb=False, use_epoch_toas=True,
k_drop=parameter.Uniform(0.0, 1.0), k_threshold=0.5, name=''):
""" Class factory for dropout physical ephemeris model signal."""
# turn off saturn orbital element parameters if not including in signal
if not inc_saturn_orb:
sat_orb_elements = np.zeros(6)
# define waveform
jup_mjd, jup_orbelxyz, sat_mjd, sat_orbelxyz = (
utils.get_planet_orbital_elements())
wf = dropout_physical_ephem_delay(frame_drift_rate=frame_drift_rate,
d_jupiter_mass=d_jupiter_mass,
d_saturn_mass=d_saturn_mass,
d_uranus_mass=d_uranus_mass,
d_neptune_mass=d_neptune_mass,
jup_orb_elements=jup_orb_elements,
sat_orb_elements=sat_orb_elements,
inc_jupiter_orb=inc_jupiter_orb,
jup_orbelxyz=jup_orbelxyz,
jup_mjd=jup_mjd,
inc_saturn_orb=inc_saturn_orb,
sat_orbelxyz=sat_orbelxyz,
sat_mjd=sat_mjd,
k_drop=k_drop, k_threshold=k_threshold)
BaseClass = deterministic_signals.Deterministic(wf, name=name)
class Dropout_PhysicalEphemerisSignal(BaseClass):
signal_name = 'phys_ephem'
signal_id = 'phys_ephem_' + name if name else 'phys_ephem'
def __init__(self, psr):
# not available for PINT yet
if isinstance(psr, enterprise.pulsar.PintPulsar):
msg = 'Physical Ephemeris model is not compatible with PINT '
msg += 'at this time.'
raise NotImplementedError(msg)
super(Dropout_PhysicalEphemerisSignal, self).__init__(psr)
if use_epoch_toas:
# get quantization matrix and calculate daily average TOAs
U, _ = utils.create_quantization_matrix(psr.toas, nmin=1)
self.uinds = utils.quant2ind(U)
avetoas = np.array([psr.toas[sc].mean() for sc in self.uinds])
self._wf[''].add_kwarg(toas=avetoas)
# interpolate ssb planet position vectors to avetoas
planetssb = np.zeros((len(avetoas), 9, 3))
for jj in range(9):
planetssb[:, jj, :] = np.array([
np.interp(avetoas, psr.toas, psr.planetssb[:, jj, aa])
for aa in range(3)]).T
self._wf[''].add_kwarg(planetssb=planetssb)
# Inteprolating the pulsar position vectors onto epoch TOAs
pos_t = np.array([np.interp(avetoas, psr.toas, psr.pos_t[:, aa])
for aa in range(3)]).T
self._wf[''].add_kwarg(pos_t=pos_t)
# initialize delay
self._delay = np.zeros(len(psr.toas))
@signal_base.cache_call('delay_params')
def get_delay(self, params):
delay = self._wf[''](params=params)
if use_epoch_toas:
for slc, val in zip(self.uinds, delay):
self._delay[slc] = val
return self._delay
else:
return delay
return Dropout_PhysicalEphemerisSignal
| 8,010 | 39.872449 | 87 | py |
enterprise_extensions | enterprise_extensions-master/enterprise_extensions/models.py |
import functools
from collections import OrderedDict
import numpy as np
from enterprise import constants as const
from enterprise.signals import (deterministic_signals, gp_signals, parameter,
selections, signal_base, white_signals)
from enterprise.signals.signal_base import LogLikelihood
from enterprise_extensions import chromatic as chrom
from enterprise_extensions import deterministic
from enterprise_extensions import dropout as do
from enterprise_extensions import model_utils
from enterprise_extensions.blocks import (bwm_block, bwm_sglpsr_block,
chromatic_noise_block,
common_red_noise_block,
dm_noise_block, red_noise_block,
white_noise_block)
from enterprise_extensions.chromatic.solar_wind import solar_wind_block
from enterprise_extensions.timing import timing_block
# from enterprise.signals.signal_base import LookupLikelihood
def model_singlepsr_noise(psr, tm_var=False, tm_linear=False,
tmparam_list=None,
red_var=True, psd='powerlaw', red_select=None,
noisedict=None, tm_svd=False, tm_norm=True,
white_vary=True, components=30, upper_limit=False,
is_wideband=False, use_dmdata=False, tnequad=False,
dmjump_var=False, gamma_val=None, dm_var=False,
dm_type='gp', dmgp_kernel='diag', dm_psd='powerlaw',
dm_nondiag_kernel='periodic', dmx_data=None,
dm_annual=False, gamma_dm_val=None,
dm_dt=15, dm_df=200,
chrom_gp=False, chrom_gp_kernel='nondiag',
chrom_psd='powerlaw', chrom_idx=4, chrom_quad=False,
chrom_kernel='periodic',
chrom_dt=15, chrom_df=200,
dm_expdip=False, dmexp_sign='negative',
dm_expdip_idx=2,
dm_expdip_tmin=None, dm_expdip_tmax=None,
num_dmdips=1, dmdip_seqname=None,
dm_cusp=False, dm_cusp_sign='negative',
dm_cusp_idx=2, dm_cusp_sym=False,
dm_cusp_tmin=None, dm_cusp_tmax=None,
num_dm_cusps=1, dm_cusp_seqname=None,
dm_dual_cusp=False, dm_dual_cusp_tmin=None,
dm_dual_cusp_tmax=None, dm_dual_cusp_sym=False,
dm_dual_cusp_idx1=2, dm_dual_cusp_idx2=4,
dm_dual_cusp_sign='negative', num_dm_dual_cusps=1,
dm_dual_cusp_seqname=None,
dm_sw_deter=False, dm_sw_gp=False,
swgp_prior=None, swgp_basis=None,
coefficients=False, extra_sigs=None,
psr_model=False, factorized_like=False,
Tspan=None, fact_like_gamma=13./3, gw_components=10,
fact_like_logmin=None, fact_like_logmax=None,
select='backend', tm_marg=False, dense_like=False, ng_twg_setup=False, wb_efac_sigma=0.25):
"""
Single pulsar noise model.
:param psr: enterprise pulsar object
:param tm_var: explicitly vary the timing model parameters
:param tm_linear: vary the timing model in the linear approximation
:param tmparam_list: an explicit list of timing model parameters to vary
:param red_var: include red noise in the model
:param psd: red noise psd model
:param noisedict: dictionary of noise parameters
:param tm_svd: boolean for svd-stabilised timing model design matrix
:param tm_norm: normalize the timing model, or provide custom normalization
:param white_vary: boolean for varying white noise or keeping fixed
:param components: number of modes in Fourier domain processes
:param dm_components: number of modes in Fourier domain DM processes
:param upper_limit: whether to do an upper-limit analysis
:param is_wideband: whether input TOAs are wideband TOAs; will exclude
ecorr from the white noise model
:param use_dmdata: whether to use DM data (WidebandTimingModel) if
is_wideband
:param gamma_val: red noise spectral index to fix
:param dm_var: whether to explicitly model DM-variations
:param dm_type: gaussian process ('gp') or dmx ('dmx')
:param dmgp_kernel: diagonal in frequency or non-diagonal
:param dm_psd: power-spectral density of DM variations
:param dm_nondiag_kernel: type of time-domain DM GP kernel
:param dmx_data: supply the DMX data from par files
:param dm_annual: include an annual DM signal
:param gamma_dm_val: spectral index of power-law DM variations
:param dm_dt: time-scale for DM linear interpolation basis (days)
:param dm_df: frequency-scale for DM linear interpolation basis (MHz)
:param chrom_gp: include general chromatic noise
:param chrom_gp_kernel: GP kernel type to use in chrom ['diag','nondiag']
:param chrom_psd: power-spectral density of chromatic noise
['powerlaw','tprocess','free_spectrum']
:param chrom_idx: frequency scaling of chromatic noise
:param chrom_kernel: Type of 'nondiag' time-domain chrom GP kernel to use
['periodic', 'sq_exp','periodic_rfband', 'sq_exp_rfband']
:param chrom_quad: Whether to add a quadratic chromatic term. Boolean
:param chrom_dt: time-scale for chromatic linear interpolation basis (days)
:param chrom_df: frequency-scale for chromatic linear interpolation basis (MHz)
:param dm_expdip: inclue a DM exponential dip
:param dmexp_sign: set the sign parameter for dip
:param dm_expdip_idx: chromatic index of exponential dip
:param dm_expdip_tmin: sampling minimum of DM dip epoch
:param dm_expdip_tmax: sampling maximum of DM dip epoch
:param num_dmdips: number of dm exponential dips
:param dmdip_seqname: name of dip sequence
:param dm_cusp: include a DM exponential cusp
:param dm_cusp_sign: set the sign parameter for cusp
:param dm_cusp_idx: chromatic index of exponential cusp
:param dm_cusp_tmin: sampling minimum of DM cusp epoch
:param dm_cusp_tmax: sampling maximum of DM cusp epoch
:param dm_cusp_sym: make exponential cusp symmetric
:param num_dm_cusps: number of dm exponential cusps
:param dm_cusp_seqname: name of cusp sequence
:param dm_dual_cusp: include a DM cusp with two chromatic indices
:param dm_dual_cusp_tmin: sampling minimum of DM dual cusp epoch
:param dm_dual_cusp_tmax: sampling maximum of DM dual cusp epoch
:param dm_dual_cusp_idx1: first chromatic index of DM dual cusp
:param dm_dual_cusp_idx2: second chromatic index of DM dual cusp
:param dm_dual_cusp_sym: make dual cusp symmetric
:param dm_dual_cusp_sign: set the sign parameter for dual cusp
:param num_dm_dual_cusps: number of DM dual cusps
:param dm_dual_cusp_seqname: name of dual cusp sequence
:param dm_scattering: whether to explicitly model DM scattering variations
:param dm_sw_deter: use the deterministic solar wind model
:param dm_sw_gp: add a Gaussian process perturbation to the deterministic
solar wind model.
:param swgp_prior: prior is currently set automatically
:param swgp_basis: ['powerlaw', 'periodic', 'sq_exp']
:param coefficients: explicitly include latent coefficients in model
:param psr_model: Return the enterprise model instantiated on the pulsar
rather than an instantiated PTA object, i.e. model(psr) rather than
PTA(model(psr)).
:param factorized_like: Whether to run a factorized likelihood analyis Boolean
:param gw_components: number of modes in Fourier domain for a common
process in a factorized likelihood calculation.
:param fact_like_gamma: fixed common process spectral index
:param fact_like_logmin: specify lower prior for common psd. This is a prior on log10_rho
if common_psd is 'spectrum', else it is a prior on log10 amplitude
:param fact_like_logmax: specify upper prior for common psd. This is a prior on log10_rho
if common_psd is 'spectrum', else it is a prior on log10 amplitude
:param Tspan: time baseline used to determine Fourier GP frequencies
:param extra_sigs: Any additional `enterprise` signals to be added to the
model.
:param tm_marg: Use marginalized timing model. In many cases this will speed
up the likelihood calculation significantly.
:param dense_like: Use dense or sparse functions to evalute lnlikelihood
:return s: single pulsar noise model
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
# timing model
if not tm_var:
if (is_wideband and use_dmdata):
if dmjump_var:
dmjump = parameter.Uniform(pmin=-0.005, pmax=0.005)
else:
dmjump = parameter.Constant()
if white_vary:
if ng_twg_setup:
dmefac = parameter.Normal(1.0, wb_efac_sigma)
else:
dmefac = parameter.Uniform(pmin=0.1, pmax=10.0)
log10_dmequad = parameter.Uniform(pmin=-7.0, pmax=0.0)
# dmjump = parameter.Uniform(pmin=-0.005, pmax=0.005)
else:
dmefac = parameter.Constant()
log10_dmequad = parameter.Constant()
# dmjump = parameter.Constant()
s = gp_signals.WidebandTimingModel(dmefac=dmefac,
log10_dmequad=log10_dmequad, dmjump=dmjump,
selection=selections.Selection(
selections.by_backend),
dmjump_selection=selections.Selection(
selections.by_frontend))
else:
if tm_marg:
s = gp_signals.MarginalizingTimingModel(use_svd=tm_svd)
else:
s = gp_signals.TimingModel(use_svd=tm_svd, normed=tm_norm,
coefficients=coefficients)
else:
# create new attribute for enterprise pulsar object
psr.tmparams_orig = OrderedDict.fromkeys(psr.t2pulsar.pars())
for key in psr.tmparams_orig:
psr.tmparams_orig[key] = (psr.t2pulsar[key].val,
psr.t2pulsar[key].err)
if not tm_linear:
s = timing_block(tmparam_list=tmparam_list)
else:
pass
# red noise and common process
if factorized_like:
if Tspan is None:
msg = 'Must Timespan to match amongst all pulsars when doing '
msg += 'a factorized likelihood analysis.'
raise ValueError(msg)
s += common_red_noise_block(psd=psd, prior=amp_prior,
Tspan=Tspan, components=gw_components,
gamma_val=fact_like_gamma, delta_val=None,
orf=None, name='gw',
coefficients=coefficients,
pshift=False, pseed=None,
logmin=fact_like_logmin, logmax=fact_like_logmax)
if red_var:
s += red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=components, gamma_val=gamma_val,
coefficients=coefficients, select=red_select)
# DM variations
if dm_var:
if dm_type == 'gp':
if dmgp_kernel == 'diag':
s += dm_noise_block(gp_kernel=dmgp_kernel, psd=dm_psd,
prior=amp_prior, components=components,
gamma_val=gamma_dm_val,
coefficients=coefficients)
elif dmgp_kernel == 'nondiag':
s += dm_noise_block(gp_kernel=dmgp_kernel,
nondiag_kernel=dm_nondiag_kernel,
dt=dm_dt, df=dm_df,
coefficients=coefficients)
elif dm_type == 'dmx':
s += chrom.dmx_signal(dmx_data=dmx_data[psr.name])
if dm_annual:
s += chrom.dm_annual_signal()
if chrom_gp:
s += chromatic_noise_block(gp_kernel=chrom_gp_kernel,
psd=chrom_psd, idx=chrom_idx,
components=components,
nondiag_kernel=chrom_kernel,
dt=chrom_dt, df=chrom_df,
include_quadratic=chrom_quad,
coefficients=coefficients)
if dm_expdip:
if dm_expdip_tmin is None and dm_expdip_tmax is None:
tmin = [psr.toas.min() / const.day for ii in range(num_dmdips)]
tmax = [psr.toas.max() / const.day for ii in range(num_dmdips)]
else:
tmin = (dm_expdip_tmin if isinstance(dm_expdip_tmin, list)
else [dm_expdip_tmin])
tmax = (dm_expdip_tmax if isinstance(dm_expdip_tmax, list)
else [dm_expdip_tmax])
if dmdip_seqname is not None:
dmdipname_base = (['dmexp_' + nm for nm in dmdip_seqname]
if isinstance(dmdip_seqname, list)
else ['dmexp_' + dmdip_seqname])
else:
dmdipname_base = ['dmexp_{0}'.format(ii+1)
for ii in range(num_dmdips)]
dm_expdip_idx = (dm_expdip_idx if isinstance(dm_expdip_idx, list)
else [dm_expdip_idx])
for dd in range(num_dmdips):
s += chrom.dm_exponential_dip(tmin=tmin[dd], tmax=tmax[dd],
idx=dm_expdip_idx[dd],
sign=dmexp_sign,
name=dmdipname_base[dd])
if dm_cusp:
if dm_cusp_tmin is None and dm_cusp_tmax is None:
tmin = [psr.toas.min() / const.day for ii in range(num_dm_cusps)]
tmax = [psr.toas.max() / const.day for ii in range(num_dm_cusps)]
else:
tmin = (dm_cusp_tmin if isinstance(dm_cusp_tmin, list)
else [dm_cusp_tmin])
tmax = (dm_cusp_tmax if isinstance(dm_cusp_tmax, list)
else [dm_cusp_tmax])
if dm_cusp_seqname is not None:
cusp_name_base = 'dm_cusp_'+dm_cusp_seqname+'_'
else:
cusp_name_base = 'dm_cusp_'
dm_cusp_idx = (dm_cusp_idx if isinstance(dm_cusp_idx, list)
else [dm_cusp_idx])
dm_cusp_sign = (dm_cusp_sign if isinstance(dm_cusp_sign, list)
else [dm_cusp_sign])
for dd in range(1, num_dm_cusps+1):
s += chrom.dm_exponential_cusp(tmin=tmin[dd-1],
tmax=tmax[dd-1],
idx=dm_cusp_idx[dd-1],
sign=dm_cusp_sign[dd-1],
symmetric=dm_cusp_sym,
name=cusp_name_base+str(dd))
if dm_dual_cusp:
if dm_dual_cusp_tmin is None and dm_cusp_tmax is None:
tmin = psr.toas.min() / const.day
tmax = psr.toas.max() / const.day
else:
tmin = dm_dual_cusp_tmin
tmax = dm_dual_cusp_tmax
if dm_dual_cusp_seqname is not None:
dual_cusp_name_base = 'dm_dual_cusp_'+dm_cusp_seqname+'_'
else:
dual_cusp_name_base = 'dm_dual_cusp_'
for dd in range(1, num_dm_dual_cusps+1):
s += chrom.dm_dual_exp_cusp(tmin=tmin, tmax=tmax,
idx1=dm_dual_cusp_idx1,
idx2=dm_dual_cusp_idx2,
sign=dm_dual_cusp_sign,
symmetric=dm_dual_cusp_sym,
name=dual_cusp_name_base+str(dd))
if dm_sw_deter:
Tspan = psr.toas.max() - psr.toas.min()
s += solar_wind_block(ACE_prior=True, include_swgp=dm_sw_gp,
swgp_prior=swgp_prior, swgp_basis=swgp_basis,
Tspan=Tspan)
if extra_sigs is not None:
s += extra_sigs
# adding white-noise, and acting on psr objects
if ('NANOGrav' in psr.flags['pta'] or 'CHIME' in psr.flags['f']) and not is_wideband:
s2 = s + white_noise_block(vary=white_vary, inc_ecorr=True,
tnequad=tnequad, select=select)
model = s2(psr)
if psr_model:
Model = s2
else:
s3 = s + white_noise_block(vary=white_vary, inc_ecorr=False,
tnequad=tnequad, select=select, ng_twg_setup=ng_twg_setup, wb_efac_sigma=wb_efac_sigma)
model = s3(psr)
if psr_model:
Model = s3
if psr_model:
return Model
else:
# set up PTA
if dense_like:
pta = signal_base.PTA([model], lnlikelihood=signal_base.LogLikelihoodDenseCholesky)
else:
pta = signal_base.PTA([model])
# set white noise parameters
if not white_vary or (is_wideband and use_dmdata):
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta
def model_1(psrs, psd='powerlaw', noisedict=None, white_vary=False,
components=30, upper_limit=False, bayesephem=False, tnequad=False,
be_type='orbel', is_wideband=False, use_dmdata=False, Tspan=None,
select='backend', tm_marg=False, dense_like=False, tm_svd=False):
"""
Reads in list of enterprise Pulsar instance and returns a PTA
instantiated with only white and red noise:
per pulsar:
1. fixed EFAC per backend/receiver system
2. fixed EQUAD per backend/receiver system
3. fixed ECORR per backend/receiver system
4. Red noise modeled as a power-law with 30 sampling frequencies
5. Linear timing model.
global:
1. Optional physical ephemeris modeling.
:param psd:
Choice of PSD function [e.g. powerlaw (default), turnover, tprocess]
:param noisedict:
Dictionary of pulsar noise properties. Can provide manually,
or the code will attempt to find it.
:param white_vary:
boolean for varying white noise or keeping fixed.
:param upper_limit:
Perform upper limit on common red noise amplitude. By default
this is set to False. Note that when perfoming upper limits it
is recommended that the spectral index also be fixed to a specific
value.
:param bayesephem:
Include BayesEphem model. Set to False by default
:param be_type:
orbel, orbel-v2, setIII
:param is_wideband:
Whether input TOAs are wideband TOAs; will exclude ecorr from the white
noise model.
:param use_dmdata: whether to use DM data (WidebandTimingModel) if
is_wideband.
:param Tspan: time baseline used to determine Fourier GP frequencies;
derived from data if not specified
:param tm_marg: Use marginalized timing model. In many cases this will speed
up the likelihood calculation significantly.
:param dense_like: Use dense or sparse functions to evalute lnlikelihood
:param tm_svd: boolean for svd-stabilised timing model design matrix
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
# find the maximum time span to set GW frequency sampling
if Tspan is None:
Tspan = model_utils.get_tspan(psrs)
# timing model
if (is_wideband and use_dmdata):
dmjump = parameter.Constant()
if white_vary:
dmefac = parameter.Uniform(pmin=0.1, pmax=10.0)
log10_dmequad = parameter.Uniform(pmin=-7.0, pmax=0.0)
# dmjump = parameter.Uniform(pmin=-0.005, pmax=0.005)
else:
dmefac = parameter.Constant()
log10_dmequad = parameter.Constant()
# dmjump = parameter.Constant()
s = gp_signals.WidebandTimingModel(dmefac=dmefac,
log10_dmequad=log10_dmequad, dmjump=dmjump,
selection=selections.Selection(selections.by_backend),
dmjump_selection=selections.Selection(selections.by_frontend))
else:
if tm_marg:
s = gp_signals.MarginalizingTimingModel(use_svd=tm_svd)
else:
s = gp_signals.TimingModel(use_svd=tm_svd)
# red noise
s += red_noise_block(psd=psd, prior=amp_prior,
Tspan=Tspan, components=components)
# ephemeris model
if bayesephem:
s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True,
model=be_type)
# adding white-noise, and acting on psr objects
models = []
for p in psrs:
if 'NANOGrav' in p.flags['pta'] and not is_wideband:
s2 = s + white_noise_block(vary=white_vary, inc_ecorr=True,
tnequad=tnequad, select=select)
models.append(s2(p))
else:
s3 = s + white_noise_block(vary=white_vary, inc_ecorr=False,
tnequad=tnequad, select=select)
models.append(s3(p))
# set up PTA
if dense_like:
pta = signal_base.PTA(models, lnlikelihood=signal_base.LogLikelihoodDenseCholesky)
else:
pta = signal_base.PTA(models)
# set white noise parameters
if not white_vary or (is_wideband and use_dmdata):
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta
def model_2a(psrs, psd='powerlaw', noisedict=None, components=30,
n_rnfreqs=None, n_gwbfreqs=None, gamma_common=None,
delta_common=None, upper_limit=False, bayesephem=False,
be_type='setIII', white_vary=False, is_wideband=False,
use_dmdata=False, Tspan=None, select='backend', tnequad=False,
pshift=False, pseed=None, psr_models=False,
tm_marg=False, dense_like=False, tm_svd=False):
"""
Reads in list of enterprise Pulsar instance and returns a PTA
instantiated with model 2A from the analysis paper:
per pulsar:
1. fixed EFAC per backend/receiver system
2. fixed EQUAD per backend/receiver system
3. fixed ECORR per backend/receiver system
4. Red noise modeled as a power-law with 30 sampling frequencies
5. Linear timing model.
global:
1.Common red noise modeled with user defined PSD with
30 sampling frequencies. Available PSDs are
['powerlaw', 'turnover' 'spectrum']
2. Optional physical ephemeris modeling.
:param psd:
PSD to use for common red noise signal. Available options
are ['powerlaw', 'turnover' 'spectrum']. 'powerlaw' is default
value.
:param noisedict:
Dictionary of pulsar noise properties. Can provide manually,
or the code will attempt to find it.
:param white_vary:
boolean for varying white noise or keeping fixed.
:param gamma_common:
Fixed common red process spectral index value. By default we
vary the spectral index over the range [0, 7].
:param upper_limit:
Perform upper limit on common red noise amplitude. By default
this is set to False. Note that when perfoming upper limits it
is recommended that the spectral index also be fixed to a specific
value.
:param bayesephem:
Include BayesEphem model. Set to False by default
:param be_type:
orbel, orbel-v2, setIII
:param is_wideband:
Whether input TOAs are wideband TOAs; will exclude ecorr from the white
noise model.
:param use_dmdata: whether to use DM data (WidebandTimingModel) if
is_wideband.
:param Tspan: time baseline used to determine Fourier GP frequencies;
derived from data if not specified
:param psr_models:
Return list of psr models rather than signal_base.PTA object.
:param n_rnfreqs:
Number of frequencies to use in achromatic rednoise model.
:param n_gwbfreqs:
Number of frequencies to use in the GWB model.
:param pshift:
Option to use a random phase shift in design matrix. For testing the
null hypothesis.
:param pseed:
Option to provide a seed for the random phase shift.
:param tm_marg: Use marginalized timing model. In many cases this will speed
up the likelihood calculation significantly.
:param dense_like: Use dense or sparse functions to evalute lnlikelihood
:param tm_svd: boolean for svd-stabilised timing model design matrix
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
# find the maximum time span to set GW frequency sampling
if Tspan is None:
Tspan = model_utils.get_tspan(psrs)
if n_gwbfreqs is None:
n_gwbfreqs = components
if n_rnfreqs is None:
n_rnfreqs = components
# timing model
if (is_wideband and use_dmdata):
dmjump = parameter.Constant()
if white_vary:
dmefac = parameter.Uniform(pmin=0.1, pmax=10.0)
log10_dmequad = parameter.Uniform(pmin=-7.0, pmax=0.0)
# dmjump = parameter.Uniform(pmin=-0.005, pmax=0.005)
else:
dmefac = parameter.Constant()
log10_dmequad = parameter.Constant()
# dmjump = parameter.Constant()
s = gp_signals.WidebandTimingModel(dmefac=dmefac,
log10_dmequad=log10_dmequad, dmjump=dmjump,
selection=selections.Selection(selections.by_backend),
dmjump_selection=selections.Selection(selections.by_frontend))
else:
if tm_marg:
s = gp_signals.MarginalizingTimingModel(use_svd=tm_svd)
else:
s = gp_signals.TimingModel(use_svd=tm_svd)
# red noise
s += red_noise_block(prior=amp_prior, Tspan=Tspan, components=n_rnfreqs)
# common red noise block
s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=n_gwbfreqs, gamma_val=gamma_common,
delta_val=delta_common, name='gw',
pshift=pshift, pseed=pseed)
# ephemeris model
if bayesephem:
s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True,
model=be_type)
# adding white-noise, and acting on psr objects
models = []
for p in psrs:
if 'NANOGrav' in p.flags['pta'] and not is_wideband:
s2 = s + white_noise_block(vary=white_vary, inc_ecorr=True,
tnequad=tnequad, select=select)
models.append(s2(p))
else:
s3 = s + white_noise_block(vary=white_vary, inc_ecorr=False,
tnequad=tnequad, select=select)
models.append(s3(p))
# set up PTA
if dense_like:
pta = signal_base.PTA(models, lnlikelihood=signal_base.LogLikelihoodDenseCholesky)
else:
pta = signal_base.PTA(models)
if psr_models:
return models
else:
# set up PTA
if dense_like:
pta = signal_base.PTA(models, lnlikelihood=signal_base.LogLikelihoodDenseCholesky)
else:
pta = signal_base.PTA(models)
# set white noise parameters
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta
def model_general(psrs, tm_var=False, tm_linear=False, tmparam_list=None,
tm_svd=False, tm_norm=True, noisedict=None, white_vary=False,
Tspan=None, modes=None, wgts=None, logfreq=False, nmodes_log=10,
common_psd='powerlaw', common_components=30, tnequad=False,
log10_A_common=None, gamma_common=None,
common_logmin=None, common_logmax=None,
orf='crn', orf_names=None, orf_ifreq=0, leg_lmax=5,
upper_limit_common=None, upper_limit=False,
red_var=True, red_psd='powerlaw', red_components=30, upper_limit_red=None,
red_select=None, red_breakflat=False, red_breakflat_fq=None,
bayesephem=False, be_type='setIII_1980', is_wideband=False, use_dmdata=False,
dm_var=False, dm_type='gp', dm_psd='powerlaw', dm_components=30,
upper_limit_dm=None, dm_annual=False, dm_chrom=False, dmchrom_psd='powerlaw',
dmchrom_idx=4, gequad=False, coefficients=False, pshift=False,
select='backend', tm_marg=False, dense_like=False,
delta_common=None):
"""
Reads in list of enterprise Pulsar instances and returns a PTA
object instantiated with user-supplied options.
:param tm_var: boolean to vary timing model coefficients.
[default = False]
:param tm_linear: boolean to vary timing model under linear approximation.
[default = False]
:param tmparam_list: list of timing model parameters to vary.
[default = None]
:param tm_svd: stabilize timing model designmatrix with SVD.
[default = False]
:param tm_norm: normalize the timing model design matrix, or provide custom
normalization. Alternative to 'tm_svd'.
[default = True]
:param noisedict: Dictionary of pulsar noise properties. Can provide manually,
or the code will attempt to find it.
[default = None]
:param white_vary: boolean for varying white noise or keeping fixed.
[default = False]
:param Tspan: timespan assumed for describing stochastic processes,
in units of seconds. If None provided will find span of pulsars.
[default = None]
:param modes: list of frequencies on which to describe red processes.
[default = None]
:param wgts: sqrt summation weights for each frequency bin, i.e. sqrt(delta f).
[default = None]
:param logfreq: boolean for including log-spaced bins.
[default = False]
:param nmodes_log: number of log-spaced bins below 1/T.
[default = 10]
:param common_psd: psd of common process.
['powerlaw', 'spectrum', 'turnover', 'turnover_knee,', 'broken_powerlaw']
[default = 'powerlaw']
:param common_components: number of frequencies starting at 1/T for common process.
[default = 30]
:param log10_A_common: value of fixed log10_A_common parameter for
fixed amplitude analyses.
[default = None]
:param gamma_common: fixed common red process spectral index value. By default we
vary the spectral index over the range [0, 7].
[default = None]
:param common_logmin: specify lower prior for common psd. This is a prior on log10_rho
if common_psd is 'spectrum', else it is a prior on log amplitude
:param common_logmax: specify upper prior for common psd. This is a prior on log10_rho
if common_psd is 'spectrum', else it is a prior on log amplitude
:param orf: comma de-limited string of multiple common processes with different orfs.
[default = crn]
:param orf_names: comma de-limited string of process names for different orfs. Manual
control of these names is useful for embedding model_general within a hypermodel
analysis for a process with and without hd correlations where we want to avoid
parameter duplication.
[default = None]
:param orf_ifreq:
Frequency bin at which to start the Hellings & Downs function with
numbering beginning at 0. Currently only works with freq_hd orf.
[default = 0]
:param leg_lmax:
Maximum multipole of a Legendre polynomial series representation
of the overlap reduction function.
[default = 5]
:param upper_limit_common: perform upper limit on common red noise amplitude. Note
that when perfoming upper limits it is recommended that the spectral index also
be fixed to a specific value.
[default = False]
:param upper_limit: apply upper limit priors to all red processes.
[default = False]
:param red_var: boolean to switch on/off intrinsic red noise.
[default = True]
:param red_psd: psd of intrinsic red process.
['powerlaw', 'spectrum', 'turnover', 'tprocess', 'tprocess_adapt']
[default = 'powerlaw']
:param red_components: number of frequencies starting at 1/T for intrinsic red process.
[default = 30]
:param upper_limit_red: perform upper limit on intrinsic red noise amplitude. Note
that when perfoming upper limits it is recommended that the spectral index also
be fixed to a specific value.
[default = False]
:param red_select: selection properties for intrinsic red noise.
['backend', 'band', 'band+', None]
[default = None]
:param red_breakflat: break red noise spectrum and make flat above certain frequency.
[default = False]
:param red_breakflat_fq: break frequency for 'red_breakflat'.
[default = None]
:param bayesephem: boolean to include BayesEphem model.
[default = False]
:param be_type: flavor of bayesephem model based on how partials are computed.
['orbel', 'orbel-v2', 'setIII', 'setIII_1980']
[default = 'setIII_1980']
:param is_wideband: boolean for whether input TOAs are wideband TOAs. Will exclude
ecorr from the white noise model.
[default = False]
:param use_dmdata: whether to use DM data (WidebandTimingModel) if is_wideband.
[default = False]
:param dm_var: boolean for explicitly searching for DM variations.
[default = False]
:param dm_type: type of DM variations.
['gp', other choices selected with additional options; see below]
[default = 'gp']
:param dm_psd: psd of DM GP.
['powerlaw', 'spectrum', 'turnover', 'tprocess', 'tprocess_adapt']
[default = 'powerlaw']
:param dm_components: number of frequencies starting at 1/T for DM GP.
[default = 30]
:param upper_limit_dm: perform upper limit on DM GP. Note that when perfoming
upper limits it is recommended that the spectral index also be
fixed to a specific value.
[default = False]
:param dm_annual: boolean to search for an annual DM trend.
[default = False]
:param dm_chrom: boolean to search for a generic chromatic GP.
[default = False]
:param dmchrom_psd: psd of generic chromatic GP.
['powerlaw', 'spectrum', 'turnover']
[default = 'powerlaw']
:param dmchrom_idx: spectral index of generic chromatic GP.
[default = 4]
:param gequad: boolean to search for a global EQUAD.
[default = False]
:param coefficients: boolean to form full hierarchical PTA object;
(no analytic latent-coefficient marginalization)
[default = False]
:param pshift: boolean to add random phase shift to red noise Fourier design
matrices for false alarm rate studies.
[default = False]
:param tm_marg: Use marginalized timing model. In many cases this will speed
up the likelihood calculation significantly.
:param dense_like: Use dense or sparse functions to evalute lnlikelihood
Default PTA object composition:
1. fixed EFAC per backend/receiver system (per pulsar)
2. fixed EQUAD per backend/receiver system (per pulsar)
3. fixed ECORR per backend/receiver system (per pulsar)
4. Red noise modeled as a power-law with 30 sampling frequencies
(per pulsar)
5. Linear timing model (per pulsar)
6. Common-spectrum uncorrelated process modeled as a power-law with
30 sampling frequencies. (global)
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
gp_priors = [upper_limit_red, upper_limit_dm, upper_limit_common]
if all(ii is None for ii in gp_priors):
amp_prior_red = amp_prior
amp_prior_dm = amp_prior
amp_prior_common = amp_prior
else:
amp_prior_red = 'uniform' if upper_limit_red else 'log-uniform'
amp_prior_dm = 'uniform' if upper_limit_dm else 'log-uniform'
amp_prior_common = 'uniform' if upper_limit_common else 'log-uniform'
# timing model
if not tm_var and not use_dmdata:
if tm_marg:
s = gp_signals.MarginalizingTimingModel(use_svd=tm_svd)
else:
s = gp_signals.TimingModel(use_svd=tm_svd, normed=tm_norm,
coefficients=coefficients)
elif not tm_var and use_dmdata:
dmjump = parameter.Constant()
if white_vary:
dmefac = parameter.Uniform(pmin=0.1, pmax=10.0)
log10_dmequad = parameter.Uniform(pmin=-7.0, pmax=0.0)
# dmjump = parameter.Uniform(pmin=-0.005, pmax=0.005)
else:
dmefac = parameter.Constant()
log10_dmequad = parameter.Constant()
# dmjump = parameter.Constant()
s = gp_signals.WidebandTimingModel(dmefac=dmefac,
log10_dmequad=log10_dmequad, dmjump=dmjump,
selection=selections.Selection(selections.by_backend),
dmjump_selection=selections.Selection(selections.by_frontend))
else:
# create new attribute for enterprise pulsar object
for p in psrs:
p.tmparams_orig = OrderedDict.fromkeys(p.t2pulsar.pars())
for key in p.tmparams_orig:
p.tmparams_orig[key] = (p.t2pulsar[key].val,
p.t2pulsar[key].err)
if not tm_linear:
s = timing_block(tmparam_list=tmparam_list)
else:
pass
# find the maximum time span to set GW frequency sampling
if Tspan is not None:
Tspan = Tspan
else:
Tspan = model_utils.get_tspan(psrs)
if logfreq:
fmin = 10.0
modes, wgts = model_utils.linBinning(Tspan, nmodes_log,
1.0 / fmin / Tspan,
common_components, nmodes_log)
wgts = wgts**2.0
# red noise
if red_var:
s += red_noise_block(psd=red_psd, prior=amp_prior_red, Tspan=Tspan,
components=red_components, modes=modes, wgts=wgts,
coefficients=coefficients,
select=red_select, break_flat=red_breakflat,
break_flat_fq=red_breakflat_fq)
# common red noise block
crn = []
if orf_names is None:
orf_names = orf
for elem, elem_name in zip(orf.split(','), orf_names.split(',')):
if elem == 'zero_diag_bin_orf' or elem == 'zero_diag_legendre_orf':
log10_A_val = log10_A_common
else:
log10_A_val = None
crn.append(common_red_noise_block(psd=common_psd, prior=amp_prior_common, Tspan=Tspan,
components=common_components,
log10_A_val=log10_A_val, gamma_val=gamma_common,
delta_val=None, orf=elem, name='gw_{}'.format(elem_name),
orf_ifreq=orf_ifreq, leg_lmax=leg_lmax,
coefficients=coefficients, pshift=pshift, pseed=None,
logmin=common_logmin, logmax=common_logmax))
# orf_ifreq only affects freq_hd model.
# leg_lmax only affects (zero_diag_)legendre_orf model.
crn = functools.reduce((lambda x, y: x+y), crn)
s += crn
# DM variations
if dm_var:
if dm_type == 'gp':
s += dm_noise_block(gp_kernel='diag', psd=dm_psd,
prior=amp_prior_dm,
components=dm_components, gamma_val=None,
coefficients=coefficients)
if dm_annual:
s += chrom.dm_annual_signal()
if dm_chrom:
s += chromatic_noise_block(psd=dmchrom_psd, idx=dmchrom_idx,
name='chromatic',
components=dm_components,
coefficients=coefficients)
# ephemeris model
if bayesephem:
s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True,
model=be_type)
# adding white-noise, and acting on psr objects
models = []
for p in psrs:
if 'NANOGrav' in p.flags['pta'] and not is_wideband:
s2 = s + white_noise_block(vary=white_vary, inc_ecorr=True,
tnequad=tnequad, select=select)
if gequad:
s2 += white_signals.EquadNoise(log10_equad=parameter.Uniform(-8.5, -5),
selection=selections.Selection(selections.no_selection),
name='gequad')
if '1713' in p.name and dm_var:
tmin = p.toas.min() / const.day
tmax = p.toas.max() / const.day
s3 = s2 + chrom.dm_exponential_dip(tmin=tmin, tmax=tmax, idx=2,
sign=False, name='dmexp')
models.append(s3(p))
else:
models.append(s2(p))
else:
s4 = s + white_noise_block(vary=white_vary, inc_ecorr=False,
tnequad=tnequad, select=select)
if gequad:
s4 += white_signals.TNEquadNoise(log10_tnequad=parameter.Uniform(-8.5, -5),
selection=selections.Selection(selections.no_selection),
name='gequad')
if '1713' in p.name and dm_var:
tmin = p.toas.min() / const.day
tmax = p.toas.max() / const.day
s5 = s4 + chrom.dm_exponential_dip(tmin=tmin, tmax=tmax, idx=2,
sign=False, name='dmexp')
models.append(s5(p))
else:
models.append(s4(p))
# set up PTA
if dense_like:
pta = signal_base.PTA(models, lnlikelihood=signal_base.LogLikelihoodDenseCholesky)
else:
pta = signal_base.PTA(models)
# set white noise parameters
if not white_vary or (is_wideband and use_dmdata):
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta
def model_2b(psrs, psd='powerlaw', noisedict=None, white_vary=False,
bayesephem=False, be_type='orbel', is_wideband=False, components=30,
use_dmdata=False, Tspan=None, select='backend', pshift=False, tnequad=False,
tm_marg=False, dense_like=False, tm_svd=False, upper_limit=False,
gamma_common=None):
"""
Reads in list of enterprise Pulsar instance and returns a PTA
instantiated with model 2B from the analysis paper:
per pulsar:
1. fixed EFAC per backend/receiver system
2. fixed EQUAD per backend/receiver system
3. fixed ECORR per backend/receiver system
4. Red noise modeled as a power-law with 30 sampling frequencies
5. Linear timing model.
global:
1. Dipole spatially correlated signal modeled with PSD.
Default PSD is powerlaw. Available options
['powerlaw', 'turnover', 'spectrum']
2. Optional physical ephemeris modeling.
:param psd:
PSD to use for common red noise signal. Available options
are ['powerlaw', 'turnover' 'spectrum']. 'powerlaw' is default
value.
:param noisedict:
Dictionary of pulsar noise properties. Can provide manually,
or the code will attempt to find it.
:param white_vary:
boolean for varying white noise or keeping fixed.
:param gamma_common:
Fixed common red process spectral index value. By default we
vary the spectral index over the range [0, 7].
:param upper_limit:
Perform upper limit on common red noise amplitude. By default
this is set to False. Note that when perfoming upper limits it
is recommended that the spectral index also be fixed to a specific
value.
:param bayesephem:
Include BayesEphem model. Set to False by default
:param be_type:
orbel, orbel-v2, setIII
:param is_wideband:
Whether input TOAs are wideband TOAs; will exclude ecorr from the white
noise model.
:param use_dmdata: whether to use DM data (WidebandTimingModel) if
is_wideband.
:param Tspan: time baseline used to determine Fourier GP frequencies;
derived from data if not specified
:param tm_marg: Use marginalized timing model. In many cases this will speed
up the likelihood calculation significantly.
:param dense_like: Use dense or sparse functions to evalute lnlikelihood
:param tm_svd: boolean for svd-stabilised timing model design matrix
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
# find the maximum time span to set GW frequency sampling
if Tspan is None:
Tspan = model_utils.get_tspan(psrs)
# timing model
if (is_wideband and use_dmdata):
dmjump = parameter.Constant()
if white_vary:
dmefac = parameter.Uniform(pmin=0.1, pmax=10.0)
log10_dmequad = parameter.Uniform(pmin=-7.0, pmax=0.0)
# dmjump = parameter.Uniform(pmin=-0.005, pmax=0.005)
else:
dmefac = parameter.Constant()
log10_dmequad = parameter.Constant()
# dmjump = parameter.Constant()
s = gp_signals.WidebandTimingModel(dmefac=dmefac,
log10_dmequad=log10_dmequad, dmjump=dmjump,
selection=selections.Selection(selections.by_backend),
dmjump_selection=selections.Selection(selections.by_frontend))
else:
if tm_marg:
s = gp_signals.MarginalizingTimingModel(use_svd=tm_svd)
else:
s = gp_signals.TimingModel(use_svd=tm_svd)
# red noise
s += red_noise_block(prior=amp_prior, Tspan=Tspan, components=components)
# dipole
s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=components, gamma_val=gamma_common,
orf='dipole', name='dipole', pshift=pshift)
# ephemeris model
if bayesephem:
s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True,
model=be_type)
# adding white-noise, and acting on psr objects
models = []
for p in psrs:
if 'NANOGrav' in p.flags['pta'] and not is_wideband:
s2 = s + white_noise_block(vary=white_vary, inc_ecorr=True,
tnequad=tnequad, select=select)
models.append(s2(p))
else:
s3 = s + white_noise_block(vary=white_vary, inc_ecorr=False,
tnequad=tnequad, select=select)
models.append(s3(p))
# set up PTA
if dense_like:
pta = signal_base.PTA(models, lnlikelihood=signal_base.LogLikelihoodDenseCholesky)
else:
pta = signal_base.PTA(models)
# set white noise parameters
if not white_vary or (is_wideband and use_dmdata):
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta
def model_2c(psrs, psd='powerlaw', noisedict=None, white_vary=False,
components=30, gamma_common=None, upper_limit=False, tnequad=False,
bayesephem=False, be_type='orbel', is_wideband=False,
use_dmdata=False, Tspan=None, select='backend', tm_marg=False,
dense_like=False, tm_svd=False):
"""
Reads in list of enterprise Pulsar instance and returns a PTA
instantiated with model 2C from the analysis paper:
per pulsar:
1. fixed EFAC per backend/receiver system
2. fixed EQUAD per backend/receiver system
3. fixed ECORR per backend/receiver system
4. Red noise modeled as a power-law with 30 sampling frequencies
5. Linear timing model.
global:
1. Dipole spatially correlated signal modeled with PSD.
Default PSD is powerlaw. Available options
['powerlaw', 'turnover', 'spectrum']
2. Monopole spatially correlated signal modeled with PSD.
Default PSD is powerlaw. Available options
['powerlaw', 'turnover', 'spectrum']
3. Optional physical ephemeris modeling.
:param psd:
PSD to use for common red noise signal. Available options
are ['powerlaw', 'turnover' 'spectrum']. 'powerlaw' is default
value.
:param noisedict:
Dictionary of pulsar noise properties. Can provide manually,
or the code will attempt to find it.
:param white_vary:
boolean for varying white noise or keeping fixed.
:param gamma_common:
Fixed common red process spectral index value. By default we
vary the spectral index over the range [0, 7].
:param upper_limit:
Perform upper limit on common red noise amplitude. By default
this is set to False. Note that when perfoming upper limits it
is recommended that the spectral index also be fixed to a specific
value.
:param bayesephem:
Include BayesEphem model. Set to False by default
:param be_type:
orbel, orbel-v2, setIII
:param is_wideband:
Whether input TOAs are wideband TOAs; will exclude ecorr from the white
noise model.
:param use_dmdata: whether to use DM data (WidebandTimingModel) if
is_wideband.
:param Tspan: time baseline used to determine Fourier GP frequencies;
derived from data if not specified
:param tm_marg: Use marginalized timing model. In many cases this will speed
up the likelihood calculation significantly.
:param dense_like: Use dense or sparse functions to evalute lnlikelihood
:param tm_svd: boolean for svd-stabilised timing model design matrix
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
# find the maximum time span to set GW frequency sampling
if Tspan is None:
Tspan = model_utils.get_tspan(psrs)
# timing model
if (is_wideband and use_dmdata):
dmjump = parameter.Constant()
if white_vary:
dmefac = parameter.Uniform(pmin=0.1, pmax=10.0)
log10_dmequad = parameter.Uniform(pmin=-7.0, pmax=0.0)
# dmjump = parameter.Uniform(pmin=-0.005, pmax=0.005)
else:
dmefac = parameter.Constant()
log10_dmequad = parameter.Constant()
# dmjump = parameter.Constant()
s = gp_signals.WidebandTimingModel(dmefac=dmefac,
log10_dmequad=log10_dmequad, dmjump=dmjump,
selection=selections.Selection(selections.by_backend),
dmjump_selection=selections.Selection(selections.by_frontend))
else:
if tm_marg:
s = gp_signals.MarginalizingTimingModel(use_svd=tm_svd)
else:
s = gp_signals.TimingModel(use_svd=tm_svd)
# red noise
s += red_noise_block(prior=amp_prior, Tspan=Tspan, components=components)
# dipole
s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=components, gamma_val=gamma_common,
orf='dipole', name='dipole')
# monopole
s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=components, gamma_val=gamma_common,
orf='monopole', name='monopole')
# ephemeris model
if bayesephem:
s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True,
model=be_type)
# adding white-noise, and acting on psr objects
models = []
for p in psrs:
if 'NANOGrav' in p.flags['pta'] and not is_wideband:
s2 = s + white_noise_block(vary=white_vary, inc_ecorr=True,
tnequad=tnequad, select=select)
models.append(s2(p))
else:
s3 = s + white_noise_block(vary=white_vary, inc_ecorr=False,
tnequad=tnequad, select=select)
models.append(s3(p))
# set up PTA
if dense_like:
pta = signal_base.PTA(models, lnlikelihood=signal_base.LogLikelihoodDenseCholesky)
else:
pta = signal_base.PTA(models)
# set white noise parameters
if not white_vary or (is_wideband and use_dmdata):
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta
def model_2d(psrs, psd='powerlaw', noisedict=None, white_vary=False,
components=30, n_rnfreqs=None, n_gwbfreqs=None,
gamma_common=None, upper_limit=False, tnequad=False,
bayesephem=False, be_type='orbel', is_wideband=False,
use_dmdata=False, Tspan=None, select='backend', pshift=False,
tm_marg=False, dense_like=False, tm_svd=False):
"""
Reads in list of enterprise Pulsar instance and returns a PTA
instantiated with model 2D from the analysis paper:
per pulsar:
1. fixed EFAC per backend/receiver system
2. fixed EQUAD per backend/receiver system
3. fixed ECORR per backend/receiver system
4. Red noise modeled as a power-law with 30 sampling frequencies
5. Linear timing model.
global:
1. Monopole spatially correlated signal modeled with PSD.
Default PSD is powerlaw. Available options
['powerlaw', 'turnover', 'spectrum']
2. Optional physical ephemeris modeling.
:param psd:
PSD to use for common red noise signal. Available options
are ['powerlaw', 'turnover' 'spectrum']. 'powerlaw' is default
value.
:param noisedict:
Dictionary of pulsar noise properties. Can provide manually,
or the code will attempt to find it.
:param white_vary:
boolean for varying white noise or keeping fixed.
:param gamma_common:
Fixed common red process spectral index value. By default we
vary the spectral index over the range [0, 7].
:param upper_limit:
Perform upper limit on common red noise amplitude. By default
this is set to False. Note that when perfoming upper limits it
is recommended that the spectral index also be fixed to a specific
value.
:param bayesephem:
Include BayesEphem model. Set to False by default
:param be_type:
orbel, orbel-v2, setIII
:param is_wideband:
Whether input TOAs are wideband TOAs; will exclude ecorr from the white
noise model.
:param use_dmdata: whether to use DM data (WidebandTimingModel) if
is_wideband.
:param Tspan: time baseline used to determine Fourier GP frequencies;
derived from data if not specified
:param tm_marg: Use marginalized timing model. In many cases this will speed
up the likelihood calculation significantly.
:param dense_like: Use dense or sparse functions to evalute lnlikelihood
:param tm_svd: boolean for svd-stabilised timing model design matrix
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
# find the maximum time span to set GW frequency sampling
if Tspan is None:
Tspan = model_utils.get_tspan(psrs)
if n_gwbfreqs is None:
n_gwbfreqs = components
if n_rnfreqs is None:
n_rnfreqs = components
# timing model
if (is_wideband and use_dmdata):
dmjump = parameter.Constant()
if white_vary:
dmefac = parameter.Uniform(pmin=0.1, pmax=10.0)
log10_dmequad = parameter.Uniform(pmin=-7.0, pmax=0.0)
# dmjump = parameter.Uniform(pmin=-0.005, pmax=0.005)
else:
dmefac = parameter.Constant()
log10_dmequad = parameter.Constant()
# dmjump = parameter.Constant()
s = gp_signals.WidebandTimingModel(dmefac=dmefac,
log10_dmequad=log10_dmequad, dmjump=dmjump,
selection=selections.Selection(selections.by_backend),
dmjump_selection=selections.Selection(selections.by_frontend))
else:
if tm_marg:
s = gp_signals.MarginalizingTimingModel(use_svd=tm_svd)
else:
s = gp_signals.TimingModel(use_svd=tm_svd)
# red noise
s += red_noise_block(prior=amp_prior, Tspan=Tspan, components=n_rnfreqs)
# monopole
s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=n_gwbfreqs, gamma_val=gamma_common,
orf='monopole', name='monopole', pshift=pshift)
# ephemeris model
if bayesephem:
s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True,
model=be_type)
# adding white-noise, and acting on psr objects
models = []
for p in psrs:
if 'NANOGrav' in p.flags['pta'] and not is_wideband:
s2 = s + white_noise_block(vary=white_vary, inc_ecorr=True,
tnequad=tnequad, select=select)
models.append(s2(p))
else:
s3 = s + white_noise_block(vary=white_vary, inc_ecorr=False,
tnequad=tnequad, select=select)
models.append(s3(p))
# set up PTA
if dense_like:
pta = signal_base.PTA(models, lnlikelihood=signal_base.LogLikelihoodDenseCholesky)
else:
pta = signal_base.PTA(models)
# set white noise parameters
if not white_vary or (is_wideband and use_dmdata):
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta
def model_3a(psrs, psd='powerlaw', noisedict=None, white_vary=False,
components=30, n_rnfreqs=None, n_gwbfreqs=None,
gamma_common=None, delta_common=None, upper_limit=False,
bayesephem=False, be_type='setIII', is_wideband=False,
use_dmdata=False, Tspan=None, select='backend',
tnequad=False,
pshift=False, pseed=None, psr_models=False,
tm_marg=False, dense_like=False, tm_svd=False):
"""
Reads in list of enterprise Pulsar instance and returns a PTA
instantiated with model 3A from the analysis paper:
per pulsar:
1. fixed EFAC per backend/receiver system
2. fixed EQUAD per backend/receiver system
3. fixed ECORR per backend/receiver system
4. Red noise modeled as a power-law with 30 sampling frequencies
5. Linear timing model.
global:
1. GWB with HD correlations modeled with user defined PSD with
30 sampling frequencies. Available PSDs are
['powerlaw', 'turnover' 'spectrum']
2. Optional physical ephemeris modeling.
:param psd:
PSD to use for common red noise signal. Available options
are ['powerlaw', 'turnover' 'spectrum'] 'powerlaw' is default
value.
:param noisedict:
Dictionary of pulsar noise properties. Can provide manually,
or the code will attempt to find it.
:param white_vary:
boolean for varying white noise or keeping fixed.
:param gamma_common:
Fixed common red process spectral index value. By default we
vary the spectral index over the range [0, 7].
:param delta_common:
Fixed common red process spectral index value for higher frequencies in
broken power law model.
By default we vary the spectral index over the range [0, 7].
:param upper_limit:
Perform upper limit on common red noise amplitude. By default
this is set to False. Note that when perfoming upper limits it
is recommended that the spectral index also be fixed to a specific
value.
:param bayesephem:
Include BayesEphem model. Set to False by default
:param be_type:
orbel, orbel-v2, setIII
:param is_wideband:
Whether input TOAs are wideband TOAs; will exclude ecorr from the white
noise model.
:param use_dmdata: whether to use DM data (WidebandTimingModel) if
is_wideband.
:param Tspan: time baseline used to determine Fourier GP frequencies;
derived from data if not specified
:param pshift:
Option to use a random phase shift in design matrix. For testing the
null hypothesis.
:param pseed:
Option to provide a seed for the random phase shift.
:param psr_models:
Return list of psr models rather than signal_base.PTA object.
:param tm_marg: Use marginalized timing model. In many cases this will speed
up the likelihood calculation significantly.
:param dense_like: Use dense or sparse functions to evalute lnlikelihood
:param tm_svd: boolean for svd-stabilised timing model design matrix
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
# find the maximum time span to set GW frequency sampling
if Tspan is None:
Tspan = model_utils.get_tspan(psrs)
if n_gwbfreqs is None:
n_gwbfreqs = components
if n_rnfreqs is None:
n_rnfreqs = components
# timing model
if (is_wideband and use_dmdata):
dmjump = parameter.Constant()
if white_vary:
dmefac = parameter.Uniform(pmin=0.1, pmax=10.0)
log10_dmequad = parameter.Uniform(pmin=-7.0, pmax=0.0)
# dmjump = parameter.Uniform(pmin=-0.005, pmax=0.005)
else:
dmefac = parameter.Constant()
log10_dmequad = parameter.Constant()
# dmjump = parameter.Constant()
s = gp_signals.WidebandTimingModel(dmefac=dmefac,
log10_dmequad=log10_dmequad, dmjump=dmjump,
selection=selections.Selection(selections.by_backend),
dmjump_selection=selections.Selection(selections.by_frontend))
else:
if tm_marg:
s = gp_signals.MarginalizingTimingModel(use_svd=tm_svd)
else:
s = gp_signals.TimingModel(use_svd=tm_svd)
# red noise
s += red_noise_block(psd='powerlaw',
prior=amp_prior,
Tspan=Tspan, components=n_rnfreqs)
# common red noise block
s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=n_gwbfreqs, gamma_val=gamma_common,
delta_val=delta_common,
orf='hd', name='gw', pshift=pshift, pseed=pseed)
# ephemeris model
if bayesephem:
s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True,
model=be_type)
# adding white-noise, and acting on psr objects
models = []
for p in psrs:
if 'NANOGrav' in p.flags['pta'] and not is_wideband:
s2 = s + white_noise_block(vary=white_vary, inc_ecorr=True,
tnequad=tnequad, select=select)
models.append(s2(p))
else:
s3 = s + white_noise_block(vary=white_vary, inc_ecorr=False,
tnequad=tnequad, select=select)
models.append(s3(p))
if psr_models:
return models
else:
# set up PTA
if dense_like:
pta = signal_base.PTA(models, lnlikelihood=signal_base.LogLikelihoodDenseCholesky)
else:
pta = signal_base.PTA(models)
# set white noise parameters
if not white_vary or (is_wideband and use_dmdata):
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta
def model_3b(psrs, psd='powerlaw', noisedict=None, white_vary=False,
components=30, gamma_common=None, upper_limit=False, tnequad=False,
bayesephem=False, be_type='setIII', is_wideband=False,
use_dmdata=False, Tspan=None, select='backend', tm_marg=False,
dense_like=False, tm_svd=False):
"""
Reads in list of enterprise Pulsar instance and returns a PTA
instantiated with model 3B from the analysis paper:
per pulsar:
1. fixed EFAC per backend/receiver system
2. fixed EQUAD per backend/receiver system
3. fixed ECORR per backend/receiver system
4. Red noise modeled as a power-law with 30 sampling frequencies
5. Linear timing model.
global:
1. GWB with HD correlations modeled with user defined PSD with
30 sampling frequencies. Available PSDs are
['powerlaw', 'turnover' 'spectrum']
2. Dipole signal modeled with user defined PSD with
30 sampling frequencies. Available PSDs are
['powerlaw', 'turnover' 'spectrum']
3. Optional physical ephemeris modeling.
:param psd:
PSD to use for common red noise signal. Available options
are ['powerlaw', 'turnover' 'spectrum'] 'powerlaw' is default
value.
:param noisedict:
Dictionary of pulsar noise properties. Can provide manually,
or the code will attempt to find it.
:param white_vary:
boolean for varying white noise or keeping fixed.
:param gamma_common:
Fixed common red process spectral index value. By default we
vary the spectral index over the range [0, 7].
:param upper_limit:
Perform upper limit on common red noise amplitude. By default
this is set to False. Note that when perfoming upper limits it
is recommended that the spectral index also be fixed to a specific
value.
:param bayesephem:
Include BayesEphem model. Set to False by default
:param be_type:
orbel, orbel-v2, setIII
:param is_wideband:
Whether input TOAs are wideband TOAs; will exclude ecorr from the white
noise model.
:param use_dmdata: whether to use DM data (WidebandTimingModel) if
is_wideband.
:param Tspan: time baseline used to determine Fourier GP frequencies;
derived from data if not specified
:param tm_marg: Use marginalized timing model. In many cases this will speed
up the likelihood calculation significantly.
:param dense_like: Use dense or sparse functions to evalute lnlikelihood
:param tm_svd: boolean for svd-stabilised timing model design matrix
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
# find the maximum time span to set GW frequency sampling
if Tspan is None:
Tspan = model_utils.get_tspan(psrs)
# timing model
if (is_wideband and use_dmdata):
dmjump = parameter.Constant()
if white_vary:
dmefac = parameter.Uniform(pmin=0.1, pmax=10.0)
log10_dmequad = parameter.Uniform(pmin=-7.0, pmax=0.0)
# dmjump = parameter.Uniform(pmin=-0.005, pmax=0.005)
else:
dmefac = parameter.Constant()
log10_dmequad = parameter.Constant()
# dmjump = parameter.Constant()
s = gp_signals.WidebandTimingModel(dmefac=dmefac,
log10_dmequad=log10_dmequad, dmjump=dmjump,
selection=selections.Selection(selections.by_backend),
dmjump_selection=selections.Selection(selections.by_frontend))
else:
if tm_marg:
s = gp_signals.MarginalizingTimingModel(use_svd=tm_svd)
else:
s = gp_signals.TimingModel(use_svd=tm_svd)
# red noise
s += red_noise_block(prior=amp_prior, Tspan=Tspan, components=components)
# common red noise block
s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=components, gamma_val=gamma_common,
orf='hd', name='gw')
# dipole
s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=components, gamma_val=gamma_common,
orf='dipole', name='dipole')
# ephemeris model
if bayesephem:
s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True,
model=be_type)
# adding white-noise, and acting on psr objects
models = []
for p in psrs:
if 'NANOGrav' in p.flags['pta'] and not is_wideband:
s2 = s + white_noise_block(vary=white_vary, inc_ecorr=True,
tnequad=tnequad, select=select)
models.append(s2(p))
else:
s3 = s + white_noise_block(vary=white_vary, inc_ecorr=False,
tnequad=tnequad, select=select)
models.append(s3(p))
# set up PTA
if dense_like:
pta = signal_base.PTA(models, lnlikelihood=signal_base.LogLikelihoodDenseCholesky)
else:
pta = signal_base.PTA(models)
# set white noise parameters
if not white_vary or (is_wideband and use_dmdata):
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta
def model_3c(psrs, psd='powerlaw', noisedict=None, white_vary=False,
components=30, gamma_common=None, upper_limit=False, tnequad=False,
bayesephem=False, be_type='orbel', is_wideband=False,
use_dmdata=False, Tspan=None, select='backend', tm_marg=False,
dense_like=False, tm_svd=False):
"""
Reads in list of enterprise Pulsar instance and returns a PTA
instantiated with model 3C from the analysis paper:
per pulsar:
1. fixed EFAC per backend/receiver system
2. fixed EQUAD per backend/receiver system
3. fixed ECORR per backend/receiver system
4. Red noise modeled as a power-law with 30 sampling frequencies
5. Linear timing model.
global:
1. GWB with HD correlations modeled with user defined PSD with
30 sampling frequencies. Available PSDs are
['powerlaw', 'turnover' 'spectrum']
2. Dipole signal modeled with user defined PSD with
30 sampling frequencies. Available PSDs are
['powerlaw', 'turnover' 'spectrum']
3. Monopole signal modeled with user defined PSD with
30 sampling frequencies. Available PSDs are
['powerlaw', 'turnover' 'spectrum']
4. Optional physical ephemeris modeling.
:param psd:
PSD to use for common red noise signal. Available options
are ['powerlaw', 'turnover' 'spectrum'] 'powerlaw' is default
value.
:param noisedict:
Dictionary of pulsar noise properties. Can provide manually,
or the code will attempt to find it.
:param white_vary:
boolean for varying white noise or keeping fixed.
:param gamma_common:
Fixed common red process spectral index value. By default we
vary the spectral index over the range [0, 7].
:param upper_limit:
Perform upper limit on common red noise amplitude. By default
this is set to False. Note that when perfoming upper limits it
is recommended that the spectral index also be fixed to a specific
value.
:param bayesephem:
Include BayesEphem model. Set to False by default
:param be_type:
orbel, orbel-v2, setIII
:param is_wideband:
Whether input TOAs are wideband TOAs; will exclude ecorr from the white
noise model.
:param use_dmdata: whether to use DM data (WidebandTimingModel) if
is_wideband.
:param Tspan: time baseline used to determine Fourier GP frequencies;
derived from data if not specified
:param tm_marg: Use marginalized timing model. In many cases this will speed
up the likelihood calculation significantly.
:param dense_like: Use dense or sparse functions to evalute lnlikelihood
:param tm_svd: boolean for svd-stabilised timing model design matrix
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
# find the maximum time span to set GW frequency sampling
if Tspan is None:
Tspan = model_utils.get_tspan(psrs)
# timing model
if is_wideband and use_dmdata:
dmjump = parameter.Constant()
if white_vary:
dmefac = parameter.Uniform(pmin=0.1, pmax=10.0)
log10_dmequad = parameter.Uniform(pmin=-7.0, pmax=0.0)
# dmjump = parameter.Uniform(pmin=-0.005, pmax=0.005)
else:
dmefac = parameter.Constant()
log10_dmequad = parameter.Constant()
# dmjump = parameter.Constant()
s = gp_signals.WidebandTimingModel(dmefac=dmefac,
log10_dmequad=log10_dmequad, dmjump=dmjump,
selection=selections.Selection(selections.by_backend),
dmjump_selection=selections.Selection(selections.by_frontend))
else:
if tm_marg:
s = gp_signals.MarginalizingTimingModel(use_svd=tm_svd)
else:
s = gp_signals.TimingModel(use_svd=tm_svd)
# red noise
s += red_noise_block(prior=amp_prior, Tspan=Tspan, components=components)
# common red noise block
s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=components, gamma_val=gamma_common,
orf='hd', name='gw')
# dipole
s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=components, gamma_val=gamma_common,
orf='dipole', name='dipole')
# monopole
s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=components, gamma_val=gamma_common,
orf='monopole', name='monopole')
# ephemeris model
if bayesephem:
s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True,
model=be_type)
# adding white-noise, and acting on psr objects
models = []
for p in psrs:
if 'NANOGrav' in p.flags['pta'] and not is_wideband:
s2 = s + white_noise_block(vary=white_vary, inc_ecorr=True,
tnequad=tnequad, select=select)
models.append(s2(p))
else:
s3 = s + white_noise_block(vary=white_vary, inc_ecorr=False,
tnequad=tnequad, select=select)
models.append(s3(p))
# set up PTA
if dense_like:
pta = signal_base.PTA(models, lnlikelihood=signal_base.LogLikelihoodDenseCholesky)
else:
pta = signal_base.PTA(models)
# set white noise parameters
if not white_vary or (is_wideband and use_dmdata):
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta
def model_3d(psrs, psd='powerlaw', noisedict=None, white_vary=False,
components=30, gamma_common=None, upper_limit=False, tnequad=False,
bayesephem=False, be_type='orbel', is_wideband=False,
use_dmdata=False, Tspan=None, select='backend', tm_marg=False,
dense_like=False, tm_svd=False):
"""
Reads in list of enterprise Pulsar instance and returns a PTA
instantiated with model 3D from the analysis paper:
per pulsar:
1. fixed EFAC per backend/receiver system
2. fixed EQUAD per backend/receiver system
3. fixed ECORR per backend/receiver system
4. Red noise modeled as a power-law with 30 sampling frequencies
5. Linear timing model.
global:
1. GWB with HD correlations modeled with user defined PSD with
30 sampling frequencies. Available PSDs are
['powerlaw', 'turnover' 'spectrum']
2. Monopole signal modeled with user defined PSD with
30 sampling frequencies. Available PSDs are
['powerlaw', 'turnover' 'spectrum']
3. Optional physical ephemeris modeling.
:param psd:
PSD to use for common red noise signal. Available options
are ['powerlaw', 'turnover' 'spectrum'] 'powerlaw' is default
value.
:param noisedict:
Dictionary of pulsar noise properties. Can provide manually,
or the code will attempt to find it.
:param white_vary:
boolean for varying white noise or keeping fixed.
:param gamma_common:
Fixed common red process spectral index value. By default we
vary the spectral index over the range [0, 7].
:param upper_limit:
Perform upper limit on common red noise amplitude. By default
this is set to False. Note that when perfoming upper limits it
is recommended that the spectral index also be fixed to a specific
value.
:param bayesephem:
Include BayesEphem model. Set to False by default
:param be_type:
orbel, orbel-v2, setIII
:param is_wideband:
Whether input TOAs are wideband TOAs; will exclude ecorr from the white
noise model.
:param use_dmdata: whether to use DM data (WidebandTimingModel) if
is_wideband.
:param Tspan: time baseline used to determine Fourier GP frequencies;
derived from data if not specified
:param tm_marg: Use marginalized timing model. In many cases this will speed
up the likelihood calculation significantly.
:param dense_like: Use dense or sparse functions to evalute lnlikelihood
:param tm_svd: boolean for svd-stabilised timing model design matrix
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
# find the maximum time span to set GW frequency sampling
if Tspan is None:
Tspan = model_utils.get_tspan(psrs)
# timing model
if (is_wideband and use_dmdata):
dmjump = parameter.Constant()
if white_vary:
dmefac = parameter.Uniform(pmin=0.1, pmax=10.0)
log10_dmequad = parameter.Uniform(pmin=-7.0, pmax=0.0)
# dmjump = parameter.Uniform(pmin=-0.005, pmax=0.005)
else:
dmefac = parameter.Constant()
log10_dmequad = parameter.Constant()
# dmjump = parameter.Constant()
s = gp_signals.WidebandTimingModel(dmefac=dmefac,
log10_dmequad=log10_dmequad, dmjump=dmjump,
selection=selections.Selection(selections.by_backend),
dmjump_selection=selections.Selection(selections.by_frontend))
else:
if tm_marg:
s = gp_signals.MarginalizingTimingModel(use_svd=tm_svd)
else:
s = gp_signals.TimingModel(use_svd=tm_svd)
# red noise
s += red_noise_block(prior=amp_prior, Tspan=Tspan, components=components)
# common red noise block
s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=components, gamma_val=gamma_common,
orf='hd', name='gw')
# monopole
s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=components, gamma_val=gamma_common,
orf='monopole', name='monopole')
# ephemeris model
if bayesephem:
s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True,
model=be_type)
# adding white-noise, and acting on psr objects
models = []
for p in psrs:
if 'NANOGrav' in p.flags['pta'] and not is_wideband:
s2 = s + white_noise_block(vary=white_vary, inc_ecorr=True,
tnequad=tnequad, select=select)
models.append(s2(p))
else:
s3 = s + white_noise_block(vary=white_vary, inc_ecorr=False,
tnequad=tnequad, select=select)
models.append(s3(p))
# set up PTA
if dense_like:
pta = signal_base.PTA(models, lnlikelihood=signal_base.LogLikelihoodDenseCholesky)
else:
pta = signal_base.PTA(models)
# set white noise parameters
if not white_vary or (is_wideband and use_dmdata):
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta
def model_2a_drop_be(psrs, psd='powerlaw', noisedict=None, white_vary=False,
components=30, gamma_common=None, upper_limit=False,
is_wideband=False, use_dmdata=False, k_threshold=0.5,
pshift=False, tm_marg=False, dense_like=False, tm_svd=False,
tnequad=False,):
"""
Reads in list of enterprise Pulsar instance and returns a PTA
instantiated with model 2A from the analysis paper:
per pulsar:
1. fixed EFAC per backend/receiver system
2. fixed EQUAD per backend/receiver system
3. fixed ECORR per backend/receiver system
4. Red noise modeled as a power-law with 30 sampling frequencies
5. Linear timing model.
global:
1.Common red noise modeled with user defined PSD with
30 sampling frequencies. Available PSDs are
['powerlaw', 'turnover' 'spectrum']
2. Optional physical ephemeris modeling.
:param psd:
PSD to use for common red noise signal. Available options
are ['powerlaw', 'turnover' 'spectrum']. 'powerlaw' is default
value.
:param noisedict:
Dictionary of pulsar noise properties. Can provide manually,
or the code will attempt to find it.
:param white_vary:
boolean for varying white noise or keeping fixed.
:param gamma_common:
Fixed common red process spectral index value. By default we
vary the spectral index over the range [0, 7].
:param upper_limit:
Perform upper limit on common red noise amplitude. By default
this is set to False. Note that when perfoming upper limits it
is recommended that the spectral index also be fixed to a specific
value.
:param is_wideband:
Whether input TOAs are wideband TOAs; will exclude ecorr from the white
noise model.
:param use_dmdata: whether to use DM data (WidebandTimingModel) if
is_wideband.
:param k_threshold:
Define threshold for dropout parameter 'k'.
:param tm_marg: Use marginalized timing model. In many cases this will speed
up the likelihood calculation significantly.
:param dense_like: Use dense or sparse functions to evalute lnlikelihood
:param tm_svd: boolean for svd-stabilised timing model design matrix
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
# find the maximum time span to set GW frequency sampling
Tspan = model_utils.get_tspan(psrs)
# timing model
if (is_wideband and use_dmdata):
dmjump = parameter.Constant()
if white_vary:
dmefac = parameter.Uniform(pmin=0.1, pmax=10.0)
log10_dmequad = parameter.Uniform(pmin=-7.0, pmax=0.0)
# dmjump = parameter.Uniform(pmin=-0.005, pmax=0.005)
else:
dmefac = parameter.Constant()
log10_dmequad = parameter.Constant()
# dmjump = parameter.Constant()
s = gp_signals.WidebandTimingModel(dmefac=dmefac,
log10_dmequad=log10_dmequad, dmjump=dmjump,
selection=selections.Selection(selections.by_backend),
dmjump_selection=selections.Selection(selections.by_frontend))
else:
if tm_marg:
s = gp_signals.MarginalizingTimingModel(use_svd=tm_svd)
else:
s = gp_signals.TimingModel(use_svd=tm_svd)
# red noise
s += red_noise_block(prior=amp_prior, Tspan=Tspan, components=components)
# common red noise block
s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=components, gamma_val=gamma_common,
name='gw', pshift=pshift)
# ephemeris model
s += do.Dropout_PhysicalEphemerisSignal(use_epoch_toas=True,
k_threshold=k_threshold)
# adding white-noise, and acting on psr objects
models = []
for p in psrs:
if 'NANOGrav' in p.flags['pta'] and not is_wideband:
s2 = s + white_noise_block(vary=white_vary, inc_ecorr=True, tnequad=tnequad)
models.append(s2(p))
else:
s3 = s + white_noise_block(vary=white_vary, inc_ecorr=False, tnequad=tnequad)
models.append(s3(p))
# set up PTA
if dense_like:
pta = signal_base.PTA(models, lnlikelihood=signal_base.LogLikelihoodDenseCholesky)
else:
pta = signal_base.PTA(models)
# set white noise parameters
if not white_vary or (is_wideband and use_dmdata):
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta
def model_2a_drop_crn(psrs, psd='powerlaw', noisedict=None, white_vary=False,
components=30, gamma_common=None, upper_limit=False,
bayesephem=False, is_wideband=False, use_dmdata=False,
k_threshold=0.5, pshift=False, tm_marg=False,
dense_like=False, tm_svd=False, tnequad=False,):
"""
Reads in list of enterprise Pulsar instance and returns a PTA
instantiated with model 2A from the analysis paper:
per pulsar:
1. fixed EFAC per backend/receiver system
2. fixed EQUAD per backend/receiver system
3. fixed ECORR per backend/receiver system
4. Red noise modeled as a power-law with 30 sampling frequencies
5. Linear timing model.
global:
1.Common red noise modeled with user defined PSD with
30 sampling frequencies. Available PSDs are
['powerlaw', 'turnover' 'spectrum']
2. Optional physical ephemeris modeling.
:param psd:
PSD to use for common red noise signal. Available options
are ['powerlaw', 'turnover' 'spectrum']. 'powerlaw' is default
value.
:param noisedict:
Dictionary of pulsar noise properties. Can provide manually,
or the code will attempt to find it.
:param white_vary:
boolean for varying white noise or keeping fixed.
:param gamma_common:
Fixed common red process spectral index value. By default we
vary the spectral index over the range [0, 7].
:param upper_limit:
Perform upper limit on common red noise amplitude. By default
this is set to False. Note that when perfoming upper limits it
is recommended that the spectral index also be fixed to a specific
value.
:param bayesephem:
Include BayesEphem model. Set to False by default
:param is_wideband:
Whether input TOAs are wideband TOAs; will exclude ecorr from the white
noise model.
:param use_dmdata: whether to use DM data (WidebandTimingModel) if
is_wideband.
:param tm_marg: Use marginalized timing model. In many cases this will speed
up the likelihood calculation significantly.
:param dense_like: Use dense or sparse functions to evalute lnlikelihood
:param tm_svd: boolean for svd-stabilised timing model design matrix
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
# find the maximum time span to set GW frequency sampling
Tspan = model_utils.get_tspan(psrs)
# timing model
if (is_wideband and use_dmdata):
dmjump = parameter.Constant()
if white_vary:
dmefac = parameter.Uniform(pmin=0.1, pmax=10.0)
log10_dmequad = parameter.Uniform(pmin=-7.0, pmax=0.0)
# dmjump = parameter.Uniform(pmin=-0.005, pmax=0.005)
else:
dmefac = parameter.Constant()
log10_dmequad = parameter.Constant()
# dmjump = parameter.Constant()
s = gp_signals.WidebandTimingModel(dmefac=dmefac,
log10_dmequad=log10_dmequad, dmjump=dmjump,
selection=selections.Selection(selections.by_backend),
dmjump_selection=selections.Selection(selections.by_frontend))
else:
if tm_marg:
s = gp_signals.MarginalizingTimingModel(use_svd=tm_svd)
else:
s = gp_signals.TimingModel(use_svd=tm_svd)
# red noise
s += red_noise_block(prior=amp_prior, Tspan=Tspan, components=components)
# common red noise block
amp_name = '{}_log10_A'.format('gw')
if amp_prior == 'uniform':
log10_Agw = parameter.LinearExp(-18, -11)(amp_name)
elif amp_prior == 'log-uniform' and gamma_common is not None:
if np.abs(gamma_common - 4.33) < 0.1:
log10_Agw = parameter.Uniform(-18, -14)(amp_name)
else:
log10_Agw = parameter.Uniform(-18, -11)(amp_name)
else:
log10_Agw = parameter.Uniform(-18, -11)(amp_name)
gam_name = '{}_gamma'.format('gw')
if gamma_common is not None:
gamma_gw = parameter.Constant(gamma_common)(gam_name)
else:
gamma_gw = parameter.Uniform(0, 7)(gam_name)
k_drop = parameter.Uniform(0.0, 1.0) # per-pulsar
drop_pl = do.dropout_powerlaw(log10_A=log10_Agw, gamma=gamma_gw,
k_drop=k_drop, k_threshold=k_threshold)
crn = gp_signals.FourierBasisGP(drop_pl, components=components,
Tspan=Tspan, name='gw', pshift=pshift)
s += crn
# ephemeris model
s += do.Dropout_PhysicalEphemerisSignal(use_epoch_toas=True)
# adding white-noise, and acting on psr objects
models = []
for p in psrs:
if 'NANOGrav' in p.flags['pta'] and not is_wideband:
s2 = s + white_noise_block(vary=white_vary, inc_ecorr=True, tnequad=tnequad)
models.append(s2(p))
else:
s3 = s + white_noise_block(vary=white_vary, inc_ecorr=False, tnequad=tnequad)
models.append(s3(p))
# set up PTA
if dense_like:
pta = signal_base.PTA(models, lnlikelihood=signal_base.LogLikelihoodDenseCholesky)
else:
pta = signal_base.PTA(models)
# set white noise parameters
if not white_vary or (is_wideband and use_dmdata):
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta
# Does not yet work with IPTA datasets due to white-noise modeling issues.
def model_chromatic(psrs, psd='powerlaw', noisedict=None, white_vary=False,
components=30, gamma_common=None, upper_limit=False,
bayesephem=False, is_wideband=False, use_dmdata=False,
pshift=False, idx=4, chromatic_psd='powerlaw',
c_psrs=['J1713+0747'], tm_marg=False, dense_like=False,
tm_svd=False, tnequad=False):
"""
Reads in list of enterprise Pulsar instance and returns a PTA
instantiated with model 2A from the analysis paper + additional
chromatic noise for given pulsars
per pulsar:
1. fixed EFAC per backend/receiver system
2. fixed EQUAD per backend/receiver system
3. fixed ECORR per backend/receiver system
4. Red noise modeled as a power-law with 30 sampling frequencies
5. Linear timing model.
6. Chromatic noise for given pulsar list
global:
1.Common red noise modeled with user defined PSD with
30 sampling frequencies. Available PSDs are
['powerlaw', 'turnover' 'spectrum']
2. Optional physical ephemeris modeling.
:param psd:
PSD to use for common red noise signal. Available options
are ['powerlaw', 'turnover' 'spectrum']. 'powerlaw' is default
value.
:param noisedict:
Dictionary of pulsar noise properties. Can provide manually,
or the code will attempt to find it.
:param white_vary:
boolean for varying white noise or keeping fixed.
:param gamma_common:
Fixed common red process spectral index value. By default we
vary the spectral index over the range [0, 7].
:param upper_limit:
Perform upper limit on common red noise amplitude. By default
this is set to False. Note that when perfoming upper limits it
is recommended that the spectral index also be fixed to a specific
value.
:param bayesephem:
Include BayesEphem model. Set to False by default
:param is_wideband:
Whether input TOAs are wideband TOAs; will exclude ecorr from the white
noise model.
:param use_dmdata: whether to use DM data (WidebandTimingModel) if
is_wideband.
:param idx:
Index of chromatic process (i.e DM is 2, scattering would be 4). If
set to `vary` then will vary from 0 - 6 (This will be VERY slow!)
:param chromatic_psd:
PSD to use for chromatic noise. Available options
are ['powerlaw', 'turnover' 'spectrum']. 'powerlaw' is default
value.
:param c_psrs:
List of pulsars to use chromatic noise. 'all' will use all pulsars
:param tm_marg: Use marginalized timing model. In many cases this will speed
up the likelihood calculation significantly.
:param dense_like: Use dense or sparse functions to evalute lnlikelihood
:param tm_svd: boolean for svd-stabilised timing model design matrix
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
# find the maximum time span to set GW frequency sampling
Tspan = model_utils.get_tspan(psrs)
# timing model
if (is_wideband and use_dmdata):
dmjump = parameter.Constant()
if white_vary:
dmefac = parameter.Uniform(pmin=0.1, pmax=10.0)
log10_dmequad = parameter.Uniform(pmin=-7.0, pmax=0.0)
# dmjump = parameter.Uniform(pmin=-0.005, pmax=0.005)
else:
dmefac = parameter.Constant()
log10_dmequad = parameter.Constant()
# dmjump = parameter.Constant()
s = gp_signals.WidebandTimingModel(dmefac=dmefac,
log10_dmequad=log10_dmequad, dmjump=dmjump,
selection=selections.Selection(selections.by_backend),
dmjump_selection=selections.Selection(selections.by_frontend))
else:
if tm_marg:
s = gp_signals.MarginalizingTimingModel(use_svd=tm_svd)
else:
s = gp_signals.TimingModel(use_svd=tm_svd)
# white noise
s += white_noise_block(vary=white_vary, inc_ecorr=not is_wideband,
tnequad=tnequad)
# red noise
s += red_noise_block(prior=amp_prior, Tspan=Tspan, components=components)
# common red noise block
s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=components, gamma_val=gamma_common,
name='gw', pshift=pshift)
# ephemeris model
if bayesephem:
s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True)
# chromatic noise
sc = chromatic_noise_block(psd=chromatic_psd, idx=idx)
if c_psrs == 'all':
s += sc
models = [s(psr) for psr in psrs]
elif len(c_psrs) > 0:
models = []
for psr in psrs:
if psr.name in c_psrs:
print('Adding chromatic model to PSR {}'.format(psr.name))
snew = s + sc
models.append(snew(psr))
else:
models.append(s(psr))
# set up PTA
if dense_like:
pta = signal_base.PTA(models, lnlikelihood=signal_base.LogLikelihoodDenseCholesky)
else:
pta = signal_base.PTA(models)
# set white noise parameters
if not white_vary or (is_wideband and use_dmdata):
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta
def model_bwm(psrs, likelihood=LogLikelihood, lookupdir=None, noisedict=None, tm_svd=False,
Tmin_bwm=None, Tmax_bwm=None, skyloc=None, logmin=None, logmax=None,
burst_logmin=-17, burst_logmax=-12, red_psd='powerlaw', components=30,
dm_var=False, dm_psd='powerlaw', dm_annual=False, tnequad=False,
upper_limit=False, bayesephem=False, wideband=False, tm_marg=False, dense_like=False):
"""
Reads in list of enterprise Pulsar instance and returns a PTA
instantiated with BWM model:
per pulsar:
1. fixed EFAC per backend/receiver system
2. fixed EQUAD per backend/receiver system
3. fixed ECORR per backend/receiver system (if NG channelized)
4. Red noise modeled by a specified psd
5. Linear timing model.
6. Optional DM-variation modeling
global:
1. Deterministic GW burst with memory signal.
2. Optional physical ephemeris modeling.
:param psrs:
list of enterprise.Pulsar objects for PTA
:param noisedict:
Dictionary of pulsar noise properties for fixed white noise.
Can provide manually, or the code will attempt to find it.
:param tm_svd:
boolean for svd-stabilised timing model design matrix
:param Tmin_bwm:
Min time to search for BWM (MJD). If omitted, uses first TOA.
:param Tmax_bwm:
Max time to search for BWM (MJD). If omitted, uses last TOA.
:param skyloc:
Fixed sky location of BWM signal search as [cos(theta), phi].
Search over sky location if ``None`` given.
:param logmin:
Lower bound on log10_A of the red noise process in each pulsar`
:param logmax:
Upper bound on log10_A of the red noise process in each pulsar
:param burst_logmin:
Lower bound on the log10_A of the burst amplitude in each pulsar
:param burst_logmax:
Upper boudn on the log10_A of the burst amplitude in each pulsar
:param red_psd:
PSD to use for per pulsar red noise. Available options
are ['powerlaw', 'turnover', tprocess, 'spectrum'].
:param components:
number of modes in Fourier domain processes (red noise, DM
variations, etc)
:param dm_var:
include gaussian process DM variations
:param dm_psd:
power-spectral density for gp DM variations
:param dm_annual:
include a yearly period DM variation
:param upper_limit:
Perform upper limit on BWM amplitude. By default this is
set to False for a 'detection' run.
:param bayesephem:
Include BayesEphem model.
:param tm_marg: Use marginalized timing model. In many cases this will speed
up the likelihood calculation significantly.
:param dense_like: Use dense or sparse functions to evalute lnlikelihood
:return: instantiated enterprise.PTA object
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
# find the maximum time span to set frequency sampling
tmin = np.min([p.toas.min() for p in psrs])
tmax = np.max([p.toas.max() for p in psrs])
Tspan = tmax - tmin
if Tmin_bwm is None:
Tmin_bwm = tmin/const.day
if Tmax_bwm is None:
Tmax_bwm = tmax/const.day
if tm_marg:
s = gp_signals.MarginalizingTimingModel(use_svd=tm_svd)
else:
s = gp_signals.TimingModel(use_svd=tm_svd)
# red noise
s += red_noise_block(prior=amp_prior, psd=red_psd, Tspan=Tspan, components=components, logmin=logmin, logmax=logmax)
# DM variations
if dm_var:
s += dm_noise_block(psd=dm_psd, prior=amp_prior, components=components,
gamma_val=None)
if dm_annual:
s += chrom.dm_annual_signal()
# DM exponential dip for J1713's DM event
dmexp = chrom.dm_exponential_dip(tmin=54500, tmax=54900)
# GW BWM signal block
s += bwm_block(Tmin_bwm, Tmax_bwm, logmin=burst_logmin, logmax=burst_logmax,
amp_prior=amp_prior,
skyloc=skyloc, name='bwm')
# ephemeris model
if bayesephem:
s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True)
# adding white-noise, and acting on psr objects
models = []
for p in psrs:
if 'NANOGrav' in p.flags['pta'] and not wideband:
s2 = s + white_noise_block(vary=False, inc_ecorr=True, tnequad=tnequad)
if dm_var and 'J1713+0747' == p.name:
s2 += dmexp
models.append(s2(p))
else:
s3 = s + white_noise_block(vary=False, inc_ecorr=False, tnequad=tnequad)
if dm_var and 'J1713+0747' == p.name:
s3 += dmexp
models.append(s3(p))
# set up PTA
if dense_like:
pta = signal_base.PTA(models, lnlikelihood=signal_base.LogLikelihoodDenseCholesky)
else:
pta = signal_base.PTA(models)
# set white noise parameters
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta
def model_bwm_sglpsr(psr, likelihood=LogLikelihood, lookupdir=None,
noisedict=None, tm_svd=False, tnequad=False,
Tmin_bwm=None, Tmax_bwm=None,
burst_logmin=-17, burst_logmax=-12, fixed_sign=None,
red_psd='powerlaw', logmin=None,
logmax=None, components=30,
dm_var=False, dm_psd='powerlaw', dm_annual=False,
upper_limit=False, bayesephem=False,
wideband=False, tm_marg=False, dense_like=False):
"""
Burst-With-Memory model for single pulsar runs
Because all of the geometric parameters (pulsar_position, source_position, gw_pol) are all degenerate with each other in a single pulsar BWM search,
this model can only search over burst epoch and residual-space ramp amplitude (t0, ramp_amplitude)
Reads in list of enterprise Pulsar instance and returns a PTA
instantiated with single-pulsar BWM model (called a ramp):
per pulsar:
1. fixed EFAC per backend/receiver system
2. fixed EQUAD per backend/receiver system
3. fixed ECORR per backend/receiver system (if NG channelized)
4. Red noise modeled by a specified psd
5. Linear timing model.
6. Optional DM-variation modeling
7. Deterministic GW burst with memory signal for this pulsar
:param psr:
enterprise.Pulsar objects for PTA. This model is only for one pulsar at a time.
:param likelihood:
The likelihood function to use. The options are [enterprise.signals.signal_base.LogLikelihood, enterprise.signals.signal_base.LookupLikelihood]
:param noisedict:
Dictionary of pulsar noise properties for fixed white noise.
Can provide manually, or the code will attempt to find it.
:param tm_svd:
boolean for svd-stabilised timing model design matrix
:param Tmin_bwm:
Min time to search for BWM (MJD). If omitted, uses first TOA.
:param Tmax_bwm:
Max time to search for BWM (MJD). If omitted, uses last TOA.
:param red_psd:
PSD to use for per pulsar red noise. Available options
are ['powerlaw', 'turnover', tprocess, 'spectrum'].
:param components:
number of modes in Fourier domain processes (red noise, DM
variations, etc)
:param dm_var:
include gaussian process DM variations
:param dm_psd:
power-spectral density for gp DM variations
:param dm_annual:
include a yearly period DM variation
:param upper_limit:
Perform upper limit on BWM amplitude. By default this is
set to False for a 'detection' run.
:param bayesephem:
Include BayesEphem model.
:param tm_marg: Use marginalized timing model. In many cases this will speed
up the likelihood calculation significantly.
:param dense_like: Use dense or sparse functions to evalute lnlikelihood
:return: instantiated enterprise.PTA object
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
# find the maximum time span to set frequency sampling
tmin = psr.toas.min()
tmax = psr.toas.max()
Tspan = tmax - tmin
if Tmin_bwm is None:
Tmin_bwm = tmin/const.day
if Tmax_bwm is None:
Tmax_bwm = tmax/const.day
if tm_marg:
s = gp_signals.MarginalizingTimingModel()
else:
s = gp_signals.TimingModel(use_svd=tm_svd)
# red noise
s += red_noise_block(prior=amp_prior, psd=red_psd, Tspan=Tspan, components=components, logmin=logmin, logmax=logmax)
# DM variations
if dm_var:
s += dm_noise_block(psd=dm_psd, prior=amp_prior, components=components,
gamma_val=None)
if dm_annual:
s += chrom.dm_annual_signal()
# DM exponential dip for J1713's DM event
dmexp = chrom.dm_exponential_dip(tmin=54500, tmax=54900)
# GW BWM signal block
s += bwm_sglpsr_block(Tmin_bwm, Tmax_bwm, amp_prior=amp_prior, name='ramp',
logmin=burst_logmin, logmax=burst_logmax, fixed_sign=fixed_sign)
# ephemeris model
if bayesephem:
s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True)
# adding white-noise, and acting on psr objects
models = []
if 'NANOGrav' in psr.flags['pta'] and not wideband:
s2 = s + white_noise_block(vary=False, inc_ecorr=True, tnequad=tnequad)
if dm_var and 'J1713+0747' == psr.name:
s2 += dmexp
models.append(s2(psr))
else:
s3 = s + white_noise_block(vary=False, inc_ecorr=False, tnequad=tnequad)
if dm_var and 'J1713+0747' == psr.name:
s3 += dmexp
models.append(s3(psr))
# set up PTA
# TODO: decide on a way to handle likelihood
if dense_like:
pta = signal_base.PTA(models, lnlikelihood=signal_base.LogLikelihoodDenseCholesky)
else:
pta = signal_base.PTA(models)
# set white noise parameters
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta
def model_fdm(psrs, noisedict=None, white_vary=False, tm_svd=False,
Tmin_fdm=None, Tmax_fdm=None, gw_psd='powerlaw',
red_psd='powerlaw', components=30, n_rnfreqs=None,
n_gwbfreqs=None, gamma_common=None, delta_common=None,
dm_var=False, dm_psd='powerlaw', dm_annual=False,
upper_limit=False, bayesephem=False, wideband=False,
pshift=False, pseed=None, model_CRN=False,
amp_upper=-11, amp_lower=-18, tnequad=False,
freq_upper=-7, freq_lower=-9,
use_fixed_freq=False, fixed_freq=-8, tm_marg=False,
dense_like=False):
"""
Reads in list of enterprise Pulsar instance and returns a PTA
instantiated with FDM model:
per pulsar:
1. fixed EFAC per backend/receiver system
2. fixed EQUAD per backend/receiver system
3. fixed ECORR per backend/receiver system (if NG channelized)
4. Red noise modeled by a specified psd
5. Linear timing model.
6. Optional DM-variation modeling
7. The pulsar phase term.
global:
1. Deterministic GW FDM signal.
2. Optional physical ephemeris modeling.
:param psrs:
list of enterprise.Pulsar objects for PTA
:param noisedict:
Dictionary of pulsar noise properties for fixed white noise.
Can provide manually, or the code will attempt to find it.
:param white_vary:
boolean for varying white noise or keeping fixed.
:param tm_svd:
boolean for svd-stabilised timing model design matrix
:param Tmin_fdm:
Min time to search for FDM (MJD). If omitted, uses first TOA.
:param Tmax_fdm:
Max time to search for FDM (MJD). If omitted, uses last TOA.
:param gw_psd:
PSD to use for the per pulsar GWB.
:param red_psd:
PSD to use for per pulsar red noise. Available options
are ['powerlaw', 'turnover', tprocess, 'spectrum'].
:param components:
number of modes in Fourier domain processes (red noise, DM
variations, etc)
:param n_rnfreqs:
Number of frequencies to use in achromatic rednoise model.
:param n_gwbfreqs:
Number of frequencies to use in the GWB model.
:param gamma_common:
Fixed common red process spectral index value. By default we
vary the spectral index over the range [0, 7].
:param dm_var:
include gaussian process DM variations
:param dm_psd:
power-spectral density for gp DM variations
:param dm_annual:
include a yearly period DM variation
:param upper_limit:
Perform upper limit on FDM amplitude. By default this is
set to False for a 'detection' run.
:param bayesephem:
Include BayesEphem model.
:param wideband:
Whether input TOAs are wideband TOAs; will exclude ecorr from the white
noise model.
:param pshift:
Option to use a random phase shift in design matrix. For testing the
null hypothesis.
:param pseed:
Option to provide a seed for the random phase shift.
:param model_CRN:
Option to model the common red process in addition to the
FDM signal.
:param amp_upper, amp_lower, freq_upper, freq_lower:
The log-space bounds on the amplitude and frequency priors.
:param use_fixed_freq:
Whether to do a fixed-frequency run and not search over the frequency.
:param fixed_freq:
The frequency value to do a fixed-frequency run with.
:param tm_marg: Use marginalized timing model. In many cases this will speed
up the likelihood calculation significantly.
:param dense_like: Use dense or sparse functions to evalute lnlikelihood
:return: instantiated enterprise.PTA object
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
if n_gwbfreqs is None:
n_gwbfreqs = components
if n_rnfreqs is None:
n_rnfreqs = components
# find the maximum time span to set frequency sampling
tmin = np.min([p.toas.min() for p in psrs])
tmax = np.max([p.toas.max() for p in psrs])
Tspan = tmax - tmin
if Tmin_fdm is None:
Tmin_fdm = tmin/const.day
if Tmax_fdm is None:
Tmax_fdm = tmax/const.day
# timing model
if tm_marg:
s = gp_signals.MarginalizingTimingModel(use_svd=tm_svd)
else:
s = gp_signals.TimingModel(use_svd=tm_svd)
# red noise
s += red_noise_block(prior=amp_prior, psd=red_psd, Tspan=Tspan, components=n_rnfreqs)
# DM variations
if dm_var:
s += dm_noise_block(psd=dm_psd, prior=amp_prior, components=components,
gamma_val=None)
if dm_annual:
s += chrom.dm_annual_signal()
# DM exponential dip for J1713's DM event
dmexp = chrom.dm_exponential_dip(tmin=54500, tmax=54900)
if model_CRN is True:
# common red noise block
s += common_red_noise_block(psd=gw_psd, prior=amp_prior, Tspan=Tspan,
components=n_gwbfreqs, gamma_val=gamma_common,
delta_val=delta_common, name='gw',
pshift=pshift, pseed=pseed)
# GW FDM signal block
s += deterministic.fdm_block(Tmin_fdm, Tmax_fdm,
amp_prior=amp_prior, name='fdm',
amp_lower=amp_lower, amp_upper=amp_upper,
freq_lower=freq_lower, freq_upper=freq_upper,
use_fixed_freq=use_fixed_freq, fixed_freq=fixed_freq)
# ephemeris model
if bayesephem:
s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True)
# adding white-noise, and acting on psr objects
models = []
for p in psrs:
if 'NANOGrav' in p.flags['pta'] and not wideband:
s2 = s + white_noise_block(vary=False, inc_ecorr=True, tnequad=tnequad)
if dm_var and 'J1713+0747' == p.name:
s2 += dmexp
models.append(s2(p))
else:
s3 = s + white_noise_block(vary=False, inc_ecorr=False, tnequad=tnequad)
if dm_var and 'J1713+0747' == p.name:
s3 += dmexp
models.append(s3(p))
# set up PTA
if dense_like:
pta = signal_base.PTA(models, lnlikelihood=signal_base.LogLikelihoodDenseCholesky)
else:
pta = signal_base.PTA(models)
# set white noise parameters
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta
def model_cw(psrs, upper_limit=False, rn_psd='powerlaw', noisedict=None,
white_vary=False, components=30, bayesephem=False, skyloc=None,
log10_F=None, ecc=False, psrTerm=False, is_wideband=False,
use_dmdata=False, gp_ecorr='basis_ecorr', tnequad=False,
tm_marg=False, dense_like=False, tm_svd=False):
"""
Reads in list of enterprise Pulsar instance and returns a PTA
instantiated with CW model:
per pulsar:
1. fixed EFAC per backend/receiver system
2. fixed EQUAD per backend/receiver system
3. fixed ECORR per backend/receiver system
4. Red noise modeled as a power-law with 30 sampling frequencies
5. Linear timing model.
global:
1. Deterministic CW signal.
2. Optional physical ephemeris modeling.
:param upper_limit:
Perform upper limit on common red noise amplitude. By default
this is set to False. Note that when perfoming upper limits it
is recommended that the spectral index also be fixed to a specific
value.
:param rn_psd:
psd to use in red_noise_block()
:param noisedict:
Dictionary of pulsar noise properties. Can provide manually,
or the code will attempt to find it.
:param white_vary:
boolean for varying white noise or keeping fixed.
:param bayesephem:
Include BayesEphem model. Set to False by default
:param skyloc:
Fixed sky location of CW signal search as [cos(theta), phi].
Search over sky location if ``None`` given.
:param log10_F:
Fixed frequency of CW signal search.
Search over frequency if ``None`` given.
:param ecc:
boolean or float
if boolean: include/exclude eccentricity in search
if float: use fixed eccentricity with eccentric model
:psrTerm:
boolean, include/exclude pulsar term in search
:param is_wideband:
Whether input TOAs are wideband TOAs; will exclude ecorr from the white
noise model.
:param use_dmdata: whether to use DM data (WidebandTimingModel) if
is_wideband.
:param tm_marg: Use marginalized timing model. In many cases this will speed
up the likelihood calculation significantly.
:param dense_like: Use dense or sparse functions to evalute lnlikelihood
:param tm_svd: boolean for svd-stabilised timing model design matrix
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
# find the maximum time span to set GW frequency sampling
tmin = np.min([p.toas.min() for p in psrs])
tmax = np.max([p.toas.max() for p in psrs])
Tspan = tmax - tmin
# timing model
if (is_wideband and use_dmdata):
dmjump = parameter.Constant()
if white_vary:
dmefac = parameter.Uniform(pmin=0.1, pmax=10.0)
log10_dmequad = parameter.Uniform(pmin=-7.0, pmax=0.0)
# dmjump = parameter.Uniform(pmin=-0.005, pmax=0.005)
else:
dmefac = parameter.Constant()
log10_dmequad = parameter.Constant()
# dmjump = parameter.Constant()
s = gp_signals.WidebandTimingModel(dmefac=dmefac,
log10_dmequad=log10_dmequad, dmjump=dmjump,
selection=selections.Selection(selections.by_backend),
dmjump_selection=selections.Selection(selections.by_frontend))
else:
if tm_marg:
s = gp_signals.MarginalizingTimingModel(use_svd=tm_svd)
else:
s = gp_signals.TimingModel(use_svd=tm_svd)
# red noise
s += red_noise_block(prior=amp_prior,
psd=rn_psd, Tspan=Tspan, components=components)
# GW CW signal block
if not ecc:
s += deterministic.cw_block_circ(amp_prior=amp_prior,
skyloc=skyloc,
log10_fgw=log10_F,
psrTerm=psrTerm, tref=tmin,
name='cw')
else:
if type(ecc) is not float:
ecc = None
s += deterministic.cw_block_ecc(amp_prior=amp_prior,
skyloc=skyloc, log10_F=log10_F,
ecc=ecc, psrTerm=psrTerm,
tref=tmin, name='cw')
# ephemeris model
if bayesephem:
s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True)
# adding white-noise, and acting on psr objects
models = []
for p in psrs:
if 'NANOGrav' in p.flags['pta'] and not is_wideband:
if gp_ecorr:
s2 = s + white_noise_block(vary=white_vary, inc_ecorr=True,
gp_ecorr=True, name=gp_ecorr,
tnequad=tnequad)
else:
s2 = s + white_noise_block(vary=white_vary, inc_ecorr=True, tnequad=tnequad)
models.append(s2(p))
else:
s3 = s + white_noise_block(vary=white_vary, inc_ecorr=False, tnequad=tnequad)
models.append(s3(p))
# set up PTA
if dense_like:
pta = signal_base.PTA(models, lnlikelihood=signal_base.LogLikelihoodDenseCholesky)
else:
pta = signal_base.PTA(models)
# set white noise parameters
if not white_vary or (is_wideband and use_dmdata):
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta
| 122,774 | 41.973399 | 152 | py |
enterprise_extensions | enterprise_extensions-master/enterprise_extensions/blocks.py |
import types
import numpy as np
from enterprise import constants as const
from enterprise.signals import deterministic_signals
from enterprise.signals import gp_bases as gpb
from enterprise.signals import gp_priors as gpp
from enterprise.signals import (gp_signals, parameter, selections, utils,
white_signals)
from enterprise_extensions import deterministic as ee_deterministic
from . import chromatic as chrom
from . import dropout as drop
from . import gp_kernels as gpk
from . import model_orfs
__all__ = ['white_noise_block',
'red_noise_block',
'bwm_block',
'bwm_sglpsr_block',
'dm_noise_block',
'chromatic_noise_block',
'common_red_noise_block',
]
def channelized_backends(backend_flags):
"""Selection function to split by channelized backend flags only. For ECORR"""
flagvals = np.unique(backend_flags)
ch_b = ['ASP', 'GASP', 'GUPPI', 'PUPPI', 'YUPPI', 'CHIME']
flagvals = filter(lambda x: any(map(lambda y: y in x, ch_b)), flagvals)
return {flagval: backend_flags == flagval for flagval in flagvals}
def white_noise_block(vary=False, inc_ecorr=False, gp_ecorr=False,
efac1=False, select='backend', tnequad=False, name=None, ng_twg_setup=False, wb_efac_sigma=0.25):
"""
Returns the white noise block of the model:
1. EFAC per backend/receiver system
2. EQUAD per backend/receiver system
3. ECORR per backend/receiver system
:param vary:
If set to true we vary these parameters
with uniform priors. Otherwise they are set to constants
with values to be set later.
:param inc_ecorr:
include ECORR, needed for NANOGrav channelized TOAs
:param gp_ecorr:
whether to use the Gaussian process model for ECORR
:param efac1:
use a strong prior on EFAC = Normal(mu=1, stdev=0.1)
:param tnequad:
Whether to use the TempoNest definition of EQUAD. Defaults to False to
follow Tempo, Tempo2 and Pint definition.
"""
if select == 'backend':
# define selection by observing backend
backend = selections.Selection(selections.by_backend)
# define selection by nanograv backends
backend_ng = selections.Selection(selections.nanograv_backends)
# backend_ch = selections.Selection(channelized_backends)
else:
# define no selection
backend = selections.Selection(selections.no_selection)
# white noise parameters
if vary:
if efac1:
efac = parameter.Normal(1.0, 0.1)
elif ng_twg_setup:
efac = parameter.Normal(1.0, wb_efac_sigma)
else:
efac = parameter.Uniform(0.01, 10.0)
equad = parameter.Uniform(-8.5, -5)
if inc_ecorr:
ecorr = parameter.Uniform(-8.5, -5)
else:
efac = parameter.Constant()
equad = parameter.Constant()
if inc_ecorr:
ecorr = parameter.Constant()
# white noise signals
if tnequad:
efeq = white_signals.MeasurementNoise(efac=efac,
selection=backend, name=name)
efeq += white_signals.TNEquadNoise(log10_tnequad=equad,
selection=backend, name=name)
else:
efeq = white_signals.MeasurementNoise(efac=efac, log10_t2equad=equad,
selection=backend, name=name)
if inc_ecorr:
if gp_ecorr:
if name is None:
ec = gp_signals.EcorrBasisModel(log10_ecorr=ecorr,
selection=backend_ng)
else:
ec = gp_signals.EcorrBasisModel(log10_ecorr=ecorr,
selection=backend_ng, name=name)
else:
ec = white_signals.EcorrKernelNoise(log10_ecorr=ecorr,
selection=backend_ng,
name=name)
# combine signals
if inc_ecorr:
s = efeq + ec
elif not inc_ecorr:
s = efeq
return s
def red_noise_block(psd='powerlaw', prior='log-uniform', Tspan=None,
components=30, gamma_val=None, coefficients=False,
select=None, modes=None, wgts=None, combine=True,
break_flat=False, break_flat_fq=None,
logmin=None, logmax=None, dropout=False, k_threshold=0.5):
"""
Returns red noise model:
Red noise modeled as a power-law with 30 sampling frequencies
:param psd:
PSD function [e.g. powerlaw (default), turnover, spectrum, tprocess]
:param prior:
Prior on log10_A. Default if "log-uniform". Use "uniform" for
upper limits.
:param Tspan:
Sets frequency sampling f_i = i / Tspan. Default will
use overall time span for indivicual pulsar.
:param components:
Number of frequencies in sampling of red noise
:param gamma_val:
If given, this is the fixed slope of the power-law for
powerlaw, turnover, or tprocess red noise
:param coefficients: include latent coefficients in GP model?
:param dropout: Use a dropout analysis for intrinsic red noise models.
Currently only supports power law option.
:param k_threshold: Threshold for dropout analysis.
"""
# red noise parameters that are common
if psd in ['powerlaw', 'powerlaw_genmodes', 'turnover',
'tprocess', 'tprocess_adapt']:
# parameters shared by PSD functions
if logmin is not None and logmax is not None:
if prior == 'uniform':
log10_A = parameter.LinearExp(logmin, logmax)
elif prior == 'log-uniform':
log10_A = parameter.Uniform(logmin, logmax)
else:
if prior == 'uniform':
log10_A = parameter.LinearExp(-20, -11)
elif prior == 'log-uniform' and gamma_val is not None:
if np.abs(gamma_val - 4.33) < 0.1:
log10_A = parameter.Uniform(-20, -11)
else:
log10_A = parameter.Uniform(-20, -11)
else:
log10_A = parameter.Uniform(-20, -11)
if gamma_val is not None:
gamma = parameter.Constant(gamma_val)
else:
gamma = parameter.Uniform(0, 7)
# different PSD function parameters
if psd == 'powerlaw' and dropout:
k_drop = parameter.Uniform(0, 1)
pl = drop.dropout_powerlaw(log10_A=log10_A, gamma=gamma,
dropout_psr='all', k_drop=k_drop,
k_threshold=k_threshold)
elif psd == 'powerlaw':
pl = utils.powerlaw(log10_A=log10_A, gamma=gamma)
elif psd == 'powerlaw_genmodes':
pl = gpp.powerlaw_genmodes(log10_A=log10_A, gamma=gamma, wgts=wgts)
elif psd == 'turnover':
kappa = parameter.Uniform(0, 7)
lf0 = parameter.Uniform(-9, -7)
pl = utils.turnover(log10_A=log10_A, gamma=gamma,
lf0=lf0, kappa=kappa)
elif psd == 'tprocess':
df = 2
alphas = gpp.InvGamma(df/2, df/2, size=components)
pl = gpp.t_process(log10_A=log10_A, gamma=gamma, alphas=alphas)
elif psd == 'tprocess_adapt':
df = 2
alpha_adapt = gpp.InvGamma(df/2, df/2, size=1)
nfreq = parameter.Uniform(-0.5, 10-0.5)
pl = gpp.t_process_adapt(log10_A=log10_A, gamma=gamma,
alphas_adapt=alpha_adapt, nfreq=nfreq)
if psd == 'spectrum':
if prior == 'uniform':
log10_rho = parameter.LinearExp(-10, -4, size=components)
elif prior == 'log-uniform':
log10_rho = parameter.Uniform(-10, -4, size=components)
pl = gpp.free_spectrum(log10_rho=log10_rho)
if select == 'backend':
# define selection by observing backend
selection = selections.Selection(selections.by_backend)
elif select == 'band' or select == 'band+':
# define selection by observing band
selection = selections.Selection(selections.by_band)
else:
# define no selection
selection = selections.Selection(selections.no_selection)
if break_flat:
log10_A_flat = parameter.Uniform(-20, -11)
gamma_flat = parameter.Constant(0)
pl_flat = utils.powerlaw(log10_A=log10_A_flat, gamma=gamma_flat)
freqs = 1.0 * np.arange(1, components+1) / Tspan
components_low = sum(f < break_flat_fq for f in freqs)
if components_low < 1.5:
components_low = 2
rn = gp_signals.FourierBasisGP(pl, components=components_low,
Tspan=Tspan, coefficients=coefficients,
combine=combine, selection=selection)
rn_flat = gp_signals.FourierBasisGP(pl_flat,
modes=freqs[components_low:],
coefficients=coefficients,
selection=selection,
combine=combine,
name='red_noise_hf')
rn = rn + rn_flat
else:
rn = gp_signals.FourierBasisGP(pl, components=components,
Tspan=Tspan,
combine=combine,
coefficients=coefficients,
selection=selection,
modes=modes)
if select == 'band+': # Add the common component as well
rn = rn + gp_signals.FourierBasisGP(pl, components=components,
Tspan=Tspan, combine=combine,
coefficients=coefficients)
return rn
def bwm_block(Tmin, Tmax, amp_prior='log-uniform',
skyloc=None, logmin=-18, logmax=-11,
name='bwm'):
"""
Returns deterministic GW burst with memory model:
1. Burst event parameterized by time, sky location,
polarization angle, and amplitude
:param Tmin:
Min time to search, probably first TOA (MJD).
:param Tmax:
Max time to search, probably last TOA (MJD).
:param amp_prior:
Prior on log10_A. Default if "log-uniform". Use "uniform" for
upper limits.
:param skyloc:
Fixed sky location of BWM signal search as [cos(theta), phi].
Search over sky location if ``None`` given.
:param logmin:
log of minimum BWM amplitude for prior (log10)
:param logmax:
log of maximum BWM amplitude for prior (log10)
:param name:
Name of BWM signal.
"""
# BWM parameters
amp_name = '{}_log10_A'.format(name)
if amp_prior == 'uniform':
log10_A_bwm = parameter.LinearExp(logmin, logmax)(amp_name)
elif amp_prior == 'log-uniform':
log10_A_bwm = parameter.Uniform(logmin, logmax)(amp_name)
pol_name = '{}_pol'.format(name)
pol = parameter.Uniform(0, np.pi)(pol_name)
t0_name = '{}_t0'.format(name)
t0 = parameter.Uniform(Tmin, Tmax)(t0_name)
costh_name = '{}_costheta'.format(name)
phi_name = '{}_phi'.format(name)
if skyloc is None:
costh = parameter.Uniform(-1, 1)(costh_name)
phi = parameter.Uniform(0, 2*np.pi)(phi_name)
else:
costh = parameter.Constant(skyloc[0])(costh_name)
phi = parameter.Constant(skyloc[1])(phi_name)
# BWM signal
bwm_wf = ee_deterministic.bwm_delay(log10_h=log10_A_bwm, t0=t0,
cos_gwtheta=costh, gwphi=phi, gwpol=pol)
bwm = deterministic_signals.Deterministic(bwm_wf, name=name)
return bwm
def bwm_sglpsr_block(Tmin, Tmax, amp_prior='log-uniform',
logmin=-17, logmax=-12, name='ramp', fixed_sign=None):
if fixed_sign is None:
sign = parameter.Uniform(-1, 1)("sign")
else:
sign = np.sign(fixed_sign)
amp_name = '{}_log10_A'.format(name)
if amp_prior == 'uniform':
log10_A_ramp = parameter.LinearExp(logmin, logmax)(amp_name)
elif amp_prior == 'log-uniform':
log10_A_ramp = parameter.Uniform(logmin, logmax)(amp_name)
t0_name = '{}_t0'.format(name)
t0 = parameter.Uniform(Tmin, Tmax)(t0_name)
ramp_wf = ee_deterministic.bwm_sglpsr_delay(log10_A=log10_A_ramp, t0=t0, sign=sign)
ramp = deterministic_signals.Deterministic(ramp_wf, name=name)
return ramp
def dm_noise_block(gp_kernel='diag', psd='powerlaw', nondiag_kernel='periodic',
prior='log-uniform', dt=15, df=200,
Tspan=None, components=30,
gamma_val=None, coefficients=False):
"""
Returns DM noise model:
1. DM noise modeled as a power-law with 30 sampling frequencies
:param psd:
PSD function [e.g. powerlaw (default), spectrum, tprocess]
:param prior:
Prior on log10_A. Default if "log-uniform". Use "uniform" for
upper limits.
:param dt:
time-scale for linear interpolation basis (days)
:param df:
frequency-scale for linear interpolation basis (MHz)
:param Tspan:
Sets frequency sampling f_i = i / Tspan. Default will
use overall time span for indivicual pulsar.
:param components:
Number of frequencies in sampling of DM-variations.
:param gamma_val:
If given, this is the fixed slope of the power-law for
powerlaw, turnover, or tprocess DM-variations
"""
# dm noise parameters that are common
if gp_kernel == 'diag':
if psd in ['powerlaw', 'turnover', 'tprocess', 'tprocess_adapt']:
# parameters shared by PSD functions
if prior == 'uniform':
log10_A_dm = parameter.LinearExp(-20, -11)
elif prior == 'log-uniform' and gamma_val is not None:
if np.abs(gamma_val - 4.33) < 0.1:
log10_A_dm = parameter.Uniform(-20, -11)
else:
log10_A_dm = parameter.Uniform(-20, -11)
else:
log10_A_dm = parameter.Uniform(-20, -11)
if gamma_val is not None:
gamma_dm = parameter.Constant(gamma_val)
else:
gamma_dm = parameter.Uniform(0, 7)
# different PSD function parameters
if psd == 'powerlaw':
dm_prior = utils.powerlaw(log10_A=log10_A_dm, gamma=gamma_dm)
elif psd == 'turnover':
kappa_dm = parameter.Uniform(0, 7)
lf0_dm = parameter.Uniform(-9, -7)
dm_prior = utils.turnover(log10_A=log10_A_dm, gamma=gamma_dm,
lf0=lf0_dm, kappa=kappa_dm)
elif psd == 'tprocess':
df = 2
alphas_dm = gpp.InvGamma(df/2, df/2, size=components)
dm_prior = gpp.t_process(log10_A=log10_A_dm, gamma=gamma_dm,
alphas=alphas_dm)
elif psd == 'tprocess_adapt':
df = 2
alpha_adapt_dm = gpp.InvGamma(df/2, df/2, size=1)
nfreq_dm = parameter.Uniform(-0.5, 10-0.5)
dm_prior = gpp.t_process_adapt(log10_A=log10_A_dm,
gamma=gamma_dm,
alphas_adapt=alpha_adapt_dm,
nfreq=nfreq_dm)
if psd == 'spectrum':
if prior == 'uniform':
log10_rho_dm = parameter.LinearExp(-10, -4, size=components)
elif prior == 'log-uniform':
log10_rho_dm = parameter.Uniform(-10, -4, size=components)
dm_prior = gpp.free_spectrum(log10_rho=log10_rho_dm)
dm_basis = utils.createfourierdesignmatrix_dm(nmodes=components,
Tspan=Tspan)
elif gp_kernel == 'nondiag':
if nondiag_kernel == 'periodic':
# Periodic GP kernel for DM
log10_sigma = parameter.Uniform(-10, -4)
log10_ell = parameter.Uniform(1, 4)
log10_p = parameter.Uniform(-4, 1)
log10_gam_p = parameter.Uniform(-3, 2)
dm_basis = gpk.linear_interp_basis_dm(dt=dt*const.day)
dm_prior = gpk.periodic_kernel(log10_sigma=log10_sigma,
log10_ell=log10_ell,
log10_gam_p=log10_gam_p,
log10_p=log10_p)
elif nondiag_kernel == 'periodic_rfband':
# Periodic GP kernel for DM with RQ radio-frequency dependence
log10_sigma = parameter.Uniform(-10, -4)
log10_ell = parameter.Uniform(1, 4)
log10_ell2 = parameter.Uniform(2, 7)
log10_alpha_wgt = parameter.Uniform(-4, 1)
log10_p = parameter.Uniform(-4, 1)
log10_gam_p = parameter.Uniform(-3, 2)
dm_basis = gpk.get_tf_quantization_matrix(df=df, dt=dt*const.day,
dm=True)
dm_prior = gpk.tf_kernel(log10_sigma=log10_sigma,
log10_ell=log10_ell,
log10_gam_p=log10_gam_p, log10_p=log10_p,
log10_alpha_wgt=log10_alpha_wgt,
log10_ell2=log10_ell2)
elif nondiag_kernel == 'sq_exp':
# squared-exponential GP kernel for DM
log10_sigma = parameter.Uniform(-10, -4)
log10_ell = parameter.Uniform(1, 4)
dm_basis = gpk.linear_interp_basis_dm(dt=dt*const.day)
dm_prior = gpk.se_dm_kernel(log10_sigma=log10_sigma,
log10_ell=log10_ell)
elif nondiag_kernel == 'sq_exp_rfband':
# Sq-Exp GP kernel for DM with RQ radio-frequency dependence
log10_sigma = parameter.Uniform(-10, -4)
log10_ell = parameter.Uniform(1, 4)
log10_ell2 = parameter.Uniform(2, 7)
log10_alpha_wgt = parameter.Uniform(-4, 1)
dm_basis = gpk.get_tf_quantization_matrix(df=df, dt=dt*const.day,
dm=True)
dm_prior = gpk.sf_kernel(log10_sigma=log10_sigma,
log10_ell=log10_ell,
log10_alpha_wgt=log10_alpha_wgt,
log10_ell2=log10_ell2)
elif nondiag_kernel == 'dmx_like':
# DMX-like signal
log10_sigma = parameter.Uniform(-10, -4)
dm_basis = gpk.linear_interp_basis_dm(dt=dt*const.day)
dm_prior = gpk.dmx_ridge_prior(log10_sigma=log10_sigma)
dmgp = gp_signals.BasisGP(dm_prior, dm_basis, name='dm_gp',
coefficients=coefficients)
return dmgp
def chromatic_noise_block(gp_kernel='nondiag', psd='powerlaw',
nondiag_kernel='periodic',
prior='log-uniform', dt=15, df=200,
idx=4, include_quadratic=False,
Tspan=None, name='chrom', components=30,
coefficients=False):
"""
Returns GP chromatic noise model :
1. Chromatic modeled with user defined PSD with
30 sampling frequencies. Available PSDs are
['powerlaw', 'turnover' 'spectrum']
:param gp_kernel:
Whether to use a diagonal kernel for the GP. ['diag','nondiag']
:param nondiag_kernel:
Which nondiagonal kernel to use for the GP.
['periodic','sq_exp','periodic_rfband','sq_exp_rfband']
:param psd:
PSD to use for common red noise signal. Available options
are ['powerlaw', 'turnover' 'spectrum']
:param prior:
What type of prior to use for amplitudes. ['log-uniform','uniform']
:param dt:
time-scale for linear interpolation basis (days)
:param df:
frequency-scale for linear interpolation basis (MHz)
:param idx:
Index of radio frequency dependence (i.e. DM is 2). Any float will work.
:param include_quadratic:
Whether to include a quadratic fit.
:param name: Name of signal
:param Tspan:
Tspan from which to calculate frequencies for PSD-based GPs.
:param components:
Number of frequencies to use in 'diag' GPs.
:param coefficients:
Whether to keep coefficients of the GP.
"""
if gp_kernel == 'diag':
chm_basis = gpb.createfourierdesignmatrix_chromatic(nmodes=components,
Tspan=Tspan)
if psd in ['powerlaw', 'turnover']:
if prior == 'uniform':
log10_A = parameter.LinearExp(-18, -11)
elif prior == 'log-uniform':
log10_A = parameter.Uniform(-18, -11)
gamma = parameter.Uniform(0, 7)
# PSD
if psd == 'powerlaw':
chm_prior = utils.powerlaw(log10_A=log10_A, gamma=gamma)
elif psd == 'turnover':
kappa = parameter.Uniform(0, 7)
lf0 = parameter.Uniform(-9, -7)
chm_prior = utils.turnover(log10_A=log10_A, gamma=gamma,
lf0=lf0, kappa=kappa)
if psd == 'spectrum':
if prior == 'uniform':
log10_rho = parameter.LinearExp(-10, -4, size=components)
elif prior == 'log-uniform':
log10_rho = parameter.Uniform(-10, -4, size=components)
chm_prior = gpp.free_spectrum(log10_rho=log10_rho)
elif gp_kernel == 'nondiag':
if nondiag_kernel == 'periodic':
# Periodic GP kernel for DM
log10_sigma = parameter.Uniform(-10, -4)
log10_ell = parameter.Uniform(1, 4)
log10_p = parameter.Uniform(-4, 1)
log10_gam_p = parameter.Uniform(-3, 2)
chm_basis = gpk.linear_interp_basis_chromatic(dt=dt*const.day)
chm_prior = gpk.periodic_kernel(log10_sigma=log10_sigma,
log10_ell=log10_ell,
log10_gam_p=log10_gam_p,
log10_p=log10_p)
elif nondiag_kernel == 'periodic_rfband':
# Periodic GP kernel for DM with RQ radio-frequency dependence
log10_sigma = parameter.Uniform(-10, -4)
log10_ell = parameter.Uniform(1, 4)
log10_ell2 = parameter.Uniform(2, 7)
log10_alpha_wgt = parameter.Uniform(-4, 1)
log10_p = parameter.Uniform(-4, 1)
log10_gam_p = parameter.Uniform(-3, 2)
chm_basis = gpk.get_tf_quantization_matrix(df=df, dt=dt*const.day,
dm=True, dm_idx=idx)
chm_prior = gpk.tf_kernel(log10_sigma=log10_sigma,
log10_ell=log10_ell,
log10_gam_p=log10_gam_p,
log10_p=log10_p,
log10_alpha_wgt=log10_alpha_wgt,
log10_ell2=log10_ell2)
elif nondiag_kernel == 'sq_exp':
# squared-exponential kernel for DM
log10_sigma = parameter.Uniform(-10, -4)
log10_ell = parameter.Uniform(1, 4)
chm_basis = gpk.linear_interp_basis_chromatic(dt=dt*const.day, idx=idx)
chm_prior = gpk.se_dm_kernel(log10_sigma=log10_sigma,
log10_ell=log10_ell)
elif nondiag_kernel == 'sq_exp_rfband':
# Sq-Exp GP kernel for Chrom with RQ radio-frequency dependence
log10_sigma = parameter.Uniform(-10, -4)
log10_ell = parameter.Uniform(1, 4)
log10_ell2 = parameter.Uniform(2, 7)
log10_alpha_wgt = parameter.Uniform(-4, 1)
chm_basis = gpk.get_tf_quantization_matrix(df=df, dt=dt*const.day,
dm=True, dm_idx=idx)
chm_prior = gpk.sf_kernel(log10_sigma=log10_sigma,
log10_ell=log10_ell,
log10_alpha_wgt=log10_alpha_wgt,
log10_ell2=log10_ell2)
cgp = gp_signals.BasisGP(chm_prior, chm_basis, name=name+'_gp',
coefficients=coefficients)
if include_quadratic:
# quadratic piece
basis_quad = chrom.chromatic_quad_basis(idx=idx)
prior_quad = chrom.chromatic_quad_prior()
cquad = gp_signals.BasisGP(prior_quad, basis_quad, name=name+'_quad')
cgp += cquad
return cgp
def common_red_noise_block(psd='powerlaw', prior='log-uniform',
Tspan=None, components=30, combine=True,
log10_A_val=None, gamma_val=None, delta_val=None,
logmin=None, logmax=None,
orf=None, orf_ifreq=0, leg_lmax=5,
name='gw', coefficients=False,
pshift=False, pseed=None):
"""
Returns common red noise model:
1. Red noise modeled with user defined PSD with
30 sampling frequencies. Available PSDs are
['powerlaw', 'turnover' 'spectrum']
:param psd:
PSD to use for common red noise signal. Available options
are ['powerlaw', 'turnover' 'spectrum', 'broken_powerlaw']
:param prior:
Prior on log10_A. Default if "log-uniform". Use "uniform" for
upper limits.
:param Tspan:
Sets frequency sampling f_i = i / Tspan. Default will
use overall time span for individual pulsar.
:param log10_A_val:
Value of log10_A parameter for fixed amplitude analyses.
:param gamma_val:
Value of spectral index for power-law and turnover
models. By default spectral index is varied of range [0,7]
:param delta_val:
Value of spectral index for high frequencies in broken power-law
and turnover models. By default spectral index is varied in range [0,7].\
:param logmin:
Specify the lower bound of the prior on the amplitude for all psd but 'spectrum'.
If psd=='spectrum', then this specifies the lower prior on log10_rho_gw
:param logmax:
Specify the lower bound of the prior on the amplitude for all psd but 'spectrum'.
If psd=='spectrum', then this specifies the lower prior on log10_rho_gw
:param orf:
String representing which overlap reduction function to use.
By default we do not use any spatial correlations. Permitted
values are ['hd', 'dipole', 'monopole'].
:param orf_ifreq:
Frequency bin at which to start the Hellings & Downs function with
numbering beginning at 0. Currently only works with freq_hd orf.
:param leg_lmax:
Maximum multipole of a Legendre polynomial series representation
of the overlap reduction function [default=5]
:param pshift:
Option to use a random phase shift in design matrix. For testing the
null hypothesis.
:param pseed:
Option to provide a seed for the random phase shift.
:param name: Name of common red process
"""
orfs = {'crn': None, 'hd': model_orfs.hd_orf(),
'gw_monopole': model_orfs.gw_monopole_orf(),
'gw_dipole': model_orfs.gw_dipole_orf(),
'st': model_orfs.st_orf(),
'gt': model_orfs.gt_orf(tau=parameter.Uniform(-1.5, 1.5)('tau')),
'dipole': model_orfs.dipole_orf(),
'monopole': model_orfs.monopole_orf(),
'param_hd': model_orfs.param_hd_orf(a=parameter.Uniform(-1.5, 3.0)('gw_orf_param0'),
b=parameter.Uniform(-1.0, 0.5)('gw_orf_param1'),
c=parameter.Uniform(-1.0, 1.0)('gw_orf_param2')),
'spline_orf': model_orfs.spline_orf(params=parameter.Uniform(-0.9, 0.9, size=7)('gw_orf_spline')),
'bin_orf': model_orfs.bin_orf(params=parameter.Uniform(-1.0, 1.0, size=7)('gw_orf_bin')),
'zero_diag_hd': model_orfs.zero_diag_hd(),
'zero_diag_bin_orf': model_orfs.zero_diag_bin_orf(params=parameter.Uniform(
-1.0, 1.0, size=7)('gw_orf_bin_zero_diag')),
'freq_hd': model_orfs.freq_hd(params=[components, orf_ifreq]),
'legendre_orf': model_orfs.legendre_orf(params=parameter.Uniform(
-1.0, 1.0, size=leg_lmax+1)('gw_orf_legendre')),
'zero_diag_legendre_orf': model_orfs.zero_diag_legendre_orf(params=parameter.Uniform(
-1.0, 1.0, size=leg_lmax+1)('gw_orf_legendre_zero_diag'))}
# common red noise parameters
if psd in ['powerlaw', 'turnover', 'turnover_knee', 'broken_powerlaw']:
amp_name = '{}_log10_A'.format(name)
if log10_A_val is not None:
log10_Agw = parameter.Constant(log10_A_val)(amp_name)
if logmin is not None and logmax is not None:
if prior == 'uniform':
log10_Agw = parameter.LinearExp(logmin, logmax)(amp_name)
elif prior == 'log-uniform' and gamma_val is not None:
if np.abs(gamma_val - 4.33) < 0.1:
log10_Agw = parameter.Uniform(logmin, logmax)(amp_name)
else:
log10_Agw = parameter.Uniform(logmin, logmax)(amp_name)
else:
log10_Agw = parameter.Uniform(logmin, logmax)(amp_name)
else:
if prior == 'uniform':
log10_Agw = parameter.LinearExp(-18, -11)(amp_name)
elif prior == 'log-uniform' and gamma_val is not None:
if np.abs(gamma_val - 4.33) < 0.1:
log10_Agw = parameter.Uniform(-18, -14)(amp_name)
else:
log10_Agw = parameter.Uniform(-18, -11)(amp_name)
else:
log10_Agw = parameter.Uniform(-18, -11)(amp_name)
gam_name = '{}_gamma'.format(name)
if gamma_val is not None:
gamma_gw = parameter.Constant(gamma_val)(gam_name)
else:
gamma_gw = parameter.Uniform(0, 7)(gam_name)
# common red noise PSD
if psd == 'powerlaw':
cpl = utils.powerlaw(log10_A=log10_Agw, gamma=gamma_gw)
elif psd == 'broken_powerlaw':
delta_name = '{}_delta'.format(name)
kappa_name = '{}_kappa'.format(name)
log10_fb_name = '{}_log10_fb'.format(name)
kappa_gw = parameter.Uniform(0.01, 0.5)(kappa_name)
log10_fb_gw = parameter.Uniform(-10, -7)(log10_fb_name)
if delta_val is not None:
delta_gw = parameter.Constant(delta_val)(delta_name)
else:
delta_gw = parameter.Uniform(0, 7)(delta_name)
cpl = gpp.broken_powerlaw(log10_A=log10_Agw,
gamma=gamma_gw,
delta=delta_gw,
log10_fb=log10_fb_gw,
kappa=kappa_gw)
elif psd == 'turnover':
kappa_name = '{}_kappa'.format(name)
lf0_name = '{}_log10_fbend'.format(name)
kappa_gw = parameter.Uniform(0, 7)(kappa_name)
lf0_gw = parameter.Uniform(-9, -7)(lf0_name)
cpl = utils.turnover(log10_A=log10_Agw, gamma=gamma_gw,
lf0=lf0_gw, kappa=kappa_gw)
elif psd == 'turnover_knee':
kappa_name = '{}_kappa'.format(name)
lfb_name = '{}_log10_fbend'.format(name)
delta_name = '{}_delta'.format(name)
lfk_name = '{}_log10_fknee'.format(name)
kappa_gw = parameter.Uniform(0, 7)(kappa_name)
lfb_gw = parameter.Uniform(-9.3, -8)(lfb_name)
delta_gw = parameter.Uniform(-2, 0)(delta_name)
lfk_gw = parameter.Uniform(-8, -7)(lfk_name)
cpl = gpp.turnover_knee(log10_A=log10_Agw, gamma=gamma_gw,
lfb=lfb_gw, lfk=lfk_gw,
kappa=kappa_gw, delta=delta_gw)
if psd == 'spectrum':
rho_name = '{}_log10_rho'.format(name)
# checking if priors specified, otherwise give default values
if logmin is None:
logmin = -9
if logmax is None:
logmax = -4
if prior == 'uniform':
log10_rho_gw = parameter.LinearExp(logmin, logmax,
size=components)(rho_name)
elif prior == 'log-uniform':
log10_rho_gw = parameter.Uniform(logmin, logmax, size=components)(rho_name)
cpl = gpp.free_spectrum(log10_rho=log10_rho_gw)
if orf is None:
crn = gp_signals.FourierBasisGP(cpl, coefficients=coefficients, combine=combine,
components=components, Tspan=Tspan,
name=name, pshift=pshift, pseed=pseed)
elif orf in orfs.keys():
if orf == 'crn':
crn = gp_signals.FourierBasisGP(cpl, coefficients=coefficients, combine=combine,
components=components, Tspan=Tspan,
name=name, pshift=pshift, pseed=pseed)
else:
crn = gp_signals.FourierBasisCommonGP(cpl, orfs[orf],
components=components, combine=combine,
Tspan=Tspan,
name=name, pshift=pshift,
pseed=pseed)
elif isinstance(orf, types.FunctionType):
crn = gp_signals.FourierBasisCommonGP(cpl, orf,
components=components, combine=combine,
Tspan=Tspan,
name=name, pshift=pshift,
pseed=pseed)
else:
raise ValueError('ORF {} not recognized'.format(orf))
return crn
| 34,848 | 42.506866 | 119 | py |
enterprise_extensions | enterprise_extensions-master/enterprise_extensions/chromatic/chromatic.py |
import numpy as np
from enterprise import constants as const
from enterprise.signals import deterministic_signals, parameter, signal_base
__all__ = ['chrom_exp_decay',
'chrom_exp_cusp',
'chrom_dual_exp_cusp',
'chrom_yearly_sinusoid',
'chromatic_quad_basis',
'chromatic_quad_prior',
'dmx_delay',
'dm_exponential_dip',
'dm_exponential_cusp',
'dm_dual_exp_cusp',
'dmx_signal',
'dm_annual_signal',
]
@signal_base.function
def chrom_exp_decay(toas, freqs, log10_Amp=-7, sign_param=-1.0,
t0=54000, log10_tau=1.7, idx=2):
"""
Chromatic exponential-dip delay term in TOAs.
:param t0: time of exponential minimum [MJD]
:param tau: 1/e time of exponential [s]
:param log10_Amp: amplitude of dip
:param sign_param: sign of waveform
:param idx: index of chromatic dependence
:return wf: delay time-series [s]
"""
t0 *= const.day
tau = 10**log10_tau * const.day
ind = np.where(toas > t0)[0]
wf = 10**log10_Amp * np.heaviside(toas - t0, 1)
wf[ind] *= np.exp(- (toas[ind] - t0) / tau)
return np.sign(sign_param) * wf * (1400 / freqs) ** idx
@signal_base.function
def chrom_exp_cusp(toas, freqs, log10_Amp=-7, sign_param=-1.0,
t0=54000, log10_tau_pre=1.7, log10_tau_post=1.7,
symmetric=False, idx=2):
"""
Chromatic exponential-cusp delay term in TOAs.
:param t0: time of exponential minimum [MJD]
:param tau_pre: 1/e time of exponential before peak [s]
:param tau_post: 1/e time of exponential after peak[s]
:param symmetric: whether or not tau_pre = tau_post
:param log10_Amp: amplitude of cusp
:param sign_param: sign of waveform
:param idx: index of chromatic dependence
:return wf: delay time-series [s]
"""
t0 *= const.day
if symmetric:
tau = 10**log10_tau_pre * const.day
ind_pre = np.where(toas < t0)[0]
ind_post = np.where(toas > t0)[0]
wf_pre = 10**log10_Amp * (1 - np.heaviside(toas - t0, 1))
wf_pre[ind_pre] *= np.exp(- (t0 - toas[ind_pre]) / tau)
wf_post = 10**log10_Amp * np.heaviside(toas - t0, 1)
wf_post[ind_post] *= np.exp(- (toas[ind_post] - t0) / tau)
wf = wf_pre + wf_post
else:
tau_pre = 10**log10_tau_pre * const.day
tau_post = 10**log10_tau_post * const.day
ind_pre = np.where(toas < t0)[0]
ind_post = np.where(toas > t0)[0]
wf_pre = 10**log10_Amp * (1 - np.heaviside(toas - t0, 1))
wf_pre[ind_pre] *= np.exp(- (t0 - toas[ind_pre]) / tau_pre)
wf_post = 10**log10_Amp * np.heaviside(toas - t0, 1)
wf_post[ind_post] *= np.exp(- (toas[ind_post] - t0) / tau_post)
wf = wf_pre + wf_post
return np.sign(sign_param) * wf * (1400 / freqs) ** idx
@signal_base.function
def chrom_dual_exp_cusp(toas, freqs, t0=54000, sign_param=-1.0,
log10_Amp_1=-7, log10_tau_pre_1=1.7,
log10_tau_post_1=1.7,
log10_Amp_2=-7, log10_tau_pre_2=1.7,
log10_tau_post_2=1.7,
symmetric=False, idx1=2, idx2=4):
"""
Chromatic exponential-cusp delay term in TOAs.
:param t0: time of exponential minimum [MJD]
:param tau_pre: 1/e time of exponential before peak [s]
:param tau_post: 1/e time of exponential after peak[s]
:param symmetric: whether or not tau_pre = tau_post
:param log10_Amp: amplitude of cusp
:param sign_param: sign of waveform
:param idx: index of chromatic dependence
:return wf: delay time-series [s]
"""
t0 *= const.day
ind_pre = np.where(toas < t0)[0]
ind_post = np.where(toas > t0)[0]
if symmetric:
tau_1 = 10**log10_tau_pre_1 * const.day
wf_1_pre = 10**log10_Amp_1 * (1 - np.heaviside(toas - t0, 1))
wf_1_pre[ind_pre] *= np.exp(- (t0 - toas[ind_pre]) / tau_1)
wf_1_post = 10**log10_Amp_1 * np.heaviside(toas - t0, 1)
wf_1_post[ind_post] *= np.exp(- (toas[ind_post] - t0) / tau_1)
wf_1 = wf_1_pre + wf_1_post
tau_2 = 10**log10_tau_pre_2 * const.day
wf_2_pre = 10**log10_Amp_2 * (1 - np.heaviside(toas - t0, 1))
wf_2_pre[ind_pre] *= np.exp(- (t0 - toas[ind_pre]) / tau_2)
wf_2_post = 10**log10_Amp_2 * np.heaviside(toas - t0, 1)
wf_2_post[ind_post] *= np.exp(- (toas[ind_post] - t0) / tau_2)
wf_2 = wf_2_pre + wf_2_post
else:
tau_1_pre = 10**log10_tau_pre_1 * const.day
tau_1_post = 10**log10_tau_post_1 * const.day
wf_1_pre = 10**log10_Amp_1 * (1 - np.heaviside(toas - t0, 1))
wf_1_pre[ind_pre] *= np.exp(- (t0 - toas[ind_pre]) / tau_1_pre)
wf_1_post = 10**log10_Amp_1 * np.heaviside(toas - t0, 1)
wf_1_post[ind_post] *= np.exp(- (toas[ind_post] - t0) / tau_1_post)
wf_1 = wf_1_pre + wf_1_post
tau_2_pre = 10**log10_tau_pre_2 * const.day
tau_2_post = 10**log10_tau_post_2 * const.day
wf_2_pre = 10**log10_Amp_2 * (1 - np.heaviside(toas - t0, 1))
wf_2_pre[ind_pre] *= np.exp(- (t0 - toas[ind_pre]) / tau_2_pre)
wf_2_post = 10**log10_Amp_2 * np.heaviside(toas - t0, 1)
wf_2_post[ind_post] *= np.exp(- (toas[ind_post] - t0) / tau_2_post)
wf_2 = wf_2_pre + wf_2_post
return np.sign(sign_param) * (wf_1 * (1400 / freqs) ** idx1 + wf_2 * (1400 / freqs) ** idx2)
@signal_base.function
def chrom_yearly_sinusoid(toas, freqs, log10_Amp=-7, phase=0, idx=2):
"""
Chromatic annual sinusoid.
:param log10_Amp: amplitude of sinusoid
:param phase: initial phase of sinusoid
:param idx: index of chromatic dependence
:return wf: delay time-series [s]
"""
wf = 10**log10_Amp * np.sin(2 * np.pi * const.fyr * toas + phase)
return wf * (1400 / freqs) ** idx
@signal_base.function
def chromatic_quad_basis(toas, freqs, idx=4):
"""
Basis for chromatic quadratic function.
:param idx: index of chromatic dependence
:return ret: normalized quadratic basis matrix [Ntoa, 3]
"""
ret = np.zeros((len(toas), 3))
t0 = (toas.max() + toas.min()) / 2
for ii in range(3):
ret[:, ii] = (toas-t0) ** (ii) * (1400/freqs) ** idx
norm = np.sqrt(np.sum(ret**2, axis=0))
return ret/norm, np.ones(3)
@signal_base.function
def chromatic_quad_prior(toas):
"""
Prior for chromatic quadratic function.
:return prior: prior-range for quadratic coefficients
"""
return np.ones(3) * 1e80
@signal_base.function
def dmx_delay(toas, freqs, dmx_ids, **kwargs):
"""
Delay in DMX model of DM variations.
:param dmx_ids: dictionary of DMX data for each pulsar from parfile
:param kwargs: dictionary of enterprise DMX parameters
:return wf: DMX signal
"""
wf = np.zeros(len(toas))
dmx = kwargs
for dmx_id in dmx_ids:
mask = np.logical_and(toas >= (dmx_ids[dmx_id]['DMX_R1'] - 0.01) * 86400.,
toas <= (dmx_ids[dmx_id]['DMX_R2'] + 0.01) * 86400.)
wf[mask] += dmx[dmx_id] / freqs[mask]**2 / const.DM_K / 1e12
return wf
def dm_exponential_dip(tmin, tmax, idx=2, sign='negative', name='dmexp'):
"""
Returns chromatic exponential dip (i.e. TOA advance):
:param tmin, tmax:
search window for exponential dip time.
:param idx:
index of radio frequency dependence (i.e. DM is 2). If this is set
to 'vary' then the index will vary from 1 - 6
:param sign:
set sign of dip: 'positive', 'negative', or 'vary'
:param name: Name of signal
:return dmexp:
chromatic exponential dip waveform.
"""
t0_dmexp = parameter.Uniform(tmin, tmax)
log10_Amp_dmexp = parameter.Uniform(-10, -2)
log10_tau_dmexp = parameter.Uniform(0, 2.5)
if sign == 'vary':
sign_param = parameter.Uniform(-1.0, 1.0)
elif sign == 'positive':
sign_param = 1.0
else:
sign_param = -1.0
wf = chrom_exp_decay(log10_Amp=log10_Amp_dmexp,
t0=t0_dmexp, log10_tau=log10_tau_dmexp,
sign_param=sign_param, idx=idx)
dmexp = deterministic_signals.Deterministic(wf, name=name)
return dmexp
def dm_exponential_cusp(tmin, tmax, idx=2, sign='negative',
symmetric=False, name='dm_cusp'):
"""
Returns chromatic exponential cusp (i.e. TOA advance):
:param tmin, tmax:
search window for exponential cusp time.
:param idx:
index of radio frequency dependence (i.e. DM is 2). If this is set
to 'vary' then the index will vary from 1 - 6
:param sign:
set sign of dip: 'positive', 'negative', or 'vary'
:param name: Name of signal
:return dmexp:
chromatic exponential dip waveform.
"""
t0_dm_cusp = parameter.Uniform(tmin, tmax)
log10_Amp_dm_cusp = parameter.Uniform(-10, -2)
log10_tau_dm_cusp_pre = parameter.Uniform(0, 2.5)
if sign == 'vary':
sign_param = parameter.Uniform(-1.0, 1.0)
elif sign == 'positive':
sign_param = 1.0
else:
sign_param = -1.0
if symmetric:
log10_tau_dm_cusp_post = 1
else:
log10_tau_dm_cusp_post = parameter.Uniform(0, 2.5)
wf = chrom_exp_cusp(log10_Amp=log10_Amp_dm_cusp, sign_param=sign_param,
t0=t0_dm_cusp, log10_tau_pre=log10_tau_dm_cusp_pre,
log10_tau_post=log10_tau_dm_cusp_post,
symmetric=symmetric, idx=idx)
dm_cusp = deterministic_signals.Deterministic(wf, name=name)
return dm_cusp
def dm_dual_exp_cusp(tmin, tmax, idx1=2, idx2=4, sign='negative',
symmetric=False, name='dual_dm_cusp'):
"""
Returns chromatic exponential cusp (i.e. TOA advance):
:param tmin, tmax:
search window for exponential cusp time.
:param idx:
index of radio frequency dependence (i.e. DM is 2). If this is set
to 'vary' then the index will vary from 1 - 6
:param sign:
set sign of dip: 'positive', 'negative', or 'vary'
:param name: Name of signal
:return dmexp:
chromatic exponential dip waveform.
"""
t0_dual_cusp = parameter.Uniform(tmin, tmax)
log10_Amp_dual_cusp_1 = parameter.Uniform(-10, -2)
log10_Amp_dual_cusp_2 = parameter.Uniform(-10, -2)
log10_tau_dual_cusp_pre_1 = parameter.Uniform(0, 2.5)
log10_tau_dual_cusp_pre_2 = parameter.Uniform(0, 2.5)
if sign == 'vary':
sign_param = parameter.Uniform(-1.0, 1.0)
elif sign == 'positive':
sign_param = 1.0
else:
sign_param = -1.0
if symmetric:
log10_tau_dual_cusp_post_1 = 1
log10_tau_dual_cusp_post_2 = 1
else:
log10_tau_dual_cusp_post_1 = parameter.Uniform(0, 2.5)
log10_tau_dual_cusp_post_2 = parameter.Uniform(0, 2.5)
wf = chrom_dual_exp_cusp(t0=t0_dual_cusp, sign_param=sign_param,
symmetric=symmetric,
log10_Amp_1=log10_Amp_dual_cusp_1,
log10_tau_pre_1=log10_tau_dual_cusp_pre_1,
log10_tau_post_1=log10_tau_dual_cusp_post_1,
log10_Amp_2=log10_Amp_dual_cusp_2,
log10_tau_pre_2=log10_tau_dual_cusp_pre_2,
log10_tau_post_2=log10_tau_dual_cusp_post_2,
idx1=idx1, idx2=idx2)
dm_cusp = deterministic_signals.Deterministic(wf, name=name)
return dm_cusp
def dmx_signal(dmx_data, name='dmx_signal'):
"""
Returns DMX signal:
:param dmx_data: dictionary of DMX data for each pulsar from parfile.
:param name: Name of signal.
:return dmx_sig:
dmx signal waveform.
"""
dmx = {}
for dmx_id in sorted(dmx_data):
dmx_data_tmp = dmx_data[dmx_id]
dmx.update({dmx_id: parameter.Normal(mu=dmx_data_tmp['DMX_VAL'],
sigma=dmx_data_tmp['DMX_ERR'])})
wf = dmx_delay(dmx_ids=dmx_data, **dmx)
dmx_sig = deterministic_signals.Deterministic(wf, name=name)
return dmx_sig
def dm_annual_signal(idx=2, name='dm_s1yr'):
"""
Returns chromatic annual signal (i.e. TOA advance):
:param idx:
index of radio frequency dependence (i.e. DM is 2). If this is set
to 'vary' then the index will vary from 1 - 6
:param name: Name of signal
:return dm1yr:
chromatic annual waveform.
"""
log10_Amp_dm1yr = parameter.Uniform(-10, -2)
phase_dm1yr = parameter.Uniform(0, 2*np.pi)
wf = chrom_yearly_sinusoid(log10_Amp=log10_Amp_dm1yr,
phase=phase_dm1yr, idx=idx)
dm1yr = deterministic_signals.Deterministic(wf, name=name)
return dm1yr
| 12,928 | 33.569519 | 96 | py |
enterprise_extensions | enterprise_extensions-master/enterprise_extensions/chromatic/solar_wind.py |
import os
import numpy as np
import scipy.stats as sps
import scipy.special as spsf
from enterprise import constants as const
from enterprise.signals import (deterministic_signals, gp_signals, parameter,
signal_base, utils)
from .. import gp_kernels as gpk
defpath = os.path.dirname(__file__)
yr_in_sec = 365.25*24*3600
@signal_base.function
def solar_wind(toas, freqs, planetssb, sunssb, pos_t,
n_earth=5, n_earth_bins=None,
t_init=None, t_final=None):
"""
Construct DM-Solar Model fourier design matrix.
:param toas: vector of time series in seconds
:param planetssb: solar system bayrcenter positions
:param pos_t: pulsar position as 3-vector
:param freqs: radio frequencies of observations [MHz]
:param n_earth: The electron density from the solar wind at 1 AU.
:param n_earth_bins: Number of binned values of n_earth for which to fit or
an array or list of bin edges to use for binned n_Earth values.
In the latter case the first and last edges must encompass all
TOAs and in all cases it must match the size (number of
elements) of n_earth.
:param t_init: Initial time of earliest TOA in entire dataset, including
all pulsars.
:param t_final: Final time of last TOA in entire dataset, including all
pulsars.
:return dt_DM: Chromatic time delay due to solar wind
"""
if n_earth_bins is None:
theta, R_earth, _, _ = theta_impact(planetssb, sunssb, pos_t)
dm_sol_wind = dm_solar(n_earth, theta, R_earth)
dt_DM = (dm_sol_wind) * 4.148808e3 / freqs**2
else:
if isinstance(n_earth_bins, int) and (t_init is None
or t_final is None):
err_msg = 'Need to enter t_init and t_final '
err_msg += 'to make binned n_earth values.'
raise ValueError(err_msg)
elif isinstance(n_earth_bins, int):
edges, step = np.linspace(t_init, t_final, n_earth_bins,
endpoint=True, retstep=True)
elif isinstance(n_earth_bins, list) or isinstance(n_earth_bins,
np.ndarray):
edges = n_earth_bins
dt_DM = []
for ii, bin in enumerate(edges[:-1]):
bin_mask = np.logical_and(toas >= bin, toas <= edges[ii + 1])
theta, R_earth, _, _ = theta_impact(planetssb,
sunssb,
pos_t)
dm_sol_wind = dm_solar(n_earth[ii], theta[bin_mask],
R_earth[bin_mask])
if dm_sol_wind.size != 0:
dt_DM.extend((dm_sol_wind)
* 4.148808e3 / freqs[bin_mask]**2)
else:
pass
dt_DM = np.array(dt_DM)
if dt_DM.size!=toas.size:
err_msg = 'dt_DM ({0}) does not '.format(dt_DM.size)
err_msg +='match number of TOAs ({0})!!!'.format(toas.size)
raise ValueError(err_msg)
return dt_DM
@signal_base.function
def solar_wind_r_to_p(toas, freqs, planetssb, sunssb, pos_t,
n_earth=5, power=4.39, log10_ne=False):
"""
Construct DM-Solar Model fourier design matrix.
:param toas: vector of time series in seconds
:param planetssb: solar system bayrcenter positions
:param pos_t: pulsar position as 3-vector
:param freqs: radio frequencies of observations [MHz]
:param n_earth: The electron density from the solar wind at 1 AU.
:param power: Negative power of the density profile for the solar wind, r^-p.
:param log10_ne: Whether the provided value is log10 of the electron
density for this term. Suggested to set True for power much larger than
normal.
:return dt_DM: Chromatic time delay due to solar wind
"""
if log10_ne:
n_earth = 10**n_earth
theta, _, b, z_earth = theta_impact(planetssb, sunssb, pos_t)
dm_sol_wind = dm_solar_r_to_p(n_earth, theta, b, z_earth, power)
dt_DM = (dm_sol_wind) * 4.148808e3 / freqs**2
dt_DM = np.array(dt_DM)
return dt_DM
# linear interpolation basis in time with nu^-2 scaling
@signal_base.function
def linear_interp_basis_sw_dm(toas, freqs, planetssb, sunssb,
pos_t, dt=7*86400):
# get linear interpolation basis in time
U, avetoas = utils.linear_interp_basis(toas, dt=dt)
# scale with radio frequency
theta, R_earth, _, _ = theta_impact(planetssb, sunssb, pos_t)
dm_sol_wind = dm_solar(1.0, theta, R_earth)
dt_DM = dm_sol_wind * 4.148808e3 / (freqs**2)
return U * dt_DM[:, None], avetoas
@signal_base.function
def createfourierdesignmatrix_solar_dm(toas, freqs, planetssb, sunssb, pos_t,
modes=None, nmodes=30,
Tspan=None, logf=True, fmin=None,
fmax=None):
"""
Construct DM-Solar Model fourier design matrix.
:param toas: vector of time series in seconds
:param planetssb: solar system bayrcenter positions
:param pos_t: pulsar position as 3-vector
:param nmodes: number of fourier coefficients to use
:param freqs: radio frequencies of observations [MHz]
:param Tspan: option to some other Tspan
:param logf: use log frequency spacing
:param fmin: lower sampling frequency
:param fmax: upper sampling frequency
:return: F: SW DM-variation fourier design matrix
:return: f: Sampling frequencies
"""
# get base fourier design matrix and frequencies
F, Ffreqs = utils.createfourierdesignmatrix_red(toas, nmodes=nmodes,
modes=modes,
Tspan=Tspan, logf=logf,
fmin=fmin, fmax=fmax)
theta, R_earth, _, _ = theta_impact(planetssb, sunssb, pos_t)
dm_sol_wind = dm_solar(1.0, theta, R_earth)
dt_DM = dm_sol_wind * 4.148808e3 /(freqs**2)
return F * dt_DM[:, None], Ffreqs
def solar_wind_block(n_earth=None, ACE_prior=False, include_swgp=True,
swgp_prior=None, swgp_basis=None, Tspan=None):
"""
Returns Solar Wind DM noise model. Best model from Hazboun, et al (in prep)
Contains a single mean electron density with an auxiliary perturbation
modeled using a gaussian process. The GP has common prior parameters
between all pulsars, but the realizations are different for all pulsars.
Solar Wind DM noise modeled as a power-law with 30 sampling frequencies
:param n_earth:
Solar electron density at 1 AU.
:param ACE_prior:
Whether to use the ACE SWEPAM data as an astrophysical prior.
:param swgp_prior:
Prior function for solar wind Gaussian process. Default is a power law.
:param swgp_basis:
Basis to be used for solar wind Gaussian process.
Options includes ['powerlaw'.'periodic','sq_exp']
:param Tspan:
Sets frequency sampling f_i = i / Tspan. Default will
use overall time span for individual pulsar. Default is to use 15
frequencies (1/Tspan,15/Tspan).
"""
if n_earth is None and not ACE_prior:
n_earth = parameter.Uniform(0, 30)('n_earth')
elif n_earth is None and ACE_prior:
n_earth = ACE_SWEPAM_Parameter()('n_earth')
else:
pass
deter_sw = solar_wind(n_earth=n_earth)
mean_sw = deterministic_signals.Deterministic(deter_sw, name='n_earth')
sw_model = mean_sw
if include_swgp:
if swgp_basis == 'powerlaw':
# dm noise parameters that are common
log10_A_sw = parameter.Uniform(-10, 1)
gamma_sw = parameter.Uniform(-2, 1)
sw_prior = utils.powerlaw(log10_A=log10_A_sw, gamma=gamma_sw)
if Tspan is not None:
freqs = np.linspace(1/Tspan, 30/Tspan, 30)
freqs = freqs[1/freqs > 1.5*yr_in_sec]
sw_basis = createfourierdesignmatrix_solar_dm(modes=freqs)
else:
sw_basis = createfourierdesignmatrix_solar_dm(nmodes=15,
Tspan=Tspan)
elif swgp_basis == 'periodic':
# Periodic GP kernel for DM
log10_sigma = parameter.Uniform(-10, -4)
log10_ell = parameter.Uniform(1, 4)
log10_p = parameter.Uniform(-4, 1)
log10_gam_p = parameter.Uniform(-3, 2)
sw_basis = gpk.linear_interp_basis_dm(dt=6*86400)
sw_prior = gpk.periodic_kernel(log10_sigma=log10_sigma,
log10_ell=log10_ell,
log10_gam_p=log10_gam_p,
log10_p=log10_p)
elif swgp_basis == 'sq_exp':
# squared-exponential GP kernel for DM
log10_sigma = parameter.Uniform(-10, -4)
log10_ell = parameter.Uniform(1, 4)
sw_basis = gpk.linear_interp_basis_dm(dt=6*86400)
sw_prior = gpk.se_dm_kernel(log10_sigma=log10_sigma,
log10_ell=log10_ell)
gp_sw = gp_signals.BasisGP(sw_prior, sw_basis, name='gp_sw')
sw_model += gp_sw
return sw_model
AU_light_sec = const.AU / const.c # 1 AU in light seconds
AU_pc = const.AU / const.pc # 1 AU in parsecs (for DM normalization)
def _dm_solar_close(n_earth, r_earth):
return (n_earth * AU_light_sec * AU_pc / r_earth)
def _dm_solar(n_earth, theta, r_earth):
return ((np.pi - theta) *
(n_earth * AU_light_sec * AU_pc
/ (r_earth * np.sin(theta))))
def dm_solar(n_earth, theta, r_earth):
"""
Calculates Dispersion measure due to 1/r^2 solar wind density model.
::param :n_earth Solar wind proto/electron density at Earth (1/cm^3)
::param :theta: angle between sun and line-of-sight to pulsar (rad)
::param :r_earth :distance from Earth to Sun in (light seconds).
See You et al. 2007 for more details.
"""
return np.where(np.pi - theta >= 1e-5,
_dm_solar(n_earth, theta, r_earth),
_dm_solar_close(n_earth, r_earth))
def dm_solar_r_to_p(n_earth, theta, b, z_earth, p):
"""
Calculates Dispersion measure due to 1/r^p solar wind density model.
::param :n_earth Solar wind proton/electron density at Earth (1/cm^3)
::param :theta: angle between sun and line-of-sight to pulsar (rad)
::param :r_earth :distance from Earth to Sun in (light seconds).
See You et al. 20007 for more details.
"""
return _dm_solar_r_to_p(n_earth, b, z_earth, p)
def _dm_solar_close_r_to_p(n, z, p):
return n * (AU_light_sec / z)**(p - 1) * (AU_pc / p)
def _dm_solar_r_to_p(n, b, z, p):
return (n * (AU_light_sec / b)**p * b / const.pc * const.c
* (_dm_p_int(b, 1e14, p) - _dm_p_int(b, -z, p)))
def _dm_p_int(b, z, p):
return z / b * spsf.hyp2f1(0.5, p/2., 1.5, -z**2 / b**2)
def theta_impact(planetssb, sunssb, pos_t):
"""
Use the attributes of an enterprise Pulsar object to calculate the
solar impact angle.
::param :planetssb Solar system barycenter time series supplied with
enterprise.Pulsar objects.
::param :sunssb Solar system sun-to-barycenter timeseries supplied with
enterprise.Pulsar objects.
::param :pos_t Unit vector to pulsar position over time in ecliptic
coordinates. Supplied with enterprise.Pulsar objects.
returns: Solar impact angle (rad), Distance to Earth (R_earth),
impact distance (b), perpendicular distance (z_earth)
"""
earth = planetssb[:, 2, :3]
sun = sunssb[:, :3]
earthsun = earth - sun
R_earth = np.sqrt(np.einsum('ij,ij->i', earthsun, earthsun))
Re_cos_theta_impact = np.einsum('ij,ij->i', earthsun, pos_t)
theta_impact = np.arccos(-Re_cos_theta_impact / R_earth)
b = np.sqrt(R_earth**2 - Re_cos_theta_impact**2)
return theta_impact, R_earth, b, -Re_cos_theta_impact
def sw_mask(psrs, angle_cutoff=None):
"""
Convenience function for masking TOAs lower than a certain solar impact
angle.
param:: :psrs list of enterprise.Pulsar objects
param:: :angle_cutoff (degrees) Mask TOAs within this angle
returns:: dictionary of masks for each pulsar
"""
solar_wind_mask = {}
angle_cutoff = np.deg2rad(angle_cutoff)
for ii, p in enumerate(psrs):
impact_ang, _, _, _ = theta_impact(p)
solar_wind_mask[p.name] = np.where(impact_ang > angle_cutoff,
True, False)
return solar_wind_mask
# ACE Solar Wind Monitoring data prior for SW electron data.
# Using proton density as a stand in.
def ACE_SWEPAM_Prior(value):
"""Prior function for ACE SWEPAM parameters."""
return ACE_RV.pdf(value)
def ACE_SWEPAM_Sampler(size=None):
"""Sampling function for Uniform parameters."""
return ACE_RV.rvs(size=size)
def ACE_SWEPAM_Parameter(size=None):
"""Class factory for ACE SWEPAM parameters."""
class ACE_SWEPAM_Parameter(parameter.Parameter):
_size = size
_typename = parameter._argrepr('ACE_SWEPAM')
_prior = parameter.Function(ACE_SWEPAM_Prior)
_sampler = staticmethod(ACE_SWEPAM_Sampler)
return ACE_SWEPAM_Parameter
data_file = defpath + '/ACE_SWEPAM_daily_proton_density_1998_2018_MJD_cm-3.txt'
proton_density = np.loadtxt(data_file)
ne_hist = np.histogram(proton_density[:, 1], bins=100, density=True)
ACE_RV = sps.rv_histogram(ne_hist)
| 13,958 | 35.637795 | 81 | py |
enterprise_extensions | enterprise_extensions-master/enterprise_extensions/frequentist/optimal_statistic.py |
import warnings
import numpy as np
import scipy.linalg as sl
from enterprise.signals import gp_priors, signal_base, utils
from enterprise_extensions import model_orfs, models
# Define the output to be on a single line.
def warning_on_one_line(message, category, filename, lineno, file=None, line=None):
return '%s:%s: %s: %s\n' % (filename, lineno, category.__name__, message)
# Override default format.
warnings.formatwarning = warning_on_one_line
class OptimalStatistic(object):
"""
Class for the Optimal Statistic as used in the analysis paper.
This class can be used for both standard ML or noise-marginalized OS.
:param psrs: List of `enterprise` Pulsar instances.
:param bayesephem: Include BayesEphem model. Default=True
:param gamma_common:
Fixed common red process spectral index value. By default we
vary the spectral index over the range [0, 7].
:param orf:
String representing which overlap reduction function to use.
By default we do not use any spatial correlations. Permitted
values are ['hd', 'dipole', 'monopole'].
"""
def __init__(self, psrs, bayesephem=True, gamma_common=4.33, orf='hd',
wideband=False, select=None, noisedict=None, pta=None):
# initialize standard model with fixed white noise and
if pta is None:
self.pta = models.model_2a(psrs, psd='powerlaw',
bayesephem=bayesephem,
gamma_common=gamma_common,
is_wideband=wideband,
select='backend', noisedict=noisedict)
else:
if np.any(['marginalizing_linear_timing' in sig for sig in pta.signals]):
msg = "Can't run optimal statistic with `enterprise.gp_signals.MarginalizingTimingModel`."
msg += " Try creating PTA with `enterprise.gp_signals.TimingModel`, or if using `enterprise_extensions`"
msg += " set `tm_marg=False`."
raise ValueError(msg)
self.pta = pta
self.gamma_common = gamma_common
# get frequencies here
self.freqs = self._get_freqs(psrs)
# set up cache
self._set_cache_parameters()
# pulsar locations
self.psrlocs = [p.pos for p in psrs]
# overlap reduction function
if orf == 'hd':
self.orf = model_orfs.hd_orf
elif orf == 'dipole':
self.orf = model_orfs.dipole_orf
elif orf == 'monopole':
self.orf = model_orfs.monopole_orf
elif orf == 'gw_monopole':
self.orf = model_orfs.gw_monopole_orf
elif orf == 'gw_dipole':
self.orf = model_orfs.gw_dipole_orf
elif orf == 'st':
self.orf = model_orfs.st_orf
else:
raise ValueError('Unknown ORF!')
def compute_os(self, params=None, psd='powerlaw', fgw=None):
"""
Computes the optimal statistic values given an
`enterprise` parameter dictionary.
:param params: `enterprise` parameter dictionary.
:param psd: choice of cross-power psd [powerlaw,spectrum]
:fgw: frequency of GW spectrum to probe, in Hz [default=None]
:returns:
xi: angular separation [rad] for each pulsar pair
rho: correlation coefficient for each pulsar pair
sig: 1-sigma uncertainty on correlation coefficient for each pulsar pair.
OS: Optimal statistic value (units of A_gw^2)
OS_sig: 1-sigma uncertainty on OS
.. note:: SNR is computed as OS / OS_sig. In the case of a 'spectrum' model
the OS variable will be the PSD(fgw) * Tspan value at the relevant fgw bin.
"""
if params is None:
params = {name: par.sample() for name, par
in zip(self.pta.param_names, self.pta.params)}
else:
# check to see that the params dictionary includes values
# for all of the parameters in the model
for p in self.pta.param_names:
if p not in params.keys():
msg = '{0} is not included '.format(p)
msg += 'in the parameter dictionary. '
msg += 'Drawing a random value.'
warnings.warn(msg)
# get matrix products
TNrs = self.get_TNr(params=params)
TNTs = self.get_TNT(params=params)
FNrs = self.get_FNr(params=params)
FNFs = self.get_FNF(params=params)
FNTs = self.get_FNT(params=params)
phiinvs = self.pta.get_phiinv(params, logdet=False)
X, Z = [], []
for TNr, TNT, FNr, FNF, FNT, phiinv in zip(TNrs, TNTs, FNrs, FNFs, FNTs, phiinvs):
Sigma = TNT + (np.diag(phiinv) if phiinv.ndim == 1 else phiinv)
try:
cf = sl.cho_factor(Sigma)
SigmaTNr = sl.cho_solve(cf, TNr)
SigmaTNF = sl.cho_solve(cf, FNT.T)
except np.linalg.LinAlgError:
SigmaTNr = np.linalg.solve(Sigma, TNr)
SigmaTNF = np.linalg.solve(Sigma, FNT.T)
FNTSigmaTNr = np.dot(FNT, SigmaTNr)
X.append(FNr - FNTSigmaTNr)
Z.append(FNF - np.dot(FNT, SigmaTNF))
npsr = len(self.pta._signalcollections)
rho, sig, ORF, xi = [], [], [], []
for ii in range(npsr):
for jj in range(ii+1, npsr):
if psd == 'powerlaw':
if self.gamma_common is None and 'gw_gamma' in params.keys():
phiIJ = utils.powerlaw(self.freqs, log10_A=0,
gamma=params['gw_gamma'])
else:
phiIJ = utils.powerlaw(self.freqs, log10_A=0,
gamma=self.gamma_common)
elif psd == 'spectrum':
Sf = -np.inf * np.ones(int(len(self.freqs)/2))
idx = (np.abs(np.unique(self.freqs) - fgw)).argmin()
Sf[idx] = 0.0
phiIJ = gp_priors.free_spectrum(self.freqs,
log10_rho=Sf)
top = np.dot(X[ii], phiIJ * X[jj])
bot = np.trace(np.dot(Z[ii]*phiIJ[None, :], Z[jj]*phiIJ[None, :]))
# cross correlation and uncertainty
rho.append(top / bot)
sig.append(1 / np.sqrt(bot))
# Overlap reduction function for PSRs ii, jj
ORF.append(self.orf(self.psrlocs[ii], self.psrlocs[jj]))
# angular separation
xi.append(np.arccos(np.dot(self.psrlocs[ii], self.psrlocs[jj])))
rho = np.array(rho)
sig = np.array(sig)
ORF = np.array(ORF)
xi = np.array(xi)
OS = (np.sum(rho*ORF / sig ** 2) / np.sum(ORF ** 2 / sig ** 2))
OS_sig = 1 / np.sqrt(np.sum(ORF ** 2 / sig ** 2))
return xi, rho, sig, OS, OS_sig
def compute_noise_marginalized_os(self, chain, param_names=None, N=10000):
"""
Compute noise marginalized OS.
:param chain: MCMC chain from Bayesian run.
:param param_names: list of parameter names for the chain file
:param N: number of iterations to run.
:returns: (os, snr) array of OS and SNR values for each iteration.
"""
# check that the chain file has the same number of parameters as the model
if chain.shape[1] - 4 != len(self.pta.param_names):
msg = 'MCMC chain does not have the same number of parameters '
msg += 'as the model.'
warnings.warn(msg)
opt, sig = np.zeros(N), np.zeros(N)
rho, rho_sig = [], []
setpars = {}
for ii in range(N):
idx = np.random.randint(0, chain.shape[0])
# if param_names is not specified, the parameter dictionary
# parameters in the pta object
if param_names is None:
setpars.update(self.pta.map_params(chain[idx, :-4]))
else:
setpars = dict(zip(param_names, chain[idx, :-4]))
xi, rho_tmp, rho_sig_tmp, opt[ii], sig[ii] = self.compute_os(params=setpars)
rho.append(rho_tmp)
rho_sig.append(rho_sig_tmp)
return (np.array(xi), np.array(rho), np.array(rho_sig), opt, opt/sig)
def compute_noise_maximized_os(self, chain, param_names=None):
"""
Compute noise maximized OS.
:param chain: MCMC chain from Bayesian run.
:returns:
xi: angular separation [rad] for each pulsar pair
rho: correlation coefficient for each pulsar pair
sig: 1-sigma uncertainty on correlation coefficient for each pulsar pair.
OS: Optimal statistic value (units of A_gw^2)
SNR: OS / OS_sig
"""
# check that the chain file has the same number of parameters as the model
if chain.shape[1] - 4 != len(self.pta.param_names):
msg = 'MCMC chain does not have the same number of parameters '
msg += 'as the model.'
warnings.warn(msg)
idx = np.argmax(chain[:, -4])
# if param_names is not specified, the parameter dictionary
# parameters in the pta object
if param_names is None:
setpars = (self.pta.map_params(chain[idx, :-4]))
else:
setpars = dict(zip(param_names, chain[idx, :-4]))
xi, rho, sig, Opt, Sig = self.compute_os(params=setpars)
return (xi, rho, sig, Opt, Opt/Sig)
def compute_multiple_corr_os(self, params=None, psd='powerlaw', fgw=None,
correlations=['monopole', 'dipole', 'hd']):
"""
Fits the correlations to multiple spatial correlation functions
:param params: `enterprise` parameter dictionary.
:param psd: choice of cross-power psd [powerlaw,spectrum]
:param fgw: frequency of GW spectrum to probe, in Hz [default=None]
:param correlations: list of correlation functions
:returns:
xi: angular separation [rad] for each pulsar pair
rho: correlation coefficient for each pulsar pair
sig: 1-sigma uncertainty on correlation coefficient for each pulsar pair.
A: An array of correlation amplitudes
OS_sig: An array of 1-sigma uncertainties on the correlation amplitudes
"""
xi, rho, sig, _, _ = self.compute_os(params=params, psd='powerlaw', fgw=None)
# construct a list of all the ORFs to be fit simultaneously
ORFs = []
for corr in correlations:
if corr == 'hd':
orf_func = model_orfs.hd_orf
elif corr == 'dipole':
orf_func = model_orfs.dipole_orf
elif corr == 'monopole':
orf_func = model_orfs.monopole_orf
elif corr == 'gw_monopole':
orf_func = model_orfs.gw_monopole_orf
elif corr == 'gw_dipole':
orf_func = model_orfs.gw_dipole_orf
elif corr == 'st':
orf_func = model_orfs.st_orf
else:
raise ValueError('Unknown ORF!')
ORF = []
npsr = len(self.pta._signalcollections)
for ii in range(npsr):
for jj in range(ii+1, npsr):
ORF.append(orf_func(self.psrlocs[ii], self.psrlocs[jj]))
ORFs.append(np.array(ORF))
Bmat = np.array([[np.sum(ORFs[i]*ORFs[j]/sig**2) for i in range(len(ORFs))]
for j in range(len(ORFs))])
Bmatinv = np.linalg.inv(Bmat)
Cmat = np.array([np.sum(rho*ORFs[i]/sig**2) for i in range(len(ORFs))])
A = np.dot(Bmatinv, Cmat)
A_err = np.array([np.sqrt(Bmatinv[i, i]) for i in range(len(ORFs))])
return xi, rho, sig, A, A_err
def compute_noise_marginalized_multiple_corr_os(self, chain, param_names=None, N=10000,
correlations=['monopole', 'dipole', 'hd']):
"""
Noise-marginalized fitting of the correlations to multiple spatial
correlation functions
:param correlations: list of correlation functions
:param chain: MCMC chain from Bayesian run.
:param param_names: list of parameter names for the chain file
:param N: number of iterations to run.
:returns:
xi: angular separation [rad] for each pulsar pair
rho: correlation coefficient for each pulsar pair and for each noise realization
sig: 1-sigma uncertainty on correlation coefficient for each pulsar pair and for each noise realization
A: An array of correlation amplitudes for each noise realization
OS_sig: An array of 1-sigma uncertainties on the correlation amplitudes for each noise realization
"""
# check that the chain file has the same number of parameters as the model
if chain.shape[1] - 4 != len(self.pta.param_names):
msg = 'MCMC chain does not have the same number of parameters '
msg += 'as the model.'
warnings.warn(msg)
rho, sig, A, A_err = [], [], [], []
setpars = {}
for ii in range(N):
idx = np.random.randint(0, chain.shape[0])
# if param_names is not specified, the parameter dictionary
# parameters in the pta object
if param_names is None:
setpars.update(self.pta.map_params(chain[idx, :-4]))
else:
setpars = dict(zip(param_names, chain[idx, :-4]))
xi, rho_tmp, sig_tmp, A_tmp, A_err_tmp = self.compute_multiple_corr_os(params=setpars,
correlations=correlations)
rho.append(rho_tmp)
sig.append(sig_tmp)
A.append(A_tmp)
A_err.append(A_err_tmp)
return np.array(xi), np.array(rho), np.array(sig), np.array(A), np.array(A_err)
@signal_base.cache_call(['basis_params'])
def get_Fmats(self, params={}):
"""Kind of a hack to get F-matrices"""
Fmats = []
for sc in self.pta._signalcollections:
ind = []
for signal, idx in sc._idx.items():
if 'red noise' in signal.signal_name and signal.signal_id in ['gw', 'gw_crn']:
ind.append(idx)
ix = np.unique(np.concatenate(ind))
Fmats.append(sc.get_basis(params=params)[:, ix])
return Fmats
def _get_freqs(self, psrs):
"""Hackish way to get frequency vector."""
for sig in self.pta._signalcollections[0]._signals:
if 'red noise' in sig.signal_name and sig.signal_id in ['gw', 'gw_crn']:
# make sure the basis is created
_ = sig.get_basis()
if isinstance(sig._labels, np.ndarray):
return sig._labels
else:
return sig._labels['']
raise ValueError("No frequency basis in pulsar models")
def _set_cache_parameters(self):
""" Set cache parameters for efficiency. """
self.white_params = list(set(par for sc in self.pta._signalcollections
for par in sc.white_params))
self.basis_params = list(set(par for sc in self.pta._signalcollections
for par in sc.basis_params))
self.delay_params = list(set(par for sc in self.pta._signalcollections
for par in sc.delay_params))
def get_TNr(self, params={}):
return self.pta.get_TNr(params=params)
@signal_base.cache_call(['white_params', 'delay_params', 'basis_params'])
def get_FNr(self, params={}):
FNrs = []
for ct, sc in enumerate(self.pta._signalcollections):
N = sc.get_ndiag(params=params)
F = self.get_Fmats(params)[ct]
res = sc.get_detres(params=params)
FNrs.append(N.solve(res, left_array=F))
return FNrs
@signal_base.cache_call(['white_params', 'basis_params'])
def get_FNF(self, params={}):
FNFs = []
for ct, sc in enumerate(self.pta._signalcollections):
N = sc.get_ndiag(params=params)
F = self.get_Fmats(params)[ct]
FNFs.append(N.solve(F, left_array=F))
return FNFs
def get_TNT(self, params={}):
return self.pta.get_TNT(params=params)
@signal_base.cache_call(['white_params', 'basis_params'])
def get_FNT(self, params={}):
FNTs = []
for ct, sc in enumerate(self.pta._signalcollections):
N = sc.get_ndiag(params=params)
F = self.get_Fmats(params)[ct]
T = sc.get_basis(params=params)
FNTs.append(N.solve(T, left_array=F))
return FNTs
| 17,367 | 38.205418 | 120 | py |
enterprise_extensions | enterprise_extensions-master/enterprise_extensions/frequentist/Fe_statistic.py |
import numpy as np
import scipy.linalg as sl
from enterprise.signals import (gp_signals, parameter, signal_base, utils,
white_signals)
class FeStat(object):
"""
Class for the Fe-statistic.
:param psrs: List of `enterprise` Pulsar instances.
:param params: Dictionary of noise parameters.
"""
def __init__(self, psrs, params=None):
print('Initializing the model...')
efac = parameter.Constant()
equad = parameter.Constant()
ef = white_signals.MeasurementNoise(efac=efac)
eq = white_signals.EquadNoise(log10_equad=equad)
tm = gp_signals.TimingModel(use_svd=True)
s = eq + ef + tm
model = []
for p in psrs:
model.append(s(p))
self.pta = signal_base.PTA(model)
# set white noise parameters
if params is None:
print('No noise dictionary provided!...')
else:
self.pta.set_default_params(params)
self.psrs = psrs
self.params = params
self.Nmats = None
def get_Nmats(self):
'''Makes the Nmatrix used in the fstatistic'''
TNTs = self.pta.get_TNT(self.params)
phiinvs = self.pta.get_phiinv(self.params, logdet=False, method='partition')
# Get noise parameters for pta toaerr**2
Nvecs = self.pta.get_ndiag(self.params)
# Get the basis matrix
Ts = self.pta.get_basis(self.params)
Nmats = [make_Nmat(phiinv, TNT, Nvec, T) for phiinv, TNT, Nvec, T in zip(phiinvs, TNTs, Nvecs, Ts)]
return Nmats
def compute_Fe(self, f0, gw_skyloc, brave=False, maximized_parameters=False):
"""
Computes the Fe-statistic (see Ellis, Siemens, Creighton 2012).
:param f0: GW frequency
:param gw_skyloc: 2x{number of sky locations} array containing [theta, phi] for each queried sky location,
where theta=pi/2-DEC, phi=RA,
for singlge sky location use gw_skyloc= np.array([[theta,],[phi,]])
:param brave: Skip sanity checks in linalg for speedup if True.
:param maximized_parameters: Calculate maximized extrinsic parameters if True.
:returns:
fstat: value of the Fe-statistic
:if maximized_parameters=True also returns:
inc_max: Maximized value of inclination
psi_max: Maximized value of polarization angle
phase0_max: Maximized value of initial fhase
h_max: Maximized value of amplitude
"""
tref=53000*86400
phiinvs = self.pta.get_phiinv(self.params, logdet=False)
TNTs = self.pta.get_TNT(self.params)
Ts = self.pta.get_basis()
if self.Nmats is None:
self.Nmats = self.get_Nmats()
n_psr = len(self.psrs)
N = np.zeros((n_psr, 4))
M = np.zeros((n_psr, 4, 4))
for idx, (psr, Nmat, TNT, phiinv, T) in enumerate(zip(self.psrs, self.Nmats,
TNTs, phiinvs, Ts)):
Sigma = TNT + (np.diag(phiinv) if phiinv.ndim == 1 else phiinv)
ntoa = len(psr.toas)
A = np.zeros((4, ntoa))
A[0, :] = 1 / f0 ** (1 / 3) * np.sin(2 * np.pi * f0 * (psr.toas-tref))
A[1, :] = 1 / f0 ** (1 / 3) * np.cos(2 * np.pi * f0 * (psr.toas-tref))
A[2, :] = 1 / f0 ** (1 / 3) * np.sin(2 * np.pi * f0 * (psr.toas-tref))
A[3, :] = 1 / f0 ** (1 / 3) * np.cos(2 * np.pi * f0 * (psr.toas-tref))
ip1 = innerProduct_rr(A[0, :], psr.residuals, Nmat, T, Sigma, brave=brave)
ip2 = innerProduct_rr(A[1, :], psr.residuals, Nmat, T, Sigma, brave=brave)
ip3 = innerProduct_rr(A[2, :], psr.residuals, Nmat, T, Sigma, brave=brave)
ip4 = innerProduct_rr(A[3, :], psr.residuals, Nmat, T, Sigma, brave=brave)
N[idx, :] = np.array([ip1, ip2, ip3, ip4])
# define M matrix M_ij=(A_i|A_j)
for jj in range(4):
for kk in range(4):
M[idx, jj, kk] = innerProduct_rr(A[jj, :], A[kk, :], Nmat, T, Sigma, brave=brave)
fstat = np.zeros(gw_skyloc.shape[1])
if maximized_parameters:
inc_max = np.zeros(gw_skyloc.shape[1])
psi_max = np.zeros(gw_skyloc.shape[1])
phase0_max = np.zeros(gw_skyloc.shape[1])
h_max = np.zeros(gw_skyloc.shape[1])
for j, gw_pos in enumerate(gw_skyloc.T):
NN = np.copy(N)
MM = np.copy(M)
for idx, psr in enumerate(self.psrs):
F_p, F_c, _ = utils.create_gw_antenna_pattern(psr.pos, gw_pos[0], gw_pos[1])
NN[idx, :] *= np.array([F_p, F_p, F_c, F_c])
MM[idx, :, :] *= np.array([[F_p**2, F_p**2, F_p*F_c, F_p*F_c],
[F_p**2, F_p**2, F_p*F_c, F_p*F_c],
[F_p*F_c, F_p*F_c, F_c**2, F_c**2],
[F_p*F_c, F_p*F_c, F_c**2, F_c**2]])
N_sum = np.sum(NN, axis=0)
M_sum = np.sum(MM, axis=0)
# take inverse of M
Minv = np.linalg.pinv(M_sum)
fstat[j] = 0.5 * np.dot(N_sum, np.dot(Minv, N_sum))
if maximized_parameters:
a_hat = np.dot(Minv, N_sum)
A_p = (np.sqrt((a_hat[0]+a_hat[3])**2 + (a_hat[1]-a_hat[2])**2) +
np.sqrt((a_hat[0]-a_hat[3])**2 + (a_hat[1]+a_hat[2])**2))
A_c = (np.sqrt((a_hat[0]+a_hat[3])**2 + (a_hat[1]-a_hat[2])**2) -
np.sqrt((a_hat[0]-a_hat[3])**2 + (a_hat[1]+a_hat[2])**2))
AA = A_p + np.sqrt(A_p**2 - A_c**2)
# AA = A_p + np.sqrt(A_p**2 + A_c**2)
# inc_max[j] = np.arccos(-A_c/AA)
inc_max[j] = np.arccos(A_c/AA)
two_psi_max = np.arctan2((A_p*a_hat[3] - A_c*a_hat[0]),
(A_c*a_hat[2] + A_p*a_hat[1]))
psi_max[j]=0.5*np.arctan2(np.sin(two_psi_max),
-np.cos(two_psi_max))
# convert from [-pi, pi] convention to [0,2*pi] convention
if psi_max[j]<0:
psi_max[j]+=np.pi
# correcting weird problem of degeneracy (psi-->pi-psi/2 and phi0-->2pi-phi0 keep everything the same)
if psi_max[j]>np.pi/2:
psi_max[j]+= -np.pi/2
half_phase0 = -0.5*np.arctan2(A_p*a_hat[3] - A_c*a_hat[0],
A_c*a_hat[1] + A_p*a_hat[2])
phase0_max[j] = np.arctan2(-np.sin(2*half_phase0),
np.cos(2*half_phase0))
# convert from [-pi, pi] convention to [0,2*pi] convention
if phase0_max[j]<0:
phase0_max[j]+=2*np.pi
zeta = np.abs(AA)/4 # related to amplitude, zeta=M_chirp^(5/3)/D
h_max[j] = zeta * 2 * (np.pi*f0)**(2/3)*np.pi**(1/3)
if maximized_parameters:
return fstat, inc_max, psi_max, phase0_max, h_max
else:
return fstat
def innerProduct_rr(x, y, Nmat, Tmat, Sigma, TNx=None, TNy=None, brave=False):
r"""
Compute inner product using rank-reduced
approximations for red noise/jitter
Compute: x^T N^{-1} y - x^T N^{-1} T \Sigma^{-1} T^T N^{-1} y
:param x: vector timeseries 1
:param y: vector timeseries 2
:param Nmat: white noise matrix
:param Tmat: Modified design matrix including red noise/jitter
:param Sigma: Sigma matrix (\varphi^{-1} + T^T N^{-1} T)
:param TNx: T^T N^{-1} x precomputed
:param TNy: T^T N^{-1} y precomputed
:return: inner product (x|y)
"""
# white noise term
Ni = Nmat
xNy = np.dot(np.dot(x, Ni), y)
Nx, Ny = np.dot(Ni, x), np.dot(Ni, y)
if TNx is None and TNy is None:
TNx = np.dot(Tmat.T, Nx)
TNy = np.dot(Tmat.T, Ny)
if brave:
cf = sl.cho_factor(Sigma, check_finite=False)
SigmaTNy = sl.cho_solve(cf, TNy, check_finite=False)
else:
cf = sl.cho_factor(Sigma)
SigmaTNy = sl.cho_solve(cf, TNy)
ret = xNy - np.dot(TNx, SigmaTNy)
return ret
def make_Nmat(phiinv, TNT, Nvec, T):
Sigma = TNT + (np.diag(phiinv) if phiinv.ndim == 1 else phiinv)
cf = sl.cho_factor(Sigma)
# Nshape = np.shape(T)[0] # Not currently used in code
TtN = np.multiply((1/Nvec)[:, None], T).T
# Put pulsar's autoerrors in a diagonal matrix
Ndiag = np.diag(1/Nvec)
expval2 = sl.cho_solve(cf, TtN)
# TtNt = np.transpose(TtN) # Not currently used in code
# An Ntoa by Ntoa noise matrix to be used in expand dense matrix calculations earlier
return Ndiag - np.dot(TtN.T, expval2)
| 8,981 | 35.661224 | 118 | py |
enterprise_extensions | enterprise_extensions-master/enterprise_extensions/frequentist/F_statistic.py |
import numpy as np
import scipy.special
from enterprise.signals import deterministic_signals, gp_signals, signal_base
from enterprise_extensions import blocks, deterministic
def get_xCy(Nvec, T, sigmainv, x, y):
"""Get x^T C^{-1} y"""
TNx = Nvec.solve(x, left_array=T)
TNy = Nvec.solve(y, left_array=T)
xNy = Nvec.solve(y, left_array=x)
return xNy - TNx @ sigmainv @ TNy
def get_TCy(Nvec, T, y, sigmainv, TNT):
"""Get T^T C^{-1} y"""
TNy = Nvec.solve(y, left_array=T)
return TNy - TNT @ sigmainv @ TNy
def innerprod(Nvec, T, sigmainv, TNT, x, y):
"""Get the inner product between x and y"""
xCy = get_xCy(Nvec, T, sigmainv, x, y)
TCy = get_TCy(Nvec, T, y, sigmainv, TNT)
TCx = get_TCy(Nvec, T, x, sigmainv, TNT)
return xCy - TCx.T @ sigmainv @ TCy
class FpStat(object):
"""
Class for the Fp-statistic.
:param psrs: List of `enterprise` Pulsar instances.
:param noisedict: Dictionary of white noise parameter values. Default=None
:param psrTerm: Include the pulsar term in the CW signal model. Default=True
:param bayesephem: Include BayesEphem model. Default=True
"""
def __init__(self, psrs, noisedict=None,
psrTerm=True, bayesephem=True, pta=None, tnequad=False):
if pta is None:
# initialize standard model with fixed white noise
# uses the implementation of ECORR in gp_signals
print('Initializing the model...')
tmin = np.min([p.toas.min() for p in psrs])
tmax = np.max([p.toas.max() for p in psrs])
Tspan = tmax - tmin
s = gp_signals.TimingModel(use_svd=True)
s += deterministic.cw_block_circ(amp_prior='log-uniform',
psrTerm=psrTerm, tref=tmin, name='cw')
s += blocks.red_noise_block(prior='log-uniform', psd='powerlaw',
Tspan=Tspan, components=30)
if bayesephem:
s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True)
# adding white-noise, and acting on psr objects
models = []
for p in psrs:
if 'NANOGrav' in p.flags['pta']:
s2 = s + blocks.white_noise_block(vary=False, inc_ecorr=True,
gp_ecorr=True, tnequad=tnequad)
models.append(s2(p))
else:
s3 = s + blocks.white_noise_block(vary=False, inc_ecorr=False, tnequad=tnequad)
models.append(s3(p))
pta = signal_base.PTA(models)
# set white noise parameters
if noisedict is None:
print('No noise dictionary provided!')
else:
pta.set_default_params(noisedict)
self.pta = pta
else:
# user can specify their own pta object
# if ECORR is included, use the implementation in gp_signals
self.pta = pta
self.psrs = psrs
self.noisedict = noisedict
# precompute important bits:
self.phiinvs = self.pta.get_phiinv(noisedict)
self.TNTs = self.pta.get_TNT(noisedict)
self.Nvecs = self.pta.get_ndiag(noisedict)
self.Ts = self.pta.get_basis(noisedict)
# self.cf_TNT = [sl.cho_factor(TNT + np.diag(phiinv)) for TNT, phiinv in zip(self.TNTs, self.phiinvs)]
self.sigmainvs = [np.linalg.pinv(TNT + np.diag(phiinv)) for TNT, phiinv in zip(self.TNTs, self.phiinvs)]
def compute_Fp(self, fgw):
"""
Computes the Fp-statistic.
:param fgw: GW frequency
:returns:
fstat: value of the Fp-statistic at the given frequency
"""
N = np.zeros(2)
M = np.zeros((2, 2))
fstat = 0
for psr, Nvec, TNT, T, sigmainv in zip(self.psrs, self.Nvecs, self.TNTs, self.Ts, self.sigmainvs):
ntoa = len(psr.toas)
A = np.zeros((2, ntoa))
A[0, :] = 1 / fgw ** (1 / 3) * np.sin(2 * np.pi * fgw * psr.toas)
A[1, :] = 1 / fgw ** (1 / 3) * np.cos(2 * np.pi * fgw * psr.toas)
ip1 = innerprod(Nvec, T, sigmainv, TNT, A[0, :], psr.residuals)
# logger.info(ip1)
ip2 = innerprod(Nvec, T, sigmainv, TNT, A[1, :], psr.residuals)
# logger.info(ip2)
N = np.array([ip1, ip2])
# define M matrix M_ij=(A_i|A_j)
for jj in range(2):
for kk in range(2):
M[jj, kk] = innerprod(Nvec, T, sigmainv, TNT, A[jj, :], A[kk, :])
# take inverse of M
Minv = np.linalg.pinv(M)
fstat += 0.5 * np.dot(N, np.dot(Minv, N))
return fstat
def compute_fap(self, fgw):
"""
Compute false alarm rate for Fp-Statistic. We calculate
the log of the FAP and then exponentiate it in order
to avoid numerical precision problems
:param fgw: GW frequency
:returns: False alarm probability as defined in Eq (64)
of Ellis, Seiemens, Creighton (2012)
"""
fp0 = self.compute_Fp(fgw)
N = len(self.psrs)
n = np.arange(0, N)
return np.sum(np.exp(n*np.log(fp0)-fp0-np.log(scipy.special.gamma(n+1))))
| 5,418 | 33.297468 | 112 | py |
enterprise_extensions | enterprise_extensions-master/enterprise_extensions/frequentist/chi_squared.py |
import numpy as np
import scipy.linalg as sl
def get_chi2(pta, xs):
"""Compute generalize chisq for pta:
chisq = y^T (N + F phi F^T)^-1 y
= y^T N^-1 y - y^T N^-1 F (F^T N^-1 F + phi^-1)^-1 F^T N^-1 y
"""
params = xs if isinstance(xs, dict) else pta.map_params(xs)
# chisq = y^T (N + F phi F^T)^-1 y
# = y^T N^-1 y - y^T N^-1 F (F^T N^-1 F + phi^-1)^-1 F^T N^-1 y
TNrs = pta.get_TNr(params)
TNTs = pta.get_TNT(params)
phiinvs = pta.get_phiinv(params, logdet=True, method='cliques')
chi2 = np.sum(ell[0] for ell in pta.get_rNr_logdet(params))
if pta._commonsignals:
raise NotImplementedError("get_chi2 does not support correlated signals")
else:
for TNr, TNT, pl in zip(TNrs, TNTs, phiinvs):
if TNr is None:
continue
phiinv, _ = pl
Sigma = TNT + (np.diag(phiinv) if phiinv.ndim == 1 else phiinv)
try:
cf = sl.cho_factor(Sigma)
expval = sl.cho_solve(cf, TNr)
except sl.LinAlgError: # pragma: no cover
return -np.inf
chi2 = chi2 - np.dot(TNr, expval)
return chi2
def get_reduced_chi2(pta, xs):
"""
Compute Generalized Reduced Chi Square for PTA using degrees of freedom
(DOF), defined by dof= NTOAs - N Timing Parameters - N Model Params.
"""
keys = [ky for ky in pta._signal_dict.keys() if 'timing_model' in ky]
chi2 = get_chi2(pta, xs)
degs = np.array([pta._signal_dict[ky].get_basis().shape for ky in keys])
dof = np.sum(degs[:, 0]) - np.sum(degs[:, 1])
dof -= len(pta.param_names)
return chi2/dof
| 1,704 | 29.446429 | 81 | py |
Graph-Unlearning | Graph-Unlearning-main/main.py | import logging
import torch
from exp.exp_graph_partition import ExpGraphPartition
from exp.exp_node_edge_unlearning import ExpNodeEdgeUnlearning
from exp.exp_unlearning import ExpUnlearning
from exp.exp_attack_unlearning import ExpAttackUnlearning
from parameter_parser import parameter_parser
def config_logger(save_name):
# create logger
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(levelname)s:%(asctime)s: - %(name)s - : %(message)s')
# create console handler
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
def main(args, exp):
# config the logger
logger_name = "_".join((exp, args['dataset_name'], args['partition_method'], str(args['num_shards']), str(args['test_ratio'])))
config_logger(logger_name)
logging.info(logger_name)
torch.set_num_threads(args["num_threads"])
torch.cuda.set_device(args["cuda"])
os.environ["CUDA_VISIBLE_DEVICES"] = str(args["cuda"])
# subroutine entry for different methods
if exp == 'partition':
ExpGraphPartition(args)
elif exp == 'unlearning':
ExpUnlearning(args)
elif exp == 'node_edge_unlearning':
ExpNodeEdgeUnlearning(args)
elif exp == 'attack_unlearning':
ExpAttackUnlearning(args)
else:
raise Exception('unsupported attack')
if __name__ == "__main__":
args = parameter_parser()
main(args, args['exp'])
| 1,499 | 27.846154 | 131 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_node_embedding/node_embedding.py | import logging
import config
from lib_gnn_model.graphsage.graphsage import SAGE
from lib_dataset.data_store import DataStore
class NodeEmbedding:
def __init__(self, args, graph, data):
super(NodeEmbedding, self)
self.logger = logging.getLogger(__name__)
self.args = args
self.graph = graph
self.data = data
self.data_store = DataStore(self.args)
def sage_encoder(self):
if self.args['is_gen_embedding']:
self.logger.info("generating node embeddings with GraphSage...")
node_to_embedding = {}
# run sage
self.target_model = SAGE(self.data.num_features, len(self.data.y.unique()), self.data)
# self.target_model.train_model(50)
# load a pretrained GNN model for generating node embeddings
target_model_name = '_'.join((self.args['target_model'], 'random_1',
str(self.args['shard_size_delta']),
str(self.args['ratio_deleted_edges']), '0_0_1'))
target_model_file = config.MODEL_PATH + self.args['dataset_name'] + '/' + target_model_name
self.target_model.load_model(target_model_file)
logits = self.target_model.generate_embeddings().detach().cpu().numpy()
for node in self.graph.nodes:
node_to_embedding[node] = logits[node]
self.data_store.save_embeddings(node_to_embedding)
else:
node_to_embedding = self.data_store.load_embeddings()
return node_to_embedding
| 1,606 | 34.711111 | 103 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_node_embedding/ge/walker.py | import itertools
import math
import random
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
from tqdm import trange
from .alias import alias_sample, create_alias_table
from .utils import partition_num
class RandomWalker:
def __init__(self, G, p=1, q=1, use_rejection_sampling=0):
"""
:param G:
:param p: Return parameter,controls the likelihood of immediately revisiting a node in the walk.
:param q: In-out parameter,allows the search to differentiate between “inward” and “outward” nodes
:param use_rejection_sampling: Whether to use the rejection sampling strategy in node2vec.
"""
self.G = G
self.p = p
self.q = q
self.use_rejection_sampling = use_rejection_sampling
def deepwalk_walk(self, walk_length, start_node):
walk = [start_node]
while len(walk) < walk_length:
cur = walk[-1]
cur_nbrs = list(self.G.neighbors(cur))
if len(cur_nbrs) > 0:
walk.append(random.choice(cur_nbrs))
else:
break
return walk
def node2vec_walk(self, walk_length, start_node):
G = self.G
alias_nodes = self.alias_nodes
alias_edges = self.alias_edges
walk = [start_node]
while len(walk) < walk_length:
cur = walk[-1]
cur_nbrs = list(G.neighbors(cur))
if len(cur_nbrs) > 0:
if len(walk) == 1:
walk.append(
cur_nbrs[alias_sample(alias_nodes[cur][0], alias_nodes[cur][1])])
else:
prev = walk[-2]
edge = (prev, cur)
next_node = cur_nbrs[alias_sample(alias_edges[edge][0],
alias_edges[edge][1])]
walk.append(next_node)
else:
break
return walk
def node2vec_walk2(self, walk_length, start_node):
"""
Reference:
KnightKing: A Fast Distributed Graph Random Walk Engine
http://madsys.cs.tsinghua.edu.cn/publications/SOSP19-yang.pdf
"""
def rejection_sample(inv_p, inv_q, nbrs_num):
upper_bound = max(1.0, max(inv_p, inv_q))
lower_bound = min(1.0, min(inv_p, inv_q))
shatter = 0
second_upper_bound = max(1.0, inv_q)
if (inv_p > second_upper_bound):
shatter = second_upper_bound / nbrs_num
upper_bound = second_upper_bound + shatter
return upper_bound, lower_bound, shatter
G = self.G
alias_nodes = self.alias_nodes
inv_p = 1.0 / self.p
inv_q = 1.0 / self.q
walk = [start_node]
while len(walk) < walk_length:
cur = walk[-1]
cur_nbrs = list(G.neighbors(cur))
if len(cur_nbrs) > 0:
if len(walk) == 1:
walk.append(
cur_nbrs[alias_sample(alias_nodes[cur][0], alias_nodes[cur][1])])
else:
upper_bound, lower_bound, shatter = rejection_sample(
inv_p, inv_q, len(cur_nbrs))
prev = walk[-2]
prev_nbrs = set(G.neighbors(prev))
while True:
prob = random.random() * upper_bound
if (prob + shatter >= upper_bound):
next_node = prev
break
next_node = cur_nbrs[alias_sample(
alias_nodes[cur][0], alias_nodes[cur][1])]
if (prob < lower_bound):
break
if (prob < inv_p and next_node == prev):
break
_prob = 1.0 if next_node in prev_nbrs else inv_q
if (prob < _prob):
break
walk.append(next_node)
else:
break
return walk
def simulate_walks(self, num_walks, walk_length, workers=1, verbose=0):
G = self.G
nodes = list(G.nodes())
results = Parallel(n_jobs=workers, verbose=verbose, )(
delayed(self._simulate_walks)(nodes, num, walk_length) for num in
partition_num(num_walks, workers))
walks = list(itertools.chain(*results))
return walks
def _simulate_walks(self, nodes, num_walks, walk_length,):
walks = []
for _ in range(num_walks):
random.shuffle(nodes)
for v in nodes:
if self.p == 1 and self.q == 1:
walks.append(self.deepwalk_walk(
walk_length=walk_length, start_node=v))
elif self.use_rejection_sampling:
walks.append(self.node2vec_walk2(
walk_length=walk_length, start_node=v))
else:
walks.append(self.node2vec_walk(
walk_length=walk_length, start_node=v))
return walks
def get_alias_edge(self, t, v):
"""
compute unnormalized transition probability between nodes v and its neighbors give the previous visited node t.
:param t:
:param v:
:return:
"""
G = self.G
p = self.p
q = self.q
unnormalized_probs = []
for x in G.neighbors(v):
weight = G[v][x].get('weight', 1.0) # w_vx
if x == t: # d_tx == 0
unnormalized_probs.append(weight/p)
elif G.has_edge(x, t): # d_tx == 1
unnormalized_probs.append(weight)
else: # d_tx > 1
unnormalized_probs.append(weight/q)
norm_const = sum(unnormalized_probs)
normalized_probs = [
float(u_prob)/norm_const for u_prob in unnormalized_probs]
return create_alias_table(normalized_probs)
def preprocess_transition_probs(self):
"""
Preprocessing of transition probabilities for guiding the random walks.
"""
G = self.G
alias_nodes = {}
for node in G.nodes():
unnormalized_probs = [G[node][nbr].get('weight', 1.0)
for nbr in G.neighbors(node)]
norm_const = sum(unnormalized_probs)
normalized_probs = [
float(u_prob)/norm_const for u_prob in unnormalized_probs]
alias_nodes[node] = create_alias_table(normalized_probs)
if not self.use_rejection_sampling:
alias_edges = {}
for edge in G.edges():
alias_edges[edge] = self.get_alias_edge(edge[0], edge[1])
if not G.is_directed():
alias_edges[(edge[1], edge[0])] = self.get_alias_edge(edge[1], edge[0])
self.alias_edges = alias_edges
self.alias_nodes = alias_nodes
return
class BiasedWalker:
def __init__(self, idx2node, temp_path):
self.idx2node = idx2node
self.idx = list(range(len(self.idx2node)))
self.temp_path = temp_path
pass
def simulate_walks(self, num_walks, walk_length, stay_prob=0.3, workers=1, verbose=0):
layers_adj = pd.read_pickle(self.temp_path+'layers_adj.pkl')
layers_alias = pd.read_pickle(self.temp_path+'layers_alias.pkl')
layers_accept = pd.read_pickle(self.temp_path+'layers_accept.pkl')
gamma = pd.read_pickle(self.temp_path+'gamma.pkl')
walks = []
initialLayer = 0
nodes = self.idx # list(self.g.nodes())
results = Parallel(n_jobs=workers, verbose=verbose, )(
delayed(self._simulate_walks)(nodes, num, walk_length, stay_prob, layers_adj, layers_accept, layers_alias, gamma) for num in
partition_num(num_walks, workers))
walks = list(itertools.chain(*results))
return walks
def _simulate_walks(self, nodes, num_walks, walk_length, stay_prob, layers_adj, layers_accept, layers_alias, gamma):
walks = []
for _ in range(num_walks):
random.shuffle(nodes)
for v in nodes:
walks.append(self._exec_random_walk(layers_adj, layers_accept, layers_alias,
v, walk_length, gamma, stay_prob))
return walks
def _exec_random_walk(self, graphs, layers_accept, layers_alias, v, walk_length, gamma, stay_prob=0.3):
initialLayer = 0
layer = initialLayer
path = []
path.append(self.idx2node[v])
while len(path) < walk_length:
r = random.random()
if(r < stay_prob): # same layer
v = chooseNeighbor(v, graphs, layers_alias,
layers_accept, layer)
path.append(self.idx2node[v])
else: # different layer
r = random.random()
try:
x = math.log(gamma[layer][v] + math.e)
p_moveup = (x / (x + 1))
except:
print(layer, v)
raise ValueError()
if(r > p_moveup):
if(layer > initialLayer):
layer = layer - 1
else:
if((layer + 1) in graphs and v in graphs[layer + 1]):
layer = layer + 1
return path
def chooseNeighbor(v, graphs, layers_alias, layers_accept, layer):
v_list = graphs[layer][v]
idx = alias_sample(layers_accept[layer][v], layers_alias[layer][v])
v = v_list[idx]
return v
| 9,789 | 34.34296 | 136 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_node_embedding/ge/classify.py | from __future__ import print_function
import numpy
from sklearn.metrics import f1_score, accuracy_score
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import MultiLabelBinarizer
class TopKRanker(OneVsRestClassifier):
def predict(self, X, top_k_list):
probs = numpy.asarray(super(TopKRanker, self).predict_proba(X))
all_labels = []
for i, k in enumerate(top_k_list):
probs_ = probs[i, :]
labels = self.classes_[probs_.argsort()[-k:]].tolist()
probs_[:] = 0
probs_[labels] = 1
all_labels.append(probs_)
return numpy.asarray(all_labels)
class Classifier(object):
def __init__(self, embeddings, clf):
self.embeddings = embeddings
self.clf = TopKRanker(clf)
self.binarizer = MultiLabelBinarizer(sparse_output=True)
def train(self, X, Y, Y_all):
self.binarizer.fit(Y_all)
X_train = [self.embeddings[x] for x in X]
Y = self.binarizer.transform(Y)
self.clf.fit(X_train, Y)
def evaluate(self, X, Y):
top_k_list = [len(l) for l in Y]
Y_ = self.predict(X, top_k_list)
Y = self.binarizer.transform(Y)
averages = ["micro", "macro", "samples", "weighted"]
results = {}
for average in averages:
results[average] = f1_score(Y, Y_, average=average)
results['acc'] = accuracy_score(Y,Y_)
print('-------------------')
print(results)
return results
print('-------------------')
def predict(self, X, top_k_list):
X_ = numpy.asarray([self.embeddings[x] for x in X])
Y = self.clf.predict(X_, top_k_list=top_k_list)
return Y
def split_train_evaluate(self, X, Y, train_precent, seed=0):
state = numpy.random.get_state()
training_size = int(train_precent * len(X))
numpy.random.seed(seed)
shuffle_indices = numpy.random.permutation(numpy.arange(len(X)))
X_train = [X[shuffle_indices[i]] for i in range(training_size)]
Y_train = [Y[shuffle_indices[i]] for i in range(training_size)]
X_test = [X[shuffle_indices[i]] for i in range(training_size, len(X))]
Y_test = [Y[shuffle_indices[i]] for i in range(training_size, len(X))]
self.train(X_train, Y_train, Y)
numpy.random.set_state(state)
return self.evaluate(X_test, Y_test)
def read_node_label(filename, skip_head=False):
fin = open(filename, 'r')
X = []
Y = []
while 1:
if skip_head:
fin.readline()
l = fin.readline()
if l == '':
break
vec = l.strip().split(' ')
X.append(vec[0])
Y.append(vec[1:])
fin.close()
return X, Y
| 2,772 | 31.244186 | 78 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_node_embedding/ge/alias.py | import numpy as np
def create_alias_table(area_ratio):
"""
:param area_ratio: sum(area_ratio)=1
:return: accept,alias
"""
l = len(area_ratio)
accept, alias = [0] * l, [0] * l
small, large = [], []
area_ratio_ = np.array(area_ratio) * l
for i, prob in enumerate(area_ratio_):
if prob < 1.0:
small.append(i)
else:
large.append(i)
while small and large:
small_idx, large_idx = small.pop(), large.pop()
accept[small_idx] = area_ratio_[small_idx]
alias[small_idx] = large_idx
area_ratio_[large_idx] = area_ratio_[large_idx] - \
(1 - area_ratio_[small_idx])
if area_ratio_[large_idx] < 1.0:
small.append(large_idx)
else:
large.append(large_idx)
while large:
large_idx = large.pop()
accept[large_idx] = 1
while small:
small_idx = small.pop()
accept[small_idx] = 1
return accept, alias
def alias_sample(accept, alias):
"""
:param accept:
:param alias:
:return: sample index
"""
N = len(accept)
i = int(np.random.random()*N)
r = np.random.random()
if r < accept[i]:
return i
else:
return alias[i]
| 1,261 | 21.945455 | 59 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_node_embedding/ge/utils.py | def preprocess_nxgraph(graph):
node2idx = {}
idx2node = []
node_size = 0
for node in graph.nodes():
node2idx[node] = node_size
idx2node.append(node)
node_size += 1
return idx2node, node2idx
def partition_dict(vertices, workers):
batch_size = (len(vertices) - 1) // workers + 1
part_list = []
part = []
count = 0
for v1, nbs in vertices.items():
part.append((v1, nbs))
count += 1
if count % batch_size == 0:
part_list.append(part)
part = []
if len(part) > 0:
part_list.append(part)
return part_list
def partition_list(vertices, workers):
batch_size = (len(vertices) - 1) // workers + 1
part_list = []
part = []
count = 0
for v1, nbs in enumerate(vertices):
part.append((v1, nbs))
count += 1
if count % batch_size == 0:
part_list.append(part)
part = []
if len(part) > 0:
part_list.append(part)
return part_list
def partition_num(num, workers):
if num % workers == 0:
return [num//workers]*workers
else:
return [num//workers]*workers + [num % workers]
| 1,191 | 23.326531 | 55 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_utils/utils.py | import os
import errno
import numpy as np
import pandas as pd
import networkx as nx
import torch
from scipy.sparse import coo_matrix
from tqdm import tqdm
def graph_reader(path):
"""
Function to read the graph from the path.
:param path: Path to the edge list.
:return graph: NetworkX object returned.
"""
graph = nx.from_edgelist(pd.read_csv(path).values.tolist())
return graph
def feature_reader(path):
"""
Reading the sparse feature matrix stored as csv from the disk.
:param path: Path to the csv file.
:return features: Dense matrix of features.
"""
features = pd.read_csv(path)
node_index = features["node_id"].values.tolist()
feature_index = features["feature_id"].values.tolist()
feature_values = features["value"].values.tolist()
node_count = max(node_index) + 1
feature_count = max(feature_index) + 1
features = coo_matrix((feature_values, (node_index, feature_index)), shape=(node_count, feature_count)).toarray()
return features
def target_reader(path):
"""
Reading the target vector from disk.
:param path: Path to the target.
:return target: Target vector.
"""
target = np.array(pd.read_csv(path)["target"]).reshape(-1, 1)
return target
def make_adjacency(graph, max_degree, sel=None):
all_nodes = np.array(graph.nodes())
# Initialize w/ links to a dummy node
n_nodes = len(all_nodes)
adj = (np.zeros((n_nodes + 1, max_degree)) + n_nodes).astype(int)
if sel is not None:
# only look at nodes in training set
all_nodes = all_nodes[sel]
for node in tqdm(all_nodes):
neibs = np.array(list(graph.neighbors(node)))
if sel is not None:
neibs = neibs[sel[neibs]]
if len(neibs) > 0:
if len(neibs) > max_degree:
neibs = np.random.choice(neibs, max_degree, replace=False)
elif len(neibs) < max_degree:
extra = np.random.choice(neibs, max_degree - neibs.shape[0], replace=True)
neibs = np.concatenate([neibs, extra])
adj[node, :] = neibs
return adj
def connected_component_subgraphs(graph):
"""
Find all connected subgraphs in a networkx Graph
Args:
graph (Graph): A networkx Graph
Yields:
generator: A subgraph generator
"""
for c in nx.connected_components(graph):
yield graph.subgraph(c)
def check_exist(file_name):
if not os.path.exists(os.path.dirname(file_name)):
try:
os.makedirs(os.path.dirname(file_name))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
def filter_edge_index(edge_index, node_indices, reindex=True):
assert np.all(np.diff(node_indices) >= 0), 'node_indices must be sorted'
if isinstance(edge_index, torch.Tensor):
edge_index = edge_index.cpu()
node_index = np.isin(edge_index, node_indices)
col_index = np.nonzero(np.logical_and(node_index[0], node_index[1]))[0]
edge_index = edge_index[:, col_index]
if reindex:
return np.searchsorted(node_indices, edge_index)
else:
return edge_index
def pyg_to_nx(data):
"""
Convert a torch geometric Data to networkx Graph.
Args:
data (Data): A torch geometric Data.
Returns:
Graph: A networkx Graph.
"""
graph = nx.Graph()
graph.add_nodes_from(np.arange(data.num_nodes))
edge_index = data.edge_index.numpy()
for u, v in np.transpose(edge_index):
graph.add_edge(u, v)
return graph
def edge_index_to_nx(edge_index, num_nodes):
"""
Convert a torch geometric Data to networkx Graph by edge_index.
Args:
edge_index (Data.edge_index): A torch geometric Data.
num_nodes (int): Number of nodes in a graph.
Returns:
Graph: networkx Graph
"""
graph = nx.Graph()
graph.add_nodes_from(np.arange(num_nodes))
edge_index = edge_index.numpy()
for u, v in np.transpose(edge_index):
graph.add_edge(u, v)
return graph
def filter_edge_index_1(data, node_indices):
"""
Remove unnecessary edges from a torch geometric Data, only keep the edges between node_indices.
Args:
data (Data): A torch geometric Data.
node_indices (list): A list of nodes to be deleted from data.
Returns:
data.edge_index: The new edge_index after removing the node_indices.
"""
if isinstance(data.edge_index, torch.Tensor):
data.edge_index = data.edge_index.cpu()
edge_index = data.edge_index
node_index = np.isin(edge_index, node_indices)
col_index = np.nonzero(np.logical_and(node_index[0], node_index[1]))[0]
edge_index = data.edge_index[:, col_index]
return np.searchsorted(node_indices, edge_index)
| 4,851 | 27.046243 | 117 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_utils/logger.py | from texttable import Texttable
def tab_printer(args):
"""
Function to print the logs in a nice tabular format.
:param args: Parameters used for the model.
"""
# args = vars(args)
keys = sorted(args.keys())
t = Texttable()
t.add_rows([["Parameter", "Value"]] + [[k.replace("_"," ").capitalize(),args[k]] for k in keys])
print(t.draw()) | 373 | 30.166667 | 101 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_aggregator/opt_dataset.py | from torch.utils.data import Dataset
class OptDataset(Dataset):
def __init__(self, posteriors, labels):
self.posteriors = posteriors
self.labels = labels
def __getitem__(self, index):
ret_posterior = {}
for shard, post in self.posteriors.items():
ret_posterior[shard] = post[index]
return ret_posterior, self.labels[index]
def __len__(self):
return self.labels.shape[0]
| 448 | 22.631579 | 51 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_aggregator/optimal_aggregator.py | import copy
import logging
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from torch.optim.lr_scheduler import MultiStepLR
from torch.utils.data import DataLoader
from torch_geometric.data import Data
from lib_aggregator.opt_dataset import OptDataset
from lib_dataset.data_store import DataStore
from lib_utils import utils
class OptimalAggregator:
def __init__(self, run, target_model, data, args):
self.logger = logging.getLogger('optimal_aggregator')
self.args = args
self.run = run
self.target_model = target_model
self.data = data
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.num_shards = args['num_shards']
def generate_train_data(self):
data_store = DataStore(self.args)
train_indices, _ = data_store.load_train_test_split()
# sample a set of nodes from train_indices
if self.args["num_opt_samples"] == 1000:
train_indices = np.random.choice(train_indices, size=1000, replace=False)
elif self.args["num_opt_samples"] == 10000:
train_indices = np.random.choice(train_indices, size=int(train_indices.shape[0] * 0.1), replace=False)
elif self.args["num_opt_samples"] == 1:
train_indices = np.random.choice(train_indices, size=int(train_indices.shape[0]), replace=False)
train_indices = np.sort(train_indices)
self.logger.info("Using %s samples for optimization" % (int(train_indices.shape[0])))
x = self.data.x[train_indices]
y = self.data.y[train_indices]
edge_index = utils.filter_edge_index(self.data.edge_index, train_indices)
train_data = Data(x=x, edge_index=torch.from_numpy(edge_index), y=y)
train_data.train_mask = torch.zeros(train_indices.shape[0], dtype=torch.bool)
train_data.test_mask = torch.ones(train_indices.shape[0], dtype=torch.bool)
self.true_labels = y
self.posteriors = {}
for shard in range(self.num_shards):
self.target_model.data = train_data
data_store.load_target_model(self.run, self.target_model, shard)
self.posteriors[shard] = self.target_model.posterior().to(self.device)
def optimization(self):
weight_para = nn.Parameter(torch.full((self.num_shards,), fill_value=1.0 / self.num_shards), requires_grad=True)
optimizer = optim.Adam([weight_para], lr=self.args['opt_lr'])
scheduler = MultiStepLR(optimizer, milestones=[500, 1000], gamma=self.args['opt_lr'])
train_dset = OptDataset(self.posteriors, self.true_labels)
train_loader = DataLoader(train_dset, batch_size=32, shuffle=True, num_workers=0)
min_loss = 1000.0
for epoch in range(self.args['opt_num_epochs']):
loss_all = 0.0
for posteriors, labels in train_loader:
labels = labels.to(self.device)
optimizer.zero_grad()
loss = self._loss_fn(posteriors, labels, weight_para)
loss.backward()
loss_all += loss
optimizer.step()
with torch.no_grad():
weight_para[:] = torch.clamp(weight_para, min=0.0)
scheduler.step()
if loss_all < min_loss:
ret_weight_para = copy.deepcopy(weight_para)
min_loss = loss_all
self.logger.info('epoch: %s, loss: %s' % (epoch, loss_all))
return ret_weight_para / torch.sum(ret_weight_para)
def _loss_fn(self, posteriors, labels, weight_para):
aggregate_posteriors = torch.zeros_like(posteriors[0])
for shard in range(self.num_shards):
aggregate_posteriors += weight_para[shard] * posteriors[shard]
aggregate_posteriors = F.softmax(aggregate_posteriors, dim=1)
loss_1 = F.cross_entropy(aggregate_posteriors, labels)
loss_2 = torch.sqrt(torch.sum(weight_para ** 2))
return loss_1 + loss_2
| 4,054 | 37.990385 | 120 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_aggregator/aggregator.py | import logging
import torch
torch.cuda.empty_cache()
from sklearn.metrics import f1_score
import numpy as np
from lib_aggregator.optimal_aggregator import OptimalAggregator
from lib_dataset.data_store import DataStore
class Aggregator:
def __init__(self, run, target_model, data, shard_data, args):
self.logger = logging.getLogger('Aggregator')
self.args = args
self.data_store = DataStore(self.args)
self.run = run
self.target_model = target_model
self.data = data
self.shard_data = shard_data
self.num_shards = args['num_shards']
def generate_posterior(self, suffix=""):
self.true_label = self.shard_data[0].y[self.shard_data[0]['test_mask']].detach().cpu().numpy()
self.posteriors = {}
for shard in range(self.args['num_shards']):
self.target_model.data = self.shard_data[shard]
self.data_store.load_target_model(self.run, self.target_model, shard, suffix)
self.posteriors[shard] = self.target_model.posterior()
self.logger.info("Saving posteriors.")
self.data_store.save_posteriors(self.posteriors, self.run, suffix)
def aggregate(self):
if self.args['aggregator'] == 'mean':
aggregate_f1_score = self._mean_aggregator()
elif self.args['aggregator'] == 'optimal':
aggregate_f1_score = self._optimal_aggregator()
elif self.args['aggregator'] == 'majority':
aggregate_f1_score = self._majority_aggregator()
else:
raise Exception("unsupported aggregator.")
return aggregate_f1_score
def _mean_aggregator(self):
posterior = self.posteriors[0]
for shard in range(1, self.num_shards):
posterior += self.posteriors[shard]
posterior = posterior / self.num_shards
return f1_score(self.true_label, posterior.argmax(axis=1).cpu().numpy(), average="micro")
def _majority_aggregator(self):
pred_labels = []
for shard in range(self.num_shards):
pred_labels.append(self.posteriors[shard].argmax(axis=1).cpu().numpy())
pred_labels = np.stack(pred_labels)
pred_label = np.argmax(
np.apply_along_axis(np.bincount, axis=0, arr=pred_labels, minlength=self.posteriors[0].shape[1]), axis=0)
return f1_score(self.true_label, pred_label, average="micro")
def _optimal_aggregator(self):
optimal = OptimalAggregator(self.run, self.target_model, self.data, self.args)
optimal.generate_train_data()
weight_para = optimal.optimization()
self.data_store.save_optimal_weight(weight_para, run=self.run)
posterior = self.posteriors[0] * weight_para[0]
for shard in range(1, self.num_shards):
posterior += self.posteriors[shard] * weight_para[shard]
return f1_score(self.true_label, posterior.argmax(axis=1).cpu().numpy(), average="micro")
| 2,958 | 35.9875 | 117 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_graph_partition/constrained_kmeans.py | import logging
import copy
from tqdm import tqdm
import numpy as np
import cupy as np
class ConstrainedKmeans:
def __init__(self, data_feat, num_clusters, node_threshold, terminate_delta, max_iteration=20):
self.logger = logging.getLogger('constrained_kmeans')
self.data_feat = data_feat
self.num_clusters = num_clusters
self.node_threshold = node_threshold
self.terminate_delta = terminate_delta
self.max_iteration = max_iteration
def initialization(self):
centroids = np.random.choice(np.arange(self.data_feat.shape[0]), self.num_clusters, replace=False)
self.centroid = {}
for i in range(self.num_clusters):
self.centroid[i] = self.data_feat[centroids[i].get()]
def clustering(self):
centroid = copy.deepcopy(self.centroid)
km_delta = []
pbar = tqdm(total=self.max_iteration)
pbar.set_description('Clustering')
for i in range(self.max_iteration):
self.logger.info('iteration %s' % (i,))
self._node_reassignment()
self._centroid_updating()
# record the average change of centroids, if the change is smaller than a very small value, then terminate
delta = self._centroid_delta(centroid, self.centroid)
km_delta.append(delta)
centroid = copy.deepcopy(self.centroid)
if delta <= self.terminate_delta:
break
self.logger.info("delta: %s" % delta)
pbar.close()
return self.clusters, km_delta
def _node_reassignment(self):
self.clusters = {}
for i in range(self.num_clusters):
self.clusters[i] = np.zeros(0, dtype=np.uint64)
distance = np.zeros([self.num_clusters, self.data_feat.shape[0]])
for i in range(self.num_clusters):
distance[i] = np.sum(np.power((self.data_feat - self.centroid[i]), 2), axis=1)
sort_indices = np.unravel_index(np.argsort(distance, axis=None), distance.shape)
clusters = sort_indices[0]
users = sort_indices[1]
selected_nodes = np.zeros(0, dtype=np.int64)
counter = 0
while len(selected_nodes) < self.data_feat.shape[0]:
cluster = int(clusters[counter])
user = users[counter]
if self.clusters[cluster].size < self.node_threshold:
self.clusters[cluster] = np.append(self.clusters[cluster], np.array(int(user)))
selected_nodes = np.append(selected_nodes, np.array(int(user)))
# delete all the following pairs for the selected user
user_indices = np.where(users == user)[0]
a = np.arange(users.size)
b = user_indices[user_indices > counter]
remain_indices = a[np.where(np.logical_not(np.isin(a, b)))[0]]
clusters = clusters[remain_indices]
users = users[remain_indices]
counter += 1
def _centroid_updating(self):
for i in range(self.num_clusters):
self.centroid[i] = np.mean(self.data_feat[self.clusters[i].astype(int)], axis=0)
def _centroid_delta(self, centroid_pre, centroid_cur):
delta = 0.0
for i in range(len(centroid_cur)):
delta += np.sum(np.abs(centroid_cur[i] - centroid_pre[i]))
return delta
if __name__ == '__main__':
output_file = None
logging.basicConfig(filename=output_file,
format='%(levelname)s:%(asctime)s: - %(name)s - : %(message)s',
level=logging.DEBUG)
data_feat = np.array([[1, 2],
[1, 3],
[1, 4],
[1, 5],
[10, 2],
[10, 3]])
num_clusters = 2
node_threshold = 3
terminate_delta = 0.001
cluster = ConstrainedKmeans(data_feat, num_clusters, node_threshold, terminate_delta)
cluster.initialization()
cluster.clustering() | 4,038 | 34.743363 | 118 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_graph_partition/graph_partition.py | import logging
from lib_graph_partition.partition_kmeans import PartitionKMeans
from lib_graph_partition.partition_lpa import PartitionConstrainedLPA, PartitionLPA, PartitionConstrainedLPABase
from lib_graph_partition.metis_partition import MetisPartition
from lib_graph_partition.partition_random import PartitionRandom
class GraphPartition:
def __init__(self, args, graph, dataset=None):
self.logger = logging.getLogger(__name__)
self.args = args
self.graph = graph
self.dataset = dataset
self.partition_method = self.args['partition_method']
self.num_shards = self.args['num_shards']
def graph_partition(self):
self.logger.info('graph partition, method: %s' % self.partition_method)
if self.partition_method == 'random':
partition_method = PartitionRandom(self.args, self.graph)
elif self.partition_method in ['sage_km', 'sage_km_base']:
partition_method = PartitionKMeans(self.args, self.graph, self.dataset)
elif self.partition_method == 'lpa' and not self.args['is_constrained']:
partition_method = PartitionLPA(self.args, self.graph)
elif self.partition_method == 'lpa' and self.args['is_constrained']:
partition_method = PartitionConstrainedLPA(self.args, self.graph)
elif self.partition_method == 'lpa_base':
partition_method = PartitionConstrainedLPABase(self.args, self.graph)
elif self.partition_method == 'metis':
partition_method = MetisPartition(self.args, self.graph, self.dataset)
else:
raise Exception('Unsupported partition method')
return partition_method.partition()
| 1,708 | 42.820513 | 112 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_graph_partition/constrained_kmeans_base.py | # An implementation of ``Balanced K-Means for Clustering.'' (https://rdcu.be/cESzk)
import logging
import copy
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from munkres import Munkres
from lib_graph_partition.hungarian import Hungarian
from lib_graph_partition.hungarian_1 import KMMatcher
class ConstrainedKmeansBase:
def __init__(self, data_feat, num_clusters, node_threshold, terminate_delta, max_iteration=20):
self.logger = logging.getLogger('constrained_kmeans_base')
self.data_feat = data_feat
self.num_clusters = num_clusters
self.node_threshold = node_threshold
self.terminate_delta = terminate_delta
self.max_iteration = max_iteration
def initialization(self):
centroids = np.random.choice(np.arange(self.data_feat.shape[0]), self.num_clusters, replace=False)
self.centroid = dict(zip(range(self.num_clusters), self.data_feat[centroids]))
def clustering(self):
centroid = copy.deepcopy(self.centroid)
centroid_delta = {}
km_base_delta = []
for i in range(self.max_iteration):
self.logger.info('iteration %s' % (i))
self._node_reassignment()
self._centroid_updating()
# record the average change of centroids, if the change is smaller than a very small value, then terminate
delta = self._centroid_delta(centroid, self.centroid)
centroid_delta[i] = delta
km_base_delta.append(delta)
centroid = copy.deepcopy(self.centroid)
if delta <= self.terminate_delta:
break
self.logger.info("delta: %s" % delta)
return self.clusters, km_base_delta
def _node_reassignment(self):
self.logger.info('Node reassignment begins')
self.clusters = dict(
zip(np.arange(self.num_clusters), [np.zeros(0, dtype=np.uint64) for _ in range(self.num_clusters)]))
distance = np.zeros([self.num_clusters, self.data_feat.shape[0]])
# cost_matrix = np.zeros([self.data_feat.shape[0], self.data_feat.shape[0]])
for i in range(self.num_clusters):
distance[i] = np.sum((self.data_feat - self.centroid[i]) ** 2, axis=1)
cost_matrix = np.tile(distance, (self.data_feat.shape[0], 1))
cost_matrix = cost_matrix[:self.data_feat.shape[0], :]
# too slow
# matrix = np.array(cost_matrix)
# m = Munkres()
# assignment = np.array(assignment)
# assignment = assignment[:, 1]
# hungarian = Hungarian(cost_matrix)
# hungarian.calculate()
# assignment = hungarian.get_results()
# assignment = np.array(assignment)
# assignment = assignment[np.argsort(assignment[:, 0])]
# assignment = assignment[:, 1]
matcher = KMMatcher(cost_matrix)
assignment, _ = matcher.solve()
partition = np.zeros(self.data_feat.shape[0])
for i in range(self.data_feat.shape[0]):
partition[assignment[i]] = i % self.num_clusters
for i in range(self.num_clusters):
self.clusters[i] = np.where(partition == i)[0]
def _centroid_updating(self):
self.logger.info('Updating centroid begins')
for i in range(self.num_clusters):
self.centroid[i] = np.mean(self.data_feat[self.clusters[i]], axis=0)
def _centroid_delta(self, centroid_pre, centroid_cur):
delta = 0.0
for i in range(len(centroid_cur)):
delta += np.sum(np.abs(centroid_cur[i] - centroid_pre[i]))
return delta
if __name__ == '__main__':
output_file = None
logging.basicConfig(filename=output_file,
format='%(levelname)s:%(asctime)s: - %(name)s - : %(message)s',
level=logging.DEBUG)
data_feat = np.array([[1, 2],
[1, 3],
[1, 4],
[1, 5],
[10, 2],
[10, 3]])
num_clusters = 2
node_threshold = 3
terminate_delta = 0.001
cluster = ConstrainedKmeansBase(data_feat, num_clusters, node_threshold, terminate_delta)
cluster.initialization()
cluster.clustering()
| 4,307 | 35.820513 | 118 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_graph_partition/metis_partition.py | import numpy as np
import networkx as nx
import pymetis
from torch_geometric.data import ClusterData
from torch_geometric.utils import from_networkx
from lib_graph_partition.partition import Partition
class MetisPartition(Partition):
def __init__(self, args, graph, dataset):
super(MetisPartition, self).__init__(args, graph, dataset)
self.graph = graph
self.args = args
self.data = dataset
def partition(self, recursive=False):
# recursive (bool, optional): If set to :obj:`True`, will use multilevel
# recursive bisection instead of multilevel k-way partitioning.
# (default: :obj:`False`)
# only use train data, not the whole dataset
self.train_data = from_networkx(self.graph)
data = ClusterData(self.train_data, self.args['num_shards'], recursive=recursive)
community_to_node = {}
for i in range(self.args['num_shards']):
community_to_node[i] = [*range(data.partptr[i], data.partptr[i+1], 1)]
# map node back to original graph
for com in range(self.args['num_shards']):
community_to_node[com] = np.array(list(self.graph.nodes))[data.partptr.numpy()[com]:data.partptr.numpy()[com+1]]
return community_to_node
class PyMetisPartition(Partition):
def __init__(self, args, graph, dataset):
super(PyMetisPartition, self).__init__(args, graph, dataset)
self.graph = graph
self.args = args
self.data = dataset
def partition(self, recursive=False):
# recursive (bool, optional): If set to :obj:`True`, will use multilevel
# recursive bisection instead of multilevel k-way partitioning.
# (default: :obj:`False`)
# only use train data, not the whole dataset
# map graph into new graph
mapping = {}
for i, node in enumerate(self.graph.nodes):
mapping[node] = i
partition_graph = nx.relabel_nodes(self.graph, mapping=mapping)
adj_list = []
for line in nx.generate_adjlist(partition_graph):
line_int = list(map(int, line.split()))
adj_list.append(np.array(line_int))
n_cuts, membership = pymetis.part_graph(self.args['num_shards'], adjacency=adj_list)
# map node back to original graph
community_to_node = {}
for shard_index in range(self.args['num_shards']):
community_to_node[shard_index] = np.array([node_id for node_id, node_shard_index in zip(list(mapping.keys()), membership) if node_shard_index == shard_index])
return community_to_node
| 2,609 | 38.545455 | 170 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_graph_partition/constrained_lpa.py | import copy
import logging
from collections import defaultdict
import numpy as np
class ConstrainedLPA:
def __init__(self, adj, num_communities, node_threshold, terminate_delta):
self.logger = logging.getLogger('constrained_lpa_single')
self.adj = adj
self.num_nodes = adj.shape[0]
self.num_communities = num_communities
self.node_threshold = node_threshold
self.terminate_delta = terminate_delta
def initialization(self):
self.logger.info('initializing communities')
random_nodes = np.arange(self.num_nodes)
np.random.shuffle(random_nodes)
self.communities = defaultdict(set)
self.node_community = np.zeros(self.adj.shape[0])
# each node use node is as its community label
for community, nodes in enumerate(np.array_split(random_nodes, self.num_communities)):
self.communities[community] = set(nodes)
self.node_community[nodes] = community
def community_detection(self, iterations=100):
self.logger.info('detecting communities')
communities = copy.deepcopy(self.communities)
lpa_deltas = []
# Currently, break when maximum iterations round achieves.
for i in range(iterations):
self.logger.info('iteration %s' % (i,))
desire_move = self._determine_desire_move()
sort_indices = np.flip(np.argsort(desire_move[:, 2]))
candidate_nodes = defaultdict(list)
# allocate nodes' community with descending order of colocate count
for node in sort_indices:
src_community = desire_move[node][0]
dst_community = desire_move[node][1]
if src_community != dst_community:
if len(self.communities[dst_community]) < self.node_threshold:
self.node_community[node] = dst_community
self.communities[dst_community].add(node)
self.communities[src_community].remove(node)
# reallocate the candidate nodes
candidate_nodes_cur = candidate_nodes[src_community]
while len(candidate_nodes_cur) != 0:
node_cur = candidate_nodes_cur[0]
src_community_cur = desire_move[node_cur][0]
dst_community_cur = desire_move[node_cur][1]
self.node_community[node_cur] = dst_community_cur
self.communities[dst_community_cur].add(node_cur)
self.communities[src_community_cur].remove(node_cur)
candidate_nodes[dst_community_cur].pop(0)
candidate_nodes_cur = candidate_nodes[src_community_cur]
else:
candidate_nodes[dst_community].append(node)
# record the communities of each iteration, break the loop while communities are stable.
delta = self._lpa_delta(communities, self.communities)
lpa_deltas.append(delta)
self.logger.info("%d" % delta)
communities = copy.deepcopy(self.communities)
if delta <= self.terminate_delta:
break
return self.communities, lpa_deltas
def _determine_desire_move(self):
desire_move = np.zeros([self.num_nodes, 3])
desire_move[:, 0] = self.node_community
for i in range(self.num_nodes):
# neighbor_community = self.node_community[np.nonzero(self.adj[i])[0]] # for non-bool adj
neighbor_community = self.node_community[self.adj[i]] # for bool adj
unique_community, unique_count = np.unique(neighbor_community, return_counts=True)
if unique_community.shape[0] == 0:
continue
max_indices = np.where(unique_count == np.max(unique_count))[0]
if max_indices.size == 1:
desire_move[i, 1] = unique_community[max_indices]
desire_move[i, 2] = unique_count[max_indices]
elif max_indices.size > 1:
max_index = np.random.choice(max_indices)
desire_move[i, 1] = unique_community[max_index]
desire_move[i, 2] = unique_count[max_index]
return desire_move
def _lpa_delta(self, lpa_pre, lpa_cur):
delta = 0.0
for i in range(len(lpa_cur)):
delta += len((lpa_cur[i] | lpa_pre[i]) - (lpa_cur[i] & lpa_pre[i]))
return delta
if __name__ == '__main__':
output_file = None
logging.basicConfig(filename=output_file,
format='%(levelname)s:%(asctime)s: - %(name)s - : %(message)s',
level=logging.DEBUG)
adj = np.array([[0, 1, 1],
[1, 0, 1],
[1, 1, 0]],
dtype=np.bool)
num_communities = 2
node_threshold = 3
terminate_delta = 1
lpa = ConstrainedLPA(adj, num_communities, node_threshold, terminate_delta)
lpa.initialization()
lpa.community_detection()
| 5,167 | 38.450382 | 104 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_graph_partition/partition_kmeans.py | import math
import pickle
import cupy as cp
import numpy as np
import logging
from sklearn.cluster import KMeans
import config
from lib_graph_partition.constrained_kmeans_base import ConstrainedKmeansBase
from lib_graph_partition.partition import Partition
from lib_graph_partition.constrained_kmeans import ConstrainedKmeans
from lib_node_embedding.node_embedding import NodeEmbedding
class PartitionKMeans(Partition):
def __init__(self, args, graph, dataset):
super(PartitionKMeans, self).__init__(args, graph, dataset)
self.logger = logging.getLogger('partition_kmeans')
cp.cuda.Device(self.args['cuda']).use()
self.load_embeddings()
def load_embeddings(self):
node_embedding = NodeEmbedding(self.args, self.graph, self.dataset)
if self.partition_method in ["sage_km", "sage_km_base"]:
self.node_to_embedding = node_embedding.sage_encoder()
else:
raise Exception('unsupported embedding method')
def partition(self):
self.logger.info("partitioning")
embedding = []
for node in self.node_to_embedding.keys():
embedding.append(self.node_to_embedding[node])
if not self.args['is_constrained']:
cluster = KMeans(n_clusters=self.num_shards, random_state=10)
cluster_labels = cluster.fit_predict(embedding)
node_to_community = {}
for com, node in zip(cluster_labels, self.node_to_embedding.keys()):
node_to_community[node] = com
community_to_node = {}
for com in range(len(set(node_to_community.values()))):
community_to_node[com] = np.where(np.array(list(node_to_community.values())) == com)[0]
community_to_node = dict(sorted(community_to_node.items()))
else:
# node_threshold = math.ceil(self.graph.number_of_nodes() / self.num_shards)
# node_threshold = math.ceil(self.graph.number_of_nodes() / self.num_shards + 0.05*self.graph.number_of_nodes())
node_threshold = math.ceil(
self.graph.number_of_nodes() / self.args['num_shards'] + self.args['shard_size_delta'] * (
self.graph.number_of_nodes() - self.graph.number_of_nodes() / self.args['num_shards']))
self.logger.info("#.nodes: %s. Shard threshold: %s." % (self.graph.number_of_nodes(), node_threshold))
if self.partition_method == 'sage_km_base':
cluster = ConstrainedKmeansBase(np.array(embedding), num_clusters=self.num_shards,
node_threshold=node_threshold,
terminate_delta=self.args['terminate_delta'])
cluster.initialization()
community, km_deltas = cluster.clustering()
pickle.dump(km_deltas, open(config.ANALYSIS_PATH + "partition/base_bkm_" + self.args['dataset_name'], 'wb'))
community_to_node = {}
for i in range(self.num_shards):
community_to_node[i] = np.array(community[i])
if self.partition_method == 'sage_km':
cluster = ConstrainedKmeans(cp.array(embedding), num_clusters=self.num_shards,
node_threshold=node_threshold,
terminate_delta=self.args['terminate_delta'])
cluster.initialization()
community, km_deltas = cluster.clustering()
pickle.dump(km_deltas, open(config.ANALYSIS_PATH + "partition/bkm_" + self.args['dataset_name'], 'wb'))
community_to_node = {}
for i in range(self.num_shards):
community_to_node[i] = np.array(community[i].get().astype(int))
return community_to_node
| 3,881 | 43.62069 | 124 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_graph_partition/hungarian.py | #!/usr/bin/python
"""
Implementation of the Hungarian (Munkres) Algorithm using Python and NumPy
References: http://www.ams.jhu.edu/~castello/362/Handouts/hungarian.pdf
http://weber.ucsd.edu/~vcrawfor/hungar.pdf
http://en.wikipedia.org/wiki/Hungarian_algorithm
http://www.public.iastate.edu/~ddoty/HungarianAlgorithm.html
http://www.clapper.org/software/python/munkres/
"""
# Module Information.
__version__ = "1.1.1"
__author__ = "Thom Dedecko"
__url__ = "http://github.com/tdedecko/hungarian-algorithm"
__copyright__ = "(c) 2010 Thom Dedecko"
__license__ = "MIT License"
class HungarianError(Exception):
pass
# Import numpy. Error if fails
try:
import numpy as np
except ImportError:
raise HungarianError("NumPy is not installed.")
class Hungarian:
"""
Implementation of the Hungarian (Munkres) Algorithm using np.
Usage:
hungarian = Hungarian(cost_matrix)
hungarian.calculate()
or
hungarian = Hungarian()
hungarian.calculate(cost_matrix)
Handle Profit matrix:
hungarian = Hungarian(profit_matrix, is_profit_matrix=True)
or
cost_matrix = Hungarian.make_cost_matrix(profit_matrix)
The matrix will be automatically padded if it is not square.
For that numpy's resize function is used, which automatically adds 0's to any row/column that is added
Get results and total potential after calculation:
hungarian.get_results()
hungarian.get_total_potential()
"""
def __init__(self, input_matrix=None, is_profit_matrix=False):
"""
input_matrix is a List of Lists.
input_matrix is assumed to be a cost matrix unless is_profit_matrix is True.
"""
if input_matrix is not None:
# Save input
my_matrix = np.array(input_matrix)
self._input_matrix = np.array(input_matrix)
self._maxColumn = my_matrix.shape[1]
self._maxRow = my_matrix.shape[0]
# Adds 0s if any columns/rows are added. Otherwise stays unaltered
matrix_size = max(self._maxColumn, self._maxRow)
pad_columns = matrix_size - self._maxRow
pad_rows = matrix_size - self._maxColumn
my_matrix = np.pad(my_matrix, ((0,pad_columns),(0,pad_rows)), 'constant', constant_values=(0))
# Convert matrix to profit matrix if necessary
if is_profit_matrix:
my_matrix = self.make_cost_matrix(my_matrix)
self._cost_matrix = my_matrix
self._size = len(my_matrix)
self._shape = my_matrix.shape
# Results from algorithm.
self._results = []
self._totalPotential = 0
else:
self._cost_matrix = None
def get_results(self):
"""Get results after calculation."""
return self._results
def get_total_potential(self):
"""Returns expected value after calculation."""
return self._totalPotential
def calculate(self, input_matrix=None, is_profit_matrix=False):
"""
Implementation of the Hungarian (Munkres) Algorithm.
input_matrix is a List of Lists.
input_matrix is assumed to be a cost matrix unless is_profit_matrix is True.
"""
# Handle invalid and new matrix inputs.
if input_matrix is None and self._cost_matrix is None:
raise HungarianError("Invalid input")
elif input_matrix is not None:
self.__init__(input_matrix, is_profit_matrix)
result_matrix = self._cost_matrix.copy()
# Step 1: Subtract row mins from each row.
for index, row in enumerate(result_matrix):
result_matrix[index] -= row.min()
# Step 2: Subtract column mins from each column.
for index, column in enumerate(result_matrix.T):
result_matrix[:, index] -= column.min()
# Step 3: Use minimum number of lines to cover all zeros in the matrix.
# If the total covered rows+columns is not equal to the matrix size then adjust matrix and repeat.
total_covered = 0
while total_covered < self._size:
# Find minimum number of lines to cover all zeros in the matrix and find total covered rows and columns.
cover_zeros = CoverZeros(result_matrix)
covered_rows = cover_zeros.get_covered_rows()
covered_columns = cover_zeros.get_covered_columns()
total_covered = len(covered_rows) + len(covered_columns)
# if the total covered rows+columns is not equal to the matrix size then adjust it by min uncovered num (m).
if total_covered < self._size:
result_matrix = self._adjust_matrix_by_min_uncovered_num(result_matrix, covered_rows, covered_columns)
# Step 4: Starting with the top row, work your way downwards as you make assignments.
# Find single zeros in rows or columns.
# Add them to final result and remove them and their associated row/column from the matrix.
expected_results = min(self._maxColumn, self._maxRow)
zero_locations = (result_matrix == 0)
while len(self._results) != expected_results:
# If number of zeros in the matrix is zero before finding all the results then an error has occurred.
if not zero_locations.any():
raise HungarianError("Unable to find results. Algorithm has failed.")
# Find results and mark rows and columns for deletion
matched_rows, matched_columns = self.__find_matches(zero_locations)
# Make arbitrary selection
total_matched = len(matched_rows) + len(matched_columns)
if total_matched == 0:
matched_rows, matched_columns = self.select_arbitrary_match(zero_locations)
# Delete rows and columns
for row in matched_rows:
zero_locations[row] = False
for column in matched_columns:
zero_locations[:, column] = False
# Save Results
self.__set_results(zip(matched_rows, matched_columns))
# Calculate total potential
value = 0
for row, column in self._results:
value += self._input_matrix[row, column]
self._totalPotential = value
@staticmethod
def make_cost_matrix(profit_matrix):
"""
Converts a profit matrix into a cost matrix.
Expects NumPy objects as input.
"""
# subtract profit matrix from a matrix made of the max value of the profit matrix
matrix_shape = profit_matrix.shape
offset_matrix = np.ones(matrix_shape, dtype=int) * profit_matrix.max()
cost_matrix = offset_matrix - profit_matrix
return cost_matrix
def _adjust_matrix_by_min_uncovered_num(self, result_matrix, covered_rows, covered_columns):
"""Subtract m from every uncovered number and add m to every element covered with two lines."""
# Calculate minimum uncovered number (m)
elements = []
for row_index, row in enumerate(result_matrix):
if row_index not in covered_rows:
for index, element in enumerate(row):
if index not in covered_columns:
elements.append(element)
min_uncovered_num = min(elements)
# Add m to every covered element
adjusted_matrix = result_matrix
for row in covered_rows:
adjusted_matrix[row] += min_uncovered_num
for column in covered_columns:
adjusted_matrix[:, column] += min_uncovered_num
# Subtract m from every element
m_matrix = np.ones(self._shape, dtype=int) * min_uncovered_num
adjusted_matrix -= m_matrix
return adjusted_matrix
def __find_matches(self, zero_locations):
"""Returns rows and columns with matches in them."""
marked_rows = np.array([], dtype=int)
marked_columns = np.array([], dtype=int)
# Mark rows and columns with matches
# Iterate over rows
for index, row in enumerate(zero_locations):
row_index = np.array([index])
if np.sum(row) == 1:
column_index, = np.where(row)
marked_rows, marked_columns = self.__mark_rows_and_columns(marked_rows, marked_columns, row_index,
column_index)
# Iterate over columns
for index, column in enumerate(zero_locations.T):
column_index = np.array([index])
if np.sum(column) == 1:
row_index, = np.where(column)
marked_rows, marked_columns = self.__mark_rows_and_columns(marked_rows, marked_columns, row_index,
column_index)
return marked_rows, marked_columns
@staticmethod
def __mark_rows_and_columns(marked_rows, marked_columns, row_index, column_index):
"""Check if column or row is marked. If not marked then mark it."""
new_marked_rows = marked_rows
new_marked_columns = marked_columns
if not (marked_rows == row_index).any() and not (marked_columns == column_index).any():
new_marked_rows = np.insert(marked_rows, len(marked_rows), row_index)
new_marked_columns = np.insert(marked_columns, len(marked_columns), column_index)
return new_marked_rows, new_marked_columns
@staticmethod
def select_arbitrary_match(zero_locations):
"""Selects row column combination with minimum number of zeros in it."""
# Count number of zeros in row and column combinations
rows, columns = np.where(zero_locations)
zero_count = []
for index, row in enumerate(rows):
total_zeros = np.sum(zero_locations[row]) + np.sum(zero_locations[:, columns[index]])
zero_count.append(total_zeros)
# Get the row column combination with the minimum number of zeros.
indices = zero_count.index(min(zero_count))
row = np.array([rows[indices]])
column = np.array([columns[indices]])
return row, column
def __set_results(self, result_lists):
"""Set results during calculation."""
# Check if results values are out of bound from input matrix (because of matrix being padded).
# Add results to results list.
for result in result_lists:
row, column = result
if row < self._maxRow and column < self._maxColumn:
new_result = (int(row), int(column))
self._results.append(new_result)
class CoverZeros:
"""
Use minimum number of lines to cover all zeros in the matrix.
Algorithm based on: http://weber.ucsd.edu/~vcrawfor/hungar.pdf
"""
def __init__(self, matrix):
"""
Input a matrix and save it as a boolean matrix to designate zero locations.
Run calculation procedure to generate results.
"""
# Find zeros in matrix
self._zero_locations = (matrix == 0)
self._shape = matrix.shape
# Choices starts without any choices made.
self._choices = np.zeros(self._shape, dtype=bool)
self._marked_rows = []
self._marked_columns = []
# marks rows and columns
self.__calculate()
# Draw lines through all unmarked rows and all marked columns.
self._covered_rows = list(set(range(self._shape[0])) - set(self._marked_rows))
self._covered_columns = self._marked_columns
def get_covered_rows(self):
"""Return list of covered rows."""
return self._covered_rows
def get_covered_columns(self):
"""Return list of covered columns."""
return self._covered_columns
def __calculate(self):
"""
Calculates minimum number of lines necessary to cover all zeros in a matrix.
Algorithm based on: http://weber.ucsd.edu/~vcrawfor/hungar.pdf
"""
while True:
# Erase all marks.
self._marked_rows = []
self._marked_columns = []
# Mark all rows in which no choice has been made.
for index, row in enumerate(self._choices):
if not row.any():
self._marked_rows.append(index)
# If no marked rows then finish.
if not self._marked_rows:
return True
# Mark all columns not already marked which have zeros in marked rows.
num_marked_columns = self.__mark_new_columns_with_zeros_in_marked_rows()
# If no new marked columns then finish.
if num_marked_columns == 0:
return True
# While there is some choice in every marked column.
while self.__choice_in_all_marked_columns():
# Some Choice in every marked column.
# Mark all rows not already marked which have choices in marked columns.
num_marked_rows = self.__mark_new_rows_with_choices_in_marked_columns()
# If no new marks then Finish.
if num_marked_rows == 0:
return True
# Mark all columns not already marked which have zeros in marked rows.
num_marked_columns = self.__mark_new_columns_with_zeros_in_marked_rows()
# If no new marked columns then finish.
if num_marked_columns == 0:
return True
# No choice in one or more marked columns.
# Find a marked column that does not have a choice.
choice_column_index = self.__find_marked_column_without_choice()
while choice_column_index is not None:
# Find a zero in the column indexed that does not have a row with a choice.
choice_row_index = self.__find_row_without_choice(choice_column_index)
# Check if an available row was found.
new_choice_column_index = None
if choice_row_index is None:
# Find a good row to accomodate swap. Find its column pair.
choice_row_index, new_choice_column_index = \
self.__find_best_choice_row_and_new_column(choice_column_index)
# Delete old choice.
self._choices[choice_row_index, new_choice_column_index] = False
# Set zero to choice.
self._choices[choice_row_index, choice_column_index] = True
# Loop again if choice is added to a row with a choice already in it.
choice_column_index = new_choice_column_index
def __mark_new_columns_with_zeros_in_marked_rows(self):
"""Mark all columns not already marked which have zeros in marked rows."""
num_marked_columns = 0
for index, column in enumerate(self._zero_locations.T):
if index not in self._marked_columns:
if column.any():
row_indices, = np.where(column)
zeros_in_marked_rows = (set(self._marked_rows) & set(row_indices)) != set([])
if zeros_in_marked_rows:
self._marked_columns.append(index)
num_marked_columns += 1
return num_marked_columns
def __mark_new_rows_with_choices_in_marked_columns(self):
"""Mark all rows not already marked which have choices in marked columns."""
num_marked_rows = 0
for index, row in enumerate(self._choices):
if index not in self._marked_rows:
if row.any():
column_index, = np.where(row)
if column_index in self._marked_columns:
self._marked_rows.append(index)
num_marked_rows += 1
return num_marked_rows
def __choice_in_all_marked_columns(self):
"""Return Boolean True if there is a choice in all marked columns. Returns boolean False otherwise."""
for column_index in self._marked_columns:
if not self._choices[:, column_index].any():
return False
return True
def __find_marked_column_without_choice(self):
"""Find a marked column that does not have a choice."""
for column_index in self._marked_columns:
if not self._choices[:, column_index].any():
return column_index
raise HungarianError(
"Could not find a column without a choice. Failed to cover matrix zeros. Algorithm has failed.")
def __find_row_without_choice(self, choice_column_index):
"""Find a row without a choice in it for the column indexed. If a row does not exist then return None."""
row_indices, = np.where(self._zero_locations[:, choice_column_index])
for row_index in row_indices:
if not self._choices[row_index].any():
return row_index
# All rows have choices. Return None.
return None
def __find_best_choice_row_and_new_column(self, choice_column_index):
"""
Find a row index to use for the choice so that the column that needs to be changed is optimal.
Return a random row and column if unable to find an optimal selection.
"""
row_indices, = np.where(self._zero_locations[:, choice_column_index])
for row_index in row_indices:
column_indices, = np.where(self._choices[row_index])
column_index = column_indices[0]
if self.__find_row_without_choice(column_index) is not None:
return row_index, column_index
# Cannot find optimal row and column. Return a random row and column.
from random import shuffle
shuffle(row_indices)
column_index, = np.where(self._choices[row_indices[0]])
return row_indices[0], column_index[0]
if __name__ == '__main__':
profit_matrix = [
[62, 75, 80, 93, 95, 97],
[75, 80, 82, 85, 71, 97],
[80, 75, 81, 98, 90, 97],
[78, 82, 84, 80, 50, 98],
[90, 85, 85, 80, 85, 99],
[65, 75, 80, 75, 68, 96]]
hungarian = Hungarian(profit_matrix, is_profit_matrix=True)
hungarian.calculate()
print("Expected value:\t\t543")
print("Calculated value:\t", hungarian.get_total_potential()) # = 543
print("Expected results:\n\t[(0, 4), (2, 3), (5, 5), (4, 0), (1, 1), (3, 2)]")
print("Results:\n\t", hungarian.get_results())
print("-" * 80)
cost_matrix = [
[4, 2, 8],
[4, 3, 7],
[3, 1, 6]]
hungarian = Hungarian(cost_matrix)
print('calculating...')
hungarian.calculate()
print("Expected value:\t\t12")
print("Calculated value:\t", hungarian.get_total_potential()) # = 12
print("Expected results:\n\t[(0, 1), (1, 0), (2, 2)]")
print("Results:\n\t", hungarian.get_results())
print("-" * 80)
profit_matrix = [
[62, 75, 80, 93, 0, 97],
[75, 0, 82, 85, 71, 97],
[80, 75, 81, 0, 90, 97],
[78, 82, 0, 80, 50, 98],
[0, 85, 85, 80, 85, 99],
[65, 75, 80, 75, 68, 0]]
hungarian = Hungarian()
hungarian.calculate(profit_matrix, is_profit_matrix=True)
print("Expected value:\t\t523")
print("Calculated value:\t", hungarian.get_total_potential()) # = 523
print("Expected results:\n\t[(0, 3), (2, 4), (3, 0), (5, 2), (1, 5), (4, 1)]")
print("Results:\n\t", hungarian.get_results())
print("-" * 80)
| 19,635 | 40.252101 | 120 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_graph_partition/constrained_lpa_base.py | # An implementation of `` Balanced Label Propagation for Partitioning MassiveGraphs'' (https://stanford.edu/~jugander/papers/wsdm13-blp.pdf)
import copy
import logging
from collections import defaultdict
import numpy as np
import cvxpy as cp
from scipy.stats import linregress
class ConstrainedLPABase:
def __init__(self, adj, num_communities, node_threshold, terminate_delta):
self.logger = logging.getLogger('constrained_lpa_base')
self.adj = adj
self.num_nodes = adj.shape[0]
self.num_communities = num_communities
self.node_threshold = node_threshold
self.terminate_delta = terminate_delta
def initialization(self):
self.logger.info('initializing communities')
random_nodes = np.arange(self.num_nodes)
np.random.shuffle(random_nodes)
self.communities = defaultdict(set)
self.node_community = np.zeros(self.adj.shape[0])
# each node use node is as its community label
for community, nodes in enumerate(np.array_split(random_nodes, self.num_communities)):
self.communities[community] = set(nodes)
self.node_community[nodes] = community
def community_detection(self, iterations=100):
self.logger.info('detecting communities')
communities = copy.deepcopy(self.communities)
lpa_deltas = []
for i in range(iterations):
self.logger.info('iteration %s' % (i,))
## Step 1: calculate desired move
desire_move = self._determine_desire_move()
relocation = {}
utility_func = {}
## Step 2: calculate parameters for linear programming problem
for src_community in range(self.num_communities):
for dst_community in range(self.num_communities):
move_node = desire_move[np.where(np.logical_and(desire_move[:, 1] == src_community, desire_move[:, 2] == dst_community))[0]]
if src_community != dst_community and move_node.size != 0:
move_node = move_node[np.flip(np.argsort(move_node[:, 3]))]
relocation[(src_community, dst_community)] = move_node
if move_node.shape[0] == 1:
utility_func[(src_community, dst_community)] = np.array([[0, move_node[0, 3]]])
else:
cum_sum = np.cumsum(move_node[:, 3])
utility_func_temp = np.zeros([move_node.shape[0] - 1, 2])
for k in range(move_node.shape[0] - 1):
utility_func_temp[k, 0], utility_func_temp[k, 1], _, _, _ = linregress([k, k+1], [cum_sum[k], cum_sum[k+1]])
utility_func[(src_community, dst_community)] = utility_func_temp
## Step 3: solve linear programming problem
x = cp.Variable([self.num_communities, self.num_communities])
z = cp.Variable([self.num_communities, self.num_communities])
objective = cp.Maximize(cp.sum(z))
constraints = []
for src_community in range(self.num_communities):
const = 0
for dst_community in range(self.num_communities):
if (src_community, dst_community) in relocation:
if src_community == dst_community:
constraints.append(x[src_community, dst_community] == 0)
constraints.append(z[src_community, dst_community] == 0)
else:
## Constraint 2 of Theorem 2
constraints.append(x[src_community, dst_community] >= 0)
constraints.append(x[src_community, dst_community] <= relocation[(src_community, dst_community)].shape[0])
## Constraint 1 of Theorem 2
if (dst_community, src_community) in relocation:
const += x[src_community, dst_community] - x[dst_community, src_community]
## Constraint 3 of Theorem 2
for utility_func_value in utility_func[(src_community, dst_community)]:
constraints.append(- utility_func_value[0] * x[src_community, dst_community] + z[src_community, dst_community] <= utility_func_value[1])
else:
constraints.append(x[src_community, dst_community] == 0)
constraints.append(z[src_community, dst_community] == 0)
## Constraint 1 of Theorem 2
constraints.append(len(self.communities[src_community]) + const <= self.node_threshold)
problem = cp.Problem(objective, constraints)
problem.solve()
## Step 4: parse linear programming problem results
if problem.status == 'optimal':
x_value = np.floor(np.abs(x.value)).astype(np.int64)
for src_community in range(self.num_communities):
for dst_community in range(self.num_communities):
if (src_community, dst_community) in relocation and x_value[src_community, dst_community] != 0:
# if (src_community, dst_community) in relocation:
relocation_temp = relocation[(src_community, dst_community)][:, 0].astype(np.int64)
move_node = relocation_temp[:x_value[src_community, dst_community] - 1]
if isinstance(move_node, np.int64):
self.communities[src_community].remove(move_node)
self.communities[dst_community].add(move_node)
self.node_community[move_node] = dst_community
else:
# move_node = set(move_node)
self.communities[src_community].difference_update(move_node)
self.communities[dst_community].update(move_node)
for node in move_node:
self.node_community[node] = dst_community
else:
self.logger.info("No optimal solution, break!")
break
## Check the number of moved nodes
delta = self._lpa_delta(communities, self.communities)
lpa_deltas.append(delta)
self.logger.info("%d" % delta)
communities = copy.deepcopy(self.communities)
if delta <= self.terminate_delta:
break
return self.communities, lpa_deltas
def _determine_desire_move(self):
desire_move = []
for i in range(self.num_nodes):
# neighbor_community = self.node_community[np.nonzero(self.adj[i])[0]] # for non-bool adj
neighbor_community = self.node_community[self.adj[i]] # for bool adj
unique_community, unique_count = np.unique(neighbor_community, return_counts=True)
src_relocation = unique_count[np.where(unique_community == self.node_community[i])[0]]
for community in unique_community:
if community != self.node_community[i]:
dst_relocation = unique_count[np.where(unique_community == community)[0]]
if dst_relocation - src_relocation >= 0:
desire_move_temp = np.zeros(4)
desire_move_temp[0] = i
desire_move_temp[1] = self.node_community[i]
desire_move_temp[2] = community
desire_move_temp[3] = dst_relocation - src_relocation
desire_move.append(desire_move_temp)
return np.stack(desire_move)
def _lpa_delta(self, lpa_pre, lpa_cur):
delta = 0.0
for i in range(len(lpa_cur)):
delta += len((lpa_cur[i] | lpa_pre[i]) - (lpa_cur[i] & lpa_pre[i]))
return delta
if __name__ == '__main__':
output_file = None
logging.basicConfig(filename=output_file,
format='%(levelname)s:%(asctime)s: - %(name)s - : %(message)s',
level=logging.DEBUG)
adj = np.array([[0, 1, 1],
[1, 0, 1],
[1, 1, 0]],
dtype=np.bool)
num_communities = 2
node_threshold = 3
terminate_delta = 1
lpa = ConstrainedLPABase(adj, num_communities, node_threshold, terminate_delta)
lpa.initialization()
lpa.community_detection()
| 8,700 | 45.77957 | 164 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_graph_partition/partition_random.py | import numpy as np
from lib_graph_partition.partition import Partition
class PartitionRandom(Partition):
def __init__(self, args, graph):
super(PartitionRandom, self).__init__(args, graph)
def partition(self):
graph_nodes = np.array(self.graph.nodes)
np.random.shuffle(graph_nodes)
train_shard_indices = np.array_split(graph_nodes, self.args['num_shards'])
return dict(zip(range(self.num_shards), train_shard_indices))
| 472 | 28.5625 | 82 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_graph_partition/partition.py | import numpy as np
class Partition:
def __init__(self, args, graph, dataset=None):
self.args = args
self.graph = graph
self.dataset = dataset
self.partition_method = self.args['partition_method']
self.num_shards = self.args['num_shards']
self.dataset_name = self.args['dataset_name']
def idx2id(self, idx_dict, node_list):
ret_dict = {}
for com, idx in idx_dict.items():
ret_dict[com] = node_list[list(idx)]
return ret_dict
def id2idx(self, id_dict, node_list):
ret_dict = {}
for com, id in id_dict.items():
ret_dict[com] = np.searchsorted(node_list, id)
return ret_dict
| 738 | 26.37037 | 61 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_graph_partition/hungarian_1.py | '''
reference: https://www.topcoder.com/community/competitive-programming/tutorials/assignment-problem-and-hungarian-algorithm/
'''
import numpy as np
#max weight assignment
class KMMatcher:
## weights : nxm weight matrix (numpy , float), n <= m
def __init__(self, weights):
weights = np.array(weights).astype(np.float32)
self.weights = weights
self.n, self.m = weights.shape
assert self.n <= self.m
# init label
self.label_x = np.max(weights, axis=1)
self.label_y = np.zeros((self.m, ), dtype=np.float32)
self.max_match = 0
self.xy = -np.ones((self.n,), dtype=np.int)
self.yx = -np.ones((self.m,), dtype=np.int)
def do_augment(self, x, y):
self.max_match += 1
while x != -2:
self.yx[y] = x
ty = self.xy[x]
self.xy[x] = y
x, y = self.prev[x], ty
def find_augment_path(self):
self.S = np.zeros((self.n,), np.bool)
self.T = np.zeros((self.m,), np.bool)
self.slack = np.zeros((self.m,), dtype=np.float32)
self.slackyx = -np.ones((self.m,), dtype=np.int) # l[slackyx[y]] + l[y] - w[slackx[y], y] == slack[y]
self.prev = -np.ones((self.n,), np.int)
queue, st = [], 0
root = -1
for x in range(self.n):
if self.xy[x] == -1:
queue.append(x);
root = x
self.prev[x] = -2
self.S[x] = True
break
self.slack = self.label_y + self.label_x[root] - self.weights[root]
self.slackyx[:] = root
while True:
while st < len(queue):
x = queue[st]; st+= 1
is_in_graph = np.isclose(self.weights[x], self.label_x[x] + self.label_y)
nonzero_inds = np.nonzero(np.logical_and(is_in_graph, np.logical_not(self.T)))[0]
for y in nonzero_inds:
if self.yx[y] == -1:
return x, y
self.T[y] = True
queue.append(self.yx[y])
self.add_to_tree(self.yx[y], x)
self.update_labels()
queue, st = [], 0
is_in_graph = np.isclose(self.slack, 0)
nonzero_inds = np.nonzero(np.logical_and(is_in_graph, np.logical_not(self.T)))[0]
for y in nonzero_inds:
x = self.slackyx[y]
if self.yx[y] == -1:
return x, y
self.T[y] = True
if not self.S[self.yx[y]]:
queue.append(x)
self.add_to_tree(self.yx[y], x)
def solve(self, verbose = False):
while self.max_match < self.n:
x, y = self.find_augment_path()
self.do_augment(x, y)
sum = 0.
for x in range(self.n):
if verbose:
print('match {} to {}, weight {:.4f}'.format(x, self.xy[x], self.weights[x, self.xy[x]]))
sum += self.weights[x, self.xy[x]]
self.best = sum
if verbose:
print('ans: {:.4f}'.format(sum))
return self.xy, sum
def add_to_tree(self, x, prevx):
self.S[x] = True
self.prev[x] = prevx
better_slack_idx = self.label_x[x] + self.label_y - self.weights[x] < self.slack
self.slack[better_slack_idx] = self.label_x[x] + self.label_y[better_slack_idx] - self.weights[x, better_slack_idx]
self.slackyx[better_slack_idx] = x
def update_labels(self):
delta = self.slack[np.logical_not(self.T)].min()
self.label_x[self.S] -= delta
self.label_y[self.T] += delta
self.slack[np.logical_not(self.T)] -= delta
if __name__ == '__main__':
matcher = KMMatcher([
[2., 3., 0., 3.],
[0., 4., 4., 0.],
[5., 6., 0., 0.],
[0., 0., 7., 0.]
])
best = matcher.solve(verbose=True)
print(best)
| 3,953 | 31.146341 | 123 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/gnn_base.py | import logging
import pickle
import torch
class GNNBase:
def __init__(self):
self.logger = logging.getLogger('gnn')
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# self.device = torch.device('cpu')
self.model = None
self.embedding_dim = 0
self.data = None
self.subgraph_loader = None
def save_model(self, save_path):
self.logger.info('saving model')
torch.save(self.model.state_dict(), save_path)
def load_model(self, save_path):
self.logger.info('loading model')
device = torch.device('cpu')
self.model.load_state_dict(torch.load(save_path, map_location=device))
def save_paras(self, save_path):
self.logger.info('saving paras')
self.paras = {
'embedding_dim': self.embedding_dim
}
pickle.dump(self.paras, open(save_path, 'wb'))
def load_paras(self, save_path):
self.logger.info('loading paras')
return pickle.load(open(save_path, 'rb'))
def count_parameters(self):
return sum(p.numel() for p in self.model.parameters() if p.requires_grad)
def posterior(self):
self.model.eval()
self.model = self.model.to(self.device)
self.data = self.data.to(self.device)
posteriors = self.model(self.data)
for _, mask in self.data('test_mask'):
posteriors = posteriors[mask]
return posteriors.detach()
| 1,482 | 28.078431 | 82 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/node_classifier.py | import logging
import os
import torch
from sklearn.model_selection import train_test_split
torch.cuda.empty_cache()
import torch.nn.functional as F
import torch_geometric.transforms as T
from torch_geometric.datasets import Planetoid
from torch_geometric.data import NeighborSampler
from torch_geometric.nn.conv.gcn_conv import gcn_norm
import numpy as np
import config
from lib_gnn_model.gat.gat_net_batch import GATNet
from lib_gnn_model.gin.gin_net_batch import GINNet
from lib_gnn_model.gcn.gcn_net_batch import GCNNet
from lib_gnn_model.graphsage.graphsage_net import SageNet
from lib_gnn_model.gnn_base import GNNBase
from parameter_parser import parameter_parser
from lib_utils import utils
class NodeClassifier(GNNBase):
def __init__(self, num_feats, num_classes, args, data=None):
super(NodeClassifier, self).__init__()
self.args = args
self.logger = logging.getLogger('node_classifier')
self.target_model = args['target_model']
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# self.device = 'cpu'
self.model = self.determine_model(num_feats, num_classes).to(self.device)
self.data = data
def determine_model(self, num_feats, num_classes):
self.logger.info('target model: %s' % (self.args['target_model'],))
if self.target_model == 'SAGE':
self.lr, self.decay = 0.01, 0.001
return SageNet(num_feats, 256, num_classes)
elif self.target_model == 'GAT':
self.lr, self.decay = 0.01, 0.001
return GATNet(num_feats, num_classes)
elif self.target_model == 'GCN':
self.lr, self.decay = 0.05, 0.0001
return GCNNet(num_feats, num_classes)
elif self.target_model == 'GIN':
self.lr, self.decay = 0.01, 0.0001
return GINNet(num_feats, num_classes)
else:
raise Exception('unsupported target model')
def train_model(self):
self.logger.info("training model")
self.model.train()
self.model.reset_parameters()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
self.data.y = self.data.y.squeeze().to(self.device)
self._gen_train_loader()
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr, weight_decay=self.decay)
for epoch in range(self.args['num_epochs']):
self.logger.info('epoch %s' % (epoch,))
for batch_size, n_id, adjs in self.train_loader:
# self.logger.info("batch size: %s"%(batch_size))
# `adjs` holds a list of `(edge_index, e_id, size)` tuples.
adjs = [adj.to(self.device) for adj in adjs]
test_node = np.nonzero(self.data.test_mask.cpu().numpy())[0]
intersect = np.intersect1d(test_node, n_id.numpy())
optimizer.zero_grad()
if self.target_model == 'GCN':
out = self.model(self.data.x[n_id], adjs, self.edge_weight)
else:
out = self.model(self.data.x[n_id], adjs)
loss = F.nll_loss(out, self.data.y[n_id[:batch_size]])
loss.backward()
optimizer.step()
train_acc, test_acc = self.evaluate_model()
self.logger.info(f'Train: {train_acc:.4f}, Test: {test_acc:.4f}')
@torch.no_grad()
def evaluate_model(self):
self.model.eval()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
self._gen_test_loader()
if self.target_model == 'GCN':
out = self.model.inference(self.data.x, self.test_loader, self.edge_weight, self.device)
else:
out = self.model.inference(self.data.x, self.test_loader, self.device)
y_true = self.data.y.cpu().unsqueeze(-1)
y_pred = out.argmax(dim=-1, keepdim=True)
results = []
for mask in [self.data.train_mask, self.data.test_mask]:
results += [int(y_pred[mask].eq(y_true[mask]).sum()) / int(mask.sum())]
return results
def posterior(self):
self.logger.debug("generating posteriors")
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
self.model.eval()
self._gen_test_loader()
if self.target_model == 'GCN':
posteriors = self.model.inference(self.data.x, self.test_loader, self.edge_weight, self.device)
else:
posteriors = self.model.inference(self.data.x, self.test_loader, self.device)
for _, mask in self.data('test_mask'):
posteriors = F.log_softmax(posteriors[mask], dim=-1)
return posteriors.detach()
def generate_embeddings(self):
self.model.eval()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
self._gen_test_loader()
if self.target_model == 'GCN':
logits = self.model.inference(self.data.x, self.test_loader, self.edge_weight, self.device)
else:
logits = self.model.inference(self.data.x, self.test_loader, self.device)
return logits
def _gen_train_loader(self):
self.logger.info("generate train loader")
train_indices = np.nonzero(self.data.train_mask.cpu().numpy())[0]
edge_index = utils.filter_edge_index(self.data.edge_index, train_indices, reindex=False)
if edge_index.shape[1] == 0:
edge_index = torch.tensor([[1, 2], [2, 1]])
self.train_loader = NeighborSampler(
edge_index, node_idx=self.data.train_mask,
sizes=[5, 5], num_nodes=self.data.num_nodes,
batch_size=self.args['batch_size'], shuffle=True,
num_workers=0)
if self.target_model == 'GCN':
_, self.edge_weight = gcn_norm(self.data.edge_index, edge_weight=None, num_nodes=self.data.x.shape[0],
add_self_loops=False)
self.logger.info("generate train loader finish")
def _gen_test_loader(self):
test_indices = np.nonzero(self.data.train_mask.cpu().numpy())[0]
if not self.args['use_test_neighbors']:
edge_index = utils.filter_edge_index(self.data.edge_index, test_indices, reindex=False)
else:
edge_index = self.data.edge_index
if edge_index.shape[1] == 0:
edge_index = torch.tensor([[1, 3], [3, 1]])
self.test_loader = NeighborSampler(
edge_index, node_idx=None,
sizes=[-1], num_nodes=self.data.num_nodes,
# sizes=[5], num_nodes=self.data.num_nodes,
batch_size=self.args['test_batch_size'], shuffle=False,
num_workers=0)
if self.target_model == 'GCN':
_, self.edge_weight = gcn_norm(self.data.edge_index, edge_weight=None, num_nodes=self.data.x.shape[0],
add_self_loops=False)
if __name__ == '__main__':
os.chdir('../')
args = parameter_parser()
output_file = None
logging.basicConfig(filename=output_file,
format='%(levelname)s:%(asctime)s: - %(name)s - : %(message)s',
level=logging.DEBUG)
dataset_name = 'cora'
dataset = Planetoid(config.RAW_DATA_PATH, dataset_name, transform=T.NormalizeFeatures())
data = dataset[0]
train_indices, test_indices = train_test_split(np.arange((data.num_nodes)), test_size=0.2, random_state=100)
data.train_mask, data.test_mask = torch.zeros(data.num_nodes, dtype=torch.bool), torch.zeros(data.num_nodes,
dtype=torch.bool)
data.train_mask[train_indices] = True
data.test_mask[test_indices] = True
graphsage = NodeClassifier(dataset.num_features, dataset.num_classes, args, data)
graphsage.train_model()
| 7,966 | 38.636816 | 114 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/gin/gin.py | import os
import logging
import torch
import torch.nn.functional as F
import torch_geometric.transforms as T
from torch_geometric.datasets import Planetoid, Reddit
from lib_gnn_model.gnn_base import GNNBase
from lib_gnn_model.gin.gin_net import GINNet
import config
class GIN(GNNBase):
def __init__(self, num_feats, num_classes, data=None):
super(GIN, self).__init__()
self.logger = logging.getLogger('gin')
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.model = GINNet(num_feats, num_classes).to(self.device)
self.data = data
def train_model(self, num_epochs=100):
self.model.train()
self.model.reset_parameters()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
optimizer = torch.optim.Adam(self.model.parameters(), lr=0.01)
for epoch in range(num_epochs):
self.logger.info('epoch %s' % (epoch,))
optimizer.zero_grad()
output = self.model(self.data)[self.data.train_mask]
loss = F.nll_loss(output, self.data.y[self.data.train_mask])
# loss = F.nll_loss(output, self.data.y.squeeze(1)[self.data.train_mask])
loss.backward()
optimizer.step()
train_acc, test_acc = self.evaluate_model()
self.logger.info('train acc: %s, test acc: %s' % (train_acc, test_acc))
def evaluate_model(self):
self.model.eval()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
logits, accs = self.model(self.data), []
for _, mask in self.data('train_mask', 'test_mask'):
pred = logits[mask].max(1)[1]
acc = pred.eq(self.data.y[mask]).sum().item() / mask.sum().item()
accs.append(acc)
return accs
if __name__ == '__main__':
os.chdir('../../')
output_file = None
logging.basicConfig(filename=output_file,
format='%(levelname)s:%(asctime)s: - %(name)s - : %(message)s',
level=logging.DEBUG)
dataset_name = 'citeseer'
dataset = Planetoid(config.RAW_DATA_PATH, dataset_name, transform=T.NormalizeFeatures())
data = dataset[0]
gin = GIN(dataset.num_features, dataset.num_classes, data)
gin.train_model()
| 2,338 | 31.943662 | 92 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/gin/gin_net.py | import torch
import torch.nn.functional as F
from torch.nn import Sequential, Linear, ReLU
from torch_geometric.nn import GINConv
class GINNet(torch.nn.Module):
def __init__(self, num_feats, num_classes):
super(GINNet, self).__init__()
dim = 32
nn1 = Sequential(Linear(num_feats, dim), ReLU(), Linear(dim, dim))
self.conv1 = GINConv(nn1)
self.bn1 = torch.nn.BatchNorm1d(dim)
nn2 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
self.conv2 = GINConv(nn2)
self.bn2 = torch.nn.BatchNorm1d(dim)
nn3 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
self.conv3 = GINConv(nn3)
self.bn3 = torch.nn.BatchNorm1d(dim)
nn4 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
self.conv4 = GINConv(nn4)
self.bn4 = torch.nn.BatchNorm1d(dim)
nn5 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
self.conv5 = GINConv(nn5)
self.bn5 = torch.nn.BatchNorm1d(dim)
self.fc1 = Linear(dim, dim)
self.fc2 = Linear(dim, num_classes)
def forward(self, data, batch=None):
x = F.relu(self.conv1(data.x, data.edge_index))
x = self.bn1(x)
x = F.relu(self.conv2(x, data.edge_index))
x = self.bn2(x)
x = F.relu(self.fc1(x))
x = F.dropout(x, p=0.5, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def reset_parameters(self):
self.conv1.reset_parameters()
self.conv2.reset_parameters()
| 1,558 | 30.18 | 74 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/gat/gat_net.py | import torch
import torch.nn.functional as F
from torch_geometric.nn import GATConv
class GATNet(torch.nn.Module):
def __init__(self, num_feats, num_classes, dropout=0.6):
super(GATNet, self).__init__()
self.dropout = dropout
self.conv1 = GATConv(num_feats, 8, heads=8, dropout=self.dropout, add_self_loops=False)
# On the Pubmed dataset, use heads=8 in conv2.
self.conv2 = GATConv(8 * 8, num_classes, heads=1, concat=False, dropout=self.dropout, add_self_loops=False)
# self.conv2 = GATConv(8 * 8, num_classes, heads=8, concat=False, dropout=self.dropout, add_self_loops=False)
self.reset_parameters()
def forward(self, data):
x = F.dropout(data.x, p=self.dropout, training=self.training)
x = F.elu(self.conv1(x, data.edge_index))
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.conv2(x, data.edge_index)
return F.log_softmax(x, dim=1)
def reset_parameters(self):
self.conv1.reset_parameters()
self.conv2.reset_parameters()
| 1,074 | 36.068966 | 117 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/gat/gat.py | import logging
import os
import torch
import torch.nn.functional as F
import torch_geometric.transforms as T
from torch_geometric.datasets import Planetoid
import config
from lib_gnn_model.gnn_base import GNNBase
from lib_gnn_model.gat.gat_net import GATNet
class GAT(GNNBase):
def __init__(self, num_feats, num_classes, data=None):
super(GAT, self).__init__()
self.logger = logging.getLogger('gat')
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.model = GATNet(num_feats, num_classes)
self.data = data
def train_model(self, num_epoch=100):
self.model.train()
self.model.reset_parameters()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
optimizer = torch.optim.Adam(self.model.parameters(), lr=0.005, weight_decay=0.0001)
for epoch in range(num_epoch):
self.logger.info('epoch %s' % (epoch,))
optimizer.zero_grad()
output = self.model(self.data)[self.data.train_mask]
loss = F.nll_loss(output, self.data.y[self.data.train_mask])
loss.backward()
optimizer.step()
train_acc, test_acc = self.evaluate_model()
self.logger.info('train acc: %s, test acc: %s' % (train_acc, test_acc))
def evaluate_model(self):
self.model.eval()
# self.model, self.data = self.model.to(self.device), self.data.to(self.device)
logits, accs = self.model(self.data), []
for _, mask in self.data('train_mask', 'test_mask'):
pred = logits[mask].max(1)[1]
acc = pred.eq(self.data.y[mask]).sum().item() / mask.sum().item()
accs.append(acc)
return accs
if __name__ == '__main__':
os.chdir('../../')
output_file = None
logging.basicConfig(filename=output_file,
format='%(levelname)s:%(asctime)s: - %(name)s - : %(message)s',
level=logging.DEBUG)
dataset_name = 'cora'
dataset = Planetoid(config.RAW_DATA_PATH, dataset_name, transform=T.NormalizeFeatures())
data = dataset[0]
gat = GAT(dataset.num_features, dataset.num_classes, data)
gat.train_model()
# gat.evaluate_model()
| 2,273 | 31.028169 | 92 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/graphsage/graphsage.py | import os
import logging
import torch
import torch.nn.functional as F
import torch_geometric.transforms as T
from torch_geometric.datasets import Planetoid
from torch_geometric.data import NeighborSampler
from lib_gnn_model.graphsage.graphsage_net import SageNet
from lib_gnn_model.gnn_base import GNNBase
import config
class SAGE(GNNBase):
def __init__(self, num_feats, num_classes, data=None):
super(SAGE, self).__init__()
self.logger = logging.getLogger('graphsage')
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# self.device = torch.device('cpu')
self.model = SageNet(num_feats, 256, num_classes).to(self.device)
self.data = data
def train_model(self, num_epochs=100):
self.model.train()
self.model.reset_parameters()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
self.data.y = self.data.y.squeeze().to(self.device)
self._gen_train_loader()
optimizer = torch.optim.Adam(self.model.parameters(), lr=0.01, weight_decay=0.001)
for epoch in range(num_epochs):
self.logger.info('epoch %s' % (epoch,))
for batch_size, n_id, adjs in self.train_loader:
# `adjs` holds a list of `(edge_index, e_id, size)` tuples.
adjs = [adj.to(self.device) for adj in adjs]
optimizer.zero_grad()
out = self.model(self.data.x[n_id], adjs)
loss = F.nll_loss(out, self.data.y[n_id[:batch_size]])
loss.backward()
optimizer.step()
train_acc, test_acc = self.evaluate_model()
self.logger.info(f'Train: {train_acc:.4f}, Test: {test_acc:.4f}')
@torch.no_grad()
def evaluate_model(self):
self.model.eval()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
self._gen_subgraph_loader()
out = self.model.inference(self.data.x, self.subgraph_loader, self.device)
y_true = self.data.y.cpu().unsqueeze(-1)
y_pred = out.argmax(dim=-1, keepdim=True)
results = []
for mask in [self.data.train_mask, self.data.test_mask]:
results += [int(y_pred[mask].eq(y_true[mask]).sum()) / int(mask.sum())]
return results
def posterior(self):
self.model.eval()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
self._gen_subgraph_loader()
posteriors = self.model.inference(self.data.x, self.subgraph_loader, self.device)
for _, mask in self.data('test_mask'):
posteriors = F.log_softmax(posteriors[mask], dim=-1)
return posteriors.detach()
def generate_embeddings(self):
self.model.eval()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
self._gen_subgraph_loader()
logits = self.model.inference(self.data.x, self.subgraph_loader, self.device)
return logits
def _gen_train_loader(self):
if self.data.edge_index.shape[1] == 0:
self.data.edge_index = torch.tensor([[1, 2], [2, 1]])
self.train_loader = NeighborSampler(self.data.edge_index, node_idx=self.data.train_mask,
# sizes=[25, 10], batch_size=128, shuffle=True,
# sizes=[25, 10], num_nodes=self.data.num_nodes,
sizes=[10, 10], num_nodes=self.data.num_nodes,
# sizes=[5, 5], num_nodes=self.data.num_nodes,
# batch_size=128, shuffle=True,
batch_size=64, shuffle=True,
num_workers=0)
def _gen_subgraph_loader(self):
self.subgraph_loader = NeighborSampler(self.data.edge_index, node_idx=None,
# sizes=[-1], num_nodes=self.data.num_nodes,
sizes=[10], num_nodes=self.data.num_nodes,
# batch_size=128, shuffle=False,
batch_size=64, shuffle=False,
num_workers=0)
if __name__ == '__main__':
os.chdir('../../')
output_file = None
logging.basicConfig(filename=output_file,
format='%(levelname)s:%(asctime)s: - %(name)s - : %(message)s',
level=logging.DEBUG)
dataset_name = 'cora'
dataset = Planetoid(config.RAW_DATA_PATH, dataset_name, transform=T.NormalizeFeatures())
data = dataset[0]
graphsage = SAGE(dataset.num_features, dataset.num_classes, data)
graphsage.train_model()
| 4,883 | 39.363636 | 96 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/graphsage/graphsage_net.py | import torch
import torch.nn.functional as F
from torch_geometric.nn import SAGEConv
class SageNet(torch.nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels):
super(SageNet, self).__init__()
self.num_layers = 2
self.convs = torch.nn.ModuleList()
self.convs.append(SAGEConv(in_channels, hidden_channels))
self.convs.append(SAGEConv(hidden_channels, out_channels))
def forward(self, x, adjs):
# `train_loader` computes the k-hop neighborhood of a batch of nodes,
# and returns, for each layer, a bipartite graph object, holding the
# bipartite edges `edge_index`, the index `e_id` of the original edges,
# and the size/shape `size` of the bipartite graph.
# Target nodes are also included in the source nodes so that one can
# easily apply skip-connections or add self-loops.
for i, (edge_index, _, size) in enumerate(adjs):
x_target = x[:size[1]] # Target nodes are always placed first.
x = self.convs[i]((x, x_target), edge_index)
if i != self.num_layers - 1:
x = F.relu(x)
x = F.dropout(x, p=0.5, training=self.training)
return F.log_softmax(x, dim=-1)
def inference(self, x_all, subgraph_loader, device):
# Compute representations of nodes layer by layer, using *all*
# available edges. This leads to faster computation in contrast to
# immediately computing the final representations of each batch.
for i in range(self.num_layers):
xs = []
for batch_size, n_id, adj in subgraph_loader:
edge_index, _, size = adj.to(device)
x = x_all[n_id].to(device)
x_target = x[:size[1]]
x = self.convs[i]((x, x_target), edge_index)
if i != self.num_layers - 1:
x = F.relu(x)
xs.append(x.cpu())
x_all = torch.cat(xs, dim=0)
return x_all
def reset_parameters(self):
for i in range(self.num_layers):
self.convs[i].reset_parameters()
| 2,154 | 37.482143 | 79 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/gcn/gcn_net.py | import torch
import torch.nn.functional as F
from torch_geometric.nn import GCNConv
class GCNNet(torch.nn.Module):
def __init__(self, num_feats, num_classes):
super(GCNNet, self).__init__()
self.conv1 = GCNConv(num_feats, 16, cached=True, add_self_loops=False)
self.conv2 = GCNConv(16, num_classes, cached=True, add_self_loops=False)
def forward(self, data):
x, edge_index, edge_weight = data.x, data.edge_index, data.edge_attr
x = F.relu(self.conv1(x, edge_index, edge_weight))
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index, edge_weight)
return F.log_softmax(x, dim=-1)
def reset_parameters(self):
self.conv1.reset_parameters()
self.conv2.reset_parameters()
| 781 | 31.583333 | 80 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/gcn/gcn.py | import os
import logging
import torch
import torch.nn.functional as F
import torch_geometric.transforms as T
from torch_geometric.datasets import Planetoid
from lib_gnn_model.gnn_base import GNNBase
from lib_gnn_model.gcn.gcn_net import GCNNet
import config
class GCN(GNNBase):
def __init__(self, num_feats, num_classes, data=None):
super(GCN, self).__init__()
self.logger = logging.getLogger('gcn')
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.model = GCNNet(num_feats, num_classes)
self.data = data
def train_model(self, num_epoch=100):
self.model.train()
self.model.reset_parameters()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
optimizer = torch.optim.Adam(self.model.parameters(), lr=0.01)
for epoch in range(num_epoch):
self.logger.info('epoch %s' % (epoch,))
optimizer.zero_grad()
output = self.model(self.data)[self.data.train_mask]
loss = F.nll_loss(output, self.data.y[self.data.train_mask])
loss.backward()
optimizer.step()
train_acc, test_acc = self.evaluate_model()
self.logger.info('train acc: %s, test acc: %s' % (train_acc, test_acc))
def evaluate_model(self):
self.model.eval()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
logits, accs = self.model(self.data), []
for _, mask in self.data('train_mask', 'test_mask'):
pred = logits[mask].max(1)[1]
acc = pred.eq(self.data.y[mask]).sum().item() / mask.sum().item()
accs.append(acc)
return accs
if __name__ == '__main__':
os.chdir('../../')
output_file = None
logging.basicConfig(filename=output_file,
format='%(levelname)s:%(asctime)s: - %(name)s - : %(message)s',
level=logging.DEBUG)
dataset_name = 'cora'
dataset = Planetoid(config.RAW_DATA_PATH, dataset_name, transform=T.NormalizeFeatures())
data = dataset[0]
gcn = GCN(dataset.num_features, dataset.num_classes, data)
gcn.train_model() | 2,221 | 31.202899 | 92 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/mlp/mlp.py | import os
import logging
import torch
import torch.nn.functional as F
import torch_geometric.transforms as T
from torch_geometric.datasets import Planetoid
from lib_gnn_model.gnn_base import GNNBase
from lib_gnn_model.mlp.mlpnet import MLPNet
import config
class MLP(GNNBase):
def __init__(self, num_feats, num_classes, data=None):
super(MLP, self).__init__()
self.logger = logging.getLogger(__name__)
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.model = MLPNet(num_feats, num_classes)
self.data = data
def train_model(self, num_epoch=100):
self.model.train()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
optimizer = torch.optim.Adam(self.model.parameters(), lr=0.01)
for epoch in range(num_epoch):
self.logger.info('epoch %s' % (epoch,))
optimizer.zero_grad()
output = self.model(self.data.x)[self.data.train_mask]
# loss = F.nll_loss(output, self.data.y[self.data.train_mask])
loss = torch.nn.CrossEntropyLoss(output, self.data.y[self.data.train_mask].squeeze())
loss.backward()
optimizer.step()
train_acc, test_acc = self.evaluate_model()
self.logger.info('train acc: %s, test acc: %s' % (train_acc, test_acc))
def evaluate_model(self):
self.model.eval()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
logits, accs = self.model(self.data.x), []
for _, mask in self.data('train_mask', 'test_mask'):
pred = logits[mask].max(1)[1]
acc = pred.eq(self.data.y[mask]).sum().item() / mask.sum().item()
accs.append(acc)
return accs
def posterior(self):
self.model.eval()
posteriors = self.model(self.data.x)
for _, mask in self.data('test_mask'):
posteriors = posteriors[mask]
return posteriors
if __name__ == '__main__':
os.chdir('../../')
output_file = None
logging.basicConfig(filename=output_file,
format='%(levelname)s:%(asctime)s: - %(name)s - : %(message)s',
level=logging.DEBUG)
dataset_name = 'Cora'
dataset = Planetoid(config.RAW_DATA_PATH + dataset_name, dataset_name, transform=T.NormalizeFeatures())
data = dataset[0]
gcn = MLP(dataset.num_features, dataset.num_classes, data)
gcn.train_model() | 2,518 | 31.294872 | 107 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/mlp/mlpnet.py | from torch import nn
import torch.nn.functional as F
class MLPNet(nn.Module):
def __init__(self, input_size, num_classes):
super(MLPNet, self).__init__()
self.xent = nn.CrossEntropyLoss()
self.layers = nn.Sequential(
nn.Linear(input_size, 250),
nn.Linear(250, 100),
nn.Linear(100, num_classes)
)
def forward(self, x):
x = x.view(x.size(0), -1)
x = self.layers(x)
return F.softmax(x, dim=1)
def loss(self, nodes, labels):
scores = self.forward(nodes)
return self.xent(scores, labels.squeeze())
def reset_parameters(self):
return 0
| 668 | 23.777778 | 50 | py |
Graph-Unlearning | Graph-Unlearning-main/exp/exp_graph_partition.py | import logging
import time
import torch
from sklearn.model_selection import train_test_split
import numpy as np
from torch_geometric.data import Data
import torch_geometric as tg
import networkx as nx
from exp.exp import Exp
from lib_utils.utils import connected_component_subgraphs
from lib_graph_partition.graph_partition import GraphPartition
from lib_utils import utils
class ExpGraphPartition(Exp):
def __init__(self, args):
super(ExpGraphPartition, self).__init__(args)
self.logger = logging.getLogger('exp_graph_partition')
self.load_data()
self.train_test_split()
self.gen_train_graph()
self.graph_partition()
self.generate_shard_data()
def load_data(self):
self.data = self.data_store.load_raw_data()
def train_test_split(self):
if self.args['is_split']:
self.logger.info('splitting train/test data')
self.train_indices, self.test_indices = train_test_split(np.arange((self.data.num_nodes)), test_size=self.args['test_ratio'], random_state=100)
self.data_store.save_train_test_split(self.train_indices, self.test_indices)
self.data.train_mask = torch.from_numpy(np.isin(np.arange(self.data.num_nodes), self.train_indices))
self.data.test_mask = torch.from_numpy(np.isin(np.arange(self.data.num_nodes), self.test_indices))
else:
self.train_indices, self.test_indices = self.data_store.load_train_test_split()
self.data.train_mask = torch.from_numpy(np.isin(np.arange(self.data.num_nodes), self.train_indices))
self.data.test_mask = torch.from_numpy(np.isin(np.arange(self.data.num_nodes), self.test_indices))
def gen_train_graph(self):
# delete ratio of edges and update the train graph
if self.args['ratio_deleted_edges'] != 0:
self.logger.debug("Before edge deletion. train data #.Nodes: %f, #.Edges: %f" % (
self.data.num_nodes, self.data.num_edges))
# self._ratio_delete_edges()
self.data.edge_index = self._ratio_delete_edges(self.data.edge_index)
# decouple train test edges.
edge_index = self.data.edge_index.numpy()
test_edge_indices = np.logical_or(np.isin(edge_index[0], self.test_indices),
np.isin(edge_index[1], self.test_indices))
train_edge_indices = np.logical_not(test_edge_indices)
edge_index_train = edge_index[:, train_edge_indices]
self.train_graph = nx.Graph()
self.train_graph.add_nodes_from(self.train_indices)
# use largest connected graph as train graph
if self.args['is_prune']:
self._prune_train_set()
# reconstruct a networkx train graph
for u, v in np.transpose(edge_index_train):
self.train_graph.add_edge(u, v)
self.logger.debug("After edge deletion. train graph #.Nodes: %f, #.Edges: %f" % (
self.train_graph.number_of_nodes(), self.train_graph.number_of_edges()))
self.logger.debug("After edge deletion. train data #.Nodes: %f, #.Edges: %f" % (
self.data.num_nodes, self.data.num_edges))
self.data_store.save_train_data(self.data)
self.data_store.save_train_graph(self.train_graph)
def graph_partition(self):
if self.args['is_partition']:
self.logger.info('graph partitioning')
start_time = time.time()
partition = GraphPartition(self.args, self.train_graph, self.data)
self.community_to_node = partition.graph_partition()
partition_time = time.time() - start_time
self.logger.info("Partition cost %s seconds." % partition_time)
self.data_store.save_community_data(self.community_to_node)
else:
self.community_to_node = self.data_store.load_community_data()
def generate_shard_data(self):
self.logger.info('generating shard data')
self.shard_data = {}
for shard in range(self.args['num_shards']):
train_shard_indices = list(self.community_to_node[shard])
shard_indices = np.union1d(train_shard_indices, self.test_indices)
x = self.data.x[shard_indices]
y = self.data.y[shard_indices]
edge_index = utils.filter_edge_index_1(self.data, shard_indices)
data = Data(x=x, edge_index=torch.from_numpy(edge_index), y=y)
data.train_mask = torch.from_numpy(np.isin(shard_indices, train_shard_indices))
data.test_mask = torch.from_numpy(np.isin(shard_indices, self.test_indices))
self.shard_data[shard] = data
self.data_store.save_shard_data(self.shard_data)
def _prune_train_set(self):
# extract the the maximum connected component
self.logger.debug("Before Prune... #. of Nodes: %f, #. of Edges: %f" % (
self.train_graph.number_of_nodes(), self.train_graph.number_of_edges()))
self.train_graph = max(connected_component_subgraphs(self.train_graph), key=len)
self.logger.debug("After Prune... #. of Nodes: %f, #. of Edges: %f" % (
self.train_graph.number_of_nodes(), self.train_graph.number_of_edges()))
# self.train_indices = np.array(self.train_graph.nodes)
def _ratio_delete_edges(self, edge_index):
edge_index = edge_index.numpy()
unique_indices = np.where(edge_index[0] < edge_index[1])[0]
unique_indices_not = np.where(edge_index[0] > edge_index[1])[0]
remain_indices = np.random.choice(unique_indices,
int(unique_indices.shape[0] * (1.0 - self.args['ratio_deleted_edges'])),
replace=False)
remain_encode = edge_index[0, remain_indices] * edge_index.shape[1] * 2 + edge_index[1, remain_indices]
unique_encode_not = edge_index[1, unique_indices_not] * edge_index.shape[1] * 2 + edge_index[0, unique_indices_not]
sort_indices = np.argsort(unique_encode_not)
remain_indices_not = unique_indices_not[sort_indices[np.searchsorted(unique_encode_not, remain_encode, sorter=sort_indices)]]
remain_indices = np.union1d(remain_indices, remain_indices_not)
# self.data.edge_index = torch.from_numpy(edge_index[:, remain_indices])
return torch.from_numpy(edge_index[:, remain_indices])
| 6,423 | 44.560284 | 155 | py |
Graph-Unlearning | Graph-Unlearning-main/exp/exp_attack_unlearning.py | import logging
import time
from collections import defaultdict
import numpy as np
import torch
import torch_geometric as tg
from torch_geometric.data import Data
from scipy.spatial import distance
import config
from exp.exp import Exp
from lib_graph_partition.graph_partition import GraphPartition
from lib_gnn_model.node_classifier import NodeClassifier
from lib_aggregator.aggregator import Aggregator
from lib_utils import utils
class ExpAttackUnlearning(Exp):
def __init__(self, args):
super(ExpAttackUnlearning, self).__init__(args)
self.logger = logging.getLogger('exp_attack_unlearning')
# 1. respond to the unlearning requests
self.load_preprocessed_data()
# self.graph_unlearning_request_respond()
if self.args['repartition']:
with open(config.MODEL_PATH + self.args['dataset_name'] + '/' + self.args['target_model']+"_unlearned_indices") as file:
node_unlearning_indices = [line.rstrip() for line in file]
for unlearned_node in node_unlearning_indices:
self.graph_unlearning_request_respond(int(unlearned_node))
else:
self.graph_unlearning_request_respond()
# 2. evalute the attack performance
self.attack_graph_unlearning()
def load_preprocessed_data(self):
self.shard_data = self.data_store.load_shard_data()
self.raw_data = self.data_store.load_raw_data()
self.train_data = self.data_store.load_train_data()
self.train_graph = self.data_store.load_train_graph()
self.train_indices, self.test_indices = self.data_store.load_train_test_split()
self.community_to_node = self.data_store.load_community_data()
num_feats = self.train_data.num_features
num_classes = len(self.train_data.y.unique())
self.target_model = NodeClassifier(num_feats, num_classes, self.args)
def graph_unlearning_request_respond(self, node_unlearning_request=None):
# reindex the node ids
node_to_com = self.data_store.c2n_to_n2c(self.community_to_node)
train_indices_prune = list(node_to_com.keys())
if node_unlearning_request==None:
# generate node unlearning requests
node_unlearning_indices = np.random.choice(train_indices_prune, self.args['num_unlearned_nodes'])
else:
node_unlearning_indices = np.array([node_unlearning_request])
self.num_unlearned_edges =0
unlearning_indices = defaultdict(list)
for node in node_unlearning_indices:
unlearning_indices[node_to_com[node]].append(node)
# delete a list of revoked nodes from train_graph
self.train_graph.remove_nodes_from(node_unlearning_indices)
# delete the revoked nodes from train_data
# by building unlearned data from unlearned train_graph
self.train_data.train_mask = torch.from_numpy(np.isin(np.arange(self.train_data.num_nodes), self.train_indices))
self.train_data.test_mask = torch.from_numpy(np.isin(np.arange(self.train_data.num_nodes), np.append(self.test_indices, node_unlearning_indices)))
# delete the revoked nodes from shard_data
self.shard_data_after_unlearning = {}
self.affected_shard=[]
for shard in range(self.args["num_shards"]):
train_shard_indices = list(self.community_to_node[shard])
# node unlearning
train_shard_indices = np.setdiff1d(train_shard_indices, unlearning_indices[shard])
shard_indices = np.union1d(train_shard_indices, self.test_indices)
x = self.train_data.x[shard_indices]
y = self.train_data.y[shard_indices]
edge_index = utils.filter_edge_index_1(self.train_data, shard_indices)
data = Data(x=x, edge_index=torch.from_numpy(edge_index), y=y)
data.train_mask = torch.from_numpy(np.isin(shard_indices, train_shard_indices))
data.test_mask = torch.from_numpy(np.isin(shard_indices, self.test_indices))
self.shard_data_after_unlearning[shard] = data
self.num_unlearned_edges += self.shard_data[shard].num_edges - self.shard_data_after_unlearning[shard].num_edges
# find the affected shard model
if self.shard_data_after_unlearning[shard].num_nodes != self.shard_data[shard].num_nodes:
self.affected_shard.append(shard)
self.data_store.save_unlearned_data(self.train_graph, 'train_graph')
self.data_store.save_unlearned_data(self.train_data, 'train_data')
self.data_store.save_unlearned_data(self.shard_data_after_unlearning, 'shard_data')
# retrain the correponding shard model
if not self.args['repartition']:
for shard in self.affected_shard:
suffix = "unlearned_"+str(node_unlearning_indices[0])
self._train_shard_model(shard, suffix)
# (if re-partition, re-partition the remaining graph)
# re-train the shard model, save model and optimal weight score
if self.args['repartition']:
suffix="_repartition_unlearned_" + str(node_unlearning_indices[0])
self._repartition(suffix)
for shard in range(self.args["num_shards"]):
self._train_shard_model(shard, suffix)
def _repartition(self, suffix):
# load unlearned train_graph and train_data
train_graph = self.data_store.load_unlearned_data('train_graph')
train_data = self.data_store.load_unlearned_data('train_data')
# repartition
start_time = time.time()
partition = GraphPartition(self.args, train_graph, train_data)
community_to_node = partition.graph_partition()
partition_time = time.time() - start_time
self.logger.info("Partition cost %s seconds." % partition_time)
# save the new partition and shard
self.data_store.save_community_data(community_to_node, suffix)
self._generate_unlearned_repartitioned_shard_data(train_data, community_to_node, self.test_indices)
def _generate_unlearned_repartitioned_shard_data(self, train_data, community_to_node, test_indices):
self.logger.info('generating shard data')
shard_data = {}
for shard in range(self.args['num_shards']):
train_shard_indices = list(community_to_node[shard])
shard_indices = np.union1d(train_shard_indices, test_indices)
x = self.train_data.x[shard_indices]
y = self.train_data.y[shard_indices]
edge_index = utils.filter_edge_index_1(train_data, shard_indices)
data = Data(x=x, edge_index=torch.from_numpy(edge_index), y=y)
data.train_mask = torch.from_numpy(np.isin(shard_indices, train_shard_indices))
data.test_mask = torch.from_numpy(np.isin(shard_indices, test_indices))
shard_data[shard] = data
# self.data_store.save_unlearned_data(shard_data, 'shard_data_repartition')
return shard_data
def _train_shard_model(self, shard, suffix="unlearned"):
self.logger.info('training target models, shard %s' % shard)
# load shard data
self.target_model.data = self.shard_data_after_unlearning[shard]
# retrain shard model
self.target_model.train_model()
# replace shard model
device=torch.device("cpu")
self.target_model.device = device
self.data_store.save_target_model(0, self.target_model, shard, suffix)
# self.data_store.save_unlearned_target_model(0, self.target_model, shard, suffix)
def attack_graph_unlearning(self):
# load unlearned indices
with open(config.MODEL_PATH + self.args['dataset_name'] + "/" + self.args['target_model'] +"_unlearned_indices") as file:
unlearned_indices = [line.rstrip() for line in file]
# member sample query, label as 1
positive_posteriors = self._query_target_model(unlearned_indices, unlearned_indices)
# non-member sample query, label as 0
negative_posteriors = self._query_target_model(unlearned_indices, self.test_indices)
# evaluate attack performance, train multiple shadow models, or calculate posterior entropy, or directly calculate AUC.
self.evaluate_attack_performance(positive_posteriors, negative_posteriors)
def _query_target_model(self, unlearned_indices, test_indices):
# load unlearned data
train_data = self.data_store.load_unlearned_data('train_data')
# load optimal weight score
# optimal_weight=self.data_store.load_optimal_weight(0)
# calculate the final posterior, save as attack feature
self.logger.info('aggregating submodels')
posteriors_a, posteriors_b, posteriors_c =[],[],[]
for i in unlearned_indices:
community_to_node = self.data_store.load_community_data('')
shard_data = self._generate_unlearned_repartitioned_shard_data(train_data, community_to_node, int(i))
posteriors_a.append(self._generate_posteriors(shard_data, ''))
suffix="unlearned_" + str(i)
posteriors_b.append(self._generate_posteriors_unlearned(shard_data, suffix, i))
if self.args['repartition']:
suffix = "_repartition_unlearned_" + str(i)
community_to_node = self.data_store.load_community_data(suffix)
shard_data = self._generate_unlearned_repartitioned_shard_data(train_data, community_to_node, int(i))
suffix = "__repartition_unlearned_" + str(i)
posteriors_c.append(self._generate_posteriors(shard_data, suffix))
return posteriors_a, posteriors_b, posteriors_c
def _generate_posteriors_unlearned(self, shard_data, suffix, unlearned_indice):
import glob
model_path=glob.glob(config.MODEL_PATH+self.args['dataset_name']+"/*_1unlearned_"+str(unlearned_indice))
if not model_path:
self.logger.info("No corresponding unlearned shard model for node %s" % str(unlearned_indice))
return torch.tensor([0]*6)
else:
affected_shard = int(model_path[0].split('/')[-1].split('_')[-4])
posteriors = []
for shard in range(self.args['num_shards']):
if shard == affected_shard:
# load the retrained the shard model
self.data_store.load_target_model(0, self.target_model, shard, suffix)
else:
# self.target_model.model.reset_parameters()
# load unaffected shard model
self.data_store.load_target_model(0, self.target_model, shard, '')
self.device = torch.device('cuda:3' if torch.cuda.is_available() else 'cpu')
self.target_model.model = self.target_model.model.to(self.device)
self.target_model.data = shard_data[shard].to(self.device)
posteriors.append(self.target_model.posterior())
return torch.mean(torch.cat(posteriors, dim=0), dim=0)
def _generate_posteriors(self, shard_data, suffix):
posteriors = []
for shard in range(self.args['num_shards']):
# self.target_model.model.reset_parameters()
self.data_store.load_target_model(0, self.target_model, shard, suffix)
self.device = torch.device('cuda:3' if torch.cuda.is_available() else 'cpu')
self.target_model.model = self.target_model.model.to(self.device)
self.target_model.data = shard_data[shard].to(self.device)
posteriors.append(self.target_model.posterior())
return torch.mean(torch.cat(posteriors, dim=0), dim=0)
def evaluate_attack_performance(self, positive_posteriors, negative_posteriors):
# constrcut attack data
label = torch.cat((torch.ones(len(positive_posteriors[0])), torch.zeros(len(negative_posteriors[0]))))
data={}
for i in range(2):
data[i] = torch.cat((torch.stack(positive_posteriors[i]), torch.stack(negative_posteriors[i])),0)
# calculate l2 distance
model_b_distance = self._calculate_distance(data[0], data[1])
# directly calculate AUC with feature and labels
attack_auc_b = self.evaluate_attack_with_AUC(model_b_distance, label)
if self.args['repartition']:
model_c_distance = self._calculate_distance(data[0], data[2])
attack_auc_c = self.evaluate_attack_with_AUC(model_c_distance, label)
self.logger.info("Attack_Model_B AUC: %s | Attack_Model_C AUC: %s" % (attack_auc_b, attack_auc_c))
def evaluate_attack_with_AUC(self, data, label):
from sklearn.metrics import roc_auc_score
self.logger.info("Directly calculate the attack AUC")
return roc_auc_score(label, data.reshape(-1, 1))
def _calculate_distance(self, data0, data1, distance='l2_norm' ):
if distance == 'l2_norm':
return np.array([np.linalg.norm(data0[i]-data1[i]) for i in range(len(data0))])
elif distance =='direct_diff':
return data0 - data1
else:
raise Exception("Unsupported distance")
| 13,321 | 48.895131 | 154 | py |
Graph-Unlearning | Graph-Unlearning-main/exp/exp.py | import logging
from lib_dataset.data_store import DataStore
class Exp:
def __init__(self, args):
self.logger = logging.getLogger('exp')
self.args = args
self.data_store = DataStore(args)
def load_data(self):
pass
| 258 | 16.266667 | 46 | py |
Graph-Unlearning | Graph-Unlearning-main/exp/exp_node_edge_unlearning.py | import logging
import pickle
import time
from collections import defaultdict
import numpy as np
import torch
from torch_geometric.data import Data
import config
from exp.exp import Exp
from lib_gnn_model.graphsage.graphsage import SAGE
from lib_gnn_model.gat.gat import GAT
from lib_gnn_model.gin.gin import GIN
from lib_gnn_model.gcn.gcn import GCN
from lib_gnn_model.mlp.mlp import MLP
from lib_gnn_model.node_classifier import NodeClassifier
from lib_aggregator.aggregator import Aggregator
from lib_utils import utils
class ExpNodeEdgeUnlearning(Exp):
def __init__(self, args):
super(ExpNodeEdgeUnlearning, self).__init__(args)
self.logger = logging.getLogger('exp_node_edge_unlearning')
self.target_model_name = self.args['target_model']
self.load_data()
self.determine_target_model()
self.run_exp()
def run_exp(self):
# unlearning efficiency
run_f1 = np.empty((0))
unlearning_time = np.empty((0))
for run in range(self.args['num_runs']):
self.logger.info("Run %f" % run)
self.train_target_models(run)
aggregate_f1_score = self.aggregate(run)
# node_unlearning_time = self.unlearning_time_statistic()
node_unlearning_time = 0
run_f1 = np.append(run_f1, aggregate_f1_score)
unlearning_time = np.append(unlearning_time, node_unlearning_time)
self.num_unlearned_edges = 0
# model utility
self.f1_score_avg = np.average(run_f1)
self.f1_score_std = np.std(run_f1)
self.unlearning_time_avg = np.average(unlearning_time)
self.unlearning_time_std = np.std(unlearning_time)
self.logger.info(
"%s %s %s %s" % (self.f1_score_avg, self.f1_score_std, self.unlearning_time_avg, self.unlearning_time_std))
def load_data(self):
self.shard_data = self.data_store.load_shard_data()
self.raw_data = self.data_store.load_raw_data()
self.train_data = self.data_store.load_train_data()
self.unlearned_shard_data = self.shard_data
def determine_target_model(self):
num_feats = self.train_data.num_features
num_classes = len(self.train_data.y.unique())
if not self.args['is_use_batch']:
if self.target_model_name == 'SAGE':
self.target_model = SAGE(num_feats, num_classes)
elif self.target_model_name == 'GCN':
self.target_model = GCN(num_feats, num_classes)
elif self.target_model_name == 'GAT':
self.target_model = GAT(num_feats, num_classes)
elif self.target_model_name == 'GIN':
self.target_model = GIN(num_feats, num_classes)
else:
raise Exception('unsupported target model')
else:
if self.target_model_name == 'MLP':
self.target_model = MLP(num_feats, num_classes)
else:
self.target_model = NodeClassifier(num_feats, num_classes, self.args)
def train_target_models(self, run):
if self.args['is_train_target_model']:
self.logger.info('training target models')
self.time = {}
for shard in range(self.args['num_shards']):
self.time[shard] = self._train_model(run, shard)
def aggregate(self, run):
self.logger.info('aggregating submodels')
# posteriors, true_label = self.generate_posterior()
aggregator = Aggregator(run, self.target_model, self.train_data, self.unlearned_shard_data, self.args)
aggregator.generate_posterior()
self.aggregate_f1_score = aggregator.aggregate()
self.logger.info("Final Test F1: %s" % (self.aggregate_f1_score,))
return self.aggregate_f1_score
def _generate_unlearning_request(self, num_unlearned="assign"):
node_list = []
for key, value in self.community_to_node.items():
# node_list.extend(value.tolist())
node_list.extend(value)
if num_unlearned == "assign":
num_of_unlearned_nodes = self.args['num_unlearned_nodes']
elif num_unlearned == "ratio":
num_of_unlearned_nodes = int(self.args['ratio_unlearned_nodes'] * len(node_list))
if self.args['unlearning_request'] == 'random':
unlearned_nodes_indices = np.random.choice(node_list, num_of_unlearned_nodes, replace=False)
elif self.args['unlearning_request'] == 'top1':
sorted_shards = sorted(self.community_to_node.items(), key=lambda x: len(x[1]), reverse=True)
unlearned_nodes_indices = np.random.choice(sorted_shards[0][1], num_of_unlearned_nodes, replace=False)
elif self.args['unlearning_request'] == 'adaptive':
sorted_shards = sorted(self.community_to_node.items(), key=lambda x: len(x[1]), reverse=True)
candidate_list = np.concatenate([sorted_shards[i][1] for i in range(int(self.args['num_shards']/2)+1)], axis=0)
unlearned_nodes_indices = np.random.choice(candidate_list, num_of_unlearned_nodes, replace=False)
elif self.args['unlearning_request'] == 'last5':
sorted_shards = sorted(self.community_to_node.items(), key=lambda x: len(x[1]), reverse=False)
candidate_list = np.concatenate([sorted_shards[i][1] for i in range(int(self.args['num_shards']/2)+1)], axis=0)
unlearned_nodes_indices = np.random.choice(candidate_list, num_of_unlearned_nodes, replace=False)
return unlearned_nodes_indices
def unlearning_time_statistic(self):
if self.args['is_train_target_model'] and self.args['num_shards'] != 1:
# random sample 5% nodes, find their belonging communities
unlearned_nodes = self._generate_unlearning_request(num_unlearned="ratio")
belong_community = []
for sample_node in range(len(unlearned_nodes)):
for community, node in self.community_to_node.items():
if np.in1d(unlearned_nodes[sample_node], node).any():
belong_community.append(community)
# calculate the total unlearning time and group unlearning time
group_unlearning_time = []
node_unlearning_time = []
for shard in range(self.args['num_shards']):
if belong_community.count(shard) != 0:
group_unlearning_time.append(self.time[shard])
node_unlearning_time.extend([float(self.time[shard]) for j in range(belong_community.count(shard))])
return node_unlearning_time
elif self.args['is_train_target_model'] and self.args['num_shards'] == 1:
return self.time[0]
else:
return 0
def _train_model(self, run, shard):
self.logger.info('training target models, run %s, shard %s' % (run, shard))
start_time = time.time()
self.target_model.data = self.unlearned_shard_data[shard]
self.target_model.train_model()
train_time = time.time() - start_time
self.data_store.save_target_model(run, self.target_model, shard)
return train_time
| 7,194 | 42.606061 | 123 | py |
Graph-Unlearning | Graph-Unlearning-main/exp/exp_unlearning.py | import logging
import time
import numpy as np
from exp.exp import Exp
from lib_gnn_model.graphsage.graphsage import SAGE
from lib_gnn_model.gat.gat import GAT
from lib_gnn_model.gin.gin import GIN
from lib_gnn_model.gcn.gcn import GCN
from lib_gnn_model.mlp.mlp import MLP
from lib_gnn_model.node_classifier import NodeClassifier
from lib_aggregator.aggregator import Aggregator
class ExpUnlearning(Exp):
def __init__(self, args):
super(ExpUnlearning, self).__init__(args)
self.logger = logging.getLogger('exp_unlearning')
self.target_model_name = self.args['target_model']
self.num_opt_samples = self.args['num_opt_samples']
self.load_data()
self.determine_target_model()
run_f1 = np.empty((0))
unlearning_time = np.empty((0))
for run in range(self.args['num_runs']):
self.logger.info("Run %f" % run)
self.train_target_models(run)
aggregate_f1_score = self.aggregate(run)
node_unlearning_time = self.unlearning_time_statistic()
run_f1 = np.append(run_f1, aggregate_f1_score)
unlearning_time = np.append(unlearning_time, node_unlearning_time)
self.f1_score_avg = np.average(run_f1)
self.f1_score_std = np.std(run_f1)
self.unlearning_time_avg = np.average(unlearning_time)
self.unlearning_time_std = np.std(unlearning_time)
self.logger.info("%s %s %s %s" % (self.f1_score_avg, self.f1_score_std, self.unlearning_time_avg, self.unlearning_time_std))
def load_data(self):
self.shard_data = self.data_store.load_shard_data()
self.data = self.data_store.load_raw_data()
def determine_target_model(self):
num_feats = self.data.num_features
num_classes = len(self.data.y.unique())
if not self.args['is_use_batch']:
if self.target_model_name == 'SAGE':
self.target_model = SAGE(num_feats, num_classes)
elif self.target_model_name == 'GCN':
self.target_model = GCN(num_feats, num_classes)
elif self.target_model_name == 'GAT':
self.target_model = GAT(num_feats, num_classes)
elif self.target_model_name == 'GIN':
self.target_model = GIN(num_feats, num_classes)
else:
raise Exception('unsupported target model')
else:
if self.target_model_name == 'MLP':
self.target_model = MLP(num_feats, num_classes)
else:
self.target_model = NodeClassifier(num_feats, num_classes, self.args)
def train_target_models(self, run):
if self.args['is_train_target_model']:
self.logger.info('training target models')
self.time = {}
for shard in range(self.args['num_shards']):
self.time[shard] = self._train_model(run, shard)
def aggregate(self, run):
self.logger.info('aggregating submodels')
start_time = time.time()
aggregator = Aggregator(run, self.target_model, self.data, self.shard_data, self.args)
aggregator.generate_posterior()
self.aggregate_f1_score = aggregator.aggregate()
aggregate_time = time.time() - start_time
self.logger.info("Partition cost %s seconds." % aggregate_time)
self.logger.info("Final Test F1: %s" % (self.aggregate_f1_score,))
return self.aggregate_f1_score
def unlearning_time_statistic(self):
if self.args['is_train_target_model'] and self.args['num_shards'] != 1:
self.community_to_node = self.data_store.load_community_data()
node_list = []
for key, value in self.community_to_node.items():
node_list.extend(value)
# random sample 5% nodes, find their belonging communities
sample_nodes = np.random.choice(node_list, int(0.05 * len(node_list)))
belong_community = []
for sample_node in range(len(sample_nodes)):
for community, node in self.community_to_node.items():
if np.in1d(sample_nodes[sample_node], node).any():
belong_community.append(community)
# calculate the total unlearning time and group unlearning time
group_unlearning_time = []
node_unlearning_time = []
for shard in range(self.args['num_shards']):
if belong_community.count(shard) != 0:
group_unlearning_time.append(self.time[shard])
node_unlearning_time.extend([float(self.time[shard]) for j in range(belong_community.count(shard))])
return node_unlearning_time
elif self.args['is_train_target_model'] and self.args['num_shards'] == 1:
return self.time[0]
else:
return 0
def _train_model(self, run, shard):
self.logger.info('training target models, run %s, shard %s' % (run, shard))
start_time = time.time()
self.target_model.data = self.shard_data[shard]
self.target_model.train_model()
train_time = time.time() - start_time
self.data_store.save_target_model(run, self.target_model, shard)
self.logger.info("Model training time: %s" % (train_time))
return train_time
| 5,345 | 39.195489 | 132 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_dataset/data_store.py | import os
import pickle
import logging
import shutil
import numpy as np
import torch
from torch_geometric.datasets import Planetoid, Coauthor
import torch_geometric.transforms as T
import config
class DataStore:
def __init__(self, args):
self.logger = logging.getLogger('data_store')
self.args = args
self.dataset_name = self.args['dataset_name']
self.num_features = {
"cora": 1433,
"pubmed": 500,
"citeseer": 3703,
"Coauthor_CS": 6805,
"Coauthor_Phys": 8415
}
self.partition_method = self.args['partition_method']
self.num_shards = self.args['num_shards']
self.target_model = self.args['target_model']
self.determine_data_path()
def determine_data_path(self):
embedding_name = '_'.join(('embedding', self._extract_embedding_method(self.partition_method),
str(self.args['ratio_deleted_edges'])))
community_name = '_'.join(('community', self.partition_method, str(self.num_shards),
str(self.args['ratio_deleted_edges'])))
shard_name = '_'.join(('shard_data', self.partition_method, str(self.num_shards),
str(self.args['shard_size_delta']), str(self.args['ratio_deleted_edges'])))
target_model_name = '_'.join((self.target_model, self.partition_method, str(self.num_shards),
str(self.args['shard_size_delta']), str(self.args['ratio_deleted_edges'])))
optimal_weight_name = '_'.join((self.target_model, self.partition_method, str(self.num_shards),
str(self.args['shard_size_delta']), str(self.args['ratio_deleted_edges'])))
processed_data_prefix = config.PROCESSED_DATA_PATH + self.dataset_name + "/"
self.train_test_split_file = processed_data_prefix + "train_test_split" + str(self.args['test_ratio'])
self.train_data_file = processed_data_prefix + "train_data"
self.train_graph_file = processed_data_prefix + "train_graph"
self.embedding_file = processed_data_prefix + embedding_name
self.community_file = processed_data_prefix + community_name
self.shard_file = processed_data_prefix + shard_name
self.unlearned_file = processed_data_prefix+ '_'.join(('unlearned', str(self.args['num_unlearned_nodes'])))
self.target_model_file = config.MODEL_PATH + self.dataset_name + '/' + target_model_name
self.optimal_weight_file = config.ANALYSIS_PATH + 'optimal/' + self.dataset_name + '/' + optimal_weight_name
self.posteriors_file = config.ANALYSIS_PATH + 'posteriors/' + self.dataset_name + '/' + target_model_name
dir_lists = [s + self.dataset_name for s in [config.PROCESSED_DATA_PATH,
config.MODEL_PATH,
config.ANALYSIS_PATH + 'optimal/',
config.ANALYSIS_PATH + 'posteriors/']]
for dir in dir_lists:
self._check_and_create_dirs(dir)
def _check_and_create_dirs(self, folder):
if not os.path.exists(folder):
try:
self.logger.info("checking directory %s", folder)
os.makedirs(folder, exist_ok=True)
self.logger.info("new directory %s created", folder)
except OSError as error:
self.logger.info("deleting old and creating new empty %s", folder)
shutil.rmtree(folder)
os.mkdir(folder)
self.logger.info("new empty directory %s created", folder)
else:
self.logger.info("folder %s exists, do not need to create again.", folder)
def load_raw_data(self):
self.logger.info('loading raw data')
if not self.args['is_use_node_feature']:
self.transform = T.Compose([
T.OneHotDegree(-2, cat=False) # use only node degree as node feature.
])
else:
self.transform = None
if self.dataset_name in ["cora", "pubmed", "citeseer"]:
dataset = Planetoid(config.RAW_DATA_PATH, self.dataset_name, transform=T.NormalizeFeatures())
labels = np.unique(dataset.data.y.numpy())
elif self.dataset_name in ["Coauthor_CS", "Coauthor_Phys"]:
if self.dataset_name == "Coauthor_Phys":
dataset = Coauthor(config.RAW_DATA_PATH, name="Physics", pre_transform=self.transform)
else:
dataset = Coauthor(config.RAW_DATA_PATH, name="CS", pre_transform=self.transform)
else:
raise Exception('unsupported dataset')
data = dataset[0]
return data
def save_train_data(self, train_data):
self.logger.info('saving train data')
pickle.dump(train_data, open(self.train_data_file, 'wb'))
def load_train_data(self):
self.logger.info('loading train data')
return pickle.load(open(self.train_data_file, 'rb'))
def save_train_graph(self, train_data):
self.logger.info('saving train graph')
pickle.dump(train_data, open(self.train_graph_file, 'wb'))
def load_train_graph(self):
self.logger.info('loading train graph')
return pickle.load(open(self.train_graph_file, 'rb'))
def save_train_test_split(self, train_indices, test_indices):
self.logger.info('saving train test split data')
pickle.dump((train_indices, test_indices), open(self.train_test_split_file, 'wb'))
def load_train_test_split(self):
self.logger.info('loading train test split data')
return pickle.load(open(self.train_test_split_file, 'rb'))
def save_embeddings(self, embeddings):
self.logger.info('saving embedding data')
pickle.dump(embeddings, open(self.embedding_file, 'wb'))
def load_embeddings(self):
self.logger.info('loading embedding data')
return pickle.load(open(self.embedding_file, 'rb'))
def save_community_data(self, community_to_node, suffix=''):
self.logger.info('saving community data')
pickle.dump(community_to_node, open(self.community_file + suffix, 'wb'))
def load_community_data(self, suffix=''):
self.logger.info('loading community data from: %s'%(self.community_file + suffix))
return pickle.load(open(self.community_file + suffix, 'rb'))
def c2n_to_n2c(self, community_to_node):
node_list = []
for i in range(self.num_shards):
node_list.extend(list(community_to_node.values())[i])
node_to_community = {}
for comm, nodes in dict(community_to_node).items():
for node in nodes:
# Map node id back to original graph
# node_to_community[node_list[node]] = comm
node_to_community[node] = comm
return node_to_community
def save_shard_data(self, shard_data):
self.logger.info('saving shard data')
pickle.dump(shard_data, open(self.shard_file, 'wb'))
def load_shard_data(self):
self.logger.info('loading shard data')
return pickle.load(open(self.shard_file, 'rb'))
def load_unlearned_data(self, suffix):
file_path = '_'.join((self.unlearned_file, suffix))
self.logger.info('loading unlearned data from %s' % file_path)
return pickle.load(open(file_path, 'rb'))
def save_unlearned_data(self, data, suffix):
self.logger.info('saving unlearned data %s' % suffix)
pickle.dump(data, open('_'.join((self.unlearned_file, suffix)), 'wb'))
def save_target_model(self, run, model, shard, suffix=''):
if self.args["exp"] in ["node_edge_unlearning", "attack_unlearning"]:
model_path = '_'.join((self.target_model_file, str(shard), str(run), str(self.args['num_unlearned_nodes']))) + suffix
model.save_model(model_path)
else:
model.save_model(self.target_model_file + '_' + str(shard) + '_' + str(run))
# model.save_model(self.target_model_file + '_' + str(shard))
def load_target_model(self, run, model, shard, suffix=''):
if self.args["exp"] == "node_edge_unlearning":
model.load_model(
'_'.join((self.target_model_file, str(shard), str(run), str(self.args['num_unlearned_nodes']))))
elif self.args["exp"] == "attack_unlearning":
model_path = '_'.join((self.target_model_file, str(shard), str(run), str(self.args['num_unlearned_nodes']))) + suffix
print("loading target model from:" + model_path)
device = torch.device('cpu')
model.load_model(model_path)
model.device=device
else:
# model.load_model(self.target_model_file + '_' + str(shard) + '_' + str(run))
model.load_model(self.target_model_file + '_' + str(shard) + '_' + str(0))
def save_optimal_weight(self, weight, run):
torch.save(weight, self.optimal_weight_file + '_' + str(run))
def load_optimal_weight(self, run):
return torch.load(self.optimal_weight_file + '_' + str(run))
def save_posteriors(self, posteriors, run, suffix=''):
torch.save(posteriors, self.posteriors_file + '_' + str(run) + suffix)
def load_posteriors(self, run):
return torch.load(self.posteriors_file + '_' + str(run))
def _extract_embedding_method(self, partition_method):
return partition_method.split('_')[0]
| 9,583 | 44.421801 | 129 | py |