repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
891k
| license
stringclasses 15
values | hash
int64 -9,223,135,201,861,841,000
9,223,183,049B
| line_mean
float64 6
99.4
| line_max
int64 17
1k
| alpha_frac
float64 0.25
0.89
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
EtiCui/Msc-UdeS | dataAnalysis/msd.py | 1 | 3196 | #!/usr/bin/python
""" Functions to calculate the mean-square displacement from a LAMMPS trajectory
Usage:
#Must be in pythonpath or working directory
from msd import msd
msd_df = msd(atom_type,first_frame,last_frame)
Requirement:
python2
numpy
dump_dataframe.py
pandas
TODO:
Parallelisation
Add a function for a trajectory in a single file
"""
from dump_dataframe import read_dump
import numpy as np
import pandas as pd
from glob import glob
def msd(atom_type=3, first_frame=-1000, last_frame=-1):
""" Function to calculate the mean-square displacement(in each direction and the total msd)
of a trajectory. Reads all the dump to create an array with the time evolution of
the positions for each particles of an atom_type
Args:
----
atom_type(int): The atom type of the desired atoms to calculate the msd_df
first_frame(int): The first frame to start the msd
last_frame(int): The last frame for the msd
Returns:
----
msd(dataframe): An dataframe with the time as index, msd x,msd y,msd z and total as columns
"""
# List of all the dump in the trajectory
complete_trajectory = glob("*dump*")
# sort the list according to the number in the filename
complete_trajectory.sort(key=lambda f: int(filter(str.isdigit, f)))
# consider only the desired frames
desired_trajectory = complete_trajectory[first_frame:last_frame]
# Initialize the lists for the positions and timestep
x = []
y = []
z = []
timesteps = []
for step in desired_trajectory:
# read the dump for each steps
dump = read_dump(step, wrap=False)
timestep = dump["step"]
atom_df = dump["atom_df"]
# select only the usefull columns
msd_col_list = ["type", "xu", "yu", "zu"]
msd_df = atom_df[msd_col_list]
# choose only the wanted atom_type
msd_df = msd_df[msd_df["type"] == atom_type]
# drop the now useless type column
msd_df = msd_df.drop(["type"], axis=1)
# append each values to the list
timesteps.append(timestep)
x.append(msd_df.xu.values.tolist())
y.append(msd_df.yu.values.tolist())
z.append(msd_df.zu.values.tolist())
# Convert list to arrays and transpose them, so the lines will be each particles
# and the columns the steps
timesteps = np.array(timesteps).T
x = np.array(x).T
y = np.array(y).T
z = np.array(z).T
msd = []
n = 1
while n < len(desired_trajectory):
# calculate the delta_t
delta_t = timesteps[n] - timesteps[0]
# calculate (x(t+n)-x(t))**2 and the mean over all the particles and
# the same delta_t
x_diff = x[:, n:] - x[:, :-n]
msd_x = np.mean(x_diff**2)
y_diff = y[:, n:] - y[:, :-n]
msd_y = np.mean(y_diff**2)
z_diff = z[:, n:] - z[:, :-n]
msd_z = np.mean(z_diff**2)
msd.append([delta_t, msd_x, msd_y, msd_z, msd_x + msd_y + msd_z])
n += 1
msd = np.array(msd)
msd_df = pd.DataFrame(msd[:, 1:], index=msd[:, 0],
columns=["x", "y", "z", "total"])
msd_df.index.name = "temps"
return msd_df
| mit | -885,826,488,544,187,500 | 28.869159 | 95 | 0.611389 | false |
CamDavidsonPilon/lifelines | lifelines/datasets/dfcv_dataset.py | 1 | 2700 | # -*- coding: utf-8 -*-
import pandas as pd
from lifelines.utils import add_covariate_to_timeline
from lifelines.utils import to_long_format
df = pd.DataFrame(
[
[1, 3, True, 1],
[6, 4, False, 0],
[3, 5, True, 1],
[2, 5, False, 1],
[4, 6, True, 1],
[7, 7, True, 0],
[8, 8, False, 0],
[5, 8, False, 1],
[9, 9, True, 0],
[10, 10, True, 0],
],
columns=["id", "time", "event", "group"],
)
df = to_long_format(df, "time")
cv = pd.DataFrame.from_records(
[
{"id": 1, "z": 0, "time": 0},
{"id": 6, "z": 1, "time": 0},
{"id": 3, "z": 1, "time": 0},
{"id": 2, "z": 0, "time": 0},
{"id": 4, "z": 0, "time": 0},
{"id": 7, "z": 0, "time": 0},
{"id": 8, "z": 0, "time": 0},
{"id": 5, "z": 0, "time": 0},
{"id": 9, "z": 0, "time": 0},
{"id": 10, "z": 0, "time": 0},
{"id": 1, "z": 0, "time": 3},
{"id": 6, "z": 1, "time": 3},
{"id": 3, "z": 1, "time": 3},
{"id": 2, "z": 0, "time": 3},
{"id": 4, "z": 0, "time": 3},
{"id": 7, "z": 0, "time": 3},
{"id": 8, "z": 0, "time": 3},
{"id": 5, "z": 0, "time": 3},
{"id": 9, "z": 0, "time": 3},
{"id": 10, "z": 1, "time": 3},
{"id": 6, "z": 1, "time": 4},
{"id": 3, "z": 1, "time": 4},
{"id": 2, "z": 0, "time": 4},
{"id": 4, "z": 0, "time": 4},
{"id": 7, "z": 0, "time": 4},
{"id": 8, "z": 0, "time": 4},
{"id": 5, "z": 0, "time": 4},
{"id": 9, "z": 0, "time": 4},
{"id": 10, "z": 1, "time": 4},
{"id": 3, "z": 1, "time": 5},
{"id": 2, "z": 0, "time": 5},
{"id": 4, "z": 0, "time": 5},
{"id": 7, "z": 1, "time": 5},
{"id": 8, "z": 0, "time": 5},
{"id": 5, "z": 0, "time": 5},
{"id": 9, "z": 1, "time": 5},
{"id": 10, "z": 1, "time": 5},
{"id": 4, "z": 0, "time": 6},
{"id": 7, "z": 1, "time": 6},
{"id": 8, "z": 0, "time": 6},
{"id": 5, "z": 1, "time": 6},
{"id": 9, "z": 1, "time": 6},
{"id": 10, "z": 1, "time": 6},
{"id": 7, "z": 1, "time": 7},
{"id": 8, "z": 0, "time": 7},
{"id": 5, "z": 1, "time": 7},
{"id": 9, "z": 1, "time": 7},
{"id": 10, "z": 1, "time": 7},
{"id": 8, "z": 0, "time": 8},
{"id": 5, "z": 1, "time": 8},
{"id": 9, "z": 1, "time": 8},
{"id": 10, "z": 1, "time": 8},
{"id": 9, "z": 1, "time": 9},
{"id": 10, "z": 1, "time": 9},
]
)
dfcv = add_covariate_to_timeline(df, cv, "id", "time", "event", add_enum=False)
| mit | -5,791,755,339,109,978,000 | 31.142857 | 79 | 0.32037 | false |
hahnicity/pytrader | pytrader/algorithms/large_move_diff.py | 1 | 6483 | from collections import namedtuple
from getpass import getpass
import matplotlib.pyplot as plt
from numpy import append, array, diff
from pandas import DataFrame
from redis import StrictRedis
from sklearn.ensemble import RandomForestClassifier
from zipline.api import order_percent, order_target, record
from pytrader.exceptions import RecordsNotFoundError
from pytrader.gatherer import gather_data_with_single_process_client
from pytrader.main import get_authenticated_data_impl
from pytrader.storage import pull_from_redis, push_to_redis
def _calc_return(new, old):
return (new - old) / old
def get_x_point(context, data, ticker, move_return):
date = data[ticker]["dt"].strftime("%Y-%m-%d")
pytrader_data = context.pytrader_data[ticker].loc[date].values
return append(pytrader_data, move_return)
def initialize(context):
context.pytrader_data = {}
context.model = RandomForestClassifier()
context.StockTuple = namedtuple(
"StockTuple", ["ticker", "days_after", "close", "move_return", "prediction"]
)
context.x = []
context.y = []
context.yesterday_price = {}
context.number_days_after = 1
context.data_points_necessary = 50
context.data_countdowns = []
context.to_terminate = []
context.threshold = .05
context.predictions = []
def calculate_diffs(df):
diff_cols = ["diff_{}".format(key) for key in df.keys()]
diff_vals = [append(0, diff(df[key].values) / df[key].values[:-1]) for key in df.keys()]
return DataFrame(array(diff_vals).transpose(), index=df.index.values, columns=diff_cols)
def post_initialize(context, data):
"""
Since `initialize` doesn't actually enable us to see the parameters of when
our simulation is starting/ending we need to create a `post_initialize` func
to handle gathering extra data from pytrader
"""
if context.sim_params.period_start == data[data.keys()[0]]["dt"]:
redis = StrictRedis(host="localhost", port=6379, db=0)
data_impl = get_authenticated_data_impl("grehm87@gmail.com", getpass())
# Since we are currently using YCharts our data schema is a little weird
start_date = context.sim_params.period_start.strftime("%Y-%m-%d")
end_date = context.sim_params.period_end.strftime("%Y-%m-%d")
for ticker in data.keys():
try:
pytrader_data = pull_from_redis(redis, ticker, start_date, end_date)
except RecordsNotFoundError:
pytrader_data = gather_data_with_single_process_client(data_impl, ticker, None, start_date, end_date)
push_to_redis(redis, pytrader_data, ticker)
del pytrader_data["price"] # Delete the price col, we don't need it
pytrader_data = calculate_diffs(pytrader_data)
context.pytrader_data[ticker] = pytrader_data
def handle_countdowns(context, data):
countdown_idx_to_remove = []
for idx, stock_tuple in enumerate(context.data_countdowns):
ticker = stock_tuple.ticker
countdown = stock_tuple.days_after - 1
if countdown == 0:
context.x.append(get_x_point(context, data, stock_tuple.ticker, stock_tuple.move_return))
context.y.append(_calc_return(data[ticker]["close"], stock_tuple.close) > 0)
countdown_idx_to_remove.append(idx)
if stock_tuple.prediction is not None:
context.predictions.append(
(_calc_return(data[ticker]["close"], stock_tuple.close) > 0) ==
stock_tuple.prediction
)
else:
context.data_countdowns[idx] = context.StockTuple(
stock_tuple.ticker, countdown, *stock_tuple[2:]
)
context.data_countdowns = [
stock_tuple for idx, stock_tuple in enumerate(context.data_countdowns)
if idx not in countdown_idx_to_remove
]
def handle_price_histories(context, data):
for ticker, stock_data in data.items():
if ticker not in context.yesterday_price:
context.yesterday_price[ticker] = stock_data["close"]
elif abs(_calc_return(stock_data["close"], context.yesterday_price[ticker])) > context.threshold:
context.data_countdowns.append(context.StockTuple(
ticker,
context.number_days_after,
stock_data["close"],
_calc_return(stock_data["close"], context.yesterday_price[ticker]),
None
))
context.yesterday_price[ticker] = stock_data["close"]
else:
context.yesterday_price[ticker] = stock_data["close"]
def handle_terminations(context):
idx_to_remove = []
for idx, position in enumerate(context.to_terminate):
ticker = position[0]
countdown = position[1] - 1
if countdown == 0:
order_target(ticker, 0)
idx_to_remove.append(idx)
else:
context.to_terminate[idx] = (ticker, countdown)
context.to_terminate = [
data for idx, data in enumerate(context.to_terminate) if idx not in idx_to_remove
]
def handle_data(context, data):
post_initialize(context, data)
handle_terminations(context)
handle_countdowns(context, data)
old_data_counts = len(context.data_countdowns)
handle_price_histories(context, data)
new_data_counts = len(context.data_countdowns)
if len(context.x) > context.data_points_necessary and new_data_counts > old_data_counts:
context.model.fit(context.x, context.y)
new_counts = context.data_countdowns[old_data_counts:]
for idx, stock_tuple in enumerate(new_counts):
prediction = context.model.predict(
get_x_point(context, data, stock_tuple.ticker, stock_tuple.move_return)
)
record(prediction={1: 1, 0: -1}[int(prediction)])
order_percent(stock_tuple.ticker, {1: 1, 0: -1}[int(prediction)] * (1.0 / len(data)))
context.to_terminate.append((stock_tuple.ticker, context.number_days_after))
tmp_tuple = context.data_countdowns[old_data_counts + idx]
tmp_tuple = context.StockTuple(
tmp_tuple[0], tmp_tuple[1], tmp_tuple[2], tmp_tuple[3], bool(prediction)
)
context.data_countdowns[old_data_counts + idx] = tmp_tuple
def analyze(context, perf):
perf.portfolio_value.plot()
plt.ylabel('portfolio value in $')
plt.legend(loc=0)
plt.show()
| gpl-2.0 | -4,807,247,707,430,531,000 | 40.292994 | 117 | 0.643066 | false |
ChristfriedBalizou/jeamsql | adapters/tabulate/tabulate.py | 1 | 51473 | # -*- coding: utf-8 -*-
"""Pretty-print tabular data."""
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple, Iterable
from platform import python_version_tuple
import re
if python_version_tuple()[0] < "3":
from itertools import izip_longest
from functools import partial
_none_type = type(None)
_bool_type = bool
_int_type = int
_long_type = long
_float_type = float
_text_type = unicode
_binary_type = str
def _is_file(f):
return isinstance(f, file)
else:
from itertools import zip_longest as izip_longest
from functools import reduce, partial
_none_type = type(None)
_bool_type = bool
_int_type = int
_long_type = int
_float_type = float
_text_type = str
_binary_type = bytes
basestring = str
import io
def _is_file(f):
return isinstance(f, io.IOBase)
try:
import wcwidth # optional wide-character (CJK) support
except ImportError:
wcwidth = None
__all__ = ["tabulate", "tabulate_formats", "simple_separated_format"]
__version__ = "0.8.0"
# minimum extra space in headers
MIN_PADDING = 2
# Whether or not to preserve leading/trailing whitespace in data.
PRESERVE_WHITESPACE = False
_DEFAULT_FLOATFMT="g"
_DEFAULT_MISSINGVAL=""
# if True, enable wide-character (CJK) support
WIDE_CHARS_MODE = wcwidth is not None
Line = namedtuple("Line", ["begin", "hline", "sep", "end"])
DataRow = namedtuple("DataRow", ["begin", "sep", "end"])
# A table structure is suppposed to be:
#
# --- lineabove ---------
# headerrow
# --- linebelowheader ---
# datarow
# --- linebewteenrows ---
# ... (more datarows) ...
# --- linebewteenrows ---
# last datarow
# --- linebelow ---------
#
# TableFormat's line* elements can be
#
# - either None, if the element is not used,
# - or a Line tuple,
# - or a function: [col_widths], [col_alignments] -> string.
#
# TableFormat's *row elements can be
#
# - either None, if the element is not used,
# - or a DataRow tuple,
# - or a function: [cell_values], [col_widths], [col_alignments] -> string.
#
# padding (an integer) is the amount of white space around data values.
#
# with_header_hide:
#
# - either None, to display all table elements unconditionally,
# - or a list of elements not to be displayed if the table has column headers.
#
TableFormat = namedtuple("TableFormat", ["lineabove", "linebelowheader",
"linebetweenrows", "linebelow",
"headerrow", "datarow",
"padding", "with_header_hide"])
def _pipe_segment_with_colons(align, colwidth):
"""Return a segment of a horizontal line with optional colons which
indicate column's alignment (as in `pipe` output format)."""
w = colwidth
if align in ["right", "decimal"]:
return ('-' * (w - 1)) + ":"
elif align == "center":
return ":" + ('-' * (w - 2)) + ":"
elif align == "left":
return ":" + ('-' * (w - 1))
else:
return '-' * w
def _pipe_line_with_colons(colwidths, colaligns):
"""Return a horizontal line with optional colons to indicate column's
alignment (as in `pipe` output format)."""
segments = [_pipe_segment_with_colons(a, w) for a, w in zip(colaligns, colwidths)]
return "|" + "|".join(segments) + "|"
def _mediawiki_row_with_attrs(separator, cell_values, colwidths, colaligns):
alignment = { "left": '',
"right": 'align="right"| ',
"center": 'align="center"| ',
"decimal": 'align="right"| ' }
# hard-coded padding _around_ align attribute and value together
# rather than padding parameter which affects only the value
values_with_attrs = [' ' + alignment.get(a, '') + c + ' '
for c, a in zip(cell_values, colaligns)]
colsep = separator*2
return (separator + colsep.join(values_with_attrs)).rstrip()
def _textile_row_with_attrs(cell_values, colwidths, colaligns):
cell_values[0] += ' '
alignment = { "left": "<.", "right": ">.", "center": "=.", "decimal": ">." }
values = (alignment.get(a, '') + v for a, v in zip(colaligns, cell_values))
return '|' + '|'.join(values) + '|'
def _html_begin_table_without_header(colwidths_ignore, colaligns_ignore):
# this table header will be suppressed if there is a header row
return "\n".join(["<table>", "<tbody>"])
def _html_row_with_attrs(celltag, cell_values, colwidths, colaligns):
alignment = { "left": '',
"right": ' style="text-align: right;"',
"center": ' style="text-align: center;"',
"decimal": ' style="text-align: right;"' }
values_with_attrs = ["<{0}{1}>{2}</{0}>".format(celltag, alignment.get(a, ''), c)
for c, a in zip(cell_values, colaligns)]
rowhtml = "<tr>" + "".join(values_with_attrs).rstrip() + "</tr>"
if celltag == "th": # it's a header row, create a new table header
rowhtml = "\n".join(["<table>",
"<thead>",
rowhtml,
"</thead>",
"<tbody>"])
return rowhtml
def _moin_row_with_attrs(celltag, cell_values, colwidths, colaligns, header=''):
alignment = { "left": '',
"right": '<style="text-align: right;">',
"center": '<style="text-align: center;">',
"decimal": '<style="text-align: right;">' }
values_with_attrs = ["{0}{1} {2} ".format(celltag,
alignment.get(a, ''),
header+c+header)
for c, a in zip(cell_values, colaligns)]
return "".join(values_with_attrs)+"||"
def _latex_line_begin_tabular(colwidths, colaligns, booktabs=False):
alignment = { "left": "l", "right": "r", "center": "c", "decimal": "r" }
tabular_columns_fmt = "".join([alignment.get(a, "l") for a in colaligns])
return "\n".join(["\\begin{tabular}{" + tabular_columns_fmt + "}",
"\\toprule" if booktabs else "\hline"])
LATEX_ESCAPE_RULES = {r"&": r"\&", r"%": r"\%", r"$": r"\$", r"#": r"\#",
r"_": r"\_", r"^": r"\^{}", r"{": r"\{", r"}": r"\}",
r"~": r"\textasciitilde{}", "\\": r"\textbackslash{}",
r"<": r"\ensuremath{<}", r">": r"\ensuremath{>}"}
def _latex_row(cell_values, colwidths, colaligns, escrules=LATEX_ESCAPE_RULES):
def escape_char(c):
return escrules.get(c, c)
escaped_values = ["".join(map(escape_char, cell)) for cell in cell_values]
rowfmt = DataRow("", "&", "\\\\")
return _build_simple_row(escaped_values, rowfmt)
def _rst_escape_first_column(rows, headers):
def escape_empty(val):
if isinstance(val, (_text_type, _binary_type)) and val.strip() is "":
return ".."
else:
return val
new_headers = list(headers)
new_rows = []
if headers:
new_headers[0] = escape_empty(headers[0])
for row in rows:
new_row = list(row)
if new_row:
new_row[0] = escape_empty(row[0])
new_rows.append(new_row)
return new_rows, new_headers
_table_formats = {"simple":
TableFormat(lineabove=Line("", "-", " ", ""),
linebelowheader=Line("", "-", " ", ""),
linebetweenrows=None,
linebelow=Line("", "-", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0,
with_header_hide=["lineabove", "linebelow"]),
"plain":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"grid":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("+", "=", "+", "+"),
linebetweenrows=Line("+", "-", "+", "+"),
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"fancy_grid":
TableFormat(lineabove=Line("╒", "═", "╤", "╕"),
linebelowheader=Line("╞", "═", "╪", "╡"),
linebetweenrows=Line("├", "─", "┼", "┤"),
linebelow=Line("╘", "═", "╧", "╛"),
headerrow=DataRow("│", "│", "│"),
datarow=DataRow("│", "│", "│"),
padding=1, with_header_hide=None),
"pipe":
TableFormat(lineabove=_pipe_line_with_colons,
linebelowheader=_pipe_line_with_colons,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
with_header_hide=["lineabove"]),
"orgtbl":
TableFormat(lineabove=None,
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"jira":
TableFormat(lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("||", "||", "||"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"psql":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"rst":
TableFormat(lineabove=Line("", "=", " ", ""),
linebelowheader=Line("", "=", " ", ""),
linebetweenrows=None,
linebelow=Line("", "=", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"mediawiki":
TableFormat(lineabove=Line("{| class=\"wikitable\" style=\"text-align: left;\"",
"", "", "\n|+ <!-- caption -->\n|-"),
linebelowheader=Line("|-", "", "", ""),
linebetweenrows=Line("|-", "", "", ""),
linebelow=Line("|}", "", "", ""),
headerrow=partial(_mediawiki_row_with_attrs, "!"),
datarow=partial(_mediawiki_row_with_attrs, "|"),
padding=0, with_header_hide=None),
"moinmoin":
TableFormat(lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=partial(_moin_row_with_attrs,"||",header="'''"),
datarow=partial(_moin_row_with_attrs,"||"),
padding=1, with_header_hide=None),
"youtrack":
TableFormat(lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|| ", " || ", " || "),
datarow=DataRow("| ", " | ", " |"),
padding=1, with_header_hide=None),
"html":
TableFormat(lineabove=_html_begin_table_without_header,
linebelowheader="",
linebetweenrows=None,
linebelow=Line("</tbody>\n</table>", "", "", ""),
headerrow=partial(_html_row_with_attrs, "th"),
datarow=partial(_html_row_with_attrs, "td"),
padding=0, with_header_hide=["lineabove"]),
"latex":
TableFormat(lineabove=_latex_line_begin_tabular,
linebelowheader=Line("\\hline", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\hline\n\\end{tabular}", "", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"latex_raw":
TableFormat(lineabove=_latex_line_begin_tabular,
linebelowheader=Line("\\hline", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\hline\n\\end{tabular}", "", "", ""),
headerrow=partial(_latex_row, escrules={}),
datarow=partial(_latex_row, escrules={}),
padding=1, with_header_hide=None),
"latex_booktabs":
TableFormat(lineabove=partial(_latex_line_begin_tabular, booktabs=True),
linebelowheader=Line("\\midrule", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\bottomrule\n\\end{tabular}", "", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"tsv":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", "\t", ""),
datarow=DataRow("", "\t", ""),
padding=0, with_header_hide=None),
"textile":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("|_. ", "|_.", "|"),
datarow=_textile_row_with_attrs,
padding=1, with_header_hide=None)}
tabulate_formats = list(sorted(_table_formats.keys()))
_invisible_codes = re.compile(r"\x1b\[\d+[;\d]*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes
_invisible_codes_bytes = re.compile(b"\x1b\[\d+[;\d]*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes
def simple_separated_format(separator):
"""Construct a simple TableFormat with columns separated by a separator.
>>> tsv = simple_separated_format("\\t") ; \
tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == 'foo \\t 1\\nspam\\t23'
True
"""
return TableFormat(None, None, None, None,
headerrow=DataRow('', separator, ''),
datarow=DataRow('', separator, ''),
padding=0, with_header_hide=None)
def _isconvertible(conv, string):
try:
n = conv(string)
return True
except (ValueError, TypeError):
return False
def _isnumber(string):
"""
>>> _isnumber("123.45")
True
>>> _isnumber("123")
True
>>> _isnumber("spam")
False
"""
return _isconvertible(float, string)
def _isint(string, inttype=int):
"""
>>> _isint("123")
True
>>> _isint("123.45")
False
"""
return type(string) is inttype or\
(isinstance(string, _binary_type) or isinstance(string, _text_type))\
and\
_isconvertible(inttype, string)
def _isbool(string):
"""
>>> _isbool(True)
True
>>> _isbool("False")
True
>>> _isbool(1)
False
"""
return type(string) is _bool_type or\
(isinstance(string, (_binary_type, _text_type))\
and\
string in ("True", "False"))
def _type(string, has_invisible=True, numparse=True):
"""The least generic type (type(None), int, float, str, unicode).
>>> _type(None) is type(None)
True
>>> _type("foo") is type("")
True
>>> _type("1") is type(1)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
"""
if has_invisible and \
(isinstance(string, _text_type) or isinstance(string, _binary_type)):
string = _strip_invisible(string)
if string is None:
return _none_type
elif hasattr(string, "isoformat"): # datetime.datetime, date, and time
return _text_type
elif _isbool(string):
return _bool_type
elif _isint(string) and numparse:
return int
elif _isint(string, _long_type) and numparse:
return int
elif _isnumber(string) and numparse:
return float
elif isinstance(string, _binary_type):
return _binary_type
else:
return _text_type
def _afterpoint(string):
"""Symbols after a decimal point, -1 if the string lacks the decimal point.
>>> _afterpoint("123.45")
2
>>> _afterpoint("1001")
-1
>>> _afterpoint("eggs")
-1
>>> _afterpoint("123e45")
2
"""
if _isnumber(string):
if _isint(string):
return -1
else:
pos = string.rfind(".")
pos = string.lower().rfind("e") if pos < 0 else pos
if pos >= 0:
return len(string) - pos - 1
else:
return -1 # no point
else:
return -1 # not a number
def _padleft(width, s):
"""Flush right.
>>> _padleft(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430'
True
"""
fmt = "{0:>%ds}" % width
return fmt.format(s)
def _padright(width, s):
"""Flush left.
>>> _padright(6, '\u044f\u0439\u0446\u0430') == '\u044f\u0439\u0446\u0430 '
True
"""
fmt = "{0:<%ds}" % width
return fmt.format(s)
def _padboth(width, s):
"""Center string.
>>> _padboth(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430 '
True
"""
fmt = "{0:^%ds}" % width
return fmt.format(s)
def _strip_invisible(s):
"Remove invisible ANSI color codes."
if isinstance(s, _text_type):
return re.sub(_invisible_codes, "", s)
else: # a bytestring
return re.sub(_invisible_codes_bytes, "", s)
def _visible_width(s):
"""Visible width of a printed string. ANSI color codes are removed.
>>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world")
(5, 5)
"""
# optional wide-character support
if wcwidth is not None and WIDE_CHARS_MODE:
len_fn = wcwidth.wcswidth
else:
len_fn = len
if isinstance(s, _text_type) or isinstance(s, _binary_type):
return len_fn(_strip_invisible(s))
else:
return len_fn(_text_type(s))
def _align_column(strings, alignment, minwidth=0, has_invisible=True):
"""[string] -> [padded_string]
>>> list(map(str,_align_column(["12.345", "-1234.5", "1.23", "1234.5", "1e+234", "1.0e234"], "decimal")))
[' 12.345 ', '-1234.5 ', ' 1.23 ', ' 1234.5 ', ' 1e+234 ', ' 1.0e234']
>>> list(map(str,_align_column(['123.4', '56.7890'], None)))
['123.4', '56.7890']
"""
if alignment == "right":
if not PRESERVE_WHITESPACE:
strings = [s.strip() for s in strings]
padfn = _padleft
elif alignment == "center":
if not PRESERVE_WHITESPACE:
strings = [s.strip() for s in strings]
padfn = _padboth
elif alignment == "decimal":
if has_invisible:
decimals = [_afterpoint(_strip_invisible(s)) for s in strings]
else:
decimals = [_afterpoint(s) for s in strings]
maxdecimals = max(decimals)
strings = [s + (maxdecimals - decs) * " "
for s, decs in zip(strings, decimals)]
padfn = _padleft
elif not alignment:
return strings
else:
if not PRESERVE_WHITESPACE:
strings = [s.strip() for s in strings]
padfn = _padright
enable_widechars = wcwidth is not None and WIDE_CHARS_MODE
if has_invisible:
width_fn = _visible_width
elif enable_widechars: # optional wide-character support if available
width_fn = wcwidth.wcswidth
else:
width_fn = len
s_lens = list(map(len, strings))
s_widths = list(map(width_fn, strings))
maxwidth = max(max(s_widths), minwidth)
if not enable_widechars and not has_invisible:
padded_strings = [padfn(maxwidth, s) for s in strings]
else:
# enable wide-character width corrections
visible_widths = [maxwidth - (w - l) for w, l in zip(s_widths, s_lens)]
# wcswidth and _visible_width don't count invisible characters;
# padfn doesn't need to apply another correction
padded_strings = [padfn(w, s) for s, w in zip(strings, visible_widths)]
return padded_strings
def _more_generic(type1, type2):
types = { _none_type: 0, _bool_type: 1, int: 2, float: 3, _binary_type: 4, _text_type: 5 }
invtypes = { 5: _text_type, 4: _binary_type, 3: float, 2: int, 1: _bool_type, 0: _none_type }
moregeneric = max(types.get(type1, 5), types.get(type2, 5))
return invtypes[moregeneric]
def _column_type(strings, has_invisible=True, numparse=True):
"""The least generic type all column values are convertible to.
>>> _column_type([True, False]) is _bool_type
True
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
>>> import datetime as dt
>>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
True
"""
types = [_type(s, has_invisible, numparse) for s in strings ]
return reduce(_more_generic, types, _bool_type)
def _format(val, valtype, floatfmt, missingval="", has_invisible=True):
"""Format a value accoding to its type.
Unicode is supported:
>>> hrow = ['\u0431\u0443\u043a\u0432\u0430', '\u0446\u0438\u0444\u0440\u0430'] ; \
tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \
good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \
tabulate(tbl, headers=hrow) == good_result
True
"""
if val is None:
return missingval
if valtype in [int, _text_type]:
return "{0}".format(val)
elif valtype is _binary_type:
try:
return _text_type(val, "ascii")
except TypeError:
return _text_type(val)
elif valtype is float:
is_a_colored_number = has_invisible and isinstance(val, (_text_type, _binary_type))
if is_a_colored_number:
raw_val = _strip_invisible(val)
formatted_val = format(float(raw_val), floatfmt)
return val.replace(raw_val, formatted_val)
else:
return format(float(val), floatfmt)
else:
return "{0}".format(val)
def _align_header(header, alignment, width, visible_width):
"Pad string header to width chars given known visible_width of the header."
width += len(header) - visible_width
if alignment == "left":
return _padright(width, header)
elif alignment == "center":
return _padboth(width, header)
elif not alignment:
return "{0}".format(header)
else:
return _padleft(width, header)
def _prepend_row_index(rows, index):
"""Add a left-most index column."""
if index is None or index is False:
return rows
if len(index) != len(rows):
print('index=', index)
print('rows=', rows)
raise ValueError('index must be as long as the number of data rows')
rows = [[v]+list(row) for v,row in zip(index, rows)]
return rows
def _bool(val):
"A wrapper around standard bool() which doesn't throw on NumPy arrays"
try:
return bool(val)
except ValueError: # val is likely to be a numpy array with many elements
return False
def _normalize_tabular_data(tabular_data, headers, showindex="default"):
"""Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* list of named tuples (usually used with headers="keys")
* list of dicts (usually used with headers="keys")
* list of OrderedDicts (usually used with headers="keys")
* 2D NumPy arrays
* NumPy record arrays (usually used with headers="keys")
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys".
If showindex="default", show row indices of the pandas.DataFrame.
If showindex="always", show row indices for all types of data.
If showindex="never", don't show row indices for all types of data.
If showindex is an iterable, show its values as row indices.
"""
try:
bool(headers)
is_headers2bool_broken = False
except ValueError: # numpy.ndarray, pandas.core.index.Index, ...
is_headers2bool_broken = True
headers = list(headers)
index = None
if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"):
# dict-like and pandas.DataFrame?
if hasattr(tabular_data.values, "__call__"):
# likely a conventional dict
keys = tabular_data.keys()
rows = list(izip_longest(*tabular_data.values())) # columns have to be transposed
elif hasattr(tabular_data, "index"):
# values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0)
keys = list(tabular_data)
if tabular_data.index.name is not None:
if isinstance(tabular_data.index.name, list):
keys[:0] = tabular_data.index.name
else:
keys[:0] = [tabular_data.index.name]
vals = tabular_data.values # values matrix doesn't need to be transposed
# for DataFrames add an index per default
index = list(tabular_data.index)
rows = [list(row) for row in vals]
else:
raise ValueError("tabular data doesn't appear to be a dict or a DataFrame")
if headers == "keys":
headers = list(map(_text_type,keys)) # headers should be strings
else: # it's a usual an iterable of iterables, or a NumPy array
rows = list(tabular_data)
if (headers == "keys" and not rows):
# an empty table (issue #81)
headers = []
elif (headers == "keys" and
hasattr(tabular_data, "dtype") and
getattr(tabular_data.dtype, "names")):
# numpy record array
headers = tabular_data.dtype.names
elif (headers == "keys"
and len(rows) > 0
and isinstance(rows[0], tuple)
and hasattr(rows[0], "_fields")):
# namedtuple
headers = list(map(_text_type, rows[0]._fields))
elif (len(rows) > 0
and isinstance(rows[0], dict)):
# dict or OrderedDict
uniq_keys = set() # implements hashed lookup
keys = [] # storage for set
if headers == "firstrow":
firstdict = rows[0] if len(rows) > 0 else {}
keys.extend(firstdict.keys())
uniq_keys.update(keys)
rows = rows[1:]
for row in rows:
for k in row.keys():
#Save unique items in input order
if k not in uniq_keys:
keys.append(k)
uniq_keys.add(k)
if headers == 'keys':
headers = keys
elif isinstance(headers, dict):
# a dict of headers for a list of dicts
headers = [headers.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
elif headers == "firstrow":
if len(rows) > 0:
headers = [firstdict.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
else:
headers = []
elif headers:
raise ValueError('headers for a list of dicts is not a dict or a keyword')
rows = [[row.get(k) for k in keys] for row in rows]
elif (headers == "keys"
and hasattr(tabular_data, "description")
and hasattr(tabular_data, "fetchone")
and hasattr(tabular_data, "rowcount")):
# Python Database API cursor object (PEP 0249)
# print tabulate(cursor, headers='keys')
headers = [column[0] for column in tabular_data.description]
elif headers == "keys" and len(rows) > 0:
# keys are column indices
headers = list(map(_text_type, range(len(rows[0]))))
# take headers from the first row if necessary
if headers == "firstrow" and len(rows) > 0:
if index is not None:
headers = [index[0]] + list(rows[0])
index = index[1:]
else:
headers = rows[0]
headers = list(map(_text_type, headers)) # headers should be strings
rows = rows[1:]
headers = list(map(_text_type,headers))
rows = list(map(list,rows))
# add or remove an index column
showindex_is_a_str = type(showindex) in [_text_type, _binary_type]
if showindex == "default" and index is not None:
rows = _prepend_row_index(rows, index)
elif isinstance(showindex, Iterable) and not showindex_is_a_str:
rows = _prepend_row_index(rows, list(showindex))
elif showindex == "always" or (_bool(showindex) and not showindex_is_a_str):
if index is None:
index = list(range(len(rows)))
rows = _prepend_row_index(rows, index)
elif showindex == "never" or (not _bool(showindex) and not showindex_is_a_str):
pass
# pad with empty headers for initial columns if necessary
if headers and len(rows) > 0:
nhs = len(headers)
ncols = len(rows[0])
if nhs < ncols:
headers = [""]*(ncols - nhs) + headers
return rows, headers
def tabulate(tabular_data, headers=(), tablefmt="simple",
floatfmt=_DEFAULT_FLOATFMT, numalign="decimal", stralign="left",
missingval=_DEFAULT_MISSINGVAL, showindex="default", disable_numparse=False):
"""Format a fixed width table for pretty printing.
>>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))
--- ---------
1 2.34
-56 8.999
2 10001
--- ---------
The first required argument (`tabular_data`) can be a
list-of-lists (or another iterable of iterables), a list of named
tuples, a dictionary of iterables, an iterable of dictionaries,
a two-dimensional NumPy array, NumPy record array, or a Pandas'
dataframe.
Table headers
-------------
To print nice column headers, supply the second argument (`headers`):
- `headers` can be an explicit list of column headers
- if `headers="firstrow"`, then the first row of data is used
- if `headers="keys"`, then dictionary keys or column indices are used
Otherwise a headerless table is produced.
If the number of headers is less than the number of columns, they
are supposed to be names of the last columns. This is consistent
with the plain-text format of R and Pandas' dataframes.
>>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
... headers="firstrow"))
sex age
----- ----- -----
Alice F 24
Bob M 19
By default, pandas.DataFrame data have an additional column called
row index. To add a similar column to all other types of data,
use `showindex="always"` or `showindex=True`. To suppress row indices
for all types of data, pass `showindex="never" or `showindex=False`.
To add a custom row index column, pass `showindex=some_iterable`.
>>> print(tabulate([["F",24],["M",19]], showindex="always"))
- - --
0 F 24
1 M 19
- - --
Column alignment
----------------
`tabulate` tries to detect column types automatically, and aligns
the values properly. By default it aligns decimal points of the
numbers (or flushes integer numbers to the right), and flushes
everything else to the left. Possible column alignments
(`numalign`, `stralign`) are: "right", "center", "left", "decimal"
(only for `numalign`), and None (to disable alignment).
Table formats
-------------
`floatfmt` is a format specification used for columns which
contain numeric data with a decimal point. This can also be
a list or tuple of format strings, one per column.
`None` values are replaced with a `missingval` string (like
`floatfmt`, this can also be a list of values for different
columns):
>>> print(tabulate([["spam", 1, None],
... ["eggs", 42, 3.14],
... ["other", None, 2.7]], missingval="?"))
----- -- ----
spam 1 ?
eggs 42 3.14
other ? 2.7
----- -- ----
Various plain-text table formats (`tablefmt`) are supported:
'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki',
'latex', 'latex_raw' and 'latex_booktabs'. Variable `tabulate_formats`
contains the list of currently supported formats.
"plain" format doesn't use any pseudographics to draw tables,
it separates columns with a double space:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "plain"))
strings numbers
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain"))
spam 41.9999
eggs 451
"simple" format is like Pandoc simple_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "simple"))
strings numbers
--------- ---------
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple"))
---- --------
spam 41.9999
eggs 451
---- --------
"grid" is similar to tables produced by Emacs table.el package or
Pandoc grid_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "grid"))
+-----------+-----------+
| strings | numbers |
+===========+===========+
| spam | 41.9999 |
+-----------+-----------+
| eggs | 451 |
+-----------+-----------+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid"))
+------+----------+
| spam | 41.9999 |
+------+----------+
| eggs | 451 |
+------+----------+
"fancy_grid" draws a grid using box-drawing characters:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "fancy_grid"))
╒═══════════╤═══════════╕
│ strings │ numbers │
╞═══════════╪═══════════╡
│ spam │ 41.9999 │
├───────────┼───────────┤
│ eggs │ 451 │
╘═══════════╧═══════════╛
"pipe" is like tables in PHP Markdown Extra extension or Pandoc
pipe_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "pipe"))
| strings | numbers |
|:----------|----------:|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe"))
|:-----|---------:|
| spam | 41.9999 |
| eggs | 451 |
"orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They
are slightly different from "pipe" format by not using colons to
define column alignment, and using a "+" sign to indicate line
intersections:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "orgtbl"))
| strings | numbers |
|-----------+-----------|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl"))
| spam | 41.9999 |
| eggs | 451 |
"rst" is like a simple table format from reStructuredText; please
note that reStructuredText accepts also "grid" tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "rst"))
========= =========
strings numbers
========= =========
spam 41.9999
eggs 451
========= =========
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst"))
==== ========
spam 41.9999
eggs 451
==== ========
"mediawiki" produces a table markup used in Wikipedia and on other
MediaWiki-based sites:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="mediawiki"))
{| class="wikitable" style="text-align: left;"
|+ <!-- caption -->
|-
! strings !! align="right"| numbers
|-
| spam || align="right"| 41.9999
|-
| eggs || align="right"| 451
|}
"html" produces HTML markup:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="html"))
<table>
<thead>
<tr><th>strings </th><th style="text-align: right;"> numbers</th></tr>
</thead>
<tbody>
<tr><td>spam </td><td style="text-align: right;"> 41.9999</td></tr>
<tr><td>eggs </td><td style="text-align: right;"> 451 </td></tr>
</tbody>
</table>
"latex" produces a tabular environment of LaTeX document markup:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex"))
\\begin{tabular}{lr}
\\hline
spam & 41.9999 \\\\
eggs & 451 \\\\
\\hline
\\end{tabular}
"latex_raw" is similar to "latex", but doesn't escape special characters,
such as backslash and underscore, so LaTeX commands may embedded into
cells' values:
>>> print(tabulate([["spam$_9$", 41.9999], ["\\\\emph{eggs}", "451.0"]], tablefmt="latex_raw"))
\\begin{tabular}{lr}
\\hline
spam$_9$ & 41.9999 \\\\
\\emph{eggs} & 451 \\\\
\\hline
\\end{tabular}
"latex_booktabs" produces a tabular environment of LaTeX document markup
using the booktabs.sty package:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_booktabs"))
\\begin{tabular}{lr}
\\toprule
spam & 41.9999 \\\\
eggs & 451 \\\\
\\bottomrule
\end{tabular}
Number parsing
--------------
By default, anything which can be parsed as a number is a number.
This ensures numbers represented as strings are aligned properly.
This can lead to weird results for particular strings such as
specific git SHAs e.g. "42992e1" will be parsed into the number
429920 and aligned as such.
To completely disable number parsing (and alignment), use
`disable_numparse=True`. For more fine grained control, a list column
indices is used to disable number parsing only on those columns
e.g. `disable_numparse=[0, 2]` would disable number parsing only on the
first and third columns.
"""
if tabular_data is None:
tabular_data = []
list_of_lists, headers = _normalize_tabular_data(
tabular_data, headers, showindex=showindex)
# empty values in the first column of RST tables should be escaped (issue #82)
# "" should be escaped as "\\ " or ".."
if tablefmt == 'rst':
list_of_lists, headers = _rst_escape_first_column(list_of_lists, headers)
# optimization: look for ANSI control codes once,
# enable smart width functions only if a control code is found
plain_text = '\n'.join(['\t'.join(map(_text_type, headers))] + \
['\t'.join(map(_text_type, row)) for row in list_of_lists])
has_invisible = re.search(_invisible_codes, plain_text)
enable_widechars = wcwidth is not None and WIDE_CHARS_MODE
if has_invisible:
width_fn = _visible_width
elif enable_widechars: # optional wide-character support if available
width_fn = wcwidth.wcswidth
else:
width_fn = len
# format rows and columns, convert numeric values to strings
cols = list(izip_longest(*list_of_lists))
numparses = _expand_numparse(disable_numparse, len(cols))
coltypes = [_column_type(col, numparse=np) for col, np in
zip(cols, numparses)]
if isinstance(floatfmt, basestring): #old version
float_formats = len(cols) * [floatfmt] # just duplicate the string to use in each column
else: # if floatfmt is list, tuple etc we have one per column
float_formats = list(floatfmt)
if len(float_formats) < len(cols):
float_formats.extend( (len(cols)-len(float_formats)) * [_DEFAULT_FLOATFMT] )
if isinstance(missingval, basestring):
missing_vals = len(cols) * [missingval]
else:
missing_vals = list(missingval)
if len(missing_vals) < len(cols):
missing_vals.extend( (len(cols)-len(missing_vals)) * [_DEFAULT_MISSINGVAL] )
cols = [[_format(v, ct, fl_fmt, miss_v, has_invisible) for v in c]
for c, ct, fl_fmt, miss_v in zip(cols, coltypes, float_formats, missing_vals)]
# align columns
aligns = [numalign if ct in [int,float] else stralign for ct in coltypes]
minwidths = [width_fn(h) + MIN_PADDING for h in headers] if headers else [0]*len(cols)
cols = [_align_column(c, a, minw, has_invisible)
for c, a, minw in zip(cols, aligns, minwidths)]
if headers:
# align headers and add headers
t_cols = cols or [['']] * len(headers)
t_aligns = aligns or [stralign] * len(headers)
minwidths = [max(minw, width_fn(c[0])) for minw, c in zip(minwidths, t_cols)]
headers = [_align_header(h, a, minw, width_fn(h))
for h, a, minw in zip(headers, t_aligns, minwidths)]
rows = list(zip(*cols))
else:
minwidths = [width_fn(c[0]) for c in cols]
rows = list(zip(*cols))
if not isinstance(tablefmt, TableFormat):
tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])
return _format_table(tablefmt, headers, rows, minwidths, aligns)
def _expand_numparse(disable_numparse, column_count):
"""
Return a list of bools of length `column_count` which indicates whether
number parsing should be used on each column.
If `disable_numparse` is a list of indices, each of those indices are False,
and everything else is True.
If `disable_numparse` is a bool, then the returned list is all the same.
"""
if isinstance(disable_numparse, Iterable):
numparses = [True] * column_count
for index in disable_numparse:
numparses[index] = False
return numparses
else:
return [not disable_numparse] * column_count
def _build_simple_row(padded_cells, rowfmt):
"Format row according to DataRow format without padding."
begin, sep, end = rowfmt
return (begin + sep.join(padded_cells) + end).rstrip()
def _build_row(padded_cells, colwidths, colaligns, rowfmt):
"Return a string which represents a row of data cells."
if not rowfmt:
return None
if hasattr(rowfmt, "__call__"):
return rowfmt(padded_cells, colwidths, colaligns)
else:
return _build_simple_row(padded_cells, rowfmt)
def _build_line(colwidths, colaligns, linefmt):
"Return a string which represents a horizontal line."
if not linefmt:
return None
if hasattr(linefmt, "__call__"):
return linefmt(colwidths, colaligns)
else:
begin, fill, sep, end = linefmt
cells = [fill*w for w in colwidths]
return _build_simple_row(cells, (begin, sep, end))
def _pad_row(cells, padding):
if cells:
pad = " "*padding
padded_cells = [pad + cell + pad for cell in cells]
return padded_cells
else:
return cells
def _format_table(fmt, headers, rows, colwidths, colaligns):
"""Produce a plain-text representation of the table."""
lines = []
hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else []
pad = fmt.padding
headerrow = fmt.headerrow
padded_widths = [(w + 2*pad) for w in colwidths]
padded_headers = _pad_row(headers, pad)
padded_rows = [_pad_row(row, pad) for row in rows]
if fmt.lineabove and "lineabove" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.lineabove))
if padded_headers:
lines.append(_build_row(padded_headers, padded_widths, colaligns, headerrow))
if fmt.linebelowheader and "linebelowheader" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.linebelowheader))
if padded_rows and fmt.linebetweenrows and "linebetweenrows" not in hidden:
# initial rows with a line below
for row in padded_rows[:-1]:
lines.append(_build_row(row, padded_widths, colaligns, fmt.datarow))
lines.append(_build_line(padded_widths, colaligns, fmt.linebetweenrows))
# the last row without a line below
lines.append(_build_row(padded_rows[-1], padded_widths, colaligns, fmt.datarow))
else:
for row in padded_rows:
lines.append(_build_row(row, padded_widths, colaligns, fmt.datarow))
if fmt.linebelow and "linebelow" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.linebelow))
if headers or rows:
return "\n".join(lines)
else: # a completely empty table
return ""
def _main():
"""\
Usage: tabulate [options] [FILE ...]
Pretty-print tabular data.
See also https://bitbucket.org/astanin/python-tabulate
FILE a filename of the file with tabular data;
if "-" or missing, read data from stdin.
Options:
-h, --help show this message
-1, --header use the first row of data as a table header
-o FILE, --output FILE print table to FILE (default: stdout)
-s REGEXP, --sep REGEXP use a custom column separator (default: whitespace)
-F FPFMT, --float FPFMT floating point number format (default: g)
-f FMT, --format FMT set output table format; supported formats:
plain, simple, grid, fancy_grid, pipe, orgtbl,
rst, mediawiki, html, latex, latex_raw,
latex_booktabs, tsv
(default: simple)
"""
import getopt
import sys
import textwrap
usage = textwrap.dedent(_main.__doc__)
try:
opts, args = getopt.getopt(sys.argv[1:],
"h1o:s:F:f:",
["help", "header", "output", "sep=", "float=", "format="])
except getopt.GetoptError as e:
print(e)
print(usage)
sys.exit(2)
headers = []
floatfmt = _DEFAULT_FLOATFMT
tablefmt = "simple"
sep = r"\s+"
outfile = "-"
for opt, value in opts:
if opt in ["-1", "--header"]:
headers = "firstrow"
elif opt in ["-o", "--output"]:
outfile = value
elif opt in ["-F", "--float"]:
floatfmt = value
elif opt in ["-f", "--format"]:
if value not in tabulate_formats:
print("%s is not a supported table format" % value)
print(usage)
sys.exit(3)
tablefmt = value
elif opt in ["-s", "--sep"]:
sep = value
elif opt in ["-h", "--help"]:
print(usage)
sys.exit(0)
files = [sys.stdin] if not args else args
with (sys.stdout if outfile == "-" else open(outfile, "w")) as out:
for f in files:
if f == "-":
f = sys.stdin
if _is_file(f):
_pprint_file(f, headers=headers, tablefmt=tablefmt,
sep=sep, floatfmt=floatfmt, file=out)
else:
with open(f) as fobj:
_pprint_file(fobj, headers=headers, tablefmt=tablefmt,
sep=sep, floatfmt=floatfmt, file=out)
def _pprint_file(fobject, headers, tablefmt, sep, floatfmt, file):
rows = fobject.readlines()
table = [re.split(sep, r.rstrip()) for r in rows if r.strip()]
print(tabulate(table, headers, tablefmt, floatfmt=floatfmt), file=file)
if __name__ == "__main__":
_main()
| mit | -5,558,374,671,324,170,000 | 35.922134 | 197 | 0.529164 | false |
mikofski/pvlib-python | pvlib/tests/iotools/test_srml.py | 1 | 3136 | from numpy import isnan
import pandas as pd
import pytest
from pvlib.iotools import srml
from conftest import DATA_DIR, RERUNS, RERUNS_DELAY
srml_testfile = DATA_DIR / 'SRML-day-EUPO1801.txt'
def test_read_srml():
srml.read_srml(srml_testfile)
@pytest.mark.remote_data
@pytest.mark.flaky(reruns=RERUNS, reruns_delay=RERUNS_DELAY)
def test_read_srml_remote():
srml.read_srml('http://solardat.uoregon.edu/download/Archive/EUPO1801.txt')
def test_read_srml_columns_exist():
data = srml.read_srml(srml_testfile)
assert 'ghi_0' in data.columns
assert 'ghi_0_flag' in data.columns
assert 'dni_1' in data.columns
assert 'dni_1_flag' in data.columns
assert '7008' in data.columns
assert '7008_flag' in data.columns
def test_read_srml_nans_exist():
data = srml.read_srml(srml_testfile)
assert isnan(data['dni_0'][1119])
assert data['dni_0_flag'][1119] == 99
@pytest.mark.parametrize('url,year,month', [
('http://solardat.uoregon.edu/download/Archive/EUPO1801.txt',
2018, 1),
('http://solardat.uoregon.edu/download/Archive/EUPO1612.txt',
2016, 12),
])
@pytest.mark.remote_data
@pytest.mark.flaky(reruns=RERUNS, reruns_delay=RERUNS_DELAY)
def test_read_srml_dt_index(url, year, month):
data = srml.read_srml(url)
start = pd.Timestamp(f'{year:04d}{month:02d}01 00:00')
start = start.tz_localize('Etc/GMT+8')
end = pd.Timestamp(f'{year:04d}{month:02d}31 23:59')
end = end.tz_localize('Etc/GMT+8')
assert data.index[0] == start
assert data.index[-1] == end
assert (data.index[59::60].minute == 59).all()
assert str(year) not in data.columns
@pytest.mark.parametrize('column,expected', [
('1001', 'ghi_1'),
('7324', '7324'),
('2001', '2001'),
('2017', 'dni_7')
])
def test_map_columns(column, expected):
assert srml.map_columns(column) == expected
@pytest.mark.remote_data
@pytest.mark.flaky(reruns=RERUNS, reruns_delay=RERUNS_DELAY)
def test_read_srml_month_from_solardat():
url = 'http://solardat.uoregon.edu/download/Archive/EUPO1801.txt'
file_data = srml.read_srml(url)
requested = srml.read_srml_month_from_solardat('EU', 2018, 1)
assert file_data.equals(requested)
@pytest.mark.remote_data
@pytest.mark.flaky(reruns=RERUNS, reruns_delay=RERUNS_DELAY)
def test_15_minute_dt_index():
data = srml.read_srml_month_from_solardat('TW', 2019, 4, 'RQ')
start = pd.Timestamp('20190401 00:00')
start = start.tz_localize('Etc/GMT+8')
end = pd.Timestamp('20190430 23:45')
end = end.tz_localize('Etc/GMT+8')
assert data.index[0] == start
assert data.index[-1] == end
assert (data.index[3::4].minute == 45).all()
@pytest.mark.remote_data
@pytest.mark.flaky(reruns=RERUNS, reruns_delay=RERUNS_DELAY)
def test_hourly_dt_index():
data = srml.read_srml_month_from_solardat('CD', 1986, 4, 'PH')
start = pd.Timestamp('19860401 00:00')
start = start.tz_localize('Etc/GMT+8')
end = pd.Timestamp('19860430 23:00')
end = end.tz_localize('Etc/GMT+8')
assert data.index[0] == start
assert data.index[-1] == end
assert (data.index.minute == 0).all()
| bsd-3-clause | -2,769,409,382,250,196,000 | 30.676768 | 79 | 0.675064 | false |
DimensionalScoop/kautschuk | AP_SS16/US1/PythonSkript.py | 1 | 12682 | ##################################################### Import system libraries ######################################################
import matplotlib as mpl
mpl.rcdefaults()
mpl.rcParams.update(mpl.rc_params_from_file('meine-matplotlibrc'))
import matplotlib.pyplot as plt
import numpy as np
import scipy.constants as const
import uncertainties.unumpy as unp
from uncertainties import ufloat
from uncertainties.unumpy import (
nominal_values as noms,
std_devs as stds,
)
################################################ Finish importing system libraries #################################################
################################################ Adding subfolder to system's path #################################################
import os, sys, inspect
# realpath() will make your script run, even if you symlink it :)
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile( inspect.currentframe() ))[0]))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
# use this if you want to include modules from a subfolder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"python_custom_scripts")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
############################################# Finish adding subfolder to system's path #############################################
##################################################### Import custom libraries ######################################################
from curve_fit import ucurve_fit
from table import (
make_table,
make_full_table,
make_composed_table,
make_SI,
write,
)
from regression import (
reg_linear,
reg_quadratic,
reg_cubic
)
from error_calculation import(
MeanError
)
################################################ Finish importing custom libraries #################################################
################################ FREQUENTLY USED CODE ################################
#
########## IMPORT ##########
# t, U, U_err = np.genfromtxt('data.txt', unpack=True)
# t *= 1e-3
########## ERRORS ##########
# R_unc = ufloat(R[0],R[2])
# U = 1e3 * unp.uarray(U, U_err)
# Rx_mean = np.mean(Rx) # Mittelwert und syst. Fehler
# Rx_mean_err = MeanError(noms(Rx)) # Fehler des Mittelwertes
#
## Relative Fehler zum späteren Vergleich in der Diskussion
# RelFehler_G = (G_mess - G_lit) / G_lit
# RelFehler_B = (B_mess - B_lit) / B_lit
# write('build/RelFehler_G.tex', make_SI(RelFehler_G*100, r'\percent', figures=1))
# write('build/RelFehler_B.tex', make_SI(RelFehler_B*100, r'\percent', figures=1))
########## CURVE FIT ##########
# def f(t, a, b, c, d):
# return a * np.sin(b * t + c) + d
#
# params = ucurve_fit(f, t, U, p0=[1, 1e3, 0, 0]) # p0 bezeichnet die Startwerte der zu fittenden Parameter
# params = ucurve_fit(reg_linear, x, y) # linearer Fit
# params = ucurve_fit(reg_quadratic, x, y) # quadratischer Fit
# params = ucurve_fit(reg_cubic, x, y) # kubischer Fit
# a, b = params
# write('build/parameter_a.tex', make_SI(a * 1e-3, r'\kilo\volt', figures=1)) # type in Anz. signifikanter Stellen
# write('build/parameter_b.tex', make_SI(b * 1e-3, r'\kilo\hertz', figures=2)) # type in Anz. signifikanter Stellen
########## PLOTTING ##########
# plt.clf # clear actual plot before generating a new one
#
## automatically choosing limits with existing array T1
# t_plot = np.linspace(np.amin(T1), np.amax(T1), 100)
# plt.xlim(t_plot[0]-1/np.size(T1)*(t_plot[-1]-t_plot[0]), t_plot[-1]+1/np.size(T1)*(t_plot[-1]-t_plot[0]))
#
## hard coded limits
# t_plot = np.linspace(-0.5, 2 * np.pi + 0.5, 1000) * 1e-3
#
## standard plotting
# plt.plot(t_plot * 1e3, f(t_plot, *noms(params)) * 1e-3, 'b-', label='Fit')
# plt.plot(t * 1e3, U * 1e3, 'rx', label='Messdaten')
## plt.errorbar(B * 1e3, noms(y) * 1e5, fmt='rx', yerr=stds(y) * 1e5, label='Messdaten') # mit Fehlerbalken
## plt.xscale('log') # logarithmische x-Achse
# plt.xlim(t_plot[0] * 1e3, t_plot[-1] * 1e3)
# plt.xlabel(r'$t \:/\: \si{\milli\second}$')
# plt.ylabel(r'$U \:/\: \si{\kilo\volt}$')
# plt.legend(loc='best')
# plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
# plt.savefig('build/aufgabenteil_a_plot.pdf')
########## WRITING TABLES ##########
### IF THERE IS ONLY ONE COLUMN IN A TABLE (workaround):
## a=np.array([Wert_d[0]])
## b=np.array([Rx_mean])
## c=np.array([Rx_mean_err])
## d=np.array([Lx_mean*1e3])
## e=np.array([Lx_mean_err*1e3])
#
# write('build/Tabelle_b.tex', make_table([a,b,c,d,e],[0, 1, 0, 1, 1])) # Jeder fehlerbehaftete Wert bekommt zwei Spalten
# write('build/Tabelle_b_texformat.tex', make_full_table(
# 'Messdaten Kapazitätsmessbrücke.',
# 'table:A2',
# 'build/Tabelle_b.tex',
# [1,2,3,4,5], # Hier aufpassen: diese Zahlen bezeichnen diejenigen resultierenden Spaltennummern,
# # die Multicolumns sein sollen
# ['Wert',
# r'$C_2 \:/\: \si{\nano\farad}$',
# r'$R_2 \:/\: \si{\ohm}$',
# r'$R_3 / R_4$', '$R_x \:/\: \si{\ohm}$',
# r'$C_x \:/\: \si{\nano\farad}$']))
#
## Aufsplitten von Tabellen, falls sie zu lang sind
# t1, t2 = np.array_split(t * 1e3, 2)
# U1, U2 = np.array_split(U * 1e-3, 2)
# write('build/loesung-table.tex', make_table([t1, U1, t2, U2], [3, None, 3, None])) # type in Nachkommastellen
#
## Verschmelzen von Tabellen (nur Rohdaten, Anzahl der Zeilen muss gleich sein)
# write('build/Tabelle_b_composed.tex', make_composed_table(['build/Tabelle_b_teil1.tex','build/Tabelle_b_teil2.tex']))
########## ARRAY FUNCTIONS ##########
# np.arange(2,10) # Erzeugt aufwärts zählendes Array von 2 bis 10
# np.zeros(15) # Erzeugt Array mit 15 Nullen
# np.ones(15) # Erzeugt Array mit 15 Einsen
#
# np.amin(array) # Liefert den kleinsten Wert innerhalb eines Arrays
# np.argmin(array) # Gibt mir den Index des Minimums eines Arrays zurück
# np.amax(array) # Liefert den größten Wert innerhalb eines Arrays
# np.argmax(array) # Gibt mir den Index des Maximums eines Arrays zurück
#
# a1,a2 = np.array_split(array, 2) # Array in zwei Hälften teilen
# np.size(array) # Anzahl der Elemente eines Arrays ermitteln
########## ARRAY INDEXING ##########
# y[n - 1::n] # liefert aus einem Array jeden n-ten Wert als Array
########## DIFFERENT STUFF ##########
# R = const.physical_constants["molar gas constant"] # Array of value, unit, error
### VORARBEITEN ####
h_zylinder, t_zylinder = np.genfromtxt('messdaten/a.txt', unpack=True)
h_zylinder = h_zylinder*10**(-3)
t_zylinder = t_zylinder*10**(-6)
##### a #####
v_zylinder = 2*h_zylinder/t_zylinder
write('build/Tabelle_0.tex', make_table([h_zylinder*10**3, t_zylinder*10**6, v_zylinder],[2, 1, 2])) # Jeder fehlerbehaftete Wert bekommt zwei Spalten
write('build/Tabelle_0_texformat.tex', make_full_table(
'Bestimmung der Schallgeschwindigkeit mittels Impuls-Echo-Verfahren.',
'tab:0',
'build/Tabelle_0.tex',
[], # Hier aufpassen: diese Zahlen bezeichnen diejenigen resultierenden Spaltennummern,
# die Multicolumns sein sollen
[r'$h_{\text{zylinder}} \:/\: 10^{-3} \si{\metre}$',
r'$\increment t \:/\: 10^{-6} \si{\second} $',
r'$c_\text{Acryl} \:/\:\si{\metre\per\second} $']))
c_arcyl_1 = ufloat(np.mean(v_zylinder), np.std(v_zylinder))
write('build/c_acryl_1.tex', make_SI(c_arcyl_1, r'\metre\per\second', figures=2)) # type in Anz. signifikanter Stellen
params = ucurve_fit(reg_linear, 0.5*t_zylinder, h_zylinder) # linearer Fit
a, b = params
write('build/parameter_a.tex', make_SI(a, r'\metre\per\second', figures=1)) # type in Anz. signifikanter Stellen
write('build/parameter_b.tex', make_SI(b, r'\metre', figures=2)) # type in Anz. signifikanter Stellen
v_lit = 2730
v_rel_3 = abs(np.mean(a)-v_lit)/v_lit *100
write('build/v_rel_3.tex', make_SI(v_rel_3, r'\percent', figures=2))
t_plot = np.linspace(0.9*np.amin(0.5*t_zylinder), np.amax(0.5*t_zylinder)*1.1, 100)
plt.plot(t_plot, t_plot*a.n+b.n, 'b-', label='Linearer Fit')
plt.plot(0.5*t_zylinder, h_zylinder, 'rx', label='Messdaten')
# t_plot = np.linspace(-0.5, 2 * np.pi + 0.5, 1000) * 1e-3
#
## standard plotting
# plt.plot(t_plot * 1e3, f(t_plot, *noms(params)) * 1e-3, 'b-', label='Fit')
# plt.plot(t * 1e3, U * 1e3, 'rx', label='Messdaten')
## plt.errorbar(B * 1e3, noms(y) * 1e5, fmt='rx', yerr=stds(y) * 1e5, label='Messdaten') # mit Fehlerbalken
## plt.xscale('log') # logarithmische x-Achse
# plt.xlim(t_plot[0] * 1e3, t_plot[-1] * 1e3)
# plt.xlabel(r'$t \:/\: \si{\milli\selinder, 'rx', label='Messdaten')
plt.xlim(t_plot[0], t_plot[-1])
plt.xlabel(r'$\frac{1}{2} t \:/\: \si{\second}$')
plt.ylabel(r'$h \:/\: \si{\metre}$')
plt.legend(loc='best')
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('build/ausgleich.pdf')
v_rel_1 = abs(np.mean(v_zylinder)-v_lit)/v_lit *100
write('build/v_rel_1.tex', make_SI(v_rel_1, r'\percent', figures=2))
write('build/v_lit.tex', make_SI(v_lit, r'\metre\per\second', figures=0))
##############Durchschallungs-Methode####################
h_zylinder, t_zylinder = np.genfromtxt('messdaten/b.txt', unpack=True)
h_zylinder = h_zylinder*10**(-3)
t_zylinder = t_zylinder*10**(-6)/2
v_zylinder = h_zylinder/t_zylinder
write('build/Tabelle_1.tex', make_table([h_zylinder*10**3, t_zylinder*10**6, v_zylinder],[2, 1, 2])) # Jeder fehlerbehaftete Wert bekommt zwei Spalten
write('build/Tabelle_1_texformat.tex', make_full_table(
'Bestimmung der Schallgeschwindigkeit mittels Durchschallungs-Methode.',
'tab:1',
'build/Tabelle_1.tex',
[], # Hier aufpassen: diese Zahlen bezeichnen diejenigen resultierenden Spaltennummern,
# die Multicolumns sein sollen
[r'$h_{\text{zylinder}} \:/\: 10^{-3} \si{\metre}$',
r'$\increment t \:/\: 10^{-6} \si{\second} $',
r'$c_\text{Acryl} \:/\: \si{\metre\per\second} $']))
c_arcyl_2 = ufloat(np.mean(v_zylinder), np.std(v_zylinder))
write('build/c_acryl_2.tex', make_SI(c_arcyl_2, r'\metre\per\second', figures=2)) # type in Anz. signifikanter Stellen
v_rel_2 = abs(np.mean(v_zylinder)-v_lit)/v_lit *100
write('build/v_rel_2.tex', make_SI(v_rel_2, r'\percent', figures=2))
################Abschwächungskoeffizient################
U_1 = 1.214
U_2 = 1.105
t_1 = 1.3 * 10**(-6)
t_2 = 46.2 * 10**(-6)
alpha = np.log(U_1/U_2)/(t_1-t_2)
write('build/alpha.tex', make_SI(alpha, r'\second\tothe{-1}', figures=1))
################Auge##################
t_auge = np.genfromtxt('messdaten/auge.txt', unpack=True)
t_auge = t_auge*10**(-6)
c_linse = 2500
c_gk = 1410
s_12 = (t_auge[1]-t_auge[0])*c_gk
s_23 = (t_auge[2]-t_auge[1])*c_linse
s_34 = (t_auge[3]-t_auge[2])*c_linse
s_45 = (t_auge[4]-t_auge[3])*c_linse
s_36 = (t_auge[5]-t_auge[2])*c_gk
write('build/c_linse.tex', make_SI(c_linse, r'\metre\per\second', figures=0))
write('build/c_gk.tex', make_SI(c_gk, r'\metre\per\second', figures=0))
write('build/s_12.tex', make_SI(s_12, r'\metre', figures=3))
write('build/s_23.tex', make_SI(s_23, r'\metre', figures=3))
write('build/s_34.tex', make_SI(s_34, r'\metre', figures=3))
write('build/s_45.tex', make_SI(s_45, r'\metre', figures=3))
write('build/s_36.tex', make_SI(s_36, r'\metre', figures=3))
### FFT - For Fucks... Time?####
fft = np.genfromtxt('messdaten/fft.txt', unpack=True)
write('build/fft_1.tex', make_SI(fft[0], r'\mega\hertz', figures=2))
write('build/fft_2.tex', make_SI(fft[1], r'\mega\hertz', figures=2))
write('build/fft_3.tex', make_SI(fft[2], r'\mega\hertz', figures=2))
write('build/fft_4.tex', make_SI(fft[3], r'\mega\hertz', figures=2))
write('build/fft_5.tex', make_SI(fft[4], r'\mega\hertz', figures=2))
write('build/fft_6.tex', make_SI(fft[5], r'\mega\hertz', figures=2))
fft = fft * 10**6
delta_f = np.array([ fft[1]-fft[0], fft[2]-fft[1], fft[3]-fft[2], fft[4]-fft[3], fft[5]-fft[4] ])
mean_delta_f = np.mean(delta_f)
std_delta_f = np.std(delta_f)
delta_f = ufloat(mean_delta_f, std_delta_f)
s_probe = 2730/delta_f
write('build/s_probe.tex', make_SI(s_probe, r'\metre', figures=2))
### Cepstrum ###
f_cep = 10/4.9 * 0.8 + 10 #Peak in µs
s_cep = f_cep * 2730 * 10**(-6)
write('build/s_cep.tex', make_SI(s_cep, r'\metre', figures=3))
| mit | -3,034,378,349,200,479,000 | 40.815182 | 154 | 0.579006 | false |
haiweiosu/Optical-Character-Recognition-using-Template-Matching-Object-Detection-in-Images | task2_2.py | 1 | 1206 | import numpy as np
import cv2
from matplotlib import pyplot as plt
from config import roadtemp, road1, road2, road3, road4
img1 = cv2.imread(roadtemp,0) # queryImage
img2 = cv2.imread(road1,0) # trainImage
# Initiate SIFT detector
sift = cv2.xfeatures2d.SIFT_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = sift.detectAndCompute(img2,None)
# FLANN parameters
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks=50) # or pass empty dictionary
flann = cv2.FlannBasedMatcher(index_params,search_params)
matches = flann.knnMatch(des1,des2,k=2)
# Need to draw only good matches, so create a mask
matchesMask = [[0,0] for i in xrange(len(matches))]
# ratio test as per Lowe's paper
for i,(m,n) in enumerate(matches):
if m.distance < 0.7*n.distance:
matchesMask[i]=[1,0]
draw_params = dict(matchColor = (0,255,0),
singlePointColor = (255,0,0),
matchesMask = matchesMask,
flags = 0)
img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,matches,None,**draw_params)
plt.imshow(img3,),plt.show() | apache-2.0 | 3,490,889,863,178,243,000 | 28.439024 | 71 | 0.686567 | false |
mattsolo1/hmmerclust | build/lib/hmmerclust/hmmerclust.py | 1 | 40472 | '''
The MIT License (MIT)
Copyright (c) 2015 Matthew Solomonson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
from Bio import SeqIO, SearchIO
import subprocess
from multiprocessing import cpu_count
from collections import OrderedDict
from pandas import DataFrame
import pprint
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import pylab
from random import random
import os
processors = cpu_count()
def fetch_gbwithparts(list_of_NC_accessions, email, folder):
from Bio import Entrez
from time import sleep
print 'downloading genomes... please wait'
for item in list_of_NC_accessions:
Entrez.email = email
handle = Entrez.efetch(db="nuccore",
id=item,
retmode='full',
rettype='gbwithparts')
data = handle.read()
if not os.path.exists(folder):
os.makedirs(folder)
with open('%s/%s.gb' % (folder,item), 'w') as textfile:
textfile.write(data)
print 'done downloading %s' % item
sleep(2)
class OrganismDB:
"""
Recieves a list of genomes,
Makes a list of seqRecord objects,
Generates a list of Organism objects.
"""
def __init__(self, database_name, genome_list, genome_dir, freshfasta=False, search=None):
self.database_name = database_name
self.genome_list = genome_list
self.genome_dir = genome_dir
self.combined_proteome_file_name = None
if freshfasta==True:
self.generate_combined_fasta(self.genome_list, self.genome_dir)
self.organisms = [] #self.make_organisms(self.seq_record_list)
#self.seq_record_list = self.make_seq_record(self.genome_list, self.genome_dir)
self.search = search
self.df = None
self.group_dict = None
self.make_organisms(self.genome_list, self.genome_dir)
self.rRNA16SDB = rRNA16SDB(self)
def generate_combined_fasta(self, genome_list, genome_dir):
fasta = []
for genome in genome_list:
full_path = genome_dir + genome
handle = open(full_path, "rU")
print 'making combined fasta for', genome
try:
seq_record = SeqIO.read(handle, 'genbank')
org_accession = seq_record.name
except AssertionError,e:
print str(e), genome
for feature in seq_record.features:
if feature.type == 'CDS':
try:
prot_accession = feature.qualifiers['protein_id'][0]
prot_translation = feature.qualifiers['translation'][0]
newfast = '>' + org_accession + ',' + prot_accession + \
'\n' + prot_translation + '\n'
#if newfast not in fasta:
fasta.append(newfast)
except AttributeError,e:
print "organism %s, protein %s did not have \
the right attributes" % (org_accession, prot_accession)
print str(e)
except KeyError,e:
print "organism %s, protein %s did not have \
the right key" % (org_accession, prot_accession)
print str(e)
handle.close()
print "%s proteins were added" % len(fasta)
set_fasta = set(fasta)
print "%s unique proteins were added -- dropping redundant ones" % len(set_fasta)
faastring = "".join(set_fasta)
write_fasta = open('combined_fasta', 'w')
write_fasta.write(faastring)
write_fasta.close()
return set_fasta
def make_organisms(self, genome_list, genome_dir):
for genome in genome_list:
genome_path = genome_dir + genome
handle = open(genome_path, "rU")
print 'Adding organism attributes for', genome
try:
seq_record = SeqIO.read(handle, "genbank")
self.organisms.append(Organism(seq_record, genome_path, self))
del(seq_record)
except ValueError,e:
print genome, str(e)
except AssertionError,e:
print genome, str(e)
except UnboundLocalError,e:
print genome, str(e)
handle.close()
def add_protein_to_organisms(self, orgprot_list):
'''
Takes a list of items in org_acc, prot_acc,
e.g. NC_015758,YP_004723756.1,
adds to the
'''
for org in self.organisms:
handle = open(org.genome_path, "rU")
print 'adding proteins to organism', org.accession
try:
seq_record = SeqIO.read(handle, "genbank")
feature_list = []
for id in orgprot_list:
org_id = id.split(',')[0]
prot_id = id.split(',')[1]
if org.accession == org_id:
for feature in seq_record.features:
if feature.type == 'CDS':
feat_prot_acc = feature.qualifiers['protein_id'][0]
if feat_prot_acc == prot_id:
#print 'appending', hit_prot_acc
org.proteins.append(Protein(feature))
del(seq_record)
except ValueError,e:
print 'error for ', org.accession, str(e)
except AssertionError,e:
print 'error for ', org.accession, str(e)
except UnboundLocalError,e:
print 'error for ', org.accession, str(e)
except KeyError,e:
print 'error for ', org.accession, str(e)
handle.close()
def add_hits_to_proteins(self, hmm_hit_list):
for org in self.organisms:
print "adding SearchIO hit objects for", org.accession
for hit in hmm_hit_list:
hit_org_id = hit.id.split(',')[0]
hit_prot_id = hit.id.split(',')[1]
if org.accession == hit_org_id:
for prot in org.proteins:
if prot.accession == hit_prot_id:
prot.hmm_hit_list.append(hit)
def cluster_number(self, data, maxgap):
data.sort()
groups = [[data[0]]]
for x in data[1:]:
if abs(x - groups[-1][-1]) <= maxgap:
groups[-1].append(x)
else:
groups.append([x])
return groups
def find_loci(self, cluster_size, maxgap, locusview=False, colordict=None, required=None):
'''
Pass the minimum number of locus members, the maximum basepair
gap between members.
kwargs:
locusview: whether or not a map is generated for the locus_parent_organism
colordict: pass a pre-made color scheme for identified proteins
required: a list of hits the locus must
'''
if colordict != None:
self.search.protein_arrow_color_dict = colordict
for organism in self.organisms:
print 'finding loci for', organism.name
#reset loci if there is something in there already
organism.loci = []
orghits = []
for protein in organism.proteins:
if len(protein.hmm_hit_list) > 0:
orghits.append((organism.accession, protein.accession,
protein.start_bp, protein.end_bp, protein))
bp_start_pooled = [hit[2] for hit in orghits]
try:
clustered_data = self.cluster_number(bp_start_pooled, maxgap)
significant_cluster_list = []
for cluster in clustered_data:
if len(cluster) > cluster_size:
significant_cluster_list.append(cluster)
#print significant_cluster_list
for cluster in significant_cluster_list:
proteins_in_locus = []
cluster.sort()
for bp_start in cluster:
for hit in orghits:
if bp_start == hit[2]:
proteins_in_locus.append(hit[4])
organism.loci.append(Locus(proteins_in_locus,
organism,
self.search.query_names,
locusview))
except IndexError,e:
print 'Index error', str(e), organism.name
print 'total of', str(len(organism.loci)), 'found for', organism.name
def clear_loci():
for org in self.organisms:
org.loci=None
class Organism(object):
"""
Encapsulates data related to a single organism.
Args:
seq_record (SeqRecord): biopython seqrecord object
genome_path (str): The path to genome
OrganismDB (OrganismDB): the parent organism database
Attributes:
genome_path (str)
parent_db (OrganismDB)
"""
def __init__(self, seq_record, genome_path, OrganismDB):
self.genome_path = genome_path
self.parent_db = OrganismDB
self.accesion_version = seq_record.id
self.accession = seq_record.name
self.description = seq_record.description
self.name = seq_record.annotations['source']
self.taxonomy = seq_record.annotations['taxonomy']
self.species = " ".join(self.name.split(" ")[0:2])
try:
self.kingdom = self.taxonomy[0]
self.phylum = self.taxonomy[1]
self.clazz = self.taxonomy[2]
self.order = self.taxonomy[3]
self.family = self.taxonomy[4]
self.genus = self.taxonomy[5]
except:
print 'Unable to parse taxonomy for', self.accession
self.taxonomy = None
self.kingdom = None
self.phylum = None
self.clazz = None
self.order = None
self.family = None
self.genus = None
self.rRNA16S_sequence = None
self.tree_order = 0
self.proteins = []
self.genome_length = len(seq_record.seq)
#self.proteome = Proteome(hit_features_only)
#self.seq_record = seq_record
self.loci = [] # list of Locus objects
class Proteome:
'''
Takes a seq_record_features object
An object representing all the proteins from an Organism.
Has Protein objects.
Can generate a FASTA that can be queried by hmmsearch
A Proteome belongs to an Organism
'''
def __init__(self, seq_record_features):
#self.seq_record_features = seq_record_features
self.proteins = []
self.protein_count = len(self.proteins)
self.generate_proteome(seq_record_features)
def generate_proteome(self, seq_record_features):
for feature in seq_record_features:
if feature.type == 'CDS':
self.proteins.append(Protein(feature))
class Protein:
"""
Takes a SeqIO feature object
An object representing an indiviudual CDS from the Organism
With the data we care about
"""
def __init__(self, feature):
#self.seqrecord_feature = feature
self.accession = feature.qualifiers['protein_id'][0]
self.gi = feature.qualifiers['db_xref'][0].split(':')[1]
self.product = feature.qualifiers['product'][0]
#self.note = feature.qualifiers['note']
self.start_bp = feature.location.start.position
self.end_bp = feature.location.end.position
self.strand = feature.location.strand
self.translation = feature.qualifiers['translation'][0]
self.numb_residues = len(self.translation)
self.hmm_hit_list = []
self.hit_dataframe = None
self.hit_name_best = 'non-hit'
self.hit_evalue_best = 'non-hit'
self.hit_bitscore_best = 'non-hit'
self.hit_bias_best = 'non-hit'
self.hit_start_best = 'non-hit'
self.hit_end_best = 'non-hit'
self.is_in_locus = None
def __repr__(self):
return "%s - %s" % (self.accession, self.product)
def parse_hmm_hit_list(self, hmm_hit_list):
'''
take a list of hmm hit results, take needed info,
'''
tuplist = []
for hit in hmm_hit_list:
for hsp in hit.hsps:
tup = tuplist.append((hit._query_id.split('_')[0],
hit.bitscore,
hit.evalue,
hsp.bias,
hsp.env_start,
hsp.env_end))
cols = ['name','bitscore','evalue', 'bias', 'hsp_start','hsp_end']
df = DataFrame(tuplist, columns=cols)
df.set_index('name', inplace=True)
return df
class rRNA16SDB:
def __init__(self, OrganismDB):
#self.write_16S_rRNA_fasta(OrganismDB.organisms)
self.import_tree_order_from_file(OrganismDB, '16S_aligned.csv')
def write_16S_rRNA_fasta(self, org_list):
'''
Writes a fasta file containing 16S rRNA sequences
for a list of Organism objects,
The first 16S sequence found in the seq record object is used,
since it looks like there are duplicates
'''
fasta = []
for org in org_list:
handle = open(org.genome_path, "rU")
seq_record = SeqIO.read(handle, "genbank")
for feat in seq_record.features:
if feat.type == 'rRNA':
if '16S ribosomal' in feat.qualifiers['product'][0]:
start = feat.location.start.position
end = feat.location.end.position
if ((end - start) > 1400) & ((end - start) < 1700) :
print 'rRNA sequence extracted for', org.accession
fasta.append('>' + org.accession +
'\n' +
str(feat.extract(seq_record.seq)) +
'\n')
org.rRNA16S_sequence = str(feat.extract(seq_record.seq))
break
faastring = "".join(fasta)
filename = '16S-rRNA.fasta'
write_fasta = open(filename, 'w')
write_fasta.write(faastring)
write_fasta.close()
def import_tree_order_from_file(self, MyOrganismDB, filename):
'''
Import the accession list that has been ordered by position
in a phylogenetic tree. Get the index in the list, and
add this to the Organism object. Later we can use this position
to make a heatmap that matches up to a phylogenetic tree.
'''
tree_order = [acc.strip() for acc in open(filename)]
#print tree_order
for org in MyOrganismDB.organisms:
for tree_accession in tree_order:
#print tree_accession
if org.accession == tree_accession:
org.tree_order = tree_order.index(tree_accession)
class HmmSearch:
"""
Give alignment files, name them according to what the names
should be in the analysis.
First the hmm is built with Hmmbuild, and the hmm files output.
Then run Hmmsearch, parse the files, put each result in a list
"""
def __init__(self, OrganismDB, combined_fasta, freshbuild=True, freshsearch=True, ):
self.alignment_dir = './alignments/'
self.alignment_list = [x for x in os.listdir(self.alignment_dir) if '.txt' in x]
self.query_names = []
self.hmm_dir = './hmm/'
if not os.path.exists(self.hmm_dir):
os.makedirs(self.hmm_dir)
self.combined_fasta = combined_fasta
self.hhsearch_result_folder = './hhsearch_results/'
if not os.path.exists(self.hhsearch_result_folder):
os.makedirs(self.hhsearch_result_folder)
self.hmm_result_list=[]
if freshbuild == True:
self.run_hmmbuild()
if freshsearch == True:
self.run_hmmsearch()
self.combined_hit_list = self.extract_hit_list_from_hmmsearch_results()
self.orgprot_list = list(set([x.id for x in self.combined_hit_list]))
OrganismDB.search = self
self.protein_arrow_color_dict = self.make_protein_arrow_color_dict(self.query_names)
OrganismDB.add_protein_to_organisms(self.orgprot_list)
OrganismDB.add_hits_to_proteins(self.combined_hit_list)
self.parse_proteins(OrganismDB)
self.set_best_hit_values_for_proteins(OrganismDB)
def run_hmmbuild(self):
'''
Generate hmm with hhbuild,
output to file. Also stores query names.
'''
for alignment in self.alignment_list:
print 'building Hmm for', alignment
alignment_full_path = self.alignment_dir + alignment
query_name = alignment.split("_")[0]
self.query_names.append(query_name)
new_hmm= self.hmm_dir + query_name + ".hmm"
hmmbuild_output = subprocess.call(["hmmbuild", new_hmm,
alignment_full_path])
print 'hhbuild complete for', self.query_names
def run_hmmsearch(self):
'''
'''
all_searches = []
for name in self.query_names:
print 'running HHsearch on', name
hmm_full_path = self.hmm_dir + name + '.hmm'
hmmsearch_output = subprocess.check_output(["hmmsearch",
"--cpu", str(processors), hmm_full_path,
self.combined_fasta])
hmm_result_file_name = self.hhsearch_result_folder + name + ".out"
self.hmm_result_list.append((name + ".out"))
f = open(hmm_result_file_name, 'w')
f.write(hmmsearch_output)
f.close()
def extract_hit_list_from_hmmsearch_results(self):
'''
Make a giant list of all the hit objects from
our search
'''
combined_list_of_hits = []
for result in self.hmm_result_list:
fullpath = self.hhsearch_result_folder + result
se = SearchIO.read(fullpath, 'hmmer3-text')
sublist = []
for hit in se:
combined_list_of_hits.append(hit)
sublist.append(hit.id)
print 'extracted', str(len(sublist)), 'hits for', result
return combined_list_of_hits
def make_protein_arrow_color_dict(self, query_names):
'''
Generates a random color for all proteins in query_names,
stores these in a dict.
'''
protein_arrow_color_dict = dict()
for protein in self.query_names:
protein_arrow_color_dict[protein] = (random(), random(), random())
return protein_arrow_color_dict
def make_hsps(self, hit):
hit_name = hit._query_id.split("_")[0]
hit_evalue = hit.evalue
hit_bitscore = hit.bitscore
def parse_proteins(self,OrganismDB):
'''
Iterate through all the proteins in the DB,
creates a hit_dataframe for each protein.
'''
for org in OrganismDB.organisms:
for prot in org.proteins:
if len(prot.hmm_hit_list) > 0:
try:
prot.hit_dataframe = prot.parse_hmm_hit_list(prot.hmm_hit_list)
except ValueError,e:
print 'error for', org.name, prot.accession, str(e)
def set_best_hit_values_for_proteins(self, OrganismDB):
'''
Iterate through all proteins in the DB,
drop duplicates in the hit_dataframe, then store the maximum
hit information as protein attributes.
'''
for org in OrganismDB.organisms:
print 'setting best hit values for', org.name
for prot in org.proteins:
if len(prot.hmm_hit_list) > 0:
try:
dd_df = prot.hit_dataframe.drop_duplicates(subset='bitscore')
try:
prot.hit_name_best = dd_df.bitscore.idxmax()
prot.hit_evalue_best = dd_df.ix[prot.hit_name_best].evalue
prot.hit_bitscore_best = dd_df.ix[prot.hit_name_best].bitscore
prot.hit_bias_best = dd_df.ix[prot.hit_name_best].bias
prot.hit_start_best = dd_df.ix[prot.hit_name_best].hsp_start
prot.hit_end_best = dd_df.ix[prot.hit_name_best].hsp_end
except:
print 'could not set best hit values for ', org.name
except AttributeError:
pass
class Locus:
'''
Accepts list of protein objects, typically clustered proteins
generated by the find_loci() method. The first and last proteinsin in
the locus are defined as boundaries
Also add the Locus back to the OrganismDB
'''
def __init__(self, list_of_protein_objects, organism, query_proteins, locusview):
self.locus_hit_membership = list_of_protein_objects
for prot in self.locus_hit_membership:
prot.is_in_locus = self
self.locus_number_of_hits = len(self.locus_hit_membership)
self.locus_min_hit_boundary = self.locus_hit_membership[0].start_bp
self.locus_max_hit_boundary = self.locus_hit_membership[-1].end_bp
self.locus_bp_size = int(self.locus_hit_membership[-1].end_bp) - \
int(self.locus_hit_membership[0].start_bp)
self.locus_total_membership = self.get_total_membership(organism)
self.locus_number_in_total = len(self.locus_total_membership)
self.query_proteins = query_proteins
self.locus_parent_organism = organism
#print organism.proteome.proteins
if locusview == True:
LocusView(self)
self.write_out_locus_fasta()
def get_total_membership(self, organism):
handle = open(organism.genome_path, "rU")
total_membership_list = list(self.locus_hit_membership)
try:
seq_record = SeqIO.read(handle, "genbank")
for feature in seq_record.features:
if feature.type == 'CDS':
locus_hit_accs = [x.accession for x in self.locus_hit_membership]
if feature.qualifiers['protein_id'][0] not in locus_hit_accs:
featstart = feature.location.start.position
featend = feature.location.end.position
if ((featstart >= self.locus_min_hit_boundary) and
(featend <= self.locus_max_hit_boundary)):
newprot = (Protein(feature))
newprot.is_in_locus = self
total_membership_list.append(newprot)
organism.proteins.append(newprot)
del(seq_record)
except ValueError,e:
print str(e), organism.name
except AssertionError,e:
print str(e), organism.name
except UnboundLocalError,e:
print str(e), organism.name
handle.close()
total_membership_list = list(set(total_membership_list))
total_membership_list.sort(key=lambda x: x.start_bp)
return total_membership_list
def write_out_locus_fasta(self):
fasta=[]
for prot in self.locus_total_membership:
fasta.append('>' + prot.accession +
"," + prot.hit_name_best +
"," + prot.product +
'\n' +prot.translation + '\n')
faastring = "".join(fasta)
if not os.path.exists('./locus_fastas/'):
os.makedirs('./locus_fastas/')
filename = ('./locus_fastas/' +
self.locus_parent_organism.accession +
str(self.locus_min_hit_boundary) + '.fasta')
write_fasta = open(filename, 'w')
write_fasta.write(faastring)
write_fasta.close()
class LocusView:
def __init__(self, Locus, hit_detail_table=False, xlims=None):
self.generate_locus_view(Locus, xlims)
if hit_detail_table==True:
self.show_locus_hit_details(Locus)
def generate_locus_dataframe(self, Locus):
data_tuple_list = []
print '\n'
print '-'*70, '\n','-'*70
print "Organism: ", Locus.locus_parent_organism.name
print 'Locus id:', id(Locus)
for protein in Locus.locus_total_membership:
if len(protein.hmm_hit_list) > 0:
#print protein.hmm_hit_list[0].__dict__
protein_hit_query = protein.hit_name_best
protein_hit_evalue = protein.hit_evalue_best
protein_hit_bitscore = protein.hit_bitscore_best
protein_hit_bias = protein.hit_bias_best
#protein_hsps = protein.hmm_hit_list[0]._items
else:
protein_hit_query = '-'
protein_hit_evalue = '-'
protein_hit_bitscore = '-'
protein_hit_bias = '-'
data_tuple_list.append((protein.accession, protein.product[:22],
protein_hit_query,protein_hit_evalue,protein_hit_bitscore,
protein_hit_bias))
cols = ['accession', 'name', 'query', 'evalue', 'bitscore', 'bias']
df = DataFrame(data_tuple_list, columns=cols)
return df[['accession', 'query', 'evalue', 'bitscore', 'bias', 'name']]
def generate_locus_view(self, Locus, xlims):
#set_option('expand_frame_repr', False)
#figsize(20,5)
df = self.generate_locus_dataframe(Locus)
if xlims != None:
xmin_value = xlims[0]
xmax_value = xlims[1]
else:
xmin_value = Locus.locus_min_hit_boundary - Locus.locus_min_hit_boundary
xmax_value = Locus.locus_max_hit_boundary - Locus.locus_min_hit_boundary
ax = plt.axes()
#plt.figure(num=None, figsize=(20,5))
for n, protein in enumerate(Locus.locus_total_membership):
colordict = Locus.locus_parent_organism.parent_db.search.protein_arrow_color_dict
if protein.strand == 1:
arrow_start = protein.start_bp - Locus.locus_min_hit_boundary
arrow_end = protein.end_bp - protein.start_bp
else:
arrow_start = protein.end_bp - Locus.locus_min_hit_boundary
arrow_end = protein.start_bp - protein.end_bp
if len(protein.hmm_hit_list) != 0:
protname = protein.hit_name_best
arrow_color = colordict[protname]
else:
arrow_color = '#EEE7EB'
ax.arrow(arrow_start, 100, arrow_end, 0, head_width=30, width=20,
color=arrow_color, head_length=200, length_includes_head=True,
fill=True, ec="black", lw=1)
if len(protein.hmm_hit_list) > 0:
hitdf = protein.hit_dataframe.reset_index()
ystart = 80 if (n % 2 == 0) else 120
for i in range(0, len(hitdf)):
if protein.strand == 1:
line_start = protein.start_bp - Locus.locus_min_hit_boundary + hitdf.ix[i].hsp_start*3
line_end = protein.end_bp - Locus.locus_min_hit_boundary - (protein.numb_residues - hitdf.ix[i].hsp_end)*3
else:
line_start = protein.start_bp - Locus.locus_min_hit_boundary + (protein.numb_residues - hitdf.ix[i].hsp_end)*3
line_end = protein.end_bp - Locus.locus_min_hit_boundary - hitdf.ix[i].hsp_start*3
plt.plot([line_start, line_end], [ystart, ystart], 'k-', lw=2)
xtextpos = line_start
if (n % 2 == 0):
ytextpos = ystart - 8
else:
ytextpos = ystart+2
label = str(i) + " " + hitdf.ix[i]['name']# + " " + str(hitdf.ix[i]['evalue'])[:3]
plt.annotate(label, xy=(xtextpos, ytextpos))
if (n % 2 == 0):
ystart -= 10
else:
ystart +=10
plt.axis('on')
pylab.ylim([0,200])
pylab.xlim([xmin_value, xmax_value])
#savefig('image.svg', dpi=300, format='pdf')
plt.show()
print df
def show_locus_hit_details(self, Locus):
for hit in Locus.locus_hit_membership:
try:
print hit.hit_dataframe.sort(columns='bitscore', ascending=False)
except AttributeError,e:
print str(e), 'attribute error for ', hit
class FinalDataFrame:
'''
Package all data into pandas.DataFrame
'''
def __init__(self,OrganismDB):
self.df = self.make_df(OrganismDB)
def make_df(self, OrganismDB):
list_of_hit_dicts = []
for i in range(0, len(OrganismDB.organisms)):
organism = OrganismDB.organisms[i]
for j in range(0, len(organism.proteins)):
protein = organism.proteins[j]
if len(protein.hmm_hit_list) != 0:
hit_dict = OrderedDict()
hit_dict['org_name'] = organism.name
hit_dict['org_acc'] = organism.accession
hit_dict['org_phylum'] = organism.phylum
hit_dict['org_class'] = organism.clazz
hit_dict['org_order'] = organism.order
hit_dict['org_family'] = organism.family
hit_dict['org_genus'] = organism.genus
hit_dict['org_species'] = organism.species
hit_dict['org_tree_order'] = organism.tree_order
hit_dict['org_genome_length'] = organism.genome_length
hit_dict['org_prot_count'] = len(organism.proteins)
hit_dict['org_numb_loci'] = len(organism.loci)
hit_dict['prot_acc'] = protein.accession
hit_dict['prot_gi'] = protein.gi
hit_dict['prot_product'] = protein.product
hit_dict['prot_translation'] = protein.translation
hit_dict['prot_numb_of_res'] = protein.numb_residues
hit_dict['hit_query'] = protein.hit_name_best
hit_dict['hit_evalue'] = protein.hit_evalue_best
hit_dict['hit_bitscore'] = protein.hit_bitscore_best
hit_dict['hit_bias'] = protein.hit_bias_best
hit_dict['locus_id'] = protein.is_in_locus
list_of_hit_dicts.append(hit_dict)
df = DataFrame(list_of_hit_dicts)
print df.index
cols = ['org_name',
'org_acc',
'org_phylum',
'org_class',
'org_order',
'org_family',
'org_genus',
'org_species',
'org_tree_order',
'org_genome_length',
'org_prot_count',
'org_numb_loci',
'prot_acc',
'prot_gi',
'prot_product',
'prot_translation',
'prot_numb_of_res',
'hit_query',
'hit_evalue',
'hit_bitscore',
'hit_bias',
'locus_id']
df = df[cols]
return df
class HeatMap:
def __init__(self, DataFrame, by_locus=False, cols=None, subset=None, singleletters=None):
self.unstacked_df = self.unstack_df(DataFrame, by_locus, cols, subset)
self.heatmap = self.make_heatmap(self.unstacked_df, singleletters)
def unstack_df(self, DataFrame, by_locus, cols, subset):
if by_locus == True:
colheads = ['org_species', 'locus_id', 'org_tree_order', 'hit_query']
else:
colheads = ['org_species', 'org_tree_order', 'hit_query']
unstacked_df = DataFrame.groupby(colheads).size().unstack()
if subset != None:
unstacked_df = unstacked_df.dropna(subset=subset)
unstacked_df = unstacked_df.fillna(0).sortlevel('org_tree_order', ascending=False)
if cols != None:
unstacked_df=unstacked_df[cols]
return unstacked_df
def make_heatmap(self, unstacked_df, singleletters):
fig, ax = plt.subplots(num=None, figsize=(10,len(unstacked_df)/3), dpi=80, facecolor='w', edgecolor='k')
#heatmap = ax.pcolor(unstacked_df, cmap=plt.cm.Reds, alpha=2, vmax = 5)
#heatmap = ax.pcolor(unstacked_df, cmap=plt.cm.gist_ncar_r, alpha=20, vmax = 20)
#heatmap = ax.pcolor(unstacked_df, cmap=plt.cm.YlGnBu, alpha=20, vmax = 2)
heatmap = ax.pcolor(unstacked_df, cmap=plt.cm.jet, alpha=10, vmax = 5)
# ax.set_title('140616 - Esp distribution in actinobacteria')
#cb = plt.colorbar(heatmap)
#cb.set_label('# of copies')
species_names_only = ['%s locus:%s' % (x[0],str(x[1])[-12:]) for x in unstacked_df.index.values]
ax.set_aspect('equal')
ax.yaxis.set_ticks(range(0, len(unstacked_df.values)))
ax.xaxis.set_ticks(range(0, len(unstacked_df.columns)))
ax.set_xticklabels(unstacked_df.columns, rotation='90')
ax.set_yticklabels(species_names_only)
#ax.set_yticklabels(unstacked_df.index.values)
ax.tick_params(axis='both', left='off', right='off', bottom='off', top='off')
#ax.set_xticks(np.range(data.shape[0])+0.5, minor=False)
#ax.set_yticks(np.range(data.shape[1])+0.5, minor=False)
#ax.invert_yaxis()
#ax.xaxis.tick_top()
plt.grid(True, color='black', ls='-', linewidth=0.5)
'''exerimental: displaying text on the heatmap'''
if singleletters != None:
for y in range(unstacked_df.values.shape[0]):
for x in range(unstacked_df.values.shape[1]):
plt.text(x + 0.5, y + 0.5, '%.4s' % singleletters[(x)],
horizontalalignment='center',
verticalalignment='center',
)
plt.savefig("out.svg")
plt.show()
#print species_names_only
def make_heatmap_text(self, unstacked_df):
fig, ax = plt.subplots(num=None, figsize=(10,len(unstacked_df)/3), dpi=80, facecolor='w', edgecolor='k')
#heatmap = ax.pcolor(unstacked_df, cmap=plt.cm.Reds, alpha=2, vmax = 5)
#heatmap = ax.pcolor(unstacked_df, cmap=plt.cm.gist_ncar_r, alpha=20, vmax = 20)
#heatmap = ax.pcolor(unstacked_df, cmap=plt.cm.YlGnBu, alpha=20, vmax = 2)
heatmap = ax.pcolor(unstacked_df, cmap=plt.cm.jet, alpha=10, vmax = 5)
# ax.set_title('140616 - Esp distribution in actinobacteria')
#cb = plt.colorbar(heatmap)
#cb.set_label('# of copies')
species_names_only = ['%s locus:%s' % (x[0],str(x[1])[-12:]) for x in unstacked_df.index.values]
ax.set_aspect('equal')
ax.yaxis.set_ticks(range(0, len(unstacked_df.values)))
ax.xaxis.set_ticks(range(0, len(unstacked_df.columns)))
ax.set_xticklabels(unstacked_df.columns, rotation='90')
ax.set_yticklabels(species_names_only)
#ax.set_yticklabels(unstacked_df.index.values)
ax.tick_params(axis='both', left='off', right='off', bottom='off', top='off')
#ax.set_xticks(np.range(data.shape[0])+0.5, minor=False)
#ax.set_yticks(np.range(data.shape[1])+0.5, minor=False)
#ax.invert_yaxis()
#ax.xaxis.tick_top()
plt.grid(True, color='black', ls='-', linewidth=0.5)
'''exerimental: displaying text on the heatmap'''
for y in range(unstacked_df.values.shape[0]):
for x in range(unstacked_df.values.shape[1]):
plt.text(x + 0.5, y + 0.5, '%.4s' % 'A',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
#print species_names_only
class RelatedProteinGroup:
'''
An object representing a group of related proteins
to be used for generating alignments, phylogeny, etc.
Input is a list of Protein objects, e.g. of the same type that were
identified in the Hmm search & where found in a cluster.
Can output a fasta file for each group for making alignments & trees
'''
def __init__(self, input_df):
self.make_related_protein_fasta_from_dataframe(input_df)
def make_related_protein_fasta_from_dataframe(self, input_df):
'''
DataFrame should have
'''
dirname = './group_fastas'
if not os.path.exists(dirname):
os.makedirs(dirname)
unique_hit_queries = set(input_df.hit_query)
for hq in unique_hit_queries:
fasta = []
subdf = input_df[input_df.hit_query==hq].reset_index()
for i in range(0, len(subdf)):
fasta.append('>' + subdf.ix[i].org_name.replace(" ", "-") +
"," + subdf.ix[i].hit_query +
"," + subdf.ix[i].prot_acc +
'\n' + subdf.ix[i].prot_translation + '\n')
faastring = "".join(fasta)
filename = './group_fastas/' + hq + '.fasta'
write_fasta = open(filename, 'w')
write_fasta.write(faastring)
write_fasta.close()
'''
def make_16S(OrganismDB):
for org in OrganismDB.genome_list:
hmmbuild_output = subprocess.call(["hmmbuild", './16S_rRNA/16S_rRNA.hmm',
'./16S_rRNA/16S_rRNA_alignment.fasta'])
hmmsearch_output = subprocess.check_output(["hmmsearch",
"--cpu", str(processors), './16S_rRNA/16S_rRNA.hmm',
'combined_fasta'])
f = open('./16S_rRNA/16S_rRNA_result.out', 'w')
f.write(hmmsearch_output)
f.close ()
''' | gpl-2.0 | -6,073,266,835,436,664,000 | 32.311111 | 134 | 0.537829 | false |
Pragmatismo/TimelapsePi-EasyControl | webcamcap_show_numpy_buttons.py | 1 | 20708 | #!/usr/bin/python
import time
import os
import sys
import pygame
import numpy
from PIL import Image, ImageDraw, ImageChops
#import matplotlib.pyplot as plt
#from scipy.misc import imread, imsave, imresize
from scipy import misc
from scipy.ndimage import gaussian_filter, median_filter
print("")
print("")
print(" USE l=3 to take a photo every 3 somethings, try a 1000 or 2")
print(" t to take triggered photos ")
print(" cap=/home/pi/folder/ to set caps path other than current dir")
print(" ")
s_val = "10"
c_val = "2"
g_val = "10"
b_val = "15"
x_dim = 1600
y_dim = 896
additonal_commands = "-d/dev/video0 -w"
ypos = 50 #size of buttons
to_graph = False
#try:
# cappath = os.getcwd()
##except:
# print(" COULD NOT GET CURRENT DIR SET WITH A FLAG ")
cappath = "./"
# print(" COULD NOT GET CURRENT DIR SET WITH A FLAG ")
loc_settings = "./camera_settings.txt"
try:
with open(loc_settings, "r") as f:
for line in f:
s_item = line.split("=")
if s_item[0] == "s_val":
s_val = s_item[1].split("\n")[0]
elif s_item[0] == "c_val":
c_val = s_item[1].split("\n")[0]
elif s_item[0] == "g_val":
g_val = s_item[1].split("\n")[0]
elif s_item[0] == "b_val":
b_val = s_item[1].split("\n")[0]
elif s_item[0] == "x_dim":
x_dim = s_item[1].split("\n")[0]
elif s_item[0] == "y_dim":
y_dim = s_item[1].split("\n")[0]
elif s_item[0] == "additonal_commands":
additonal_commands = s_item[1].split("\n")[0]
except:
print("No config file for camera, using default")
print("Run cam_config.py to create one")
def photo():
# take and save photo
timenow = time.time()
timenow = str(timenow)[0:10]
filename= "cap_"+str(timenow)+".jpg"
#os.system("uvccapture "+additonal_commands+" -S"+s_val+" -C" + c_val + " -G"+ g_val +" -B"+ b_val +" -x"+str(x_dim)+" -y"+str(y_dim)+" -v -t0 -o"+cappath+filename)
cmd = str("uvccapture "+additonal_commands+" -x"+str(x_dim)+" -y"+str(y_dim)+" -v -t0 -o"+cappath+filename)
os.system(cmd)
print("Image taken and saved to "+cappath+filename)
return filename
loop = False
trig = False
for argu in sys.argv[1:]:
try:
thearg = str(argu).split('=')[0]
except:
thearg = str(argu)
if thearg == 'cap' or thearg =='cappath':
cappath = str(argu).split('=')[1]
elif thearg == 'l' or thearg == 'looped':
try:
wait_num = int(str(argu).split('=')[1])
except:
print("No speed supplied, taking every 10")
wait_num = 10
loop = True
elif thearg == 't' or thearg == 'TRIGGERED':
trig = True
print(" Saving files to, " + str(cappath))
pygame.init()
display_width = x_dim+75
display_height = y_dim+55
gameDisplay = pygame.display.set_mode((display_width,display_height))
pygame.display.set_caption('Most recent image')
clock = pygame.time.Clock()
crashed = False
def show_pic(imgtaken, x=0,y=0):
gameDisplay.blit(imgtaken, (x,y))
def draw_menu():
#gameDisplay.blit(button, (but1x,but1y))
pygame.draw.rect(gameDisplay, (50,200,50), (10, ypos+10, 50,ypos), 5)
pygame.draw.rect(gameDisplay, (100,200,100), (10,((ypos+10)*2), 50,ypos), 5)
pygame.draw.rect(gameDisplay, (100,100,200), (10,((ypos+10)*3), 50,ypos), 5)
pygame.draw.rect(gameDisplay, (100,200,100), (10,((ypos+10)*4), 50,ypos), 5)
pygame.draw.rect(gameDisplay, (100,100,200), (10,((ypos+10)*5), 50,ypos), 5)
pygame.draw.rect(gameDisplay, (50,200,50), (10,((ypos+10)*6), 50,ypos), 5)
pygame.draw.rect(gameDisplay, (200,100,200), (10,((ypos+10)*7), 50,ypos), 5)
pygame.draw.rect(gameDisplay, (50,200,50), (10,((ypos+10)*8), 50,ypos), 5)
pygame.draw.rect(gameDisplay, (50,200,50), (10,((ypos+10)*9), 50,ypos), 5)
pygame.draw.rect(gameDisplay, (50,200,50), (10,((ypos+10)*10), 50,ypos), 5)
pygame.draw.rect(gameDisplay, (50,200,50), (10,((ypos+10)*11), 50,ypos), 5)
pygame.draw.rect(gameDisplay, (50,200,50), (10,((ypos+10)*12), 50,ypos), 5)
box_Rect = pygame.Rect(10, ypos+10, 50, ypos)
box2_Rect = pygame.Rect(10, ((ypos+10)*2), 50, ypos)
box3_Rect = pygame.Rect(10, ((ypos+10)*3), 50, ypos)
box4_Rect = pygame.Rect(10, ((ypos+10)*4), 50, ypos)
box5_Rect = pygame.Rect(10, ((ypos+10)*5), 50, ypos)
box6_Rect = pygame.Rect(10, ((ypos+10)*6), 50, ypos)
box7_Rect = pygame.Rect(10, ((ypos+10)*7), 50, ypos)
box8_Rect = pygame.Rect(10, ((ypos+10)*8), 50, ypos)
box9_Rect = pygame.Rect(10, ((ypos+10)*9), 50, ypos)
box10_Rect = pygame.Rect(10, ((ypos+10)*10), 50, ypos)
box11_Rect = pygame.Rect(10, ((ypos+10)*11), 50, ypos)
box12_Rect = pygame.Rect(10, ((ypos+10)*12), 50, ypos)
##
## Initial Image collection before loop starts;
##
gameDisplay.fill((255,255,255))
#current photo
c_photo = photo()
pil_c_photo = Image.open(c_photo)
numpy_pic = numpy.array(pil_c_photo)
#previous photo
b_photo = photo()
pil_b_photo = Image.open(b_photo)
numpy_pic_b = numpy.array(pil_b_photo)
#masks
mask = numpy_pic_b > numpy_pic + 30 #the +30 gets rid of noise
mask2 = numpy_pic_b < numpy_pic - 30
e_pic = numpy_pic.copy()
e_pic[:,:,:] = 0 #hash this out to start with a photo
margin = 25
num = 0
colour_values = []
font = pygame.font.SysFont("comicsansms", 72)
def t_impcent(part):
return 100 * float(part)/float(1096704000)
def s_impcent(part):
return 100 * float(part)/float(365568000)
but1x = 30
but1y = 50
persistant = 0
show_style = 0
denoise = 1
mask_option = 1 # used in menu
while not crashed:
gameDisplay.fill((240,255,240))
for eve in pygame.event.get():
#print("-------------")
#print event
#print("-------------")
if eve.type == pygame.QUIT:
crashed = True
elif eve.type == pygame.MOUSEBUTTONDOWN:
posx = int(eve.pos[0])
posy = int(eve.pos[1])
Mouse_Rect = pygame.Rect(posx, posy, 2, 2)
if box_Rect.contains(Mouse_Rect):
print("Blanking the edited pics")
pygame.draw.rect(gameDisplay, (200,50,200), (15,65, 40,40), 15)
e_pic[:,:,:] = 0
old_e[:,:,:] = 0
elif box2_Rect.contains(Mouse_Rect):
print("cycling persistance up")
persistant = persistant + 1
elif box3_Rect.contains(Mouse_Rect):
print("cycling persistance down")
persistant = persistant - 1
#if persistant <= -1:
# persistant = -1
elif box4_Rect.contains(Mouse_Rect):
print("increasing margin")
margin = margin + 1
elif box5_Rect.contains(Mouse_Rect):
print("margin going down...")
margin = margin - 1
if margin <= 0:
margin = 0
elif box6_Rect.contains(Mouse_Rect):
print("---SHOW STYLE---")
pygame.draw.rect(gameDisplay, (200,50,200), (15,365, 40,40), 15)
show_style = show_style + 1
if show_style >= 8:
show_style = 0
print show_style
elif box7_Rect.contains(Mouse_Rect):
print("denoise cycle...")
pygame.draw.rect(gameDisplay, (200,50,200), (15,425, 40,40), 15)
denoise = denoise + 1
if denoise >= 6:
denoise = 0
elif box8_Rect.contains(Mouse_Rect):
print("mask cycle...")
pygame.draw.rect(gameDisplay, (200,50,200), (15,485, 40,40), 15)
mask_option = mask_option + 1
if mask_option >= 9:
mask_option = 1
#print len(numpy_pic[3])
print "###"
#print numpy_pic[1:,1,1]
#a = np.arange(100)
print "##########"
#numpy_pic[1:500, range(0, len(numpy_pic[2]), 10), 1] = 0
#for x in numpy_pic[1:500, range(0, len(numpy_pic[2])), 1]:
# if x >= 100:
# x = 255
#for x in range(10,170,10):
# mask = numpy_pic < x
# numpy_pic[mask] = 255-x #numpy_pic[mask] + numpy_pic[mask]
#for x in range(200,255,5):
# mask = numpy_pic > x
# numpy_pic[mask] = 0+(x/10) # numpy_pic[mask] / numpy_pic[mask]+(numpy_pic[mask]/numpy_pic[mask])
#print numpy_pic[1:,1,1]
#print numpy_pic.min()
print "###"
#print numpy_pic.shape #Array dimensions
#print numpy_pic.ndim #Number of array dimensions
#print numpy_pic.dtype #Data type of array elements
#print numpy_pic.dtype.name #Name of data type
#print numpy_pic.mean()
#print numpy_pic.max()
#print numpy_pic.min()
#print numpy.info(numpy.ndarray.dtype)
#print numpy_pic.astype(int)
#mask = numpy_pic > numpy_pic_b
#mask = numpy_pic[:, :, 2] > 150
#numpy_pic[mask] = [0, 0, 255]
#mask = numpy_pic_b > numpy_pic + 30 #the +30 gets rid of noise
#mask2 = numpy_pic_b < numpy_pic - 30
#numpy_pic[mask] = [0, 0, 255]
# if show1 == '1':
# e_pic = ((e_pic/4) - (numpy_pic))*3
# e_pic = e_pic / 3 + old_e / 2
# elif show1 == 'tripsy':
# e_pic = ((e_pic/4) - (numpy_pic))*3
# e_pic = e_pic - old_e / 2
## elif show1 == 'waa':
# e_pic = ((e_pic/4) - (numpy_pic))*3
# #e_pic = old_e * 0.8 + e_pic * 0.2
# Image.fromarray(e_pic).save(e_photo)
#define, name and blank images
e_photo= "numpy_"+str(str(time.time()).split(".")[0])+".jpg" #name to save the edited photo as
num = num + 1 #frame count
b_photo = c_photo #current photo becomes before photo.
numpy_pic_b = numpy_pic.copy() #and for the numpy loaded image
c_photo = photo() #take a new photo for the current image
pil_c_photo = Image.open(c_photo) #load the just taken photo
numpy_pic = numpy.array(pil_c_photo) #turn it into a numpy array
#print numpy_pic.size
old_e = e_pic.copy() #current edited pic becomes old edited pic
e_pic[:,:,:] = 0 #current edited pic is blanked
mask_b_pic = e_pic.copy() #blank the b mask (brighter pixels)
mask_d_pic = e_pic.copy() #blank the d mask (darker pixels)
mask_m_pic = e_pic.copy() #blank the m mask (all that isn't mask)
pygame.display.set_caption(str(margin))
maskr = numpy_pic[:, :, 0] < numpy_pic_b[:, :, 0] - margin
maskg = numpy_pic[:, :, 1] < numpy_pic_b[:, :, 1] - margin
maskb = numpy_pic[:, :, 2] < numpy_pic_b[:, :, 2] - margin
maskr2 = numpy_pic[:, :, 0] > numpy_pic_b[:, :, 0] + margin
maskg2 = numpy_pic[:, :, 1] > numpy_pic_b[:, :, 1] + margin
maskb2 = numpy_pic[:, :, 2] > numpy_pic_b[:, :, 2] + margin
mask_all_b = numpy_pic[:, :, :] > numpy_pic_b[:, :, :] + margin
mask_all_d = numpy_pic[:, :, :] < numpy_pic_b[:, :, :] + margin
#mask_m options
if mask_option == 1:
mask_m_pic[mask_all_b] = 255
#m_mask = mask_b_pic[:, :, :] < 254
#mask_m_pic[m_mask] = 100
mask_text = "all b"
elif mask_option == 2:
mask_m_pic[mask_all_d] = 255
#m_mask = mask_d_pic[:, :, :] < 254
#mask_m_pic[m_mask] = 0
mask_text = "all d"
elif mask_option == 3:
mask_b_pic[maskr] = [255, 255, 255]
mask_b_pic[maskg] = [255, 255, 255]
mask_b_pic[maskb] = [255, 255, 255]
mask_d_pic[maskr2] = [0, 0, 0]
mask_d_pic[maskg2] = [0, 0, 0]
mask_d_pic[maskb2] = [0, 0, 0]
mask_m_pic = mask_b_pic + mask_d_pic
mask_text = "Two Tone Movement Mask"
elif mask_option == 4:
mask_b_pic[maskr] = [0, 0, 0]
mask_b_pic[maskg] = [0, 0, 0]
mask_b_pic[maskb] = [0, 0, 0]
mask_d_pic[maskr2] = [255, 255, 255]
mask_d_pic[maskg2] = [255, 255, 255]
mask_d_pic[maskb2] = [255, 255, 255]
mask_m_pic = mask_b_pic + mask_d_pic
mask_text = "Two Tone b Movement Mask"
elif mask_option == 5:
mask_b_pic[maskr] = [200, 10, 10]
mask_b_pic[maskg] = [10, 200, 10]
mask_b_pic[maskb] = [10, 10, 200]
mask_d_pic[maskr2] = [255, 255, 255]
mask_d_pic[maskg2] = [255, 255, 255]
mask_d_pic[maskb2] = [255, 255, 255]
mask_m_pic = mask_b_pic + mask_d_pic
mask_text = "RGB and Black Movement Mask"
elif mask_option == 6:
mask_b_pic[maskr] = [200, 10, 10]
mask_b_pic[maskg] = [10, 200, 10]
mask_b_pic[maskb] = [10, 10, 200]
mask_d_pic[maskr2] = [200, 10, 10]
mask_d_pic[maskg2] = [10, 200, 10]
mask_d_pic[maskb2] = [10, 10, 200]
mask_m_pic = mask_d_pic + mask_b_pic
mask_text = "RGB and RGB Movement Mask"
elif mask_option == 7:
mask_b_pic[maskr] = [188, 5, 5]
mask_b_pic[maskg] = [91, 217, 54]
mask_b_pic[maskb] = [213, 217, 32]
mask_d_pic[maskr2] = [188, 5, 5]
mask_d_pic[maskg2] = [91,217,54]
mask_d_pic[maskb2] = [213, 217, 32]
mask_m_pic = mask_d_pic
mask_text = "Rasta Movement Mask"
elif mask_option == 8:
mask_b_pic[maskr] = [188, 5, 5]
mask_b_pic[maskg] = [91, 217, 54]
mask_b_pic[maskb] = [213, 217, 32]
mask_d_pic[maskr2] = [188, 5, 5]
mask_d_pic[maskg2] = [91,217,54]
mask_d_pic[maskb2] = [213, 217, 32]
mask_m_pic = mask_d_pic / mask_b_pic
mask_text = "Rasta Movement Mask"
#change text
mask_text = font.render(str(mask_text), True, (0, 128, 0))
mask_button = font.render("M:"+str(mask_option), True, (50, 100, 50))
gameDisplay.blit(mask_button, (6, (ypos+10)*8))
#denoise option
denoise_text = ""
if denoise == 1:
e_pic = median_filter(e_pic, 3)
denoise_text = "Denoising edited image"
elif denoise == 2:
mask_m_pic = median_filter(mask_m_pic, 3)
denoise_text = "Denoising m mask"
elif denoise == 3:
mask_d_pic = median_filter(mask_d_pic, 3)
denoise_text = "Denoising d mask"
elif denoise == 4:
mask_b_pic = median_filter(mask_b_pic, 3)
denoise_text = "Denoising b mask"
elif denoise == 5:
denoise_text = ""
denoise_text = font.render(str(denoise_text), True, (0, 128, 0))
de_button = font.render(str(denoise), True, (50, 100, 50))
gameDisplay.blit(de_button, (12, (ypos+10)*7))
print denoise_text
#Persist option
if persistant == 0:
print("-no persist")
e_pic = mask_m_pic
elif persistant <= -1: #deducts multipul of prior edited image
e_pic = mask_m_pic - ((old_e * abs(persistant) / (abs(persistant)-1)))
elif persistant == 1: #simple addition with prior edited image
e_pic = mask_m_pic + old_e
elif persistant >= 2: #fade increases as number increases
e_pic = mask_m_pic + ((old_e/persistant)*(persistant-1))
text = font.render(str(persistant), True, (0, 128, 0))
gameDisplay.blit(text, (20, 120))
#show style option
if show_style == 0:
show_pic = e_pic + (numpy_pic/2)
text = ("Showing; edited pic and half the original image")
elif show_style == 2:
show_pic = e_pic
text = ("Showing; edit pic only")
elif show_style == 1:
show_pic = e_pic + numpy_pic
text = ("Showing; edit pic plus original pic")
elif show_style == 3:
show_pic = mask_m_pic
text = ("Showing; showing the current m mask")
elif show_style == 4:
show_pic = mask_b_pic
text = ("Showing; showing current b mask")
elif show_style == 5:
show_pic = mask_d_pic
text = ("Showing; shiwing current d mask")
elif show_style == 6:
show_pic = mask_b_pic + mask_d_pic
text = ("Showing; show d mask plus b mask")
elif show_style == 7:
show_pic = numpy_pic
text = ("Showing; orginal image")
text = font.render(str(text), True, (50, 128, 0))
show_button = font.render(str(show_style), True, (50, 128, 0))
gameDisplay.blit(show_button, (15, (ypos+10)*6))
gameDisplay.blit(text, (2, y_dim+2))
#
#show_pic = imresize(show_pic, (500, 500))
#show_pic = gaussian_filter(show_pic, sigma=3)
#show_pic = median_filter(show_pic, 3)
#e_pic = e_pic/6 + old_e
#e_pic = e_pic/2 - ((mask_d_pic) + (mask_b_pic))
#e_pic = e_pic/2 + ((mask_d_pic) + (mask_b_pic))
#choose one of the following
#e_pic = mask_d_pic #shows when pixel is darker than it was
#e_pic = mask_b_pic #shows when pixel is lighter than prior
#e_pic = mask_d_pic - mask_b_pic #black execpt for movement
#e_pic = mask_b_pic / (mask_d_pic / 100) #odd
#e_pic = mask_d_pic + mask_b_pic #looks odd
#e_pic = mask_d_pic - (old_e/3)*2 #persists and looks cool
#e_pic = ((mask_d_pic + mask_b_pic) - (old_e/8)*2)
#e_pic = ( ((mask_d_pic + mask_b_pic) ) - (old_e)) #* 2
#show_pic = e_pic
Image.fromarray(show_pic).save(e_photo)
if to_graph == True:
r_sum = numpy_pic[:,:,0].sum()
g_sum = numpy_pic[:,:,1].sum()
b_sum = numpy_pic[:,:,2].sum()
tot_sum = r_sum + g_sum + b_sum
print " ---- Current Photo ---"
print "Red:" + str(r_sum) + " Green:" + str(g_sum) + " Blue:" + str(b_sum)
print "Total;" + str(tot_sum) #+ " also " + str(numpy_pic[:,:,:].sum())
#e_pic[:,:,:] = 255
e_r_sum = e_pic[:,:,0].sum()
e_g_sum = e_pic[:,:,1].sum()
e_b_sum = e_pic[:,:,2].sum()
e_tot_sum = e_r_sum + e_g_sum + e_b_sum
print " ---- Motion Edited Photo ---"
print "Red:" + str(e_r_sum) + " Green:" + str(e_g_sum) + " Blue:" + str(e_b_sum)
print "Total;" + str(e_tot_sum)
colour_values.append([num, r_sum, g_sum, b_sum, tot_sum, e_r_sum, e_g_sum, e_b_sum, e_tot_sum])
onscreen = pygame.image.load(e_photo)
gameDisplay.blit(onscreen, (75,0))
if to_graph == True:
for x in colour_values:
num = x[0]
r_graph = s_impcent(x[1]) #*2.5
g_graph = s_impcent(x[2]) #*2.5
b_graph = s_impcent(x[3]) #*2.5
tot_graph = t_impcent(x[4]) *3
e_r_graph = s_impcent(x[5]) #*2.5
e_g_graph = s_impcent(x[6]) #*2.5
e_b_graph = s_impcent(x[7]) #*2.5
e_tot_graph = t_impcent(x[8]) *2.5
pygame.draw.line(gameDisplay, (255,50,100), (num, 300-r_graph), (num, 300))
pygame.draw.line(gameDisplay, (50,255,100), (num, 300-(r_graph+g_graph)), (num, 300-r_graph))
pygame.draw.line(gameDisplay, (50,50,255), (num, 300-(r_graph+g_graph+b_graph)), (num, 300-(r_graph+g_graph)))
#e_red_graph = impcent(x[5]) *250)
#pygame.draw.line(gameDisplay, (255,50,50), (num,300-e_red_graph), (num, 300))
#pygame.draw.line(gameDisplay, (255,50,100), (num+10,300-tot_graph), (num+10, 300))
#pygame.draw.line(gameDisplay, (100,50,255), (num+200,600-e_tot_graph), (num+200, 600))
pygame.draw.line(gameDisplay, (255,50,100), (num, 600 - e_r_graph), (num, 600))
pygame.draw.line(gameDisplay, (50,255,100), (num, 600 - (e_r_graph + e_g_graph)), (num, 600-e_r_graph))
pygame.draw.line(gameDisplay, (50,50,255), (num, 600 - (e_r_graph + e_g_graph + e_b_graph)), (num, 600-(e_r_graph + e_g_graph)))
#print "graph point: " + str(num) + " , " + str(e_red_graph)
#print "graph point: " + str(num) + " - total of source pic: " + str(tot_graph)
#print "graph point: " + str(num) + " - total of edited pic: " + str(e_tot_graph)
#largeText = pygame.font.Font('freesansbold.ttf',115)
#TextSurf, TextRect = text_objects("A bit Racey", largeText)
#TextRect.center = ((display_width/2),(display_height/2))
#gameDisplay.blit(TextSurf, TextRect)
draw_menu()
gameDisplay.blit(denoise_text, (2, y_dim-55))
gameDisplay.blit(mask_text, (2, y_dim-95))
pygame.display.update()
if trig == True:
print("Waiting for input before taking next image...")
tp = raw_input("press return to take picture; ")
if tp == "q":
print("---bye!")
exit()
clock.tick(20)
if loop == True:
print("waiting; " + str(wait_num))
pygame.time.wait(wait_num)
clock.tick(20)
elif trig == False and loop == False:
crashed = True
#while True:
#pygame.time.wait(1000)
#clock.tick(20)
pygame.quit()
quit()
| gpl-2.0 | -6,358,611,939,327,247,000 | 35.846975 | 168 | 0.543896 | false |
jeremiedecock/snippets | python/matplotlib/multiple_y_axis.py | 1 | 1833 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Make multiple plots with a sharex X-axis but independent Y axis
See:
- http://matplotlib.org/examples/pylab_examples/multiple_yaxis_with_spines.html
- http://matplotlib.org/examples/subplots_axes_and_figures/fahrenheit_celsius_scales.html
"""
import numpy as np
import matplotlib.pyplot as plt
# Build datas ###############
x1 = 10 * np.random.standard_normal(10000) + 5.
x2 = 12 * np.random.standard_normal(10000)
nbins = 25
fig = plt.figure(figsize=(10.,6.))
# Plot two distributions on the same plot #####################################
ax = fig.add_subplot(111)
val_of_bins_x1, edges_of_bins_x1, patches_x1 = plt.hist(x1, nbins, range=(-50,50), histtype='step')
val_of_bins_x2, edges_of_bins_x2, patches_x2 = plt.hist(x2, nbins, range=(-50,50), histtype='step')
# Compute ratio ###############################################################
# This put ratio to zero if val_of_bins_x2 is zero
ratio =np.divide(val_of_bins_x1,
val_of_bins_x2,
where=(val_of_bins_x2 != 0))
# Compute error on ration, null if cannot be computed
error= np.divide(val_of_bins_x1 * np.sqrt(val_of_bins_x2) + val_of_bins_x2 * np.sqrt(val_of_bins_x1),
val_of_bins_x2 * val_of_bins_x2,
where=(val_of_bins_x2 != 0))
# Add the ratio on the existing plot
ax2 = ax.twinx() # <- !!!
bincenter = 0.5 * (edges_of_bins_x1[1:] + edges_of_bins_x1[:-1])
ax2.errorbar(bincenter, ratio, yerr=error, fmt='.', color='r', lw=2)
ax.set_xlabel("X")
ax.set_ylabel("Count")
ax2.set_ylabel("Ratio")
# Save file ###################################################################
plt.savefig("multiple_y_axis.png")
# Plot ########################################################################
plt.show()
| mit | -7,474,506,535,735,043,000 | 30.067797 | 101 | 0.553737 | false |
jeremymcrae/mupit | mupit/mutation_rates.py | 1 | 8652 | """
Copyright (c) 2016 Genome Research Ltd.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import division
import tempfile
import urllib
import pandas
from mupit.gtf import convert_gtf
from mupit.util import is_url
def get_default_rates(rates_url="http://www.nature.com/ng/journal/v46/n9/extref/ng.3050-S2.xls",
gencode_url="ftp://ftp.sanger.ac.uk/pub/gencode/Gencode_human/release_19/gencode.v19.annotation.gtf.gz"):
""" obtain the table of mutation rates from the Samocha et al paper
Rates for all genes can be obtained from the supplementary material of
Samocha et al. Nature Genetics 2014 doi:10.1038/ng.3050
Args:
rates_url: url to supplementary mutation rates table
gencode_url: url to gencode, or local path. This is required to identify
chromosomes for the genes in the rates data, since we need to know
the chromosome in order to corrrect rates on chromosome X.
Returns:
dataframe of mutation rates, with an extra column for summed lof rate
"""
rates = pandas.read_excel(rates_url, sheetname="mutation_probabilities")
# convert rates from log-scaled values, so we can later multiply by the
# number of transmissions
columns = ["syn", "mis", "non", "splice_site", "frameshift"]
rates[columns] = 10 ** rates[columns]
# sort out the required columns and names.
rates["hgnc"] = rates["gene"]
gencode = load_gencode(gencode_url)
recode = dict(zip(gencode["hgnc"], gencode["chrom"]))
rates["chrom"] = rates["hgnc"].map(recode)
rates = rates[["hgnc", "chrom", "syn", "mis", "splice_site", "frameshift", "non"]]
return rates
def load_gencode(path):
""" load gencode table with HGNC symbols and chromosome coordinates
Args:
path: path to gzipped tab-separated table of gencode gene entries. This
can be either a url, or local path.
Returns:
pandas dataframe of HGNC symbols and genome coordiantes
"""
gencode = convert_gtf(path)
# restrict outselves to protein coding genes (or genes which are protein
# coding in at least some individuals)
gencode = gencode[gencode["gene_type"].isin(["protein_coding",
"polymorphic_pseudogene"])]
gencode = gencode[gencode["feature"] == "gene"]
# get the required column names, and strip out all unnecessary columns
gencode["hgnc"] = gencode["gene_name"]
gencode["chrom"] = [ x.strip("chr") for x in gencode["seqname"].astype(str) ]
gencode = gencode[["hgnc", "chrom", "start", "end"]].copy()
return gencode
def get_expected_mutations(rates, male, female):
""" gets numbers of expected mutation per gene
Loads gene-based mutation rates, in order to determine the expected number
of mutations per gene, given the number of studied probands and adjusts for
sex-chromosome transmissions.
This defaults to the gene-based mutation rates from Nature Genetics
46:944-950 (2014) doi:10.1038/ng.3050, but we can pass in other gene-based
mutation rate datasets.
Args:
rates: pandas dataframe containing per-gene mutation
male: number of male probands in the dataset
female: number of female probands in the dataset
Returns:
a dataframe of mutation rates for genes under different mutation
classes.
"""
if rates is None:
rates = get_default_rates()
autosomal = 2 * (male + female)
expected = rates[["hgnc", "chrom"]].copy()
# account for how different pandas versions sum series with only NA
kwargs = {}
if pandas.__version__ >= '0.22.0':
kwargs = {'min_count': 1}
# get the number of expected mutations, given the number of transmissions
expected["lof_indel"] = rates["frameshift"] * autosomal
expected["lof_snv"] = (rates[["non", "splice_site"]].sum(axis=1, skipna=True, **kwargs)) * autosomal
expected["missense_indel"] = (rates["frameshift"] / 9) * autosomal
expected["missense_snv"] = rates["mis"] * autosomal
expected["synonymous_snv"] = rates["syn"] * autosomal
# correct for the known ratio of indels to nonsense, and for transmissions
# on the X-chromosome
expected = adjust_indel_rates(expected)
expected = correct_for_x_chrom(expected, male, female)
# subset to the columns we need to estimate enrichment probabilities
expected = expected[["hgnc", "chrom", "lof_indel", "lof_snv",
"missense_indel", "missense_snv", "synonymous_snv"]]
return expected
def correct_for_x_chrom(expected, male_n, female_n):
""" correct mutations rates for sex-chromosome transmission rates
Args:
expected: gene-based data frame, containing rates for different mutation
classes.
male_n: number of trios with male offspring
female_n: number of trios with female offspring
Returns:
a dataframe of mutation rates for genes under different mutation
classes.
"""
# Calculate the number of transmissions for autosomal, male and female
# transmissions. The number of transmissions from males is equal to the
# number of female probands (since only females receive a chrX from their
# fathers). Likewise, all offspring receive a chrX from their mothers, so
# the number of transmissions from females equals the number of probands.
autosomal = 2 * (male_n + female_n)
female_transmissions = male_n + female_n
male_transmissions = female_n
# get scaling factors using the alpha from the most recent SFHS (Scottish
# Family Health Study) phased de novo data.
alpha = 3.4
male_factor = 2 / (1 + (1 / alpha))
female_factor = 2 / (1 + alpha)
# correct the non-PAR chrX genes for fewer transmissions and lower rate
# (dependent on alpha)
chrX = expected["chrom"].isin(["X", "chrX"])
x_factor = ((male_transmissions * male_factor) + (female_transmissions * female_factor)) / autosomal
x_factor = pandas.Series([x_factor] * len(chrX), index=expected.index)
x_factor[~chrX] = 1
expected["missense_snv"] *= x_factor
expected["missense_indel"] *= x_factor
expected["lof_snv"] *= x_factor
expected["lof_indel"] *= x_factor
expected["synonymous_snv"] *= x_factor
return expected
def adjust_indel_rates(expected):
""" adapt indel rates for lower rate estimate from validated de novos
The indel mutation rates from Samocha et al., Nature Genetics 46:944-950
assume that the overall indel mutation rate is 1.25-fold greater than the
overall nonsense mutation rate, ie there are 1.25 times as many frameshifts
as nonsense mutations. We have our own estimates for the ratio, derived from
our de novo validation efforts, which we shall apply in place of the Samocha
et al ratios.
Args:
rates: data frame of mutation rates.
Returns:
the rates data frame, with adjusted indel rates.
"""
# the following numbers were derived from the DDD 4K dataset.
nonsense_n = 411
frameshift_n = 610
ddd_ratio = frameshift_n / nonsense_n
samocha_ratio = 1.25 # Nature Genetics 46:944-950 frameshift to nonsense ratio
# correct back from using the Samocha et al. ratio
expected["missense_indel"] /= samocha_ratio
expected["lof_indel"] /= samocha_ratio
# adjust the indel rates for the DDD indel ratio
expected["missense_indel"] *= ddd_ratio
expected["lof_indel"] *= ddd_ratio
return expected
| mit | 8,314,548,943,535,683,000 | 38.327273 | 113 | 0.681577 | false |
jackwluo/py-quantmod | tests_full.py | 1 | 1303 | # In[]:
import numpy as np
import quantmod as qm
import pandas as pd
import pandas_datareader as web
# In[]:
ticker = 'AAPL'
df = web.DataReader(ticker, data_source='yahoo', start='2016/01/01')
df = df.tail(365)
ch = qm.Chart(df, start='2015/01/01', end='2017/03/02')
ch.has_open
ch.has_high
ch.has_low
ch.has_close
ch.op
ch.df
ch.ind
ch.pri
ch.adjust(inplace=True)
ch.adjust_volume(inplace=True)
ch.has_OHLC
print(ch.has_OHLCV)
ch.add_MA()
ch.add_SMA()
ch.add_EMA()
ch.add_WMA()
ch.add_DEMA()
ch.add_TEMA()
ch.add_T3()
ch.add_KAMA()
ch.add_TRIMA()
ch.add_MAMA()
# ch.add_MAVP()
ch.add_MIDPOINT()
ch.add_BBANDS()
ch.add_SAR()
ch.add_SAREXT()
ch.add_HT_TRENDLINE()
ch.add_APO()
ch.add_AROON()
ch.add_AROONOSC()
ch.add_BOP()
ch.add_CCI()
ch.add_CMO()
ch.add_ADX()
ch.add_ADXR()
ch.add_DX()
ch.add_MINUS_DI()
ch.add_PLUS_DI()
ch.add_MINUS_DM()
ch.add_PLUS_DM()
ch.add_MACD()
ch.add_MACDEXT()
ch.add_MFI()
ch.add_MOM()
ch.add_PPO()
ch.add_ROC()
ch.add_ROCP()
ch.add_ROCR()
ch.add_ROCR100()
ch.add_RSI()
ch.add_STOCH()
ch.add_STOCHF()
ch.add_STOCHRSI()
ch.add_TRIX()
ch.add_ULTOSC()
ch.add_WILLR()
ch.plot(kind='candlestick', volume=True,
title='Full Test', filename='full_test')
import quantmod as qm
ch = qm.get_symbol('QQQ', start='01/01/2016')
ch.add_EMA(9)
ch.add_RSI(14)
ch.iplot()
| mit | 3,089,701,147,654,672,000 | 14.151163 | 68 | 0.669225 | false |
blancha/abcngspipelines | alignment/bwamem.py | 1 | 2998 | #!/usr/bin/env python3
# Version 1.1
# Author Alexis Blanchet-Cohen
# Date: 09/06/2014
import argparse
import glob
import os
import pandas
import subprocess
import util
# Read the command line arguments.
parser = argparse.ArgumentParser(description="Generates bwa mem scripts.")
parser.add_argument("-s", "--scriptsDirectory", help="Scripts directory.", default="bwa")
parser.add_argument("-i", "--inputDirectory", help="Input directory with FASTQ files.", default="../data/FASTQ_files/untrimmed/")
parser.add_argument("-o", "--outputDirectory", help="Output directory with bwa results.", default="../results/bwa/")
parser.add_argument("-q", "--submitJobsToQueue", help="Submit jobs to queue immediately.", choices=["yes", "no", "y", "n"], default="no")
args = parser.parse_args()
# If not in the main scripts directory, cd to the main scripts directory, if it exists.
util.cdMainScriptsDirectory()
# Process the command line arguments.
scriptsDirectory = os.path.abspath(args.scriptsDirectory)
inputDirectory = os.path.abspath(args.inputDirectory)
outputDirectory = os.path.abspath(args.outputDirectory)
# Read configuration files
config = util.readConfigurationFiles()
header = config.getboolean("server", "PBS_header")
genome = config.get("project", "genome")
genomeFile = config.get(genome, "genomeFile")
bwaIndex = config.get(genome, "bwaIndex")
threads = config.get("bwamem", "threads")
# Read samples file
samplesFile = util.readsamplesFile()
# Create scripts directory, if it does not exist yet, and cd to it.
if not os.path.exists(scriptsDirectory):
os.mkdir(scriptsDirectory)
os.chdir(scriptsDirectory)
# Create output directories, if they do not exist yet..
if not os.path.exists(outputDirectory):
os.makedirs(outputDirectory)
# Cycle through all the samples and write the bwa scripts.
for index, row in samplesFile.iterrows():
sample = row["sample"]
if "lane" in samplesFile.columns:
sample= sample + "_lane_" + str(row["lane"])
# Create output directories
if not os.path.exists(outputDirectory + "/" + sample):
os.mkdir(outputDirectory +"/" + sample)
file_R1 = row["file_r1"]
file_R2 = row["file_r2"]
# Create script file.
scriptName = 'bwa_' + sample + '.sh'
script = open(scriptName, 'w')
if header:
util.writeHeader(script, config, "bwamem")
script.write("bwa mem -M" + " \\\n")
script.write("-t " + threads + " \\\n")
script.write("-R '@RG\\tID:" + sample + "\\tSM:" + row["sample"] + "\\tPL:Illumina\\tLB:lib1\\tPU:unit1'" + " \\\n");
script.write(bwaIndex + " \\\n")
script.write(inputDirectory + "/" + file_R1 + " \\\n")
script.write(inputDirectory + "/" + file_R2 + " \\\n")
script.write("1> " + outputDirectory + "/" + sample + "/" + sample + ".sam " + "\\\n")
script.write("2> " + scriptName + ".log")
script.close()
if (args.submitJobsToQueue.lower() == "yes") | (args.submitJobsToQueue.lower() == "y"):
subprocess.call("submitJobs.py", shell=True)
| gpl-3.0 | -1,189,374,541,164,608,500 | 37.435897 | 137 | 0.680787 | false |
huletlab/PyAbel | examples/example_basex_photoelectron.py | 1 | 2702 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os.path
import numpy as np
import matplotlib.pyplot as plt
import abel
# This example demonstrates a BASEX transform of an image obtained using a
# velocity map imaging (VMI) photoelecton spectrometer to record the
# photoelectron angualar distribution resulting from above threshold ionization (ATI)
# in xenon gas using a ~40 femtosecond, 800 nm laser pulse.
# This spectrum was recorded in 2012 in the Kapteyn-Murnane research group at
# JILA / The University of Colorado at Boulder
# by Dan Hickstein and co-workers (contact DanHickstein@gmail.com)
# http://journals.aps.org/prl/abstract/10.1103/PhysRevLett.109.073004
#
# Before you start your own transform, identify the central pixel of the image.
# It's nice to use a program like ImageJ for this.
# http://imagej.nih.gov/ij/
# Specify the path to the file
filename = os.path.join('data', 'Xenon_ATI_VMI_800_nm_649x519.tif')
# Name the output files
output_image = filename[:-4] + '_Abel_transform.png'
output_text = filename[:-4] + '_speeds.txt'
output_plot = filename[:-4] + '_comparison.pdf'
# Step 1: Load an image file as a numpy array
print('Loading ' + filename)
raw_data = plt.imread(filename).astype('float64')
# Step 2: Specify the center in y,x (vert,horiz) format
center = (245,340)
# or, use automatic centering
# center = 'com'
# center = 'gaussian'
# Step 3: perform the BASEX transform!
print('Performing the inverse Abel transform:')
recon = abel.Transform(raw_data, direction='inverse', method='basex',
center=center, transform_options={'basis_dir':'./'},
verbose=True).transform
speeds = abel.tools.vmi.angular_integration(recon)
# Set up some axes
fig = plt.figure(figsize=(15,4))
ax1 = plt.subplot(131)
ax2 = plt.subplot(132)
ax3 = plt.subplot(133)
# Plot the raw data
im1 = ax1.imshow(raw_data,origin='lower',aspect='auto')
fig.colorbar(im1,ax=ax1,fraction=.1,shrink=0.9,pad=0.03)
ax1.set_xlabel('x (pixels)')
ax1.set_ylabel('y (pixels)')
# Plot the 2D transform
im2 = ax2.imshow(recon,origin='lower',aspect='auto',clim=(0,2000))
fig.colorbar(im2,ax=ax2,fraction=.1,shrink=0.9,pad=0.03)
ax2.set_xlabel('x (pixels)')
ax2.set_ylabel('y (pixels)')
# Plot the 1D speed distribution
ax3.plot(*speeds)
ax3.set_xlabel('Speed (pixel)')
ax3.set_ylabel('Yield (log)')
ax3.set_yscale('log')
ax3.set_ylim(1e2,1e5)
# Prettify the plot a little bit:
plt.subplots_adjust(left=0.06,bottom=0.17,right=0.95,top=0.89,wspace=0.35,hspace=0.37)
# Show the plots
plt.show()
| mit | 6,193,225,707,221,089,000 | 31.554217 | 86 | 0.710215 | false |
pjryan126/solid-start-careers | store/api/zillow/venv/lib/python2.7/site-packages/pandas/io/tests/test_stata.py | 1 | 48444 | # -*- coding: utf-8 -*-
# pylint: disable=E1101
from datetime import datetime
import datetime as dt
import os
import warnings
import nose
import struct
import sys
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pandas.compat import iterkeys
from pandas.core.frame import DataFrame, Series
from pandas.core.common import is_categorical_dtype
from pandas.io.parsers import read_csv
from pandas.io.stata import (read_stata, StataReader, InvalidColumnName,
PossiblePrecisionLoss, StataMissingValue)
import pandas.util.testing as tm
from pandas.tslib import NaT
from pandas import compat
class TestStata(tm.TestCase):
def setUp(self):
self.dirpath = tm.get_data_path()
self.dta1_114 = os.path.join(self.dirpath, 'stata1_114.dta')
self.dta1_117 = os.path.join(self.dirpath, 'stata1_117.dta')
self.dta2_113 = os.path.join(self.dirpath, 'stata2_113.dta')
self.dta2_114 = os.path.join(self.dirpath, 'stata2_114.dta')
self.dta2_115 = os.path.join(self.dirpath, 'stata2_115.dta')
self.dta2_117 = os.path.join(self.dirpath, 'stata2_117.dta')
self.dta3_113 = os.path.join(self.dirpath, 'stata3_113.dta')
self.dta3_114 = os.path.join(self.dirpath, 'stata3_114.dta')
self.dta3_115 = os.path.join(self.dirpath, 'stata3_115.dta')
self.dta3_117 = os.path.join(self.dirpath, 'stata3_117.dta')
self.csv3 = os.path.join(self.dirpath, 'stata3.csv')
self.dta4_113 = os.path.join(self.dirpath, 'stata4_113.dta')
self.dta4_114 = os.path.join(self.dirpath, 'stata4_114.dta')
self.dta4_115 = os.path.join(self.dirpath, 'stata4_115.dta')
self.dta4_117 = os.path.join(self.dirpath, 'stata4_117.dta')
self.dta_encoding = os.path.join(self.dirpath, 'stata1_encoding.dta')
self.csv14 = os.path.join(self.dirpath, 'stata5.csv')
self.dta14_113 = os.path.join(self.dirpath, 'stata5_113.dta')
self.dta14_114 = os.path.join(self.dirpath, 'stata5_114.dta')
self.dta14_115 = os.path.join(self.dirpath, 'stata5_115.dta')
self.dta14_117 = os.path.join(self.dirpath, 'stata5_117.dta')
self.csv15 = os.path.join(self.dirpath, 'stata6.csv')
self.dta15_113 = os.path.join(self.dirpath, 'stata6_113.dta')
self.dta15_114 = os.path.join(self.dirpath, 'stata6_114.dta')
self.dta15_115 = os.path.join(self.dirpath, 'stata6_115.dta')
self.dta15_117 = os.path.join(self.dirpath, 'stata6_117.dta')
self.dta16_115 = os.path.join(self.dirpath, 'stata7_115.dta')
self.dta16_117 = os.path.join(self.dirpath, 'stata7_117.dta')
self.dta17_113 = os.path.join(self.dirpath, 'stata8_113.dta')
self.dta17_115 = os.path.join(self.dirpath, 'stata8_115.dta')
self.dta17_117 = os.path.join(self.dirpath, 'stata8_117.dta')
self.dta18_115 = os.path.join(self.dirpath, 'stata9_115.dta')
self.dta18_117 = os.path.join(self.dirpath, 'stata9_117.dta')
self.dta19_115 = os.path.join(self.dirpath, 'stata10_115.dta')
self.dta19_117 = os.path.join(self.dirpath, 'stata10_117.dta')
self.dta20_115 = os.path.join(self.dirpath, 'stata11_115.dta')
self.dta20_117 = os.path.join(self.dirpath, 'stata11_117.dta')
self.dta21_117 = os.path.join(self.dirpath, 'stata12_117.dta')
self.dta22_118 = os.path.join(self.dirpath, 'stata14_118.dta')
def read_dta(self, file):
# Legacy default reader configuration
return read_stata(file, convert_dates=True)
def read_csv(self, file):
return read_csv(file, parse_dates=True)
def test_read_empty_dta(self):
empty_ds = DataFrame(columns=['unit'])
# GH 7369, make sure can read a 0-obs dta file
with tm.ensure_clean() as path:
empty_ds.to_stata(path, write_index=False)
empty_ds2 = read_stata(path)
tm.assert_frame_equal(empty_ds, empty_ds2)
def test_data_method(self):
# Minimal testing of legacy data method
with StataReader(self.dta1_114) as rdr:
with warnings.catch_warnings(record=True) as w: # noqa
parsed_114_data = rdr.data()
with StataReader(self.dta1_114) as rdr:
parsed_114_read = rdr.read()
tm.assert_frame_equal(parsed_114_data, parsed_114_read)
def test_read_dta1(self):
parsed_114 = self.read_dta(self.dta1_114)
parsed_117 = self.read_dta(self.dta1_117)
# Pandas uses np.nan as missing value.
# Thus, all columns will be of type float, regardless of their name.
expected = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
# this is an oddity as really the nan should be float64, but
# the casting doesn't fail so need to match stata here
expected['float_miss'] = expected['float_miss'].astype(np.float32)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta2(self):
if LooseVersion(sys.version) < '2.7':
raise nose.SkipTest('datetime interp under 2.6 is faulty')
expected = DataFrame.from_records(
[
(
datetime(2006, 11, 19, 23, 13, 20),
1479596223000,
datetime(2010, 1, 20),
datetime(2010, 1, 8),
datetime(2010, 1, 1),
datetime(1974, 7, 1),
datetime(2010, 1, 1),
datetime(2010, 1, 1)
),
(
datetime(1959, 12, 31, 20, 3, 20),
-1479590,
datetime(1953, 10, 2),
datetime(1948, 6, 10),
datetime(1955, 1, 1),
datetime(1955, 7, 1),
datetime(1955, 1, 1),
datetime(2, 1, 1)
),
(
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
)
],
columns=['datetime_c', 'datetime_big_c', 'date', 'weekly_date',
'monthly_date', 'quarterly_date', 'half_yearly_date',
'yearly_date']
)
expected['yearly_date'] = expected['yearly_date'].astype('O')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed_114 = self.read_dta(self.dta2_114)
parsed_115 = self.read_dta(self.dta2_115)
parsed_117 = self.read_dta(self.dta2_117)
# 113 is buggy due to limits of date format support in Stata
# parsed_113 = self.read_dta(self.dta2_113)
# Remove resource warnings
w = [x for x in w if x.category is UserWarning]
# should get warning for each call to read_dta
tm.assert_equal(len(w), 3)
# buggy test because of the NaT comparison on certain platforms
# Format 113 test fails since it does not support tc and tC formats
# tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected,
check_datetimelike_compat=True)
tm.assert_frame_equal(parsed_115, expected,
check_datetimelike_compat=True)
tm.assert_frame_equal(parsed_117, expected,
check_datetimelike_compat=True)
def test_read_dta3(self):
parsed_113 = self.read_dta(self.dta3_113)
parsed_114 = self.read_dta(self.dta3_114)
parsed_115 = self.read_dta(self.dta3_115)
parsed_117 = self.read_dta(self.dta3_117)
# match stata here
expected = self.read_csv(self.csv3)
expected = expected.astype(np.float32)
expected['year'] = expected['year'].astype(np.int16)
expected['quarter'] = expected['quarter'].astype(np.int8)
tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta4(self):
parsed_113 = self.read_dta(self.dta4_113)
parsed_114 = self.read_dta(self.dta4_114)
parsed_115 = self.read_dta(self.dta4_115)
parsed_117 = self.read_dta(self.dta4_117)
expected = DataFrame.from_records(
[
["one", "ten", "one", "one", "one"],
["two", "nine", "two", "two", "two"],
["three", "eight", "three", "three", "three"],
["four", "seven", 4, "four", "four"],
["five", "six", 5, np.nan, "five"],
["six", "five", 6, np.nan, "six"],
["seven", "four", 7, np.nan, "seven"],
["eight", "three", 8, np.nan, "eight"],
["nine", "two", 9, np.nan, "nine"],
["ten", "one", "ten", np.nan, "ten"]
],
columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled',
'labeled_with_missings', 'float_labelled'])
# these are all categoricals
expected = pd.concat([expected[col].astype('category')
for col in expected], axis=1)
tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
# File containing strls
def test_read_dta12(self):
parsed_117 = self.read_dta(self.dta21_117)
expected = DataFrame.from_records(
[
[1, "abc", "abcdefghi"],
[3, "cba", "qwertywertyqwerty"],
[93, "", "strl"],
],
columns=['x', 'y', 'z'])
tm.assert_frame_equal(parsed_117, expected, check_dtype=False)
def test_read_dta18(self):
parsed_118 = self.read_dta(self.dta22_118)
parsed_118["Bytes"] = parsed_118["Bytes"].astype('O')
expected = DataFrame.from_records(
[['Cat', 'Bogota', u'Bogotá', 1, 1.0, u'option b Ünicode', 1.0],
['Dog', 'Boston', u'Uzunköprü', np.nan, np.nan, np.nan, np.nan],
['Plane', 'Rome', u'Tromsø', 0, 0.0, 'option a', 0.0],
['Potato', 'Tokyo', u'Elâzığ', -4, 4.0, 4, 4],
['', '', '', 0, 0.3332999, 'option a', 1 / 3.]
],
columns=['Things', 'Cities', 'Unicode_Cities_Strl',
'Ints', 'Floats', 'Bytes', 'Longs'])
expected["Floats"] = expected["Floats"].astype(np.float32)
for col in parsed_118.columns:
tm.assert_almost_equal(parsed_118[col], expected[col])
with StataReader(self.dta22_118) as rdr:
vl = rdr.variable_labels()
vl_expected = {u'Unicode_Cities_Strl':
u'Here are some strls with Ünicode chars',
u'Longs': u'long data',
u'Things': u'Here are some things',
u'Bytes': u'byte data',
u'Ints': u'int data',
u'Cities': u'Here are some cities',
u'Floats': u'float data'}
tm.assert_dict_equal(vl, vl_expected)
self.assertEqual(rdr.data_label, u'This is a Ünicode data label')
def test_read_write_dta5(self):
original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_write_dta6(self):
original = self.read_csv(self.csv3)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['year'] = original['year'].astype(np.int32)
original['quarter'] = original['quarter'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original, check_index_type=False)
def test_read_write_dta10(self):
original = DataFrame(data=[["string", "object", 1, 1.1,
np.datetime64('2003-12-25')]],
columns=['string', 'object', 'integer',
'floating', 'datetime'])
original["object"] = Series(original["object"], dtype=object)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['integer'] = original['integer'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, {'datetime': 'tc'})
written_and_read_again = self.read_dta(path)
# original.index is np.int32, readed index is np.int64
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original, check_index_type=False)
def test_stata_doc_examples(self):
with tm.ensure_clean() as path:
df = DataFrame(np.random.randn(10, 2), columns=list('AB'))
df.to_stata(path)
def test_write_preserves_original(self):
# 9795
np.random.seed(423)
df = pd.DataFrame(np.random.randn(5, 4), columns=list('abcd'))
df.ix[2, 'a':'c'] = np.nan
df_copy = df.copy()
with tm.ensure_clean() as path:
df.to_stata(path, write_index=False)
tm.assert_frame_equal(df, df_copy)
def test_encoding(self):
# GH 4626, proper encoding handling
raw = read_stata(self.dta_encoding)
encoded = read_stata(self.dta_encoding, encoding="latin-1")
result = encoded.kreis1849[0]
if compat.PY3:
expected = raw.kreis1849[0]
self.assertEqual(result, expected)
self.assertIsInstance(result, compat.string_types)
else:
expected = raw.kreis1849.str.decode("latin-1")[0]
self.assertEqual(result, expected)
self.assertIsInstance(result, unicode) # noqa
with tm.ensure_clean() as path:
encoded.to_stata(path, encoding='latin-1', write_index=False)
reread_encoded = read_stata(path, encoding='latin-1')
tm.assert_frame_equal(encoded, reread_encoded)
def test_read_write_dta11(self):
original = DataFrame([(1, 2, 3, 4)],
columns=['good', compat.u('b\u00E4d'), '8number',
'astringwithmorethan32characters______'])
formatted = DataFrame([(1, 2, 3, 4)],
columns=['good', 'b_d', '_8number',
'astringwithmorethan32characters_'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
original.to_stata(path, None)
# should get a warning for that format.
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(
written_and_read_again.set_index('index'), formatted)
def test_read_write_dta12(self):
original = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_1',
'astringwithmorethan32characters_2',
'+',
'-',
'short',
'delete'])
formatted = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_',
'_0astringwithmorethan32character',
'_',
'_1_',
'_short',
'_delete'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
original.to_stata(path, None)
# should get a warning for that format.
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(
written_and_read_again.set_index('index'), formatted)
def test_read_write_dta13(self):
s1 = Series(2 ** 9, dtype=np.int16)
s2 = Series(2 ** 17, dtype=np.int32)
s3 = Series(2 ** 33, dtype=np.int64)
original = DataFrame({'int16': s1, 'int32': s2, 'int64': s3})
original.index.name = 'index'
formatted = original
formatted['int64'] = formatted['int64'].astype(np.float64)
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
formatted)
def test_read_write_reread_dta14(self):
expected = self.read_csv(self.csv14)
cols = ['byte_', 'int_', 'long_', 'float_', 'double_']
for col in cols:
expected[col] = expected[col]._convert(datetime=True, numeric=True)
expected['float_'] = expected['float_'].astype(np.float32)
expected['date_td'] = pd.to_datetime(
expected['date_td'], errors='coerce')
parsed_113 = self.read_dta(self.dta14_113)
parsed_113.index.name = 'index'
parsed_114 = self.read_dta(self.dta14_114)
parsed_114.index.name = 'index'
parsed_115 = self.read_dta(self.dta14_115)
parsed_115.index.name = 'index'
parsed_117 = self.read_dta(self.dta14_117)
parsed_117.index.name = 'index'
tm.assert_frame_equal(parsed_114, parsed_113)
tm.assert_frame_equal(parsed_114, parsed_115)
tm.assert_frame_equal(parsed_114, parsed_117)
with tm.ensure_clean() as path:
parsed_114.to_stata(path, {'date_td': 'td'})
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(
written_and_read_again.set_index('index'), parsed_114)
def test_read_write_reread_dta15(self):
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
expected['long_'] = expected['long_'].astype(np.int32)
expected['float_'] = expected['float_'].astype(np.float32)
expected['double_'] = expected['double_'].astype(np.float64)
expected['date_td'] = expected['date_td'].apply(
datetime.strptime, args=('%Y-%m-%d',))
parsed_113 = self.read_dta(self.dta15_113)
parsed_114 = self.read_dta(self.dta15_114)
parsed_115 = self.read_dta(self.dta15_115)
parsed_117 = self.read_dta(self.dta15_117)
tm.assert_frame_equal(expected, parsed_114)
tm.assert_frame_equal(parsed_113, parsed_114)
tm.assert_frame_equal(parsed_114, parsed_115)
tm.assert_frame_equal(parsed_114, parsed_117)
def test_timestamp_and_label(self):
original = DataFrame([(1,)], columns=['var'])
time_stamp = datetime(2000, 2, 29, 14, 21)
data_label = 'This is a data file.'
with tm.ensure_clean() as path:
original.to_stata(path, time_stamp=time_stamp,
data_label=data_label)
with StataReader(path) as reader:
parsed_time_stamp = dt.datetime.strptime(
reader.time_stamp, ('%d %b %Y %H:%M'))
assert parsed_time_stamp == time_stamp
assert reader.data_label == data_label
def test_numeric_column_names(self):
original = DataFrame(np.reshape(np.arange(25.0), (5, 5)))
original.index.name = 'index'
with tm.ensure_clean() as path:
# should get a warning for that format.
with tm.assert_produces_warning(InvalidColumnName):
original.to_stata(path)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
columns = list(written_and_read_again.columns)
convert_col_name = lambda x: int(x[1])
written_and_read_again.columns = map(convert_col_name, columns)
tm.assert_frame_equal(original, written_and_read_again)
def test_nan_to_missing_value(self):
s1 = Series(np.arange(4.0), dtype=np.float32)
s2 = Series(np.arange(4.0), dtype=np.float64)
s1[::2] = np.nan
s2[1::2] = np.nan
original = DataFrame({'s1': s1, 's2': s2})
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
tm.assert_frame_equal(written_and_read_again, original)
def test_no_index(self):
columns = ['x', 'y']
original = DataFrame(np.reshape(np.arange(10.0), (5, 2)),
columns=columns)
original.index.name = 'index_not_written'
with tm.ensure_clean() as path:
original.to_stata(path, write_index=False)
written_and_read_again = self.read_dta(path)
tm.assertRaises(
KeyError, lambda: written_and_read_again['index_not_written'])
def test_string_no_dates(self):
s1 = Series(['a', 'A longer string'])
s2 = Series([1.0, 2.0], dtype=np.float64)
original = DataFrame({'s1': s1, 's2': s2})
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_large_value_conversion(self):
s0 = Series([1, 99], dtype=np.int8)
s1 = Series([1, 127], dtype=np.int8)
s2 = Series([1, 2 ** 15 - 1], dtype=np.int16)
s3 = Series([1, 2 ** 63 - 1], dtype=np.int64)
original = DataFrame({'s0': s0, 's1': s1, 's2': s2, 's3': s3})
original.index.name = 'index'
with tm.ensure_clean() as path:
with tm.assert_produces_warning(PossiblePrecisionLoss):
original.to_stata(path)
written_and_read_again = self.read_dta(path)
modified = original.copy()
modified['s1'] = Series(modified['s1'], dtype=np.int16)
modified['s2'] = Series(modified['s2'], dtype=np.int32)
modified['s3'] = Series(modified['s3'], dtype=np.float64)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
modified)
def test_dates_invalid_column(self):
original = DataFrame([datetime(2006, 11, 19, 23, 13, 20)])
original.index.name = 'index'
with tm.ensure_clean() as path:
with tm.assert_produces_warning(InvalidColumnName):
original.to_stata(path, {0: 'tc'})
written_and_read_again = self.read_dta(path)
modified = original.copy()
modified.columns = ['_0']
tm.assert_frame_equal(written_and_read_again.set_index('index'),
modified)
def test_105(self):
# Data obtained from:
# http://go.worldbank.org/ZXY29PVJ21
dpath = os.path.join(self.dirpath, 'S4_EDUC1.dta')
df = pd.read_stata(dpath)
df0 = [[1, 1, 3, -2], [2, 1, 2, -2], [4, 1, 1, -2]]
df0 = pd.DataFrame(df0)
df0.columns = ["clustnum", "pri_schl", "psch_num", "psch_dis"]
df0['clustnum'] = df0["clustnum"].astype(np.int16)
df0['pri_schl'] = df0["pri_schl"].astype(np.int8)
df0['psch_num'] = df0["psch_num"].astype(np.int8)
df0['psch_dis'] = df0["psch_dis"].astype(np.float32)
tm.assert_frame_equal(df.head(3), df0)
def test_date_export_formats(self):
columns = ['tc', 'td', 'tw', 'tm', 'tq', 'th', 'ty']
conversions = dict(((c, c) for c in columns))
data = [datetime(2006, 11, 20, 23, 13, 20)] * len(columns)
original = DataFrame([data], columns=columns)
original.index.name = 'index'
expected_values = [datetime(2006, 11, 20, 23, 13, 20), # Time
datetime(2006, 11, 20), # Day
datetime(2006, 11, 19), # Week
datetime(2006, 11, 1), # Month
datetime(2006, 10, 1), # Quarter year
datetime(2006, 7, 1), # Half year
datetime(2006, 1, 1)] # Year
expected = DataFrame([expected_values], columns=columns)
expected.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, conversions)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
expected)
def test_write_missing_strings(self):
original = DataFrame([["1"], [None]], columns=["foo"])
expected = DataFrame([["1"], [""]], columns=["foo"])
expected.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
expected)
def test_bool_uint(self):
s0 = Series([0, 1, True], dtype=np.bool)
s1 = Series([0, 1, 100], dtype=np.uint8)
s2 = Series([0, 1, 255], dtype=np.uint8)
s3 = Series([0, 1, 2 ** 15 - 100], dtype=np.uint16)
s4 = Series([0, 1, 2 ** 16 - 1], dtype=np.uint16)
s5 = Series([0, 1, 2 ** 31 - 100], dtype=np.uint32)
s6 = Series([0, 1, 2 ** 32 - 1], dtype=np.uint32)
original = DataFrame({'s0': s0, 's1': s1, 's2': s2, 's3': s3,
's4': s4, 's5': s5, 's6': s6})
original.index.name = 'index'
expected = original.copy()
expected_types = (np.int8, np.int8, np.int16, np.int16, np.int32,
np.int32, np.float64)
for c, t in zip(expected.columns, expected_types):
expected[c] = expected[c].astype(t)
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
tm.assert_frame_equal(written_and_read_again, expected)
def test_variable_labels(self):
with StataReader(self.dta16_115) as rdr:
sr_115 = rdr.variable_labels()
with StataReader(self.dta16_117) as rdr:
sr_117 = rdr.variable_labels()
keys = ('var1', 'var2', 'var3')
labels = ('label1', 'label2', 'label3')
for k, v in compat.iteritems(sr_115):
self.assertTrue(k in sr_117)
self.assertTrue(v == sr_117[k])
self.assertTrue(k in keys)
self.assertTrue(v in labels)
def test_minimal_size_col(self):
str_lens = (1, 100, 244)
s = {}
for str_len in str_lens:
s['s' + str(str_len)] = Series(['a' * str_len,
'b' * str_len, 'c' * str_len])
original = DataFrame(s)
with tm.ensure_clean() as path:
original.to_stata(path, write_index=False)
with StataReader(path) as sr:
typlist = sr.typlist
variables = sr.varlist
formats = sr.fmtlist
for variable, fmt, typ in zip(variables, formats, typlist):
self.assertTrue(int(variable[1:]) == int(fmt[1:-1]))
self.assertTrue(int(variable[1:]) == typ)
def test_excessively_long_string(self):
str_lens = (1, 244, 500)
s = {}
for str_len in str_lens:
s['s' + str(str_len)] = Series(['a' * str_len,
'b' * str_len, 'c' * str_len])
original = DataFrame(s)
with tm.assertRaises(ValueError):
with tm.ensure_clean() as path:
original.to_stata(path)
def test_missing_value_generator(self):
types = ('b', 'h', 'l')
df = DataFrame([[0.0]], columns=['float_'])
with tm.ensure_clean() as path:
df.to_stata(path)
with StataReader(path) as rdr:
valid_range = rdr.VALID_RANGE
expected_values = ['.' + chr(97 + i) for i in range(26)]
expected_values.insert(0, '.')
for t in types:
offset = valid_range[t][1]
for i in range(0, 27):
val = StataMissingValue(offset + 1 + i)
self.assertTrue(val.string == expected_values[i])
# Test extremes for floats
val = StataMissingValue(struct.unpack('<f', b'\x00\x00\x00\x7f')[0])
self.assertTrue(val.string == '.')
val = StataMissingValue(struct.unpack('<f', b'\x00\xd0\x00\x7f')[0])
self.assertTrue(val.string == '.z')
# Test extremes for floats
val = StataMissingValue(struct.unpack(
'<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0])
self.assertTrue(val.string == '.')
val = StataMissingValue(struct.unpack(
'<d', b'\x00\x00\x00\x00\x00\x1a\xe0\x7f')[0])
self.assertTrue(val.string == '.z')
def test_missing_value_conversion(self):
columns = ['int8_', 'int16_', 'int32_', 'float32_', 'float64_']
smv = StataMissingValue(101)
keys = [key for key in iterkeys(smv.MISSING_VALUES)]
keys.sort()
data = []
for i in range(27):
row = [StataMissingValue(keys[i + (j * 27)]) for j in range(5)]
data.append(row)
expected = DataFrame(data, columns=columns)
parsed_113 = read_stata(self.dta17_113, convert_missing=True)
parsed_115 = read_stata(self.dta17_115, convert_missing=True)
parsed_117 = read_stata(self.dta17_117, convert_missing=True)
tm.assert_frame_equal(expected, parsed_113)
tm.assert_frame_equal(expected, parsed_115)
tm.assert_frame_equal(expected, parsed_117)
def test_big_dates(self):
yr = [1960, 2000, 9999, 100, 2262, 1677]
mo = [1, 1, 12, 1, 4, 9]
dd = [1, 1, 31, 1, 22, 23]
hr = [0, 0, 23, 0, 0, 0]
mm = [0, 0, 59, 0, 0, 0]
ss = [0, 0, 59, 0, 0, 0]
expected = []
for i in range(len(yr)):
row = []
for j in range(7):
if j == 0:
row.append(
datetime(yr[i], mo[i], dd[i], hr[i], mm[i], ss[i]))
elif j == 6:
row.append(datetime(yr[i], 1, 1))
else:
row.append(datetime(yr[i], mo[i], dd[i]))
expected.append(row)
expected.append([NaT] * 7)
columns = ['date_tc', 'date_td', 'date_tw', 'date_tm', 'date_tq',
'date_th', 'date_ty']
# Fixes for weekly, quarterly,half,year
expected[2][2] = datetime(9999, 12, 24)
expected[2][3] = datetime(9999, 12, 1)
expected[2][4] = datetime(9999, 10, 1)
expected[2][5] = datetime(9999, 7, 1)
expected[4][2] = datetime(2262, 4, 16)
expected[4][3] = expected[4][4] = datetime(2262, 4, 1)
expected[4][5] = expected[4][6] = datetime(2262, 1, 1)
expected[5][2] = expected[5][3] = expected[
5][4] = datetime(1677, 10, 1)
expected[5][5] = expected[5][6] = datetime(1678, 1, 1)
expected = DataFrame(expected, columns=columns, dtype=np.object)
parsed_115 = read_stata(self.dta18_115)
parsed_117 = read_stata(self.dta18_117)
tm.assert_frame_equal(expected, parsed_115,
check_datetimelike_compat=True)
tm.assert_frame_equal(expected, parsed_117,
check_datetimelike_compat=True)
date_conversion = dict((c, c[-2:]) for c in columns)
# {c : c[-2:] for c in columns}
with tm.ensure_clean() as path:
expected.index.name = 'index'
expected.to_stata(path, date_conversion)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
expected,
check_datetimelike_compat=True)
def test_dtype_conversion(self):
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
expected['long_'] = expected['long_'].astype(np.int32)
expected['float_'] = expected['float_'].astype(np.float32)
expected['double_'] = expected['double_'].astype(np.float64)
expected['date_td'] = expected['date_td'].apply(datetime.strptime,
args=('%Y-%m-%d',))
no_conversion = read_stata(self.dta15_117,
convert_dates=True)
tm.assert_frame_equal(expected, no_conversion)
conversion = read_stata(self.dta15_117,
convert_dates=True,
preserve_dtypes=False)
# read_csv types are the same
expected = self.read_csv(self.csv15)
expected['date_td'] = expected['date_td'].apply(datetime.strptime,
args=('%Y-%m-%d',))
tm.assert_frame_equal(expected, conversion)
def test_drop_column(self):
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
expected['long_'] = expected['long_'].astype(np.int32)
expected['float_'] = expected['float_'].astype(np.float32)
expected['double_'] = expected['double_'].astype(np.float64)
expected['date_td'] = expected['date_td'].apply(datetime.strptime,
args=('%Y-%m-%d',))
columns = ['byte_', 'int_', 'long_']
expected = expected[columns]
dropped = read_stata(self.dta15_117, convert_dates=True,
columns=columns)
tm.assert_frame_equal(expected, dropped)
# See PR 10757
columns = ['int_', 'long_', 'byte_']
expected = expected[columns]
reordered = read_stata(self.dta15_117, convert_dates=True,
columns=columns)
tm.assert_frame_equal(expected, reordered)
with tm.assertRaises(ValueError):
columns = ['byte_', 'byte_']
read_stata(self.dta15_117, convert_dates=True, columns=columns)
with tm.assertRaises(ValueError):
columns = ['byte_', 'int_', 'long_', 'not_found']
read_stata(self.dta15_117, convert_dates=True, columns=columns)
def test_categorical_writing(self):
original = DataFrame.from_records(
[
["one", "ten", "one", "one", "one", 1],
["two", "nine", "two", "two", "two", 2],
["three", "eight", "three", "three", "three", 3],
["four", "seven", 4, "four", "four", 4],
["five", "six", 5, np.nan, "five", 5],
["six", "five", 6, np.nan, "six", 6],
["seven", "four", 7, np.nan, "seven", 7],
["eight", "three", 8, np.nan, "eight", 8],
["nine", "two", 9, np.nan, "nine", 9],
["ten", "one", "ten", np.nan, "ten", 10]
],
columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled',
'labeled_with_missings', 'float_labelled', 'unlabeled'])
expected = original.copy()
# these are all categoricals
original = pd.concat([original[col].astype('category')
for col in original], axis=1)
expected['incompletely_labeled'] = expected[
'incompletely_labeled'].apply(str)
expected['unlabeled'] = expected['unlabeled'].apply(str)
expected = pd.concat([expected[col].astype('category')
for col in expected], axis=1)
expected.index.name = 'index'
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w: # noqa
# Silence warnings
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(
written_and_read_again.set_index('index'), expected)
def test_categorical_warnings_and_errors(self):
# Warning for non-string labels
# Error for labels too long
original = pd.DataFrame.from_records(
[['a' * 10000],
['b' * 10000],
['c' * 10000],
['d' * 10000]],
columns=['Too_long'])
original = pd.concat([original[col].astype('category')
for col in original], axis=1)
with tm.ensure_clean() as path:
tm.assertRaises(ValueError, original.to_stata, path)
original = pd.DataFrame.from_records(
[['a'],
['b'],
['c'],
['d'],
[1]],
columns=['Too_long'])
original = pd.concat([original[col].astype('category')
for col in original], axis=1)
with warnings.catch_warnings(record=True) as w:
original.to_stata(path)
# should get a warning for mixed content
tm.assert_equal(len(w), 1)
def test_categorical_with_stata_missing_values(self):
values = [['a' + str(i)] for i in range(120)]
values.append([np.nan])
original = pd.DataFrame.from_records(values, columns=['many_labels'])
original = pd.concat([original[col].astype('category')
for col in original], axis=1)
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(
written_and_read_again.set_index('index'), original)
def test_categorical_order(self):
# Directly construct using expected codes
# Format is is_cat, col_name, labels (in order), underlying data
expected = [(True, 'ordered', ['a', 'b', 'c', 'd', 'e'], np.arange(5)),
(True, 'reverse', ['a', 'b', 'c',
'd', 'e'], np.arange(5)[::-1]),
(True, 'noorder', ['a', 'b', 'c', 'd',
'e'], np.array([2, 1, 4, 0, 3])),
(True, 'floating', [
'a', 'b', 'c', 'd', 'e'], np.arange(0, 5)),
(True, 'float_missing', [
'a', 'd', 'e'], np.array([0, 1, 2, -1, -1])),
(False, 'nolabel', [
1.0, 2.0, 3.0, 4.0, 5.0], np.arange(5)),
(True, 'int32_mixed', ['d', 2, 'e', 'b', 'a'],
np.arange(5))]
cols = []
for is_cat, col, labels, codes in expected:
if is_cat:
cols.append((col, pd.Categorical.from_codes(codes, labels)))
else:
cols.append((col, pd.Series(labels, dtype=np.float32)))
expected = DataFrame.from_items(cols)
# Read with and with out categoricals, ensure order is identical
parsed_115 = read_stata(self.dta19_115)
parsed_117 = read_stata(self.dta19_117)
tm.assert_frame_equal(expected, parsed_115)
tm.assert_frame_equal(expected, parsed_117)
# Check identity of codes
for col in expected:
if is_categorical_dtype(expected[col]):
tm.assert_series_equal(expected[col].cat.codes,
parsed_115[col].cat.codes)
tm.assert_index_equal(expected[col].cat.categories,
parsed_115[col].cat.categories)
def test_categorical_sorting(self):
parsed_115 = read_stata(self.dta20_115)
parsed_117 = read_stata(self.dta20_117)
# Sort based on codes, not strings
parsed_115 = parsed_115.sort_values("srh")
parsed_117 = parsed_117.sort_values("srh")
# Don't sort index
parsed_115.index = np.arange(parsed_115.shape[0])
parsed_117.index = np.arange(parsed_117.shape[0])
codes = [-1, -1, 0, 1, 1, 1, 2, 2, 3, 4]
categories = ["Poor", "Fair", "Good", "Very good", "Excellent"]
cat = pd.Categorical.from_codes(codes=codes, categories=categories)
expected = pd.Series(cat, name='srh')
tm.assert_series_equal(expected, parsed_115["srh"])
tm.assert_series_equal(expected, parsed_117["srh"])
def test_categorical_ordering(self):
parsed_115 = read_stata(self.dta19_115)
parsed_117 = read_stata(self.dta19_117)
parsed_115_unordered = read_stata(self.dta19_115,
order_categoricals=False)
parsed_117_unordered = read_stata(self.dta19_117,
order_categoricals=False)
for col in parsed_115:
if not is_categorical_dtype(parsed_115[col]):
continue
tm.assert_equal(True, parsed_115[col].cat.ordered)
tm.assert_equal(True, parsed_117[col].cat.ordered)
tm.assert_equal(False, parsed_115_unordered[col].cat.ordered)
tm.assert_equal(False, parsed_117_unordered[col].cat.ordered)
def test_read_chunks_117(self):
files_117 = [self.dta1_117, self.dta2_117, self.dta3_117,
self.dta4_117, self.dta14_117, self.dta15_117,
self.dta16_117, self.dta17_117, self.dta18_117,
self.dta19_117, self.dta20_117]
for fname in files_117:
for chunksize in 1, 2:
for convert_categoricals in False, True:
for convert_dates in False, True:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed = read_stata(
fname,
convert_categoricals=convert_categoricals,
convert_dates=convert_dates)
itr = read_stata(
fname, iterator=True,
convert_categoricals=convert_categoricals,
convert_dates=convert_dates)
pos = 0
for j in range(5):
with warnings.catch_warnings(record=True) as w: # noqa
warnings.simplefilter("always")
try:
chunk = itr.read(chunksize)
except StopIteration:
break
from_frame = parsed.iloc[pos:pos + chunksize, :]
tm.assert_frame_equal(
from_frame, chunk, check_dtype=False,
check_datetimelike_compat=True)
pos += chunksize
def test_iterator(self):
fname = self.dta3_117
parsed = read_stata(fname)
itr = read_stata(fname, iterator=True)
chunk = itr.read(5)
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk)
itr = read_stata(fname, chunksize=5)
chunk = list(itr)
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk[0])
itr = read_stata(fname, iterator=True)
chunk = itr.get_chunk(5)
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk)
itr = read_stata(fname, chunksize=5)
chunk = itr.get_chunk()
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk)
# GH12153
from_chunks = pd.concat(read_stata(fname, chunksize=4))
tm.assert_frame_equal(parsed, from_chunks)
def test_read_chunks_115(self):
files_115 = [self.dta2_115, self.dta3_115, self.dta4_115,
self.dta14_115, self.dta15_115, self.dta16_115,
self.dta17_115, self.dta18_115, self.dta19_115,
self.dta20_115]
for fname in files_115:
for chunksize in 1, 2:
for convert_categoricals in False, True:
for convert_dates in False, True:
# Read the whole file
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed = read_stata(
fname,
convert_categoricals=convert_categoricals,
convert_dates=convert_dates)
# Compare to what we get when reading by chunk
itr = read_stata(
fname, iterator=True,
convert_dates=convert_dates,
convert_categoricals=convert_categoricals)
pos = 0
for j in range(5):
with warnings.catch_warnings(record=True) as w: # noqa
warnings.simplefilter("always")
try:
chunk = itr.read(chunksize)
except StopIteration:
break
from_frame = parsed.iloc[pos:pos + chunksize, :]
tm.assert_frame_equal(
from_frame, chunk, check_dtype=False,
check_datetimelike_compat=True)
pos += chunksize
def test_read_chunks_columns(self):
fname = self.dta3_117
columns = ['quarter', 'cpi', 'm1']
chunksize = 2
parsed = read_stata(fname, columns=columns)
itr = read_stata(fname, iterator=True)
pos = 0
for j in range(5):
chunk = itr.read(chunksize, columns=columns)
if chunk is None:
break
from_frame = parsed.iloc[pos:pos + chunksize, :]
tm.assert_frame_equal(from_frame, chunk, check_dtype=False)
pos += chunksize
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-2.0 | 3,143,999,181,759,587,300 | 42.555755 | 83 | 0.525499 | false |
amadeusproject/amadeuslms | reports/views.py | 1 | 37086 | """
Copyright 2016, 2017 UFPE - Universidade Federal de Pernambuco
Este arquivo é parte do programa Amadeus Sistema de Gestão de Aprendizagem, ou simplesmente Amadeus LMS
O Amadeus LMS é um software livre; você pode redistribui-lo e/ou modifica-lo dentro dos termos da Licença Pública Geral GNU como publicada pela Fundação do Software Livre (FSF); na versão 2 da Licença.
Este programa é distribuído na esperança que possa ser útil, mas SEM NENHUMA GARANTIA; sem uma garantia implícita de ADEQUAÇÃO a qualquer MERCADO ou APLICAÇÃO EM PARTICULAR. Veja a Licença Pública Geral GNU para maiores detalhes.
Você deve ter recebido uma cópia da Licença Pública Geral GNU, sob o título "LICENSE", junto com este programa, se não, escreva para a Fundação do Software Livre (FSF) Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
"""
import copy
import math
import os
from collections import OrderedDict
from datetime import date, datetime, timedelta
from os.path import join
from typing import List
import django.views.generic as generic
import pandas as pd
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.forms import formset_factory
from django.http import HttpResponse, JsonResponse
from django.shortcuts import redirect
from django.utils.translation import ugettext_lazy as _
from amadeus import settings
from amadeus.permissions import has_subject_permissions
from chat.models import Conversation, TalkMessages
from log.models import Log
from mural.models import Comment, MuralVisualizations, SubjectPost
from subjects.models import Subject, Tag
from topics.models import Resource, Topic
from .forms import (BaseResourceAndTagFormset, CreateInteractionReportForm,
ResourceAndTagForm)
from .models import ReportCSV, ReportXLS
class ReportView(LoginRequiredMixin, generic.FormView):
template_name = "reports/create.html"
form_class = CreateInteractionReportForm
def dispatch(self, request, *args, **kwargs):
params = self.request.GET
subject = Subject.objects.get(id=params['subject_id'])
if not has_subject_permissions(request.user, subject):
return redirect(reverse('subjects:home'))
return super(ReportView, self).dispatch(request, *args, **kwargs)
def get_initial(self):
"""
Returns the initial data to use for forms on this view.
"""
initial = {}
params = self.request.GET
subject = Subject.objects.get(id=params['subject_id'])
topics = subject.topic_subject.all()
initial['subject'] = subject
initial['topic'] = topics
initial['end_date'] = date.today()
return initial
def get_context_data(self, **kwargs):
context = super(ReportView, self).get_context_data(**kwargs)
subject = Subject.objects.get(id=self.request.GET['subject_id'])
context['title'] = _('Interaction Data')
context['subject'] = subject
# set formset
resourceTagFormSet = formset_factory(ResourceAndTagForm, formset=BaseResourceAndTagFormset)
resourceTagFormSet = resourceTagFormSet()
context['resource_tag_formset'] = resourceTagFormSet
return context
def get_success_url(self):
messages.success(self.request, _("Report created successfully"))
get_params = "?"
# passing form data through GET
for key, value in self.form_data.items():
get_params += key + "=" + str(value) + "&"
for form_data in self.formset_data:
for key, value in form_data.items():
get_params += key + "=" + str(value) + "&"
# retrieving subject id for data purposes
for key, value in self.request.GET.items():
get_params += key + "=" + str(value)
return reverse('subjects:reports:view_report', kwargs={}) + get_params
def post(self, request, *args, **kwargs):
"""
Handles POST requests, instantiating a form instance with the passed
POST variables and then checked for validity.
"""
form = self.get_form()
subject = Subject.objects.get(id=self.request.GET['subject_id'])
empty_choice_index = -1
topics = subject.topic_subject.all()
# get all resources associated with topics
tags = []
for topic in topics:
resources_set = topic.resource_topic.all()
for resource in resources_set:
for tag in resource.tags.all():
tags.append(tag)
t = Tag(name=" ")
t.id = empty_choice_index
tags.append(t)
resouce_subclasses_names = Resource.__subclasses__()
amount_of_forms = self.request.POST['form-TOTAL_FORMS']
initial_datum = {'class_name': resouce_subclasses_names, 'tag': tags}
initial_data = []
for i in range(int(amount_of_forms)):
initial_data.append(initial_datum)
resource_tag_form_set = formset_factory(ResourceAndTagForm, formset=BaseResourceAndTagFormset)
resources_formset = resource_tag_form_set(self.request.POST, initial=initial_data)
if form.is_valid() and resources_formset.is_valid():
self.form_data = form.cleaned_data
self.formset_data = resources_formset.cleaned_data
return self.form_valid(form)
else:
return self.form_invalid(form)
class ViewReportView(LoginRequiredMixin, generic.TemplateView):
template_name = "reports/view.html"
def get_context_data(self, **kwargs):
context = {}
params_data = self.request.GET
subject = Subject.objects.get(id=params_data['subject_id'])
context['title'] = _('Interaction Data')
context['subject_name'] = subject.name
if params_data['topic'] == _("All"):
context['topic_name'] = params_data['topic']
else:
context['topic_name'] = Topic.objects.get(
id=int(params_data['topic'])).name
context['init_date'] = params_data['init_date']
context['end_date'] = params_data['end_date']
context['subject'] = subject
#I used getlist method so it can get more than one tag and one resource class_name
resources = params_data.getlist('resource')
tags = params_data.getlist('tag')
self.from_mural = params_data['from_mural']
self.from_messages = params_data['from_messages']
report_parameters = self.get_report_parameters(subject,
params_data['topic'],
params_data['init_date'],
params_data['end_date'],
tags)
context['data'], context['header'] = self.get_mural_data(
subject, report_parameters['topics'], report_parameters['students'],
report_parameters['init_date'], report_parameters['end_date'],
report_parameters['header'], report_parameters['tags_id'], resources)
# this is to save the csv for further download
df = pd.DataFrame.from_dict(context['data'], orient='index')
df.columns = context['header']
# so it does not exist more than one report CSV available
# for that user to download
if ReportCSV.objects.filter(user=self.request.user).count() > 0:
report = ReportCSV.objects.get(user=self.request.user)
report.delete()
report = ReportCSV(user=self.request.user, csv_data=df.to_csv())
report.save()
# for excel files
if ReportXLS.objects.filter(user=self.request.user).count() > 0:
report = ReportXLS.objects.get(user=self.request.user)
report.delete()
folder_path = join(settings.MEDIA_ROOT, 'files')
# check if the folder already exists
if not os.path.isdir(folder_path):
os.makedirs(folder_path)
path = join(settings.MEDIA_ROOT, 'files', 'report' + str(self.request.user.id) + '.xls')
writer = pd.ExcelWriter(path)
df.to_excel(writer, sheet_name='first_sheet')
writer.save()
report = ReportXLS(user=self.request.user)
report.xls_data.name = path
report.save()
return context
def process_date(self, init_date, end_date):
response = {}
# so it accepts english and portuguese date formats
date_formats = ["%d/%m/%Y", "%m/%d/%Y", "%Y-%m-%d"]
for fmt in date_formats:
try:
init_date = datetime.strptime(init_date, fmt).date()
end_date = datetime.strptime(end_date, fmt).date()
except ValueError:
pass
# I use this so the system can gather data up to end_date 11h59 p.m.
end_date = end_date + timedelta(days=1)
response["init_date"] = init_date
response["end_date"] = end_date
return response
def get_report_parameters(self, subject, topics_query, init_date, end_date,
tags_id):
"""
Process all the data to be brough by the report
Subject: subject where the report is being created
topics_query: it's either one of the topics or all of them
init_date: When the reports filter of dates stars
end_date: When the reports filter of dates end
resources_type_names: resources subclasses name that were selected
tags_id = ID of tag objects that were selected
"""
parameter_data = {}
parameter_data["students"] = subject.students.all()
date_processed = self.process_date(init_date, end_date)
parameter_data["init_date"] = date_processed["init_date"]
parameter_data["end_date"] = date_processed["end_date"]
if topics_query == _("All"):
topics = subject.topic_subject.all()
else:
topics = Topic.objects.get(id=topics_query)
parameter_data["topics"] = topics
header = [str(_('User'))]
parameter_data["header"] = header
# so I can check whether we are dealing with multiple or
# single tags (empty option)
self.used_tags = copy.deepcopy(tags_id)
parameter_data["tags_id"] = tags_id
return parameter_data
def get_mural_data(self, subject, topics, students, init_date, end_date,
header, tags_id, resources_type_names):
row_data = {}
interactions = None
#For each student in the subject
for student in students:
row_data[student.id] = []
if len(student.social_name) > 0:
row_data[student.id].append(student.social_name)
else:
row_data[student.id].append(student.username)
interactions = OrderedDict()
if self.from_mural == "True":
help_posts_made_by_user = SubjectPost.objects.filter(action="help", space__id=subject.id, user=student,
create_date__range=(init_date, end_date))
# number of help posts created by the student
interactions[_('Number of help posts created by the user.')] = help_posts_made_by_user.count()
help_posts = SubjectPost.objects.filter(action="help", create_date__range=(init_date, end_date),
space__id=subject.id)
#comments count on help posts created by the student
interactions[_('Amount of comments on help posts created by the student.')] = Comment.objects\
.filter(post__in = help_posts.filter(user=student),
create_date__range=(init_date, end_date)).count()
#count the amount of comments made by the student on posts made by one of the professors
interactions[_('Amount of comments made by the student on teachers help posts.')] = Comment.objects\
.filter(post__in = help_posts.filter(user__in=subject.professor.all()),
create_date__range=(init_date, end_date), user=student).count()
#comments made by the user on other users posts
interactions[_('Amount of comments made by the student on other students help posts.')] = Comment\
.objects.filter(post__in=help_posts.exclude(user=student),
create_date__range=(init_date, end_date), user=student).count()
comments_by_teacher = Comment.objects.filter(user__in=subject.professor.all())
help_posts_ids = []
for comment in comments_by_teacher:
help_posts_ids.append(comment.post.id)
#number of help posts created by the user that the teacher commented on
interactions[_('Number of help posts created by the user that the teacher commented on.')] = help_posts\
.filter(user=student, id__in=help_posts_ids).count()
comments_by_others = Comment.objects.filter(user__in=subject.students.exclude(id=student.id))
help_posts_ids = []
for comment in comments_by_teacher:
help_posts_ids.append(comment.post.id)
#number of help posts created by the user others students commented on
interactions[_('Number of help posts created by the user others students commented on.')] = help_posts\
.filter(user=student, id__in=help_posts_ids).count()
#Number of student visualizations on the mural of the subject
interactions[_('Number of student visualizations on the mural of the subject.')] = MuralVisualizations\
.objects.filter(post__in=SubjectPost.objects.filter(space__id=subject.id,
create_date__range=(init_date, end_date)),
user=student).count()
#variables from messages
if self.from_messages == "True":
message_data = self.get_messages_data(subject, student)
for key, value in message_data.items():
interactions[key] = value
# VAR08 through VAR_019 of documenttation:
if len(resources_type_names) > 0:
resources_data = self.get_resources_and_tags_data(resources_type_names, tags_id, student, subject,
topics, init_date, end_date)
for key, value in resources_data.items():
interactions[key] = value
#VAR20 - number of access to mural between 6 a.m to 12a.m.
interactions[_('Number of access to mural between 6 a.m to 12a.m. .')] = Log.objects\
.filter(action="access", resource="subject", user_id=student.id, context__contains=
{'subject_id': subject.id}, datetime__hour__range=(5, 11), datetime__range=(init_date, end_date)).count()
#VAR21 - number of access to mural between 0 p.m to 6p.m.
interactions[_('Number of access to mural between 0 p.m to 6p.m. .')] = Log.objects\
.filter(action="access", resource="subject", user_id=student.id, context__contains=
{'subject_id': subject.id}, datetime__hour__range=(11, 17), datetime__range=(init_date, end_date)).count()
#VAR22
interactions[_('Number of access to mural between 6 p.m to 12p.m. .')] = Log.objects\
.filter(action="access", resource="subject", user_id=student.id, context__contains=
{'subject_id': subject.id}, datetime__hour__range=(17, 23), datetime__range=(init_date, end_date)).count()
#VAR23
interactions[_('Number of access to mural between 0 a.m to 6a.m. .')] = Log.objects\
.filter(action="access", resource="subject", user_id=student.id, context__contains=
{'subject_id': subject.id}, datetime__hour__range=(23, 5), datetime__range=(init_date, end_date)).count()
#VAR24 through 30
day_numbers = [0, 1, 2, 3, 4, 5, 6]
day_names = [str(_("sunday")), str(_("monday")), str(_("tuesday")), str(_("wednesday")), str(_("thursday")),
str(_("friday")), str(_("saturday"))]
distinct_days = 0
for day_num in day_numbers:
#day+1 is because the days are started on 1 instead of the lists, which index starts at 0
interactions[_('Number of access to the subject on ')+ day_names[day_num]] = Log.objects\
.filter(action="access", resource="subject", user_id=student.id, context__contains=
{'subject_id': subject.id}, datetime__week_day=day_num+1, datetime__range=(init_date, end_date)).count()
#to save the distinct days the user has accessed
if interactions[_('Number of access to the subject on ') + day_names[day_num]] > 0:
distinct_days += 1
interactions[_('Number of distinct days the user access the subject. ')] = distinct_days
interactions[_("Class")] = _("Undefined")
interactions[_("Performance")] = _("Undefined")
for value in interactions.values():
row_data[student.id].append(value)
if interactions is not None:
for key in interactions.keys():
header.append(key)
return row_data, header
def get_resources_and_tags_data(self, resources_types, tags, student, subject, topics, init_date, end_date):
data = OrderedDict()
new_tags = [] # tags will be replaced by this variable
for i in range(len(resources_types)):
if tags[i] == "-1": # it means I should select all of tags available for this kind of resource
new_tags = set()
if not isinstance(topics, Topic):
topics = subject.topic_subject.all()
for topic in topics:
resource_set = Resource.objects.select_related(resources_types[i].lower()).filter(topic=topic)
for resource in resource_set:
if resource._my_subclass == resources_types[i].lower():
for tag in resource.tags.all():
if tag.name != "":
new_tags.add(tag)
else:
topics = topics
resource_set = Resource.objects.select_related(resources_types[i].lower()).filter(topic=topics)
for resource in resource_set:
if resource._my_subclass == resources_types[i].lower():
for tag in resource.tags.all():
if tag.name != "":
new_tags.add(tag)
data = {}
new_tags = [tag.id for tag in new_tags]
tags[i] = new_tags
for i in range(len(resources_types)):
original_tags = copy.deepcopy(self.used_tags) #effectiving copy
if isinstance(topics, Topic):
if isinstance(tags[i], List):
resources = Resource.objects.select_related(resources_types[i].lower()).filter(tags__in=tags[i],
topic=topics)
else:
resources = Resource.objects.select_related(resources_types[i].lower()).filter(tags__in=[tags[i]],
topic=topics)
else:
if isinstance(tags[i], List):
resources = Resource.objects.select_related(resources_types[i].lower()).filter(tags__in=tags[i],
topic__in=topics)
else:
resources = Resource.objects.select_related(resources_types[i].lower()).filter(tags__in=[tags[i]],
topic__in=topics)
distinct_resources = 0
total_count = 0
# variables to handle distinct days report's variable
day_numbers = [0, 1, 2, 3, 4, 5, 6]
distinct_days = 0
hours_viewed = 0 # youtube video as well as webconference
for resource in resources:
if isinstance(topics, Topic):
#if it selected only one topic to work with
count = Log.objects.filter(action="view", resource=resources_types[i].lower(), user_id=student.id,
context__contains={'subject_id': subject.id,
resources_types[i].lower()+'_id': resource.id,
'topic_id': topics.id},
datetime__range=(init_date, end_date)).count()
if resources_types[i].lower() == "ytvideo":
watch_times = Log.objects.filter(action="watch", resource=resources_types[i].lower(),
user_id=student.id, context__contains=
{'subject_id': subject.id,
resources_types[i].lower()+'_id': resource.id},
datetime__range=(init_date, end_date))
if watch_times.count() > 0:
for watch_time in watch_times:
hours_viewed = calculateHoursViewedTimeDelta(hours_viewed, watch_time,
'timestamp_start', 'timestamp_end')
if resources_types[i].lower() == "webconference":
init_times = Log.objects.filter(action="initwebconference", resource=resources_types[i].lower(),
user_id=student.id, context__contains=
{'subject_id': subject.id,
resources_types[i].lower()+'_id': resource.id},
datetime__range=(init_date, end_date))
end_times = Log.objects.filter(action="participate", resource=resources_types[i].lower(),
user_id=student.id, context__contains={'subject_id': subject.id,
resources_types[i].lower()+'_id': resource.id}, datetime__range=(init_date, end_date))
if init_times.count() > 0:
j = 0
for init_time in init_times:
hours_viewed = calculateHoursViewed(hours_viewed, init_time, end_times[j],
'webconference_init', 'webconference_finish')
for day_num in day_numbers:
count_temp = Log.objects.filter(action="view", resource=resources_types[i].lower(),
user_id=student.id, context__contains={'subject_id': subject.id,
resources_types[i].lower()+'_id': resource.id, 'topic_id': topics.id},
datetime__week_day=day_num+1,
datetime__range=(init_date, end_date)).count()
if count_temp > 0:
distinct_days += 1
else:
# or the user selected all
count = Log.objects.filter(action="view", resource=resources_types[i].lower(),
user_id=student.id, context__contains={'subject_id': subject.id,
resources_types[i].lower()+'_id': resource.id}, datetime__range=(init_date, end_date)).count()
for daynum in day_numbers:
count_temp = Log.objects.filter(action="view", resource=resources_types[i].lower(),
user_id=student.id, context__contains={'subject_id': subject.id,
resources_types[i].lower()+'_id': resource.id}, datetime__week_day=daynum+1,
datetime__range=(init_date, end_date)).count()
if count_temp > 0:
distinct_days += 1
if resources_types[i].lower() == "ytvideo":
watch_times = Log.objects.filter(action="watch", resource=resources_types[i].lower(),
user_i=student.id, context__contains=
{'subject_id': subject.id,
resources_types[i].lower()+'_id': resource.id},
datetime__range=(init_date, end_date))
if watch_times.count() > 0:
for watch_time in watch_times:
hours_viewed = calculateHoursViewedTimeDelta(hours_viewed, watch_time,
'timestamp_start', 'timestamp_end')
if resources_types[i].lower() == "webconference":
init_times = Log.objects.filter(action="initwebconference", resource=resources_types[i].lower(),
user_id=student.id, context__contains={'subject_id': subject.id,
resources_types[i].lower()+'_id': resource.id}, datetime__range=(init_date, end_date))
end_times = Log.objects.filter(action="participate", resource=resources_types[i].lower(),
user_id=student.id, context__contains={'subject_id': subject.id,
resources_types[i].lower()+'_id': resource.id}, datetime__range=(init_date, end_date))
if init_times.count() > 0:
j = 0
for init_time in init_times:
hours_viewed = calculateHoursViewed(hours_viewed, init_time, end_times[j],
'webconference_init', 'webconference_finish')
if count > 0:
distinct_resources += 1
total_count += count
# mapping to translate class names
mapping = {
'pdffile': str(_('PDF File')),
'goals': str(_('Topic Goals')),
'link': str(_('Link to Website')),
'filelink': str(_('File Link')),
'webconference': str(_('Web Conference')),
'ytvideo': str(_('YouTube Video')),
'webpage': str(_('WebPage')),
'questionary': str(_('Questionary'))}
if original_tags[i] != "-1":
data[str(_("number of visualizations of ")) + mapping[str(resources_types[i])] + str(
_(" with tag ")) + Tag.objects.get(id=int(tags[i])).name] = total_count
data[str(_("number of visualizations of distintic ")) + mapping[str(resources_types[i])] + str(
_(" with tag ")) + Tag.objects.get(id=int(tags[i])).name] = distinct_resources
data[str(_("distintic days ")) + mapping[str(resources_types[i])] + str(
_(" with tag ")) + Tag.objects.get(id=int(tags[i])).name] = distinct_days
if resources_types[i].lower() in ["ytvideo", "webconference"]:
data[str(_("hours viewed of ")) + str(resources_types[i]) + str(_(" with tag ")) + Tag.objects.get(
id=int(tags[i])).name] = hours_viewed
else:
data[str(_("number of visualizations of ")) + mapping[str(resources_types[i])]] = total_count
data[str(_("number of visualizations of distintic ")) + mapping[
str(resources_types[i])]] = distinct_resources
data[str(_("distintic days ")) + mapping[str(resources_types[i])]] = distinct_days
if resources_types[i].lower() in ["ytvideo", "webconference"]:
data[str(_("hours viewed of ")) + str(resources_types[i])] = hours_viewed
return data
def get_messages_data(self, subject, student):
data = OrderedDict()
messages_sent_to_other_students = 0
distinct_students = 0
for other_student in subject.students.exclude(id=student.id):
conversations_with_other = Conversation.objects.filter(Q(user_one=student) & Q(user_two=other_student) |
Q(user_one=other_student) & Q(user_two=student))
messages_sent_other = TalkMessages.objects.filter(talk__in=conversations_with_other, user=student,
subject=subject)
messages_received_other = TalkMessages.objects.filter(talk__in=conversations_with_other, user=other_student,
subject=subject)
if data.get(_(" amount of messages sent to other students")):
data[_(" amount of messages sent to other students")] = messages_sent_other.count() \
+ data.get(
_(" amount of messages sent to other students"))
else:
data[_(" amount of messages sent to other students")] = messages_sent_other.count()
if data.get(_("amount of messages received from other students")):
data[_("amount of messages received from other students")] = messages_received_other.count()\
+ data.get(
_("amount of messages received from other students"))
else:
data[_("amount of messages received from other students")] = messages_received_other.count()
#check whether the other started a conversation or not
if messages_sent_other.count() > 0:
distinct_students += 1
data[_("amount of distinct students to whom sent messages")] = distinct_students
# calculate the amount of messages sent to and received from professor
messages_sent_professors = 0
messages_received_professors = 0
for professor in subject.professor.all():
conversations_with_professor = Conversation.objects.filter(Q(user_one=student) & Q(user_two=professor)
| Q(user_one=professor) & Q(user_two=student))
messages_sent_to_professors = TalkMessages.objects.filter(talk__in=conversations_with_professor,
user=student, subject=subject)
messages_received_from_professors = TalkMessages.objects.filter(talk__in=conversations_with_professor,
user=professor, subject=subject)
if data.get(_("amount messages sent to professors")):
data[_("amount messages sent to professors")] = messages_sent_to_professors.count() \
+ data.get(_("amount messages sent to professors"))
else:
data[_("amount messages sent to professors")] = messages_sent_to_professors.count()
if data.get(_("amount of messages received from professors")):
data[_("amount of messages received from professors")] = messages_received_from_professors.count() \
+ data.get(
_("amount of messages received from professors"))
else:
data[_("amount of messages received from professors")] = messages_received_from_professors.count()
return data
"""
Get all possible resource subclasses available for that topic selected
"""
def get_resources(request):
# get all possible resources
subject = Subject.objects.get(id=request.GET['subject_id'])
topic_choice = request.GET["topic_choice"]
if topic_choice.lower() == "all" or topic_choice.lower() == "todos":
topics = subject.topic_subject.all()
else:
topics = [Topic.objects.get(id=int(topic_choice))]
resources_class_names = []
for topic in topics:
resource_set = Resource.objects.filter(topic=topic)
for resource in resource_set:
resources_class_names.append(resource._my_subclass)
# remove duplicates
resources = set(resources_class_names)
mapping = {}
mapping['pdffile'] = str(_('PDF File'))
mapping['goals'] = str(_('Topic Goals'))
mapping['link'] = str(_('Link to Website'))
mapping['filelink'] = str(_('File Link'))
mapping['webconference'] = str(_('Web Conference'))
mapping['ytvideo'] = str(_('YouTube Video'))
mapping['webpage'] = str(_('WebPage'))
mapping['questionary'] = str(_('Questionary'))
data = {}
data['resources'] = [{'id': resource_type, 'name': mapping[resource_type]} for resource_type in resources]
return JsonResponse(data)
"""
This function returns all the tags associated
with a resource that is of the type of of the resource_class_name provided.
"""
def get_tags(request):
resource_type = request.GET['resource_class_name']
subject = Subject.objects.get(id=request.GET['subject_id'])
topic_choice = request.GET["topic_choice"]
# Have to fix this to accept translated options
if topic_choice.lower() == "all" or topic_choice.lower() == "todos":
topics = subject.topic_subject.all()
else:
topics = [Topic.objects.get(id=int(topic_choice))]
data = {}
tags = set()
for topic in topics:
resource_set = Resource.objects.select_related(resource_type.lower()).filter(topic=topic)
for resource in resource_set:
if resource._my_subclass == resource_type.lower():
for tag in resource.tags.all():
if tag.name != "":
tags.add(tag)
# adding empty tag for the purpose of giving the user this option for adicional behavior
tags = list(tags)
# creating empty tag
t = Tag(name=" ")
t.id = -1 # so I know he choose empyt one
tags.append(t)
data['tags'] = [{'id': tag.id, 'name': tag.name} for tag in tags]
return JsonResponse(data)
def download_report_csv(request):
report = ReportCSV.objects.get(user=request.user)
response = HttpResponse(report.csv_data, content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="report.csv"'
return response
def download_report_xls(request):
report = ReportXLS.objects.get(user=request.user)
response = HttpResponse(report.xls_data, content_type='application/ms-excel')
response['Content-Disposition'] = 'attachment; filename="report.xls"'
return response
def calculateHoursViewed(hours_viewed, init_time, end_time, resource_init_field, resource_end_time_field):
begin_time = int(init_time.context[resource_init_field])
end_time = int(end_time.context[resource_end_time_field])
time_delta = math.fabs(end_time - begin_time)
hours_viewed += time_delta / 3600
return hours_viewed
def calculateHoursViewedTimeDelta(hours_viewed, watch_time, resource_init_field, resource_end_time_field):
begin_time = timedelta(microseconds=int(watch_time.context[resource_init_field]))
end_time = timedelta(microseconds=int(watch_time.context[resource_end_time_field]))
time_delta = end_time - begin_time
hours_viewed += time_delta.microseconds / 3600
return hours_viewed
| gpl-2.0 | 3,578,856,309,576,907,300 | 49.077027 | 231 | 0.550098 | false |
bnbowman/HlaTools | src/pbhla/arguments.py | 1 | 1400 | import os, argparse
from . import __VERSION__
NUM_PROC = 4
args = argparse.Namespace()
def parse_args():
"""
Parse the options for running the HLA pipeline and
"""
desc = "A pipeline for performing HLA haplotype sequencing."
parser = argparse.ArgumentParser( description=desc )
# Add the options and arguments
add = parser.add_argument
add("input_file",
metavar="INPUT",
help="A Fasta, BasH5 or FOFN of HLA data to haplotype.")
add("config_file",
metavar="CONFIG",
help="FOFN of reference fasta files and their associated loci")
add("--output",
metavar="DIR",
help="Destination folder for process results")
add("--nproc",
metavar='INT',
type=int,
default=NUM_PROC,
help="Number of processors to use for parallelization ({0})".format(NUM_PROC))
add("--version",
nargs=0,
action=PrintVersionAction)
# Parse the options and update a few of the variables as needed
parser.parse_args( namespace=args )
args.input_file = os.path.abspath( args.input_file )
if not args.output:
args.output = args.input_file.split('.')[0]
class PrintVersionAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
print "\tHLA Analysis Pipeline version: %s" % __VERSION__
raise SystemExit | bsd-3-clause | 8,794,741,998,977,473,000 | 28.1875 | 86 | 0.635714 | false |
thp44/delphin_6_automation | data_process/extraction/dummy_extraction.py | 1 | 1715 | __author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
import numpy as np
import matplotlib.pyplot as plt
# RiBuild Modules
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
def compute_cdf(mu, sigma):
curve = np.random.normal(mu, sigma, 200)
hist, edges = np.histogram(curve, density=True, bins=50)
dx = edges[1] - edges[0]
cdf = np.cumsum(hist) * dx
return edges[1:], cdf
# Filter 1
x1, y1 = compute_cdf(3, 0.5)
a1 = np.nonzero(x1 < 2.0)
height1 = y1[a1][-1]
print(height1)
plt.figure()
plt.plot(x1, y1)
plt.xlim(0, 4)
plt.title('Filter 1')
plt.xlabel('Mould Index')
plt.ylabel('Cumulated Probability')
plt.axvline(x=2.0, color='k', linestyle=':', linewidth=1)
plt.axhline(y=height1, color='k', linestyle=':', linewidth=1)
plt.show()
# Filter 2
x2, y2 = compute_cdf(2, 1)
a2 = np.nonzero(x2 < 2.0)
height2 = y2[a2][-1]
print(height2)
plt.figure()
plt.plot(x2, y2)
plt.xlim(0, 4)
plt.title('Filter 2')
plt.xlabel('Mould Index')
plt.ylabel('Cumulated Probability')
plt.axvline(x=2.0, color='k', linestyle=':', linewidth=1)
plt.axhline(y=height2, color='k', linestyle=':', linewidth=1)
plt.show()
# Filter 3
x3, y3 = compute_cdf(1.50, 0.3)
a3 = np.nonzero(x3 < 2.0)
height3 = y3[a3][-1]
print(height3)
plt.figure()
plt.plot(x3, y3)
plt.xlim(0, 4)
plt.title('Filter 3')
plt.xlabel('Mould Index')
plt.ylabel('Cumulated Probability')
plt.axvline(x=2.0, color='k', linestyle=':', linewidth=1)
plt.axhline(y=height3, color='k', linestyle=':', linewidth=1)
plt.show()
| mit | -3,010,998,486,810,298,000 | 23.855072 | 120 | 0.570845 | false |
muendelezaji/workload-automation | setup.py | 2 | 3310 | # Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import warnings
from itertools import chain
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
wlauto_dir = os.path.join(os.path.dirname(__file__), 'wlauto')
sys.path.insert(0, os.path.join(wlauto_dir, 'core'))
from version import get_wa_version
# happends if falling back to distutils
warnings.filterwarnings('ignore', "Unknown distribution option: 'install_requires'")
warnings.filterwarnings('ignore', "Unknown distribution option: 'extras_require'")
try:
os.remove('MANIFEST')
except OSError:
pass
packages = []
data_files = {}
source_dir = os.path.dirname(__file__)
for root, dirs, files in os.walk(wlauto_dir):
rel_dir = os.path.relpath(root, source_dir)
data = []
if '__init__.py' in files:
for f in files:
if os.path.splitext(f)[1] not in ['.py', '.pyc', '.pyo']:
data.append(f)
package_name = rel_dir.replace(os.sep, '.')
package_dir = root
packages.append(package_name)
data_files[package_name] = data
else:
# use previous package name
filepaths = [os.path.join(root, f) for f in files]
data_files[package_name].extend([os.path.relpath(f, package_dir) for f in filepaths])
scripts = [os.path.join('scripts', s) for s in os.listdir('scripts')]
params = dict(
name='wlauto',
description='A framework for automating workload execution and measurment collection on ARM devices.',
version=get_wa_version(),
packages=packages,
package_data=data_files,
scripts=scripts,
url='N/A',
license='Apache v2',
maintainer='ARM Architecture & Technology Device Lab',
maintainer_email='workload-automation@arm.com',
install_requires=[
'python-dateutil', # converting between UTC and local time.
'pexpect>=3.3', # Send/recieve to/from device
'pyserial', # Serial port interface
'colorama', # Printing with colors
'pyYAML', # YAML-formatted agenda parsing
'requests', # Fetch assets over HTTP
],
extras_require={
'other': ['jinja2', 'pandas>=0.13.1'],
'test': ['nose'],
'mongodb': ['pymongo'],
'notify': ['notify2'],
'doc': ['sphinx'],
},
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
],
)
all_extras = list(chain(params['extras_require'].itervalues()))
params['extras_require']['everything'] = all_extras
setup(**params)
| apache-2.0 | 4,196,680,729,969,653,000 | 31.772277 | 106 | 0.655589 | false |
dtusar/coco | code-postprocessing/bbob_pproc/rungenericmany.py | 1 | 18089 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Process data to be included in a generic template.
Synopsis:
``python path_to_folder/bbob_pproc/rungenericmany.py [OPTIONS] FOLDER``
Help:
``python path_to_folder/bbob_pproc/rungenericmany.py -h``
"""
from __future__ import absolute_import
import os
import sys
import glob
import getopt
import pickle
import tarfile
from pdb import set_trace
import warnings
import numpy
import matplotlib
ftarget = 1e-8
target_runlength = 10 # used for ppfigs.main
if __name__ == "__main__":
matplotlib.use('Agg') # To avoid window popup and use without X forwarding
# matplotlib.use('pdf')
filepath = os.path.split(sys.argv[0])[0]
# Add the path to bbob_pproc/.. folder
sys.path.append(os.path.join(filepath, os.path.pardir))
try:
import bbob_pproc as cocopp
except ImportError:
import cocopp
res = cocopp.rungenericmany.main(sys.argv[1:])
sys.exit(res)
from . import genericsettings, ppfig
from . import dataoutput, pproc, pptex
from .pproc import DataSetList, processInputArgs
from .toolsdivers import prepend_to_file, strip_pathname1, str_to_latex
from .compall import pprldmany, pptables, ppfigs
from . import ppconverrorbars
import matplotlib.pyplot as plt
__all__ = ['main']
#CLASS DEFINITIONS
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
#FUNCTION DEFINITIONS
def usage():
print main.__doc__
def main(argv=None):
r"""Main routine for post-processing the data of multiple algorithms.
Provided with some data, this routine outputs figure and TeX files
in a folder needed for the compilation of the provided LaTeX templates
for comparing multiple algorithms (``*many.tex`` or ``*3*.tex``).
The used template file needs to be edited so that the commands
``\bbobdatapath`` points to the output folder created by this routine.
These output files will contain performance tables, performance
scaling figures and empirical cumulative distribution figures. On
subsequent executions, new files will be added to the output folder,
overwriting existing older files in the process.
Keyword arguments:
*argv* -- list of strings containing options and arguments. If not
provided, sys.argv is accessed.
*argv* must list folders containing BBOB data files.
The name of these folders will be used in the output figures and
tables to designate the algorithms. Therefore you should name the
folders with differentiating names.
Furthermore, argv can begin with facultative option flags listed
below.
-h, --help
displays this message.
-v, --verbose
verbose mode, prints out operations, warnings.
-o OUTPUTDIR, --output-dir=OUTPUTDIR
changes the default output directory (:file:`ppdatamany`) to
:file:`OUTPUTDIR`.
--noise-free, --noisy
processes only part of the data.
--settings=SETTINGS
changes the style of the output figures and tables. At the
moment, the only differences are in the colors of the
output figures. SETTINGS can be either "grayscale", "color"
or "black-white". The default setting is "color".
--tab-only, --rld-only, --fig-only
these options can be used to output respectively the
comparison TeX tables, the run lengths distributions or the
figures of ERT/dim vs dim only. A combination of any two or
more of these options results in no output.
--conv
if this option is chosen, additionally convergence
plots for each function and algorithm are generated.
--rld-single-fcts
generate also runlength distribution figures for each
single function.
--expensive
runlength-based f-target values and fixed display limits,
useful with comparatively small budgets. By default the
setting is based on the budget used in the data.
--not-expensive
expensive setting off.
--svg
generate also the svg figures which are used in html files
-
Exceptions raised:
*Usage* -- Gives back a usage message.
Examples:
* Calling the rungenericmany.py interface from the command line::
$ python bbob_pproc/rungenericmany.py -v AMALGAM BFGS BIPOP-CMA-ES
* Loading this package and calling the main from the command line
(requires that the path to this package is in python search path)::
$ python -m bbob_pproc.rungenericmany -h
This will print out this help message.
* From the python interpreter (requires that the path to this
package is in python search path)::
>> import bbob_pproc as bb
>> bb.rungenericmany.main('-o outputfolder folder1 folder2'.split())
This will execute the post-processing on the data found in
:file:`folder1` and :file:`folder2`.
The ``-o`` option changes the output folder from the default to
:file:`outputfolder`.
* Generate post-processing data for some algorithms with figures in
shades of gray::
$ python rungenericmany.py --settings grayscale NEWUOA NELDER LSSTEP
"""
if argv is None:
argv = sys.argv[1:]
try:
try:
opts, args = getopt.getopt(argv, genericsettings.shortoptlist, genericsettings.longoptlist)
except getopt.error, msg:
raise Usage(msg)
if not (args):
usage()
sys.exit()
#Process options
outputdir = genericsettings.outputdir
for o, a in opts:
if o in ("-v","--verbose"):
genericsettings.verbose = True
elif o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-o", "--output-dir"):
outputdir = a
elif o == "--noisy":
genericsettings.isNoisy = True
elif o == "--noise-free":
genericsettings.isNoiseFree = True
#The next 3 are for testing purpose
elif o == "--tab-only":
genericsettings.isRLDistr = False
genericsettings.isFig = False
elif o == "--rld-single-fcts":
genericsettings.isRldOnSingleFcts = True
elif o == "--rld-only":
genericsettings.isTab = False
genericsettings.isFig = False
elif o == "--fig-only":
genericsettings.isRLDistr = False
genericsettings.isTab = False
elif o == "--settings":
genericsettings.inputsettings = a
elif o == "--conv":
genericsettings.isConv = True
elif o == "--runlength-based":
genericsettings.runlength_based_targets = True
elif o == "--expensive":
genericsettings.isExpensive = True # comprises runlength-based
elif o == "--not-expensive":
genericsettings.isExpensive = False
elif o == "--svg":
genericsettings.generate_svg_files = True
elif o == "--sca-only":
warnings.warn("option --sca-only will have no effect with rungenericmany.py")
elif o == "--los-only":
warnings.warn("option --los-only will have no effect with rungenericmany.py")
elif o == "--crafting-effort=":
warnings.warn("option --crafting-effort will have no effect with rungenericmany.py")
elif o in ("-p", "--pickle"):
warnings.warn("option --pickle will have no effect with rungenericmany.py")
else:
assert False, "unhandled option"
# from bbob_pproc import bbob2010 as inset # input settings
# TODO: conditional imports are NOT the way to go here
if genericsettings.inputsettings == "color":
from . import config, genericsettings as inset # input settings
config.config(False)
elif genericsettings.inputsettings == "grayscale":
# this settings strategy (by proving different settings files) is problematic,
# because it means copy-paste of the settings
# file and future changes have a great chance to make the pasted files incompatible
# as has most likely happened with grayscalesettings:
from . import config, grayscalesettings as inset # input settings
# better would be just adjust the previous settings, as config is doing it,
# so a config_grayscalesettings.py module seems the better approach to go
elif genericsettings.inputsettings == "black-white":
from . import config, bwsettings as inset # input settings
else:
txt = ('Settings: %s is not an appropriate ' % genericsettings.inputsettings
+ 'argument for input flag "--settings".')
raise Usage(txt)
if (not genericsettings.verbose):
warnings.filterwarnings('module', '.*', Warning, '.*') # same warning just once
warnings.simplefilter('ignore') # that is bad, but otherwise to many warnings appear
config.target_values(genericsettings.isExpensive)
except Usage, err:
print >>sys.stderr, err.msg
print >>sys.stderr, "for help use -h or --help"
return 2
if 1 < 3:
print ("Post-processing: will generate output " +
"data in folder %s" % outputdir)
print " this might take several minutes."
if not os.path.exists(outputdir):
os.makedirs(outputdir)
if genericsettings.verbose:
print 'Folder %s was created.' % (outputdir)
# prepend the algorithm name command to the tex-command file
lines = []
for i, alg in enumerate(args):
lines.append('\\providecommand{\\algorithm' + pptex.numtotext(i) +
'}{' + str_to_latex(strip_pathname1(alg)) + '}')
prepend_to_file(os.path.join(outputdir,
'bbob_pproc_commands.tex'),
lines, 5000,
'bbob_proc_commands.tex truncated, consider removing the file before the text run'
)
dsList, sortedAlgs, dictAlg = processInputArgs(args, verbose=genericsettings.verbose)
if not dsList:
sys.exit()
if (any(ds.isBiobjective() for ds in dsList) and any(not ds.isBiobjective() for ds in dsList)):
sys.exit()
for i in dictAlg:
if genericsettings.isNoisy and not genericsettings.isNoiseFree:
dictAlg[i] = dictAlg[i].dictByNoise().get('nzall', DataSetList())
if genericsettings.isNoiseFree and not genericsettings.isNoisy:
dictAlg[i] = dictAlg[i].dictByNoise().get('noiselessall', DataSetList())
# compute maxfuneval values
# TODO: we should rather take min_algorithm max_evals
dict_max_fun_evals = {}
for ds in dsList:
dict_max_fun_evals[ds.dim] = numpy.max((dict_max_fun_evals.setdefault(ds.dim, 0), float(numpy.max(ds.maxevals))))
# set target values
from . import config
config.target_values(genericsettings.isExpensive, dict_max_fun_evals)
config.config(dsList[0].isBiobjective())
for i in dsList:
if i.dim not in genericsettings.dimensions_to_display:
continue
if (dict((j, i.instancenumbers.count(j)) for j in set(i.instancenumbers)) <
inset.instancesOfInterest):
warnings.warn('The data of %s do not list ' %(i) +
'the correct instances ' +
'of function F%d.' %(i.funcId))
plt.rc("axes", **inset.rcaxes)
plt.rc("xtick", **inset.rctick)
plt.rc("ytick", **inset.rctick)
plt.rc("font", **inset.rcfont)
plt.rc("legend", **inset.rclegend)
plt.rc('pdf', fonttype = 42)
ppfig.save_single_functions_html(
os.path.join(outputdir, genericsettings.many_algorithm_file_name),
'', # algorithms names are clearly visible in the figure
algorithmCount=ppfig.AlgorithmCount.MANY
)
ppfig.copy_js_files(outputdir)
# convergence plots
if genericsettings.isConv:
ppconverrorbars.main(dictAlg, outputdir, genericsettings.verbose)
# empirical cumulative distribution functions (ECDFs) aka Data profiles
if genericsettings.isRLDistr:
config.config(dsList[0].isBiobjective())
# ECDFs per noise groups
dictNoi = pproc.dictAlgByNoi(dictAlg)
for ng, tmpdictAlg in dictNoi.iteritems():
dictDim = pproc.dictAlgByDim(tmpdictAlg)
for d, entries in dictDim.iteritems():
# pprldmany.main(entries, inset.summarized_target_function_values,
# from . import config
# config.config()
pprldmany.main(entries, # pass expensive flag here?
dsList[0].isBiobjective(),
order=sortedAlgs,
outputdir=outputdir,
info=('%02dD_%s' % (d, ng)),
verbose=genericsettings.verbose)
# ECDFs per function groups
dictFG = pproc.dictAlgByFuncGroup(dictAlg)
for fg, tmpdictAlg in dictFG.iteritems():
dictDim = pproc.dictAlgByDim(tmpdictAlg)
for d, entries in dictDim.iteritems():
pprldmany.main(entries,
dsList[0].isBiobjective(),
order=sortedAlgs,
outputdir=outputdir,
info=('%02dD_%s' % (d, fg)),
verbose=genericsettings.verbose)
if genericsettings.isRldOnSingleFcts: # copy-paste from above, here for each function instead of function groups
# ECDFs for each function
if 1 < 3:
pprldmany.all_single_functions(dictAlg, sortedAlgs,
outputdir, genericsettings.verbose)
else: # subject to removal
dictFG = pproc.dictAlgByFun(dictAlg)
for fg, tmpdictAlg in dictFG.iteritems():
dictDim = pproc.dictAlgByDim(tmpdictAlg)
for d, entries in dictDim.iteritems():
single_fct_output_dir = (outputdir.rstrip(os.sep) + os.sep +
'pprldmany-single-functions',
# + os.sep + ('f%03d' % fg),
dsList[0].isBiobjective()
)
if not os.path.exists(single_fct_output_dir):
os.makedirs(single_fct_output_dir)
pprldmany.main(entries,
order=sortedAlgs,
outputdir=single_fct_output_dir,
info=('f%03d_%02dD' % (fg, d)),
verbose=genericsettings.verbose)
print "ECDFs of run lengths figures done."
if genericsettings.isTab:
if genericsettings.isExpensive:
prepend_to_file(os.path.join(outputdir,
'bbob_pproc_commands.tex'),
['\providecommand{\\bbobpptablesmanylegend}[1]{' +
pptables.tables_many_expensive_legend + '}'])
else:
prepend_to_file(os.path.join(outputdir,
'bbob_pproc_commands.tex'),
['\providecommand{\\bbobpptablesmanylegend}[1]{' +
pptables.tables_many_legend + '}'])
dictNoi = pproc.dictAlgByNoi(dictAlg)
for ng, tmpdictng in dictNoi.iteritems():
dictDim = pproc.dictAlgByDim(tmpdictng)
for d, tmpdictdim in dictDim.iteritems():
pptables.main(
tmpdictdim,
sortedAlgs,
dsList[0].isBiobjective(),
outputdir,
genericsettings.verbose)
print "Comparison tables done."
global ftarget # not nice
if genericsettings.isFig:
plt.rc("axes", labelsize=20, titlesize=24)
plt.rc("xtick", labelsize=20)
plt.rc("ytick", labelsize=20)
plt.rc("font", size=20)
plt.rc("legend", fontsize=20)
plt.rc('pdf', fonttype = 42)
if genericsettings.runlength_based_targets:
reference_data = 'bestBiobj2016' if dsList[0].isBiobjective() else 'bestGECCO2009'
ftarget = pproc.RunlengthBasedTargetValues([target_runlength], # TODO: make this more variable but also consistent
reference_data = reference_data)
ppfigs.main(dictAlg,
genericsettings.many_algorithm_file_name,
dsList[0].isBiobjective(),
sortedAlgs,
ftarget,
outputdir,
genericsettings.verbose)
plt.rcdefaults()
print "Scaling figures done."
plt.rcdefaults()
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause | 3,673,621,803,652,670,000 | 40.679724 | 131 | 0.566256 | false |
AISystena/web_crawler | lib/image_cnn_gpu/ImageTrainer.py | 1 | 6005 | # coding: utf-8
import six
import sys
import os.path
import pickle
import numpy as np
from sklearn.cross_validation import train_test_split
import chainer
import chainer.links as L
from chainer import optimizers, cuda
import matplotlib.pyplot as plt
import Util
from ImageCnn import ImageCnn
plt.style.use('ggplot')
"""
CNNによるテキスト分類 (posi-nega)
- 5層のディープニューラルネット
- 単語ベクトルにはWordEmbeddingモデルを使用
"""
class ImageTrainer:
def __init__(self, gpu=0, epoch=50, batchsize=5):
current_dir_path = os.path.dirname(__file__)
self.model_pkl = current_dir_path + '/model/image_cnn.pkl'
self.gpu = gpu
self.batchsize = batchsize # minibatch size
self.n_epoch = epoch # エポック数(パラメータ更新回数)
self.weight_decay = 0.01
self.lr = 0.001
# 隠れ層のユニット数
self.mid_units = 2560
self.output_channel = 1280
self.filters = 32
self.n_label = 5
self.input_channel = 3
def dump_model(self, model):
'''
modelを保存
'''
with open(self.model_pkl, 'wb') as pkl:
pickle.dump(model, pkl, -1)
def load_model(self):
'''
modelを読み込む
'''
model = None
if os.path.exists(self.model_pkl):
with open(self.model_pkl, 'rb') as pkl:
model = pickle.load(pkl)
return model
def makeGpuAvailable(self, model):
# GPUを使うかどうか
if self.gpu >= 0:
pass
cuda.check_cuda_available()
cuda.get_device(self.gpu).use()
model.to_gpu()
xp = np if self.gpu < 0 else cuda.cupy # self.gpu <= 0: use cpu
return xp
def train(self):
# Prepare dataset
dataset = Util.load_data()
dataset['source'] = dataset['source'].astype(np.float32) # 特徴量
dataset['target'] = dataset['target'].astype(np.int32) # ラベル
x_train, x_test, y_train, y_test = train_test_split(dataset['source'],
dataset['target'], test_size=0.15)
N_test = y_test.size # test data size
N = len(x_train) # train data size
print('input_channel is {}'.format(self.input_channel))
print('output_channel is {}'.format(self.output_channel))
print('filter_height is {}'.format(self.filters))
print('n_label is {}'.format(self.n_label))
# モデルの定義
model = self.load_model()
if model is None:
model = L.Classifier(ImageCnn(self.input_channel,
self.output_channel, self.filters, self.mid_units, self.n_label))
xp = self.makeGpuAvailable(model)
# Setup optimizer
optimizer = optimizers.AdaGrad()
optimizer.setup(model)
optimizer.lr = self.lr
optimizer.add_hook(chainer.optimizer.WeightDecay(self.weight_decay))
train_loss = []
train_acc = []
test_loss = []
test_acc = []
# Learning loop
for epoch in six.moves.range(1, self.n_epoch + 1):
print('epoch', epoch, '/', self.n_epoch)
# training)
perm = np.random.permutation(N) # ランダムな整数列リストを取得
sum_train_loss = 0.0
sum_train_accuracy = 0.0
for i in six.moves.range(0, N, self.batchsize):
# perm を使い x_train, y_trainからデータセットを選択 (毎回対象となるデータは異なる)
x = chainer.Variable(xp.asarray(x_train[perm[i:i + self.batchsize]])) # source
t = chainer.Variable(xp.asarray(y_train[perm[i:i + self.batchsize]])) # target
optimizer.update(model, x, t)
sum_train_loss += float(model.loss.data) * len(t.data) # 平均誤差計算用
sum_train_accuracy += float(model.accuracy.data) * len(t.data) # 平均正解率計算用
train_loss.append(sum_train_loss / N)
train_acc.append(sum_train_accuracy / N)
print('train mean loss={}, accuracy={}'
.format(sum_train_loss / N, sum_train_accuracy / N))
# evaluation
sum_test_loss = 0.0
sum_test_accuracy = 0.0
for i in six.moves.range(0, N_test, self.batchsize):
# all test data
x = chainer.Variable(xp.asarray(x_test[i:i + self.batchsize]))
t = chainer.Variable(xp.asarray(y_test[i:i + self.batchsize]))
loss = model(x, t)
sum_test_loss += float(loss.data) * len(t.data)
sum_test_accuracy += float(model.accuracy.data) * len(t.data)
test_loss.append(sum_test_loss / N_test)
test_acc.append(sum_test_accuracy / N_test)
print(' test mean loss={}, accuracy={}'.format(
sum_test_loss / N_test, sum_test_accuracy / N_test))
#if epoch > 10:
# optimizer.lr *= 0.97
print('learning rate:{} weight decay:{}'.format(optimizer.lr, self.weight_decay))
sys.stdout.flush()
# modelを保存
self.dump_model(model)
# 精度と誤差をグラフ描画
plt.figure(figsize=(16, 6))
acc_plt = plt.subplot2grid((1, 2), (0, 0))
acc_plt.plot(range(len(train_acc)), train_acc)
acc_plt.plot(range(len(test_acc)), test_acc)
acc_plt.legend(["train_acc", "test_acc"], loc=4)
acc_plt.set_title("Accuracy of digit recognition.")
loss_plt = plt.subplot2grid((1, 2), (0, 1))
loss_plt.plot(range(len(train_loss)), train_loss)
loss_plt.plot(range(len(test_loss)), test_loss)
loss_plt.legend(["train_loss", "test_loss"], loc=4)
loss_plt.set_title("Loss of digit recognition.")
plt.plot()
plt.show()
| mit | 3,898,779,118,524,411,000 | 31.508571 | 95 | 0.558446 | false |
IntelLabs/hpat | examples/series/series_sub.py | 1 | 1735 | # *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import pandas as pd
from numba import njit
@njit
def series_sub():
s1 = pd.Series([5, 4, 3, 2, 1])
s2 = pd.Series([0, 2, 3, 6, 8])
return s1.sub(s2) # Expect series of 5, 2, 0, -4, -7
print(series_sub())
| bsd-2-clause | -4,880,356,070,483,243,000 | 43.487179 | 79 | 0.682421 | false |
marcocaccin/LearningMetaDynamics | MD_unconstrained/mock_sampling.py | 1 | 5083 | #!/usr/bin/env python
from __future__ import print_function, division
import os
import scipy as sp
import scipy.linalg as LA
import scipy.spatial.distance as sp_dist
from ase import units
from sklearn.kernel_ridge import KernelRidge
# import pdb
from matplotlib import pyplot as plt
import PES_plotting as pl
from time import time as get_time
SAMPLE_WEIGHT = 1
def round_vector(vec, precision = 0.05):
"""
vec: array_like, type real
precision: real, > 0
"""
return ((vec + 0.5 * precision) / precision).astype('int') * precision
def update_model(colvars, mlmodel, grid_spacing, temperature, do_update=False):
# get actual forces and potential energy of configuration
### ML IS HERE ###
if not (mlmodel is None and not do_update):
# Accumulate the new observation in the dataset
coarse_colvars = round_vector(colvars, precision=grid_spacing)
distance_from_data = sp_dist.cdist(
sp.atleast_2d(coarse_colvars), mlmodel.X_fit_).ravel()
# check if configuration has already occurred
if distance_from_data.min() == 0.0:
index = list(distance_from_data).index(0.0)
mlmodel.y[index] = - units.kB * temperature * sp.log(sp.exp(-mlmodel.y[index] / (units.kB * temperature)) + SAMPLE_WEIGHT)
else:
mlmodel.accumulate_data(coarse_colvars, - units.kB * temperature * sp.log(SAMPLE_WEIGHT))
cv = coarse_colvars.ravel()
xx = sp.linspace(cv[0] - 2*grid_spacing, cv[0] + 2*grid_spacing, 5)
yy = sp.linspace(cv[1] - 2*grid_spacing, cv[1] + 2*grid_spacing, 5)
XX, YY = sp.meshgrid(xx, yy)
near_bins = sp.vstack((XX.ravel(), YY.ravel())).T
distance_from_data = sp_dist.cdist(sp.atleast_2d(near_bins), mlmodel.X_fit_)
for distance, near_bin in zip(distance_from_data, near_bins):
if distance.min() > 0.:
mlmodel.accumulate_data(near_bin, 0.)
if do_update:
# update ML potential with all the data contained in it.
mlmodel.update_fit()
return
def main():
T = 300.0 # Simulation temperature
dt = 1 * units.fs # MD timestep
nsteps = 1000000 # MD number of steps
lengthscale = 0.5 # KRR Gaussian width.
gamma = 1 / (2 * lengthscale**2)
grid_spacing = 0.1
mlmodel = KernelRidge(kernel='rbf',
gamma=gamma, gammaL = gamma/4, gammaU=2*gamma,
alpha=1.0e-2, variable_noise=False, max_lhood=False)
anglerange = sp.arange(0, 2*sp.pi + grid_spacing, grid_spacing)
X_grid = sp.array([[sp.array([x,y]) for x in anglerange]
for y in anglerange]).reshape((len(anglerange)**2, 2))
# Bootstrap from initial database? uncomment
data_MD = sp.loadtxt('phi_psi_pot_md300.csv')
colvars = data_MD[0,:2]
PotEng = data_MD[0,2]
KinEng = data_MD[0,3]
# Prepare diagnostic visual effects.
plt.close('all')
plt.ion()
fig, ax = plt.subplots(1, 2, figsize=(24, 13))
# Zero-timestep evaluation and data files setup.
print("START")
mlmodel.accumulate_data(round_vector(data_MD[0,:2], precision=grid_spacing), 0.)
print('Step %d | Energy per atom: Epot = %.3e eV Ekin = %.3e eV (T = %3.0f K) Etot = %.7e eV' % (
0, PotEng/22, KinEng/22, KinEng / (22 * 1.5 * units.kB), PotEng + KinEng))
# MD Loop
for istep, line in enumerate(data_MD[:nsteps]):
colvars = line[:2]
PotEng = line[2]
KinEng = line[3]
# Flush Cholesky decomposition of K
if istep % 1000 == 0:
mlmodel.Cho_L = None
mlmodel.max_lhood = False
print("Dihedral angles | phi = %.3f, psi = %.3f " % (colvars[0], colvars[1]))
do_update = (istep % 1000 == 59)
t = get_time()
update_model(colvars, mlmodel, grid_spacing, T, do_update=do_update)
if do_update and mlmodel.max_lhood:
mlmodel.max_lhood = False
print("TIMER 002 | %.3f" % (get_time() - t))
print('Step %d | Energy per atom: Epot = %.3e eV Ekin = %.3e eV (T = %3.0f K) Etot = %.7e eV' % (
istep, PotEng/22, KinEng/22, KinEng / (22 * 1.5 * units.kB), PotEng + KinEng))
if istep % 1000 == 59:
t = get_time()
if 'datasetplot' not in locals():
datasetplot = pl.Plot_datapts(ax[0], mlmodel)
else:
datasetplot.update()
if hasattr(mlmodel, 'dual_coef_'):
if 'my2dplot' not in locals():
my2dplot = pl.Plot_energy_n_point(ax[1], mlmodel, colvars.ravel())
else:
my2dplot.update_prediction()
my2dplot.update_current_point(colvars.ravel())
print("TIMER 003 | %.03f" % (get_time() - t))
t = get_time()
fig.canvas.draw()
print("TIMER 004 | %.03f" % (get_time() - t))
return mlmodel
if __name__ == '__main__':
ret = main()
| gpl-2.0 | 3,163,701,003,390,810,000 | 39.34127 | 134 | 0.570726 | false |
rbiswas4/cambio | camb_utils/cambio.py | 1 | 19088 | #!/usr/bin/env python
"""
Set of routines to
(a) load quantities from CAMB output files
(b) compute very simple implicit functions in creating the CAMB
outputs from one another.
(c) plot basic quantities from CAMB outputs
Notes:
(a) Right now, these are alternative to the pycamb routine. Will worry
about integration later.
(b) No cosmological calculations should be done in this module
(somewhat hard to distinguish from b above). For example, we
will not normalize to a sigma8 in this routine, but perform that
normalization elsewhere.
R. Biswas, Fri Dec 13 17:01:22 CST 2013
------------------------------------------------------------------------------
Useful Routines:
Power Spectrum Calculation:
- Compute power spectrum of a combination of components in a transfer function.
"""
import matplotlib.pyplot as plt
import numpy as np
def loadpowerspectrum( powerspectrumfile ):
"""Loads the power spectrum from CAMB into an array with columns
of k in units of h / Mpc and power spectrum
args:
powerspectrumfile : string, mandatory
filename of the power spectrum output from CAMB
"""
import numpy as np
pk = np.loadtxt(powerspectrumfile)
return pk
def loadtransfers(rootname = None,
filename = None,
dirname = None):
"""
returns an array containing the output of transfer function output
output file from CAMB
args:
rootname: string,optional , defaults to None
CAMB convention of rootname
filename: string, optional, defaults to None
Note: either rootname or filename need to be
provided. filename overrides rootname /dirname
dirname: string , optional, defaults to None
returns : numpy array of transfer function output from CAMB. The
cols of the array are given by
0 = koverh in units of h over Mpc
1 = CDM
2 = Baryon
3 = radiation
4 = neutrino
5 = massive neutrino
6 = Total
"""
if dirname is not None:
rootname = dirname + "/" + rootname
if filename is None :
filename = rootname + "_transfer_out.dat"
transfers = np.loadtxt(filename)
return transfers
def PrimordialPS( koverh , ns , As , h , k0 = 0.05 ,nrun = 0.0):
"""Returns the primordial power spectrum
args:
koverh :
ns : float , mandatory
scalar spectral index
As : float , mandatory
h : float, mandatory
k0 : float, optional, defaults to 0.05
pivot, in units of Mpc^{-1}
nrun : float, optional, defaults to 0.0
status:
Tested withoutrunning. Tests with nrun not done
notes:
Consider moving to cosmodefs
"""
#print "PRIMORDIAL ", ns , As , k0
k = koverh * h
running = (k/k0)**(0.5*np.log(k/k0)*nrun )
return As* (k/k0 )**(ns -1.) * running
def cbtransfer ( transferfile,
Omegacdm ,
Omegab ,
koverh = None ):
"""
Returns the baryon- CDM transfer function at requested koverh values
from the transfer function file and the values of Omegacdm and Omegab
provided. If no koverh values are requested, then the transfer function
values are returned at the koverh values of the provided transfer
function file.
args:
transferfile: string, mandatory
absolute path to transfer function file produced by CAMB
Omegacdm : float , mandatory
value of omegacdm used to combine transfer functions
Omegab : float mandatory
value of Omegab used to combine transfer functions
koverh : array like ,optional defaults to None
values of koverh at which the values are requested.
if None, the transfer functions are returned at the
values of koverh in the input transfer function file
returns:
tuple of koverh , Tk of CDM and baryon combined (as in CAMB )
"""
transfers = loadtransfers(rootname = None,
filename = transferfile)
f = [Omegacdm , Omegab ]
tcb =__preparetransfersforcombination(transfers, collist=[1,2])
tcbcomb = __combinetransfers(tcb , f= f, koverh= koverh)
if koverh is None:
koverh = transfers[:,0]
#print "******************"
#print tcbcomb
#print transfers[:,-1]
return koverh, tcbcomb
def matterpowerfromtransfersforsinglespecies(
koverh ,
transfer ,
h ,
As ,
ns ):
"""
returns the power spectrum values corresponding to a set of
transfer function values at the values of koverh
(comoving k/h in units of h/Mpc)
args:
koverh : array like, mandatory (but can be None)
values of comoving k/h in units of h/Mpc as in CAMB
cols at which power spectrum values are requested
transfer : a tuple of (k/h , transfer functions)
h :
As :
ns :
returns : array of shape (numkoverh, 2) , with arr[:,0] = koverh ,
arr[:,1] = powerspectra
usage:
>>> transferout = loadtransfers(filename = "transfer.dat")
status:
"""
#print "khajksd ", As
#print "As in matterpowerfromtransfersforsinglespecies" , As
if koverh is None:
koverh = transfer[0]
PPS = PrimordialPS (koverh, ns , As , h )
k = koverh*h
#print type(transfer)
transferinterp = np.interp(koverh, transfer[0],transfer[1],left = np.nan, right = np.nan)
#print "shapes" , np.shape(k) , np.shape(koverh), np.shape(transfer), np.shape(transferinterp)
matterpower = 2.0*np.pi*np.pi* h *h * h * transferinterp * transferinterp * k * PPS
res = np.zeros(shape= (len(koverh),2))
res[:,0] = koverh
res[:,1] = matterpower
return res
def cbpowerspectrum( transferfile,
Omegacdm ,
Omegab ,
h ,
Omeganu = 0.0,
As = None,
ns = None ,
koverh = None ):
"""
Returns the baryon- CDM matter power spectrum using the transfer function
output, usng the cosmological parameters As, ns, h, Omegab, Omegacdm
args:
As : If As is None, a default value of 2.1e-9 is applied
If As >1e-5, then As is assumed to be a ratio to be
multiplied to Asdefault to get the correct As
If As < 1e-5, then As is assumed to be the real As value
returns:
array res, where res[:,0] is koverh and res[:,1] is the power
spectrum
"""
#print "AS in cbpowerspectrum " , As
#Asdefault = 2.1e-9
if As is None :
As = 1.0#Asdefault
#elif As > 1e-5:
# As = Asdefault *As
#else:
# As = As
if ns is None :
ns = 0.963
transfers = loadtransfers(rootname = None,
filename = transferfile)
f = [Omegacdm , Omegab ]
#Do as in HACC
#print "Omeganu ", Omeganu
###The lines below are wrong
###for the calculation follow comments with three ###
### R. Biswas , Mon Mar 31 19:42:09 CDT 2014
# want rhob/(rhob + rhonu + rhoc )*Tb + rhoc /(rhoc + rhonu +rhob)*Tc
# = (rhob+ rhoc)/(rhob + rhoc + rhonu) * (rhob/(rhob + rhoc) + rhoc/(rhoc + rhob))
# = (1 - fnu) * (f_b*Tb + f_c*c )
###I want (rhob * Tb + rhoc * Tc )/(rhob + rhoc)
fnu = Omeganu / (Omeganu + Omegacdm + Omegab )
fcb = 1.0 - fnu
#print f
tcb =__preparetransfersforcombination(transfers, collist=[1,2])
#f_b Tb + fc Tc
tcbcomb = __combinetransfers(tcb , f= f, koverh= koverh)
#print "*****************"
#print transfers[:,0]
#print tcbcomb
#print "*****************"
#print "tcbcomb ", type(tcbcomb)
#print np.shape(tcbcomb) , len(transfers[:,0])
if koverh ==None:
koverh = transfers[:,0]
res = matterpowerfromtransfersforsinglespecies(
koverh = koverh,
transfer = (koverh, tcbcomb),
h = h,
As = As,
ns = ns)
#print "fcb*f", fcb ,f
#res [:,1] = fcb * fcb*res[:,1]
return res
###########################################################################
######################### #################################
######################### Helper Routines #################################
######################### #################################
###########################################################################
def __preparetransfersforcombination ( transfers,
collist ):
"""
Returns tuples of transfers used in __combinetransfers
given a numpy array transfers (as loaded from CAMB for example)
and the list of columns collist for transfer functions.
args:
transfers:
array of transfer functions formed by loading the
CAMB transfer function into a numpy array
collist : list of column numbers
returns:
tuples of numpy arrays. Each numpy array has the 0th column
koverh while column 1 is the transfer function output from
CAMB.
"""
koverh = transfers[:,0]
slist = []
for i in range(len(collist)) :
s = np.zeros(shape = (len(koverh) ,2 ))
s[:,0] = koverh
s[:,1] = transfers[:,collist[i]]
slist.append(s)
return tuple(slist)
def __combinetransfers ( transfertuples , f , koverh = None) :
"""
returns the combined tranfer function of the tuple of transfer
outputs and their koverh values at the points in the
numpy array koverh. koverh must not extend beyond the common
koverh of the transfer outputs.
args:
transfertuples: tuple of transfer functions, mandatory
Each element of transfertuples
should have a col for koverh, and the transfer output
from CAMB (obviously of the same length). Different
transfers can have different lengths.
f: array like
array of fractions of each of component
koverh: arraylike
numpy array of k/h values at which the transfer function
will be returned. The range of this koverh should be
common to all the koverh ranges of the transfers
returns:
array of combined transfer function of len equal to that of the
koverh array
"""
koverh_native = transfertuples[0][:,0]
#Where to interpolate
if koverh ==None:
koverh = koverh_native
else:
#If koverh supplied, clip to range of koverh_native
# ie. we will not extrapolate
mint = min(koverh_native)
maxt = max(koverh_native)
kbools = (koverh > mint) & (koverh < maxt )
koverh = np.array(koverh[kbools])
#Put (interpolated if k values provided) transfer functions
#into an array
transferlist = []
for transfers in transfertuples:
transfervals = transfers[:,1]
interpolatedtransfers = np.interp (koverh, koverh_native ,
transfervals ,left = np.nan, right= np.nan)
transferlist.append(interpolatedtransfers)
reqdtransfers = np.array(transferlist).transpose()
#Normalize
f = np.asarray(f)
totfrac = f.sum()
fracs = f/totfrac
v = reqdtransfers*fracs
#print "all three", np.shape(reqdtransfers), np.shape(fracs), np.shape(v)
ret = v.sum(axis=1)
#print "return shape", np.shape(ret)
#print "jkhhajkdhask transfertuples"
#print transfertuples[0][:,1]
#print transfertuples[1][:,1]
#print fracs
ret = transfertuples[0][:,1] *fracs[0]+ transfertuples[1][:,1]*fracs[1]
#print test
#ret = test
return ret
def __matterpowerfromtransfers ( transfers ,
col ,
h ,
As ,
ns ):
"""
return the matter power spectrum from the transfer output of CAMB input
as an array . As opposed to plotting from the matter power output of
CAMB, this allows one to plot the power spectrum of individual
components.
usage:
>>> transfers = cambplots.__loadtransfers(rootname = "m000n0", dirname = "../data/CAMB_outputs/")
>>> pkfromtransfers = cambplots.__matterpowerfromtransfers(transfers, h = 0.71, As = 2.14e-9, ns = 0.963)
status:
Seems to match the power spectrum computed directly, but have
not done a ratio plot requiring interpolation.
>>> pk = np.loadtxt ("../data/CAMB_outputs/m000n0_matterpower.dat")
>>> plt.loglog (pk[:,0], pk[:,1])
>>> plt.loglog (pkfromtransfers[:,0], pkfromtransfers[:,1])
"""
koverh = transfers[:,0]
TKtot = transfers[:,col]
res = __matterpowerfromtransfersforsinglespecies ( koverh ,
TKtot ,
h ,
As ,
ns )
return res
def __densitycontrastfrommatterpower(
koverh ,
matterpower,
h ,
As = None ,
ns = None ,
transfers = False ) :
"""
return the transfer output of a species from the matter power output
from the same species. If the parameters ns, As for the primordial
power spectrum are not supplied, this can calculate the transfer
output multiplied by the square root of the primordial power spectrum.
args:
koverh:
matterpower:
h:
As :
ns :
returns:
usage:
status:
"""
if transfers :
if As is None or ns is None :
if transfers:
raise ValueError()
else:
PPS = PrimordialPS (koverh, ns , As , h )
else :
PPS = np.ones(len(koverh))
transsq = matterpower / (2.0*np.pi*np.pi* h *h * h * k * PPS )
trans = np.sqrt(transsq)
return trans
def __getdelta ( transfers ,
z,
omegabh2 ,
omegach2 ,
omeganuh2 ,
H0 ):
"""
"""
h = H0/100.
Omegab = omegabh2 / h / h
Omegac = omegach2 / h / h
Omegan = omeganuh2 / h / h
rhob = Omegab* (1. + z )**3.0
rhocdm = Omegac* (1. + z)**3.0
rhonm = Omegan * (1. + z) **3.0
return 0
def plotpk(rootname,
filename = None,
dirname = '',
color = 'Black',
linestyle ='-',
labels = True,
legs = "",
title = ""):
"""Plots matter power spectrum from CAMB output "root_matterpower.dat"
assuming single redshift output.
args:
rootname: mandatory
string, output_root of CAMB params.ini file
eg. "test"
filename: optional, string
overrides the rootname to get the filename of the
matter power spectrum rather than use the rootname.
dirname : optional, defaults to curren directory
string, directory where the output of CAMB is stored
eg. "/home/rbiswas/doc/camboutput/"
color : optional, defaults to "Black"
string, color of plot
eg. 'Black'
linestyle: optional, defaults to '-'
string, linestyle of plot
eg. '-'
labels : optional, defaults to True
Bool, Show x, y labels
eg. False
legs : optional, defualts to "", no label for legends
string, value for label to be used in legend
eg. "version X"
title : optional, defaults to "", no title
string, value for title of plot
returns :
0, if successful
exceptions not defined
example usage :
status :seems to work as advertised,
R.Biswas, July 14, 2012
"""
if dirname != '':
rootname = dirname + "/" + rootname
if filename is None :
filename = rootname + "_matterpower.dat"
data = np.loadtxt(filename)
if legs !='':
plt.plot (data[:,0],data[:,1],linestyle,color =color,label =legs)
else:
plt.plot (data[:,0],data[:,1],linestyle,color =color)
plt.xscale("log")
plt.yscale("log")
if labels==True:
plt.xlabel("k/h Mpc^{-1}")
plt.ylabel("P(k)")
if title!="":
plt.title(title)
return 0
def plottk(rootname,
dirname = '',
col = 1,
color = 'Black',
linestyle ='-',
labels = True,
legs = "",
title = ""):
"""Plots transfer functions from CAMB output "root_transfer.dat"
assuming single redshift output.
args:
rootname: mandatory
string, output_root of CAMB params.ini file
eg. "test"
dirname : optional, defaults to curren directory
string, directory where the output of CAMB is stored
eg. "/home/rbiswas/doc/camboutput/"
col : optional, defaults to 1 (CDM)
int, col corresponding to quantity of interest
1 = CDM
2 = Baryon
3 = radiation
4 = neutrino
5 = massive neutrino
6 = Total
color : optional, defaults to "Black"
string, color of plot
eg. 'Black'
linestyle: optional, defaults to '-'
string, linestyle of plot
eg. '-'
labels : optional, defaults to True
Bool, Show x, y labels
eg. False
legs : optional, defualts to "", no label for legends
string, value for label to be used in legend
eg. "version X"
title : optional, defaults to "", no title
string, value for title of plot
returns :
0, if successful
exceptions not defined
example usage :
status :seems to work as advertised,
R.Biswas, July 14, 2012
"""
if dirname != '':
rootname = dirname + "/" + rootname
data = np.loadtxt(rootname + "_transfer_out.dat")
if legs !='':
plt.plot (data[:,0],data[:,col],linestyle,color =color,label =legs)
else:
plt.plot (data[:,0],data[:,col],linestyle,color =color)
plt.xscale("log")
#plt.yscale("log")
if labels==True:
plt.xlabel("k/h Mpc^{-1}")
plt.ylabel("T(k)")
if title!="":
plt.title(title)
return 0
def plotpkresids(root_fid,
root_test,
dirname = '' ,
color = 'Black',
linestyle ='-',
labels=True,
epsilon = 0.001,
title = '',legends=''):
"""Plots fractional residuals of matter power spectrum from two
CAMB outputs (root_test - root_fid)/root_fid assuming single
redshift output.
args:
root_fid: mandatory
string, output_root of fiducial CAMB params.ini file
eg. "test1"
root_test:mandatory
string, output_root of test CAMB params.ini file
eg. "test2"
dirname : optional, defaults to curren directory
string, directory where the output of CAMB is stored
eg. "/home/rbiswas/doc/camboutput/"
color : optional, defaults to "Black"
string, color of plot
eg. 'Black'
linestyle: optional, defaults to '-'
string, linestyle of plot
eg. '-'
labels : optional, defaults to True
Bool, Show x, y labels
eg. False
legends : optional, defualts to "", no label for legends
string, value for label to be used in legend
eg. "version X"
epsilon : optional, defaults to 0.001
checks that the test and fiducial files containing
the matter power spectrum have the same values of k/h
to within epsilon. If this is untrue, the plot routines
fails
title : optional, defaults to "", no title
string, value for title of plot
returns :
0, if successful
1, if the values of k/h in the test and fiducial matter
power spectrum files do not match up to within
epsilon = 0.001
example usage :
status :seems to work as advertised,
R.Biswas, July 14, 2012
"""
if dirname != '':
root_test = dirname + "/" + root_test
root_fid = dirname + "/" + root_fid
datafid = np.loadtxt(root_fid + "_matterpower.dat")
datatest = np.loadtxt(root_test + "_matterpower.dat")
if len(datafid[:,0]) !=len(datatest[:,0]):
return 1
if any(abs(datatest[:,0] -datafid[:,0])) >epsilon:
return 1
if legends !='':
plt.plot(datafid[:,0],
(datatest[:,1]-datafid[:,1])/datafid[:,1],
ls = linestyle,
color = color,
label = legends)
else:
plt.plot(datafid[:,0],
(datatest[:,1]-datafid[:,1])/datafid[:,1],
color = color ,
ls = linestyle)
plt.xscale("log")
plt.axhline( color = 'Black')
if labels==True:
plt.xlabel("k/h Mpc^{-1}")
plt.ylabel("$\Delta P(k)/P(k)$")
if title != '':
plt.title(title)
return 0
def crossingz( w0 , wa ):
"""Returns the redshift at which w crosses -1
given the w0 and wa for the CPL parametrization
w = w0 + (1 - a)wa
args:
w0: mandatory
float, w0 value
wa: mandatory
float, wa value
returns:
z:
float, redshift at which crossing happens
Negative z implies no crossing
status:
"""
a = (w0 + wa +1.)/wa
z = 1.0/a - 1.0
return z
if __name__=="__main__":
import matplotlib.pyplot as plt
ps = cbpowerspectrum(
transferfile = "example_data/oneh_transfer_out.dat",
Omegacdm = 0.3,
Omegab = 0.05,
h = 0.71,
Omeganu = 0.0,
As = None,
ns = None ,
koverh = None )
#print type(ps)
plt.loglog ( ps[:,0], ps[:,1])
plt.show()
#plotpk("test",showtitle=True)
#plotpk("CM_0.04_0.1175_70.0_-0.725_1.0")
#plotpkresids("CM_0.04_0.1175_70.0_-0.725_1.0","test")
| mit | -1,916,960,154,660,356,000 | 23.597938 | 107 | 0.650933 | false |
StanczakDominik/PythonPIC | pythonpic/visualization/time_snapshots.py | 1 | 14988 | # coding=utf-8
import itertools
import numpy as np
import matplotlib.pyplot as plt
from ..helper_functions.helpers import is_this_saved_iteration, convert_global_to_particle_iter, colors, directions
class Plot:
"""
A plot for visualization. Mainly an abstract class for overloading with interesting kinds of diagnostics.
Parameters
----------
S : Simulation
A `Simulation` object to pull data from.
ax : matplotlib axis
An axis to draw on
"""
def __init__(self, S, ax):
self.S = S
if isinstance(ax, str):
fig, self.ax = plt.subplots()
else:
self.ax = ax
self.plots = []
L = S.grid.L
self.ax.set_xlim(0, S.grid.L)
self.ax.set_xlabel(rf"Position $x$ (L={L:.3e} m)")
self.ax.grid()
xticks = np.linspace(0, L, 7)
self.ax.set_xticks(xticks)
self.ax.xaxis.set_ticklabels([f"{x/L:.1f}L" for x in xticks])
self.ax.ticklabel_format(style='sci', axis='y', scilimits=(0, 0), useMathText=True,
useOffset=False) # TODO axis=both?
# self.ax.yaxis.set_label_position("right")
def animation_init(self):
"""
Zeroes out all data in all lines of the plot. Useful for Animation.
"""
for plot in self.plots:
plot.set_data([], [])
def update(self, i):
"""
Updates the plot with information from a particular iteration of the simulation.
Parameters
----------
i : int
Iteration of the simulation
"""
pass
def return_animated(self):
"""
Returns an iterable of all items that have changed. Useful for Animation
"""
return self.plots
class FrequencyPlot(Plot):
"""
Plots the spatial Fourier transform of field energy versus wave number.
""" # REFACTOR move the fourier analysis to PostProcessedGrid; describe the math here as well as there
def __init__(self, S, ax):
super().__init__(S, ax)
self.plots.append(self.ax.plot([], [], "o-", label="energy per mode")[0])
self.ax.set_xlabel(r"Wavevector mode $k$")
self.ax.set_ylabel(r"Energy $E$")
# max_interesting = S.grid.k_plot[...].max() * 0.3
# self.indices = S.grid.k_plot < max_interesting
self.indices = np.ones_like(S.grid.k_plot, dtype=bool)
interesting_x = S.grid.k_plot[self.indices]
self.ax.set_xticks(interesting_x)
self.ax.xaxis.set_ticklabels(np.arange(len(interesting_x)))
self.ax.set_xlim(interesting_x.min(), interesting_x.max())
self.ax.set_ylim(0, S.grid.longitudinal_energy_per_mode_history[...].max())
def update(self, i):
# import ipdb; ipdb.set_trace()
self.plots[0].set_data(self.S.grid.k_plot[self.indices],
self.S.grid.longitudinal_energy_per_mode_history[i][self.indices])
def phaseplot_values(species):
"""
A convenience function to get a dictionary of values, to allow generalization of the PhasePlot class.
The keys you can pull for phase plots are `x`, `v_x`, `v_y` and `v_z`.
Parameters
----------
species : Species
A species to draw data from
Returns
-------
A dictionary of phase plot values.
"""
return {"x": species.position_history,
"v_x": species.velocity_history[:, :, 0],
"v_y": species.velocity_history[:, :, 1],
"v_z": species.velocity_history[:, :, 2],
}
class PhasePlot(Plot):
"""
Draws a phase plot.
The keys you can pull for phase plots are `x`, `v_x`, `v_y` and `v_z`.
Parameters
----------
v1, v2 : str
keys for the phase plot.
alpha : float
An opacity value between 0 and 1. Useful for neat phase plots displaying density.
"""
def __init__(self, S, ax, v1, v2, alpha):
super().__init__(S, ax)
self.x = [phaseplot_values(species)[v1] for species in S.list_species]
self.y = [phaseplot_values(species)[v2] for species in S.list_species]
if len(self.y):
maxys = max([np.max(np.abs(y)) for y in self.y])
self.ax.set_ylim(-maxys, maxys)
for i, species in enumerate(S.list_species):
self.plots.append(self.ax.plot([], [], colors[i] + ".", alpha=alpha)[0])
# self.ax.yaxis.set_label_position("right")
self.ax.set_xlabel(rf"${v1}$")
self.ax.set_ylabel(rf"${v2}$")
def update(self, i):
for plot, species, x, y in zip(self.plots, self.S.list_species, self.x, self.y):
if is_this_saved_iteration(i, species.save_every_n_iterations):
index = convert_global_to_particle_iter(i, species.save_every_n_iterations)
alive = species.N_alive_history[index] +1
# print(y[index, species.alive_history[index]]) #TODO: get alive history to work here!
plot.set_data(x[index, :alive], # , species.alive_history[index]],
y[index, :alive]) # , species.alive_history[index]])
class SpatialDistributionPlot(Plot):
"""
Draws particle density on the grid.
"""
def __init__(self, S, ax):
super().__init__(S, ax)
ax.set_ylabel(f"Particle density $n$")
for species in S.list_species:
self.plots.append(self.ax.plot([], [], "-", label=species.name)[0])
if len(S.list_species):
self.ax.set_ylim(0, 1.2*max([species.density_history[...].max() for species in S.list_species]))
self.ax.legend(loc='best')
def update(self, i):
for species, plot in zip(self.S.list_species, self.plots):
plot.set_data(self.S.grid.x, species.density_history[i])
class SpatialPerturbationDistributionPlot(SpatialDistributionPlot):
def __init__(self, S, ax):
super().__init__(S, ax)
self.ax.set_ylabel(r"$\Delta n = n - n(t=0)$")
self.y = [species.density_history - species.density_history[0] for species in S.list_species]
if len(S.list_species):
self.ax.set_ylim(min([1.2 * y.min() for y in self.y]),max([1.2 * y.max() for y in self.y]))
self.ax.legend(loc='best')
def update(self, i):
for species, plot, y in zip(self.S.list_species, self.plots, self.y):
plot.set_data(self.S.grid.x, y[i])
class ChargeDistributionPlot(Plot):
"""
Draws charge density from the grid.
"""
def __init__(self, S, ax, check_poisson=False):
super().__init__(S, ax)
self.plots.append(self.ax.plot([], [], "-", alpha=0.8, label="charge")[0])
self.ax.set_ylabel(f"Charge density $\\rho$")
mincharge = np.min(S.grid.charge_density_history)
maxcharge = np.max(S.grid.charge_density_history)
self.ax.set_ylim(mincharge, maxcharge)
self.check_poisson = check_poisson
if check_poisson:
self.plots.append(self.ax.plot([], [], "-", alpha=0.8, label=r"$\varepsilon_0 \partial E/ \partial x$")[0])
self.ax.legend(loc='lower left')
def update(self, i):
self.plots[0].set_data(self.S.grid.x, self.S.grid.charge_density_history[i, :])
if self.check_poisson:
self.plots[1].set_data(self.S.grid.x, self.S.grid.check_on_charge[i])
class Histogram(Plot):
"""
Draws a histogram of a given value from the phase plot dataset.
The keys you can pull for phase plots are `x`, `v_x`, `v_y` and `v_z`.
Parameters
----------
v1 : str
A key to phase plot values.
n_bins: int
Number of bins to draw.
"""
def __init__(self, S, ax, v1: str, n_bins: int = 50):
super().__init__(S, ax)
self.bin_arrays = []
self.values = [phaseplot_values(species)[v1] for species in S.list_species]
if len(self.values):
maxxs = max([np.max(np.abs(v)) for v in self.values])
self.ax.set_xlim(-maxxs, maxxs)
for i, s, v in zip(range(len(S.list_species)), S.list_species, self.values):
bin_array = np.linspace(v.min(), v.max(), n_bins)
self.bin_arrays.append(bin_array)
self.plots.append(
self.ax.plot(*calculate_histogram_data(v[0], bin_array), colors[i])[0])
self.ax.set_xlabel(rf"${v1}$")
self.ax.set_ylabel(r"Number of particles")
if len(self.bin_arrays):
self.ax.set_xlim(min([bin_array.min() for bin_array in self.bin_arrays]),
max([bin_array.max() for bin_array in self.bin_arrays]))
def update(self, i):
for species, histogram, bin_array, v in zip(self.S.list_species, self.plots, self.bin_arrays, self.values):
index = convert_global_to_particle_iter(i, species.save_every_n_iterations)
alive = species.N_alive_history[index] +1
histogram.set_data(*calculate_histogram_data(v[index, :alive], bin_array))
def calculate_histogram_data(arr, bins):
"""
Calculates histogram values, normalized to the number of particles.
Parameters
----------
arr : ndarray
Values of a particle property, for example, velocity
bins : ndarray
Bin edges for the histogram.
Returns
-------
bin_center : ndarray
Centers of histogram bars (the x array for plotting)
bin_height : ndarray
Heights of histogram bars (the y array for plotting)
"""
bin_height, bin_edge = np.histogram(arr, bins=bins) # OPTIMIZE
bin_center = (bin_edge[:-1] + bin_edge[1:]) * 0.5
return bin_center, bin_height
class IterationCounter:
"""
A little widget inserted on an axis, displaying the iteration number and current simulation time.
"""
def __init__(self, S, ax):
self.S = S
self.ax = ax
self.counter = ax.text(0.1, 0.9, 'i=x', horizontalalignment='left',
verticalalignment='center', transform=ax.transAxes)
def animation_init(self):
self.counter.set_text("Iteration: \nTime: ")
def update(self, i):
self.counter.set_text(f"Iteration: {i}/{self.S.NT}\nTime: {i*self.S.dt:.3g}/{self.S.NT*self.S.dt:.3g}")
def return_animated(self):
return [self.counter]
class FieldPlot(Plot):
"""
Draws electric and magnetic fields from the grid in a given direction
Parameters
----------
j : int
Direction as Cartesian index number. 0: x, 1: y, 2: z
"""
def __init__(self, S, ax, j):
super().__init__(S, ax)
self.j = j
self.plots.append(self.ax.plot([], [], "-", label=f"$E_{directions[j]}$")[0])
self.ax.set_ylabel(r"Fields $E$, $B$")
max_e = np.max(np.abs(S.grid.electric_field_history[:, :, j]))
if j != 0:
self.plots.append(self.ax.plot([], [], "-", label=f"$B_{directions[j]}$")[0])
max_b = np.max(np.abs(S.grid.magnetic_field_history[:, :, j]))
maxfield = max([max_e, max_b])
else:
maxfield = max_e
print(f"For direction {directions[j]}, maxfield is {maxfield:.2e}")
self.ax.set_ylim(-maxfield, maxfield)
self.ax.legend(loc='upper right')
def update(self, i):
self.plots[0].set_data(self.S.grid.x, self.S.grid.electric_field_history[i, :, self.j])
if self.j != 0:
self.plots[1].set_data(self.S.grid.x, self.S.grid.magnetic_field_history[i, :, self.j])
class PoyntingFieldPlot(Plot):
"""
Draws electric and magnetic field energy flux (Poynting flux) from the grid
"""
def __init__(self, S, ax):
super().__init__(S, ax)
self.plots.append(self.ax.plot([], [], "-", label=f"Poynting flux")[0])
self.ax.set_ylabel(r"Poynting flux")
max_P = np.max(np.abs(S.grid.poynting_history[...]))
self.ax.set_ylim(-max_P, max_P)
def update(self, i):
self.plots[0].set_data(self.S.grid.x, self.S.grid.poynting_history[i, :])
class CurrentPlot(Plot):
"""
Draws currents from the grid in a given direction.
Parameters
----------
j : int
Direction as Cartesian index number. 0: x, 1: y, 2: z
"""
def __init__(self, S, ax, j):
super().__init__(S, ax)
self.j = j
x = S.grid.x_current if j == 0 else S.grid.x
self.plots.append(self.ax.plot(x, S.grid.current_density_history[0, :, j], "-",
alpha=0.9,
label=fr"$j_{directions[j]}$")[0])
self.ax.set_ylabel(f"Current density $j_{directions[j]}$")
self.ax.tick_params('y')
self.ax.legend(loc='lower left')
current = S.grid.current_density_history[:, :, j]
# mean = current.mean()
# std = 3*current.std()
#
# mincurrent = mean - std
# maxcurrent = mean + std
mincurrent = current.min()
maxcurrent = current.max()
try:
ax.set_ylim(mincurrent, maxcurrent)
except ValueError as E:
print(f"Error on setting current limits in {j}: {E}")
def update(self, i):
self.plots[0].set_data(self.S.grid.x, self.S.grid.current_density_history[i, :, self.j])
class PlotSet:
"""
A single object representing a few different plots on different axes.
Useful for plotting sets of directional values (fields, currents).
Parameters
----------
axes : list
List of axes to use.
list_plots :
List of `Plot`s to update and return.
"""
def __init__(self, axes, list_plots):
self.axes = axes
self.list_plots = list_plots
def update(self, i):
for plot in self.list_plots:
plot.update(i)
def animation_init(self):
for plot in self.list_plots:
plot.animation_init()
def return_animated(self):
return list(itertools.chain.from_iterable(plot.return_animated() for plot in self.list_plots))
class TripleFieldPlot(PlotSet):
"""
Draws electric and magnetic field plots on the grid on a given list of axes.
Parameters
----------
S : Simulation
Simulation to pull data from.
axes : list
List of matplotlib axes.
"""
def __init__(self, S, axes: list):
assert len(axes) <= 3, "Too many axes, we ran out of directions!"
plots = [FieldPlot(S, ax, j) for j, ax in enumerate(axes)]
super().__init__(axes, plots)
class TripleCurrentPlot(PlotSet):
"""
Draws currents on the grid on a given list of axes.
Parameters
----------
S : Simulation
Simulation to pull data from.
axes : list
List of matplotlib axes.
"""
def __init__(self, S, axes: list):
assert len(axes) <= 3, "Too many axes, we ran out of directions!"
plots = [CurrentPlot(S, ax, j) for j, ax in enumerate(axes)]
super().__init__(axes, plots)
| bsd-3-clause | 370,621,914,730,282,940 | 33.855814 | 119 | 0.573993 | false |
ricardog/raster-project | adrid-test.py | 1 | 2116 | #!/usr/bin/env python
import rasterio
import fiona
import numpy as np
import os
import time
from rasterio.plot import show
import matplotlib.pyplot as plt
from projections.rasterset import RasterSet, Raster
import projections.predicts as predicts
import projections.r2py.modelr as modelr
# Open the mask shape file
shp_file = os.path.join(os.environ['DATA_ROOT'],
'from-adriana/tropicalforests.shp')
shapes = fiona.open(shp_file)
# Read Adriana's abundance model (mainland)
mod = modelr.load(os.path.join(os.environ['MODEL_DIR'],
'ab-model.rds'))
predicts.predictify(mod)
# Import standard PREDICTS rasters
rasters = predicts.rasterset('luh5', 'historical', 1990, True)
rs = RasterSet(rasters, shapes = shapes, all_touched = True)
what = mod.output
rs[mod.output] = mod
stime = time.time()
data1, meta_data1 = rs.eval(what)
etime = time.time()
print("executed in %6.2fs" % (etime - stime))
show(data1)
##
## Compare with good raster
##
out = rasterio.open('adrid-good.tif')
good = out.read(1, masked=True)
diff = np.fabs(data1 - good)
print("max diff: %f" % diff.max())
assert np.allclose(data1, good, atol=1e-05, equal_nan=True)
del out
##
## Redo the projection using iterative API
##
mod = modelr.load('../models/ab-corrected.rds')
predicts.predictify(mod)
# Import standard PREDICTS rasters
rasters2 = predicts.rasterset('rcp', 'aim', 2020, 'medium')
rs2 = RasterSet(rasters2, shapes = shapes, all_touched = True)
rs2[mod.output] = mod
stime = time.time()
rs2.write(what, 'adrid.tif')
etime = time.time()
print("executed in %6.2fs" % (etime - stime))
out = rasterio.open('adrid.tif')
data2 = out.read(1, masked=True)
diff = np.fabs(data1 - data2)
print("max diff: %f" % diff.max())
plot = None
if plot:
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(10, 5))
show(data1, ax=ax1, cmap='Greens', title='Non-incremental')
show(data2, ax=ax2, cmap='Greens', title='Incremental')
show(diff, ax=ax3, cmap='viridis', title='Difference')
plt.show()
# Verify the data matches
assert np.allclose(data1, data2, atol=1e-05, equal_nan=True)
| apache-2.0 | -8,879,507,970,909,112,000 | 26.128205 | 62 | 0.694234 | false |
changebio/mamotif | MotifScan/core.py | 1 | 21144 | #
# Copyright @ 2014, 2015 Jiawei Wang <jerryeah@gmail.com>, Zhen Shao <shao@enders.tch.harvard.edu>
#
# Licensed under the GPL License, Version 3.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.gnu.org/copyleft/gpl.html
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""The core functions of MotifScan.
motif_scan(): the interface of MotifScan core
target_enrichment(): perform the enrichment analysis between sample and random
target_enrichment_peak2peak(): perform enrichment analysis between two samples
fc_tarnum_distribution(): fold change plot graph of each motif ranked by peak mvalue
target_site_distribution(): motif target distribution graph around peak summit
"""
__authors__ = ['"Jiawei Wang" <jerryeah@gmail.com>']
# -----------------------------------------
# modules
# -----------------------------------------
import numpy as np
import pandas as pd
from scipy import stats
import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import re
import math
import os
import os.path
import ctypes
import MotifScan
class MAT(ctypes.Structure):
_fields_ = [("n", ctypes.c_int),
("a_arr", ctypes.POINTER(ctypes.c_double)),
("c_arr", ctypes.POINTER(ctypes.c_double)),
("g_arr", ctypes.POINTER(ctypes.c_double)),
("t_arr", ctypes.POINTER(ctypes.c_double))]
class MOTIF_RES(ctypes.Structure):
_fields_ = [("tarnum", ctypes.c_int),
("ratio", ctypes.c_double),
("tarsite", ctypes.POINTER(ctypes.c_int)),
("tarratio", ctypes.POINTER(ctypes.c_double))]
def arr2MAT(arr):
n = np.shape(arr)[1]
a = np.ctypeslib.as_ctypes(arr[0])
c = np.ctypeslib.as_ctypes(arr[1])
g = np.ctypeslib.as_ctypes(arr[2])
t = np.ctypeslib.as_ctypes(arr[3])
return MAT(n, a, c, g, t)
def motif_scan(peak_table, motif_table, background, tmp_output):
""" The motif scan interface
Perform motif scanning by motifs
Args:
peak_table: pandas dataframe containing sequence matrix
motif_table: pandas dataframe containing motif basic information
background: a numpy array containing ratio of A C G T on the genome
tmp_output: temporary motifscan output file for a certain motif
"""
score_c = ctypes.CDLL('%s/score_c.so' % os.path.dirname(os.path.realpath(MotifScan.__file__)))
score_c.motif_scan_core.restype = ctypes.POINTER(MOTIF_RES)
score_c.motif_scan_core.argtypes = [ctypes.POINTER(MAT),
ctypes.POINTER(MAT),
ctypes.POINTER(ctypes.c_double*4),
ctypes.c_double,
ctypes.c_double]
score_c.freeMOTIF_RES.argtypes = [ctypes.POINTER(MOTIF_RES)]
n_motif = len(motif_table)
background = np.ctypeslib.as_ctypes(background)
cnt = 0
for idx, motif_record in motif_table.iterrows():
motif_name = motif_record['name']
motif_len = np.shape(motif_record['matrix'])[1]
score_cutoff = ctypes.c_double(motif_record['score_cutoff'])
max_score = ctypes.c_double(motif_record['max_score'])
mmatrix = arr2MAT(motif_record['matrix'])
sys.stdout.write("Scan ning for %s...%s%s\
\r" % (motif_record['name'], cnt*100/n_motif, "%"))
sys.stdout.flush()
ratio, tarnum, tarsite, tarratio = [], [], [], []
for p_idx, peak_record in peak_table.iterrows():
smatrix = arr2MAT(peak_record['seq_matrix'])
r = score_c.motif_scan_core(ctypes.byref(smatrix),
ctypes.byref(mmatrix),
ctypes.byref(background),
max_score,
score_cutoff)
ratio.append(r.contents.ratio)
ctypes_tarsite = (ctypes.c_int * r.contents.tarnum).from_address(ctypes.addressof(r.contents.tarsite.contents))
ts = []
for i in np.arange(r.contents.tarnum):
ts.append(ctypes_tarsite[i])
ctypes_tarratio = (ctypes.c_double * r.contents.tarnum).from_address(ctypes.addressof(r.contents.tarratio.contents))
tr = []
for i in np.arange(r.contents.tarnum):
tr.append(ctypes_tarratio[i])
# deduplicate overlap motif target sites
ts, tr = deduplicate_target_site(ts, tr, motif_len)
tarnum.append(len(tr))
tarsite.append(ts)
tarratio.append(tr)
score_c.freeMOTIF_RES(r)
peak_result = pd.DataFrame({'%s.ratio' % motif_name: ratio,
'%s.tarnum' % motif_name: tarnum,
'%s.tarsite' % motif_name: tarsite,
'%s.tarratio' % motif_name: tarratio})
peak_result.to_pickle("%s/%s" % (tmp_output, motif_record['id']))
cnt += 1
print 'Scanning...Done! \r'
def deduplicate_target_site(ts, tr, motif_len):
"""
if the distance between two neignbor target site is smaller than
the motif length, we viewed them as duplicate target sites and thus filter out
the one with the lower ratio.
"""
if len(ts) >= 2:
i_pre = float("-Inf")
j_pre = float("-Inf")
for (i, j) in zip(ts,tr):
if i - i_pre < motif_len:
if j < j_pre:
ts.remove(i)
tr.remove(j)
else:
ts.remove(i_pre)
tr.remove(j_pre)
i_pre = i
j_pre = j
else:
i_pre = i
j_pre = j
return ts, tr
def target_enrichment(peak_table, rnd_table, motif_table, gene_based=False):
"""Perform the enrichment analysis between sample and random.
Args:
peak_table: pandas dataframe, motifscan result table on sample
rnd_table: pandas dataframe, motifscan result table on random
motif_table: pandas dataframe, motif information table
Returns:
motif_table: pandas dataframe, table containing both motif information and
fisher exact test statistics
"""
n_motif = len(motif_table)
n_peak = len(peak_table)
n_rand = len(rnd_table)
n_samp = int(n_rand/n_peak)
fold_change = np.zeros(n_motif)
enrich_pvalue = np.zeros(n_motif)
deplete_pvalue = np.zeros(n_motif)
oddsratio = np.ones(n_motif)
pvalue_corrected = np.ones(n_motif)
peak_tarnum = np.zeros(n_motif)
rand_tarnum = np.zeros(n_motif)
peak_tarnum_table = peak_table[
pd.Index([i for i in peak_table.columns if re.search(r'\.tarnum', i)])]
rnd_tarnum_table = rnd_table[
pd.Index([i for i in rnd_table.columns if re.search(r'\.tarnum', i)])]
for mti, motif_name in zip(range(n_motif), motif_table['name']):
if gene_based:
targeted_peak_idx = peak_table.groupby(['target_gene'])['%s.tarnum' % motif_name].transform(max) == peak_table['%s.tarnum' % motif_name]
targeted_peak_idx = peak_table.loc[targeted_peak_idx,['target_gene','%s.tarnum' % motif_name]].drop_duplicates().index
targeted_rand_idx = []
for i in targeted_peak_idx:
for j in np.arange(n_samp):
targeted_rand_idx.append(i*n_samp+j)
targeted_rand_idx = pd.Index(targeted_rand_idx)
peak_tarnum[mti] = len(
[i for i in peak_tarnum_table.loc[targeted_peak_idx, '%s.tarnum' % motif_name] if i > 0])
rand_tarnum[mti] = len(
[i for i in rnd_tarnum_table.loc[targeted_rand_idx, '%s.tarnum' % motif_name] if i > 0])
else:
peak_tarnum[mti] = len(
[i for i in peak_tarnum_table['%s.tarnum' % motif_name] if i > 0])
rand_tarnum[mti] = len(
[i for i in rnd_tarnum_table['%s.tarnum' % motif_name] if i > 0])
if peak_tarnum[mti] != 0 and rand_tarnum[mti] != 0:
fold_change[mti] = float(peak_tarnum[mti] * n_rand) / (
rand_tarnum[mti] * n_peak)
else:
fold_change[mti] = 'NaN'
table = [[peak_tarnum[mti], n_peak - peak_tarnum[mti]],
[rand_tarnum[mti], n_rand - rand_tarnum[mti]]]
oddsratio[mti], enrich_pvalue[mti] = stats.fisher_exact(table, 'greater')
oddsratio[mti], deplete_pvalue[mti] = stats.fisher_exact(table, 'less')
pvalue_corrected[mti] = min(min(deplete_pvalue[mti],
enrich_pvalue[mti]) * n_motif, 1)
motif_table['target_number'] = peak_tarnum
motif_table['rnd_target_number'] = rand_tarnum
motif_table['fold_change'] = fold_change
motif_table['enrich_pvalue'] = enrich_pvalue
motif_table['deplete_pvalue'] = deplete_pvalue
motif_table['pvalue_corrected'] = pvalue_corrected
return motif_table
def target_enrichment_peak2peak(peak1_table, peak2_table, motif_table):
"""Perform the enrichment analysis on two samples
Args:
peak_table: pandas dataframe, motifscan result table on sample1
rnd_table: pandas dataframe, motifscan result table on sample2
motif_table: pandas dataframe, motif information table
Returns:
motif_table: pandas dataframe, table containing both motif information and
fisher exact test statistics
"""
n_motif = len(motif_table)
n_peak1 = len(peak1_table)
n_peak2 = len(peak2_table)
fold_change = np.zeros(n_motif)
enrich_pvalue = np.zeros(n_motif)
deplete_pvalue = np.zeros(n_motif)
oddsratio = np.ones(n_motif)
pvalue_corrected = np.ones(n_motif)
peak1_tarnum = np.zeros(n_motif)
peak2_tarnum = np.zeros(n_motif)
# print pd.Index([i for i in peak1_table.columns if re.search(r'\.tarnum',i)])
peak1_tarnum_table = peak1_table[
pd.Index([i for i in peak1_table.columns if re.search(r'\.tarnum', i)])]
peak2_tarnum_table = peak2_table[
pd.Index([i for i in peak2_table.columns if re.search(r'\.tarnum', i)])]
for mti, motif_name in zip(range(n_motif), motif_table['name']):
peak1_tarnum[mti] = len(
[i for i in peak1_tarnum_table['%s.tarnum' % motif_name] if i > 0])
peak2_tarnum[mti] = len(
[i for i in peak2_tarnum_table['%s.tarnum' % motif_name] if i > 0])
if peak1_tarnum[mti] != 0 and peak2_tarnum[mti] != 0:
fold_change[mti] = float(peak1_tarnum[mti] * n_peak2) / (
peak2_tarnum[mti] * n_peak1)
else:
fold_change[mti] = 'NaN'
table = [[peak1_tarnum[mti], n_peak1 - peak1_tarnum[mti]],
[peak2_tarnum[mti], n_peak2 - peak2_tarnum[mti]]]
oddsratio[mti], enrich_pvalue[mti] = stats.fisher_exact(table, 'greater')
oddsratio[mti], deplete_pvalue[mti] = stats.fisher_exact(table, 'less')
pvalue_corrected[mti] = min(min(deplete_pvalue[mti],
enrich_pvalue[mti]) * n_motif, 1)
motif_table['peak1_target_number'] = peak1_tarnum
motif_table['peak2_target_number'] = peak2_tarnum
motif_table['fold_change'] = fold_change
motif_table['enrich_pvalue'] = enrich_pvalue
motif_table['deplete_pvalue'] = deplete_pvalue
motif_table['oddsratio'] = oddsratio
motif_table['pvalue_corrected'] = pvalue_corrected
motif_table.sort('enrich_pvalue', inplace=True)
return motif_table
#################################################################################
# plot functions
#################################################################################
def target_site_distribution(peak_table, motif_table, plot_out_dir, bin_size=5, region_radius=500):
win_size = region_radius * 2
half_win_size = win_size / 2
bin_center = np.arange(-half_win_size, win_size - half_win_size + 1, bin_size)
bin_edge = bin_center - round(bin_size / 2)
bin_edge = np.append(bin_edge, bin_center[-1] + round(bin_size / 2))
bin_edge = map(int, bin_edge)
n_motif = len(motif_table)
cnt = 0
for midx, motif_record in motif_table.iterrows():
motif_len = np.shape(motif_record['matrix'])[1]
motif_name = motif_record['name']
motif_tarsite = []
for idx, i in enumerate(peak_table['%s.tarsite' % motif_name]):
if len(i) > 0:
for j in i:
motif_tarsite.append(j + motif_len/2 - half_win_size)
motif_tarsite_freq = np.histogram(motif_tarsite, bin_edge)
motif_marker = motif_tarsite_freq[0]
motif_marker[0] = motif_marker[0] * 2
motif_marker[-1] = motif_marker[-1] * 2
motif_marker = motif_marker / float(sum(motif_marker))
motif_marker = smooth(motif_marker, 20)
plt.cla()
#plt.plot(bin_edge[:-1], motif_marker, lw = 2, color='#4169E1', label=motif_name)
plt.bar(bin_edge[:-1], motif_marker, width=bin_size-1.5,
color='#4169E1', linewidth=0, label=motif_name)
plt.legend()
ax = plt.gca()
ax.set_xlabel('Distance to Peak Summit', weight='bold')
ax.set_ylabel('Fraction', weight='bold')
ax.set_xlim([min(bin_edge), max(bin_edge)])
ax.set_ylim([0, 1.5 * max(motif_marker)])
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontweight('bold')
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontweight('bold')
ax.tick_params(axis='x', which='both', top='off')
ax.tick_params(axis='y', which='both', right='off')
for axis in ['top', 'bottom', 'left', 'right']:
ax.spines[axis].set_linewidth(2)
plt.savefig('%s/%s_target_site.png' % (plot_out_dir, re.sub(r'[\:\-\.]{1,2}', '_', motif_name)), dpi=600)
plt.close()
# sys.stdout.write("\r")
sys.stdout.write("Target sites distribution plotting for %s...%s%s \r" % (motif_name, (cnt)*100/n_motif, "%"))
sys.stdout.flush()
cnt += 1
print "Target sites distribution plotting...Done! \r"
def tarnum_and_tarsite_distribution(peak_table, rand_table, motif_table, plot_out_dir, bin_size=10, region_radius=500):
# fold change plot parameter
motif_name_list = extract_motif_name_from_peak_result_table(peak_table)
npeak = len(peak_table)
nrand = len(rand_table)
nsamp = round(nrand / npeak)
bin = 1000
half_bin = bin/2
peak_table.sort('value', ascending=False, inplace=True)
n_motif = len(motif_name_list)
# target site paramater
win_size = region_radius * 2
half_win_size = win_size / 2
bin_center = np.arange(-half_win_size, win_size - half_win_size + 1, bin_size)
bin_edge = bin_center - round(bin_size / 2)
bin_edge = np.append(bin_edge, bin_center[-1] + round(bin_size / 2))
bin_edge = map(int, bin_edge)
n_motif = len(motif_table)
cnt = 0
for midx, motif_record in motif_table.iterrows():
cnt += 1
motif_len = np.shape(motif_record['matrix'])[1]
motif_name = motif_record['name']
# fold change data preparation
tarnum_fc_smooth = np.zeros(npeak)
tarnum_smooth = np.zeros(npeak)
for pi in np.arange(npeak):
peak_start_idx = max(0, pi - half_bin)
peak_end_idx = min(npeak, pi + half_bin)
peak_tarnum = peak_table['%s.tarnum' %
motif_name].iloc[peak_start_idx:peak_end_idx]
peak_tarnum_smooth = float(
len(peak_tarnum[peak_tarnum > 0])) / len(peak_tarnum)
rnd_idx = peak_table.iloc[peak_start_idx:peak_end_idx].index*int(nsamp)
rand_tarnum = rand_table['%s.tarnum' %
motif_name].ix[rnd_idx]
rand_tarnum_smooth = float(
len(rand_tarnum[rand_tarnum > 0])) / len(rand_tarnum)
tarnum_smooth[pi] = peak_tarnum_smooth
if rand_tarnum_smooth == 0:
tarnum_fc_smooth[pi] = peak_tarnum_smooth
else:
tarnum_fc_smooth[pi] = peak_tarnum_smooth / rand_tarnum_smooth
tarnum_fc_smooth = pd.Series(tarnum_fc_smooth)
tarnum_smooth = pd.Series(tarnum_smooth)
# target site data preparation
motif_tarsite = []
for idx, i in enumerate(peak_table['%s.tarsite' % motif_name]):
if len(i) > 0:
for j in i:
motif_tarsite.append(j + motif_len/2 - half_win_size)
motif_tarsite_freq = np.histogram(motif_tarsite, bin_edge)
motif_marker = motif_tarsite_freq[0]
motif_marker[0] = motif_marker[0] * 2
motif_marker[-1] = motif_marker[-1] * 2
motif_marker = motif_marker / float(sum(motif_marker))
motif_marker = smooth(motif_marker, 20)
# plot
plt.cla()
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))
# fc
fig.suptitle(motif_name, weight='black')
ax1.bar(np.arange(npeak), tarnum_fc_smooth, 1,
color='#4169E1', lw=0, label='fc of %s' % motif_name)
xtick_step = _get_highest_digit(npeak/6)*10**int(math.log10(npeak/6))
ax1.set_xticks(np.arange(0, npeak, xtick_step))
ax1.set_xlim(xmin=1, xmax=npeak)
ax1.set_ylim(ymin=0, ymax=1.3*max(tarnum_fc_smooth))
for axis in ['top', 'bottom', 'left', 'right']:
ax1.spines[axis].set_linewidth(2)
ax1.tick_params(axis='x', which='both', top='off')
ax1.tick_params(axis='y', which='both', right='off')
ax1.set_ylabel('Fold Change', weight='bold')
ax1.set_xlabel('Peak Rank (Sorted by Value in Descending Order)', weight='bold')
for tick in ax1.xaxis.get_major_ticks():
tick.label.set_fontweight('bold')
for tick in ax1.yaxis.get_major_ticks():
tick.label.set_fontweight('bold')
# targetsite
ax2.bar(bin_edge[:-1], motif_marker, width=bin_size-1.5,
color='#4169E1', linewidth=0, label=motif_name)
ax2.set_xlabel('Distance to Peak Summit', weight='bold')
ax2.set_ylabel('Fraction', weight='bold')
ax2.set_xlim([min(bin_edge), max(bin_edge)])
ax2.set_ylim([0, 1.5 * max(motif_marker)])
for tick in ax2.xaxis.get_major_ticks():
tick.label.set_fontweight('bold')
for tick in ax2.yaxis.get_major_ticks():
tick.label.set_fontweight('bold')
ax2.tick_params(axis='x', which='both', top='off')
ax2.tick_params(axis='y', which='both', right='off')
for axis in ['top', 'bottom', 'left', 'right']:
ax2.spines[axis].set_linewidth(2)
fig.savefig('%s/%s_%s_tarsite_fc_dist.png' % (plot_out_dir, cnt, re.sub(r'[\:\-\.]{1,2}', '_', motif_name)), dpi=600)
plt.close()
sys.stdout.write("Fold change distribution plotting for %s...%s%s \r" % (motif_name, (cnt)*100/n_motif, "%"))
sys.stdout.flush()
print 'Fold change distribution plotting...Done! \r'
def extract_motif_name_from_peak_result_table(peak_result_table):
'''
extract the motif name from the peak_result_table
'''
motif_name = []
for col_name in peak_result_table.columns:
if re.search(r'.*\.tarnum', col_name):
motif_name.append(col_name[:-7])
return motif_name
def smooth(x, window_len=5, window='hanning'):
if type(x) == type([]):
x = np.array(x)
if x.ndim != 1:
raise ValueError, "smooth only accepts 1 dimension arrays."
if x.size < window_len:
raise ValueError, "Input vector needs to be bigger than window size."
if window_len < 3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', blackman'"
s = np.r_[2*x[0] - x[window_len:1:-1], x, 2*x[-1]-x[-1:-window_len:-1]]
if window == 'flat': # moving average
w = ones(window_len, 'd')
else:
w = eval('np.'+window+'(window_len)')
y = np.convolve(w/w.sum(), s, mode='same')
# return the smoothed signal, chopping off the ends so that it has the previous size.
return y[window_len-1:-window_len+1]
def _get_highest_digit(n):
if n / 100000000:
n /= 100000000
if n / 10000:
n /= 10000
if n / 100:
n /= 100
if n / 10:
n /= 10
return n
| gpl-3.0 | 4,803,558,463,425,500,000 | 40.622047 | 148 | 0.57378 | false |
Gregor-Mendel-Institute/SNPmatch | snpmatch/core/snp_genotype.py | 1 | 21124 | import numpy as np
import numpy.ma
import scipy as sp
import pandas as pd
import logging
import re
from glob import glob
from snpmatch.pygwas import genotype
from . import parsers
from . import genomes
import allel
import itertools
import os.path
import numbers
log = logging.getLogger(__name__)
chunk_size = 1000
def load_genotype_files(h5file, hdf5_acc_file=None):
return(Genotype(h5file, hdf5_acc_file))
## Class object adapted from PyGWAS genotype object
class Genotype(object):
def __init__(self, hdf5_file, hdf5_acc_file):
assert hdf5_file is not None or hdf5_acc_file is not None, "Provide atleast one hdf5 genotype file"
if hdf5_file is None:
assert os.path.isfile(hdf5_acc_file), "Path to %s seems to be broken" % hdf5_acc_file
self.g_acc = genotype.load_hdf5_genotype_data(hdf5_acc_file)
return(None)
assert os.path.isfile(hdf5_file), "Path to %s seems to be broken" % hdf5_file
self.g = genotype.load_hdf5_genotype_data(hdf5_file)
if hdf5_acc_file is None:
hdf5_acc_file = re.sub('\.hdf5$', '', hdf5_file) + '.acc.hdf5'
if len(glob(hdf5_acc_file)) > 0:
self.g_acc = genotype.load_hdf5_genotype_data(hdf5_acc_file)
else:
self.g_acc = genotype.load_hdf5_genotype_data(hdf5_acc_file)
self.accessions = self.g.accessions.astype('U')
self.chrs = self.g.chrs.astype('U')
def get_positions_idxs(self, commonSNPsCHR, commonSNPsPOS):
return(self.get_common_positions( np.array(self.g.chromosomes), self.g.positions, commonSNPsCHR, commonSNPsPOS ))
@staticmethod
def get_common_positions(input_1_chr, input_1_pos, input_2_chr, input_2_pos):
assert len(input_1_chr) == len(input_1_pos), "Both chromosome and position array provided should be of same length"
assert len(input_2_chr) == len(input_2_pos), "Both chromosome and position array provided should be of same length"
common_ins_1 = parsers.ParseInputs("")
common_ins_1.load_snp_info( snpCHR=input_1_chr, snpPOS=input_1_pos, snpGT="", snpWEI=np.nan, DPmean=0 )
common_ins_1.filter_chr_names()
common_ins_2 = parsers.ParseInputs("")
common_ins_2.load_snp_info( snpCHR=input_2_chr, snpPOS=input_2_pos, snpGT="", snpWEI=np.nan, DPmean=0 )
common_ins_2.filter_chr_names()
common_chr_ids = np.intersect1d(common_ins_1.g_chrs_ids, common_ins_2.g_chrs_ids)
## Sort the chr list based on the db
common_chr_ids = common_ins_1.g_chrs_ids[np.where(np.isin(common_ins_1.g_chrs_ids, common_chr_ids))[0]]
common_idx_1 = np.zeros(0, dtype=int)
common_idx_2 = np.zeros(0, dtype=int)
for i in common_chr_ids:
perchrTar_ix_1 = np.where(common_ins_1.g_chrs == i)[0]
perchrTar_ix_2 = np.where(common_ins_2.g_chrs == i)[0]
perchrTar_pos_1 = np.array(common_ins_1.pos[perchrTar_ix_1], dtype=int)
perchrTar_pos_2 = np.array(common_ins_2.pos[perchrTar_ix_2], dtype=int)
common_idx_1 = np.append( common_idx_1, perchrTar_ix_1[np.where( np.in1d(perchrTar_pos_1, perchrTar_pos_2, assume_unique=True) )[0]] )
common_idx_2 = np.append( common_idx_2, perchrTar_ix_2[np.where( np.in1d(perchrTar_pos_2, perchrTar_pos_1, assume_unique=True) )[0]] )
return((common_idx_1, common_idx_2))
def get_matching_accs_ix(self, accs, return_np=False):
acc_ix = []
for ea in accs:
t_ix = np.where(self.accessions == ea)[0]
if len(t_ix) == 0:
acc_ix.append(None)
else:
acc_ix.append(t_ix[0])
if return_np:
acc_ix = np.array(acc_ix)[np.where(np.not_equal(acc_ix, None))[0]].astype("int")
return(acc_ix)
def load_snps_given_accs_pos(self, out_file, accs_ix = None, pos_ix=None, ):
## Returnning snp matrix from a indices.
if os.path.isfile(out_file + ".npz"):
return(np.load(out_file + ".npz"))
if accs_ix is not None and pos_ix is None:
req_snps = np.zeros((0, len(accs_ix)), dtype = type(self.g.snps[0,0]))
pos_ix = np.zeros(0, dtype =int)
for t_ix in range(0, self.g.snps.shape[0], chunk_size):
t_s = self.g.snps[t_ix:t_ix+chunk_size,][:,accs_ix]
t_filter = np.where(~np.all(t_s == -1, axis = 1))[0] ## Here -1 is no information
req_snps = np.vstack((req_snps, t_s[t_filter,]))
pos_ix = np.append(pos_ix, t_filter + t_ix)
elif pos_ix is not None and accs_ix is None:
req_snps = self.g.snps[pos_ix,:]
accs_ix = np.arange(self.accessions.shape[0])
elif accs_ix is not None and pos_ix is not None:
req_snps = self.g.snps[pos_ix,:][:,accs_ix]
else:
log.warn("either provide accs_ix or pos_ix!")
req_snps = np.zeros((0,0), dtype = type(self.g.snps[0,0]))
np.savez(out_file, req_snps = req_snps, accs_ix=accs_ix, pos_ix=pos_ix)
return(np.load(out_file + ".npz"))
def get_polarized_snps(self, polarize_geno = 1, return_float=True):
if hasattr(self, "req_snps"):
self.pol_snps = _polarize_snps( np.array(self.req_snps), polarize_geno)
if "pol_snps" not in self.__dict__.keys():
if self.g.snps.shape[0]/chunk_size > 2000: ## It is a huge array
self.pol_snps = np.zeros((0,self.g.snps.shape[1]), dtype="int8")
for t_ix in range(0, self.g.snps.shape[0], chunk_size):
t_s = _polarize_snps( self.g.snps[t_ix:t_ix+chunk_size,:], polarize_geno )
self.pol_snps = np.vstack((self.pol_snps, t_s))
else:
self.pol_snps = _polarize_snps( np.array(self.g.snps), polarize_geno)
if return_float:
self.pol_snps_fl = self.np_snp_to_pd_df(self.pol_snps, drop_na_all=True)
def get_af_snps(self, no_accs_missing_info, return_nind = False, filter_snps_ix = None, filter_acc_ix = None, polarize_geno = 1, return_maf = True):
"""
A function to calculate allel-frequency given 2d array for SNPs
input:
snps : 2d array for SNPs
no_accs_missing_info : number of accessions with missing info?
filter_acc_ix : accession indices if given, will only be considered
filter_snps_ix: snp indices if given will only be considered
return_maf = boolean to return either minor allele or just allel frequency
"""
multi_subpop_to_check = False
if filter_acc_ix is None:
acc_ix_to_check = np.arange( self.g.snps.shape[1] )
elif type(filter_acc_ix) is dict:
multi_subpop_to_check = True
else:
acc_ix_to_check = np.array( filter_acc_ix )
if filter_snps_ix is None:
snps_ix_to_check = np.arange( self.g.snps.shape[0] )
else:
snps_ix_to_check = np.array( filter_snps_ix )
if multi_subpop_to_check:
maf_snps = {}
nind_snps = {}
for epop in filter_acc_ix.keys():
assert type(filter_acc_ix[epop]) is np.ndarray, "provide numpy arrays in a dictionary when giving subpopulations"
maf_snps[epop] = np.zeros(0, dtype="float")
nind_snps[epop] = np.zeros(0, dtype="float")
else:
maf_snps = np.zeros(0, dtype="float")
nind_snps = np.zeros(0, dtype="int")
for t_ix in range(0, snps_ix_to_check.shape[0], chunk_size):
max_ix = min(snps_ix_to_check.shape[0], t_ix+chunk_size)
if multi_subpop_to_check:
for epop in filter_acc_ix.keys():
t_s = self.g.snps[snps_ix_to_check[t_ix:max_ix],:][:,filter_acc_ix[epop]]
t_maf, t_t = calculate_af_snp_mat(
t_s,
min_informative = no_accs_missing_info,
polarize_geno = polarize_geno,
return_maf = return_maf
)
maf_snps[epop] = np.append(maf_snps[epop], t_maf )
nind_snps[epop] = np.append(nind_snps[epop], t_t)
else:
t_s = self.g.snps[snps_ix_to_check[t_ix:max_ix],:][:,acc_ix_to_check]
t_maf, t_t = calculate_af_snp_mat(
t_s,
min_informative = no_accs_missing_info,
polarize_geno = polarize_geno,
return_maf = return_maf
)
maf_snps = np.append(maf_snps, t_maf )
nind_snps = np.append(nind_snps, t_t)
if return_nind:
return( (maf_snps, nind_snps) )
return(maf_snps)
@staticmethod
def np_snp_to_pd_df(np_arr, drop_na_all = True):
np_arr = np.array(np_arr, dtype = float)
np_arr[np_arr == -1] = np.nan
np_arr[np_arr == 2] = 0.5
pd_df = pd.DataFrame(np_arr)
if drop_na_all:
return(pd_df.dropna(how = "all"))
else:
return(pd_df.dropna(how = "any"))
def identify_segregating_snps(self, accs_ix ):
assert type(accs_ix) is np.ndarray, "provide an np array for list of indices to be considered"
assert len(accs_ix) > 1, "polymorphism happens in more than 1 line"
if len(accs_ix) > (len(self.accessions) / 2):
return( None )
if len(accs_ix) < 10:
t_snps = np.zeros(( self.g_acc.snps.shape[0], len(accs_ix) ))
for ef in range(len(accs_ix)):
t_snps[:,ef] = self.g_acc.snps[:,accs_ix[ef]]
seg_counts = segregting_snps(t_snps)
div_counts = np.divide(seg_counts[0], seg_counts[1], where = seg_counts[1] != 0 )
seg_ix = np.setdiff1d(np.where(div_counts < 1 )[0], np.where(seg_counts[1] == 0)[0])
return( seg_ix )
NumSNPs = self.g.positions.shape[0]
seg_counts = np.zeros(0, dtype=int)
total_counts = np.zeros(0, dtype=int)
for j in range(0, NumSNPs, chunk_size):
t1001SNPs = np.array(self.g.snps[j:j+chunk_size,:][:,accs_ix], dtype=float)
t1001SNPs = segregting_snps( t1001SNPs )
seg_counts = np.append(seg_counts, t1001SNPs[0] )
total_counts = np.append(total_counts, t1001SNPs[1] )
div_counts = np.divide(seg_counts, total_counts, where = total_counts != 0 )
seg_ix = np.setdiff1d(np.where(div_counts < 1 )[0], np.where(total_counts == 0)[0])
return( seg_ix )
def get_chr_ind(self, echr):
real_chrs = np.array( [ ec.replace("Chr", "").replace("chr", "") for ec in self.chrs ] )
if type(echr) is str or type(echr) is np.string_:
echr_num = str(echr).replace("Chr", "").replace("chr", "")
if len(np.where(real_chrs == echr_num)[0]) == 1:
return(np.where(real_chrs == echr_num)[0][0])
else:
return(None)
echr_num = np.unique( np.array( echr ) )
ret_echr_ix = np.zeros( len(echr), dtype="int8" )
for ec in echr_num:
t_ix = np.where(real_chrs == str(ec).replace("Chr", "").replace("chr", "") )[0]
ret_echr_ix[ np.where(np.array( echr ) == ec)[0] ] = t_ix[0]
return(ret_echr_ix)
def determine_snp_ix_given_bed(self, req_bed):
"""
Determine the indices of hdf5 file from the bed positions
input:
req_bed: either a array ["Chr1", 1, 100] or a string "Chr1,1,1000"
output:
Numpy array of indices
"""
if type(req_bed) is str:
req_bed = req_bed.split(",")
assert len(req_bed) == 3, "provide a bed region, ex. Chr1,1,1000"
req_bed[1] = int(req_bed[1])
req_bed[2] = int(req_bed[2])
g_chr_pos = self.g.chr_regions[self.get_chr_ind( req_bed[0] )]
g_pos = self.g.positions[g_chr_pos[0]:g_chr_pos[1]]
snp_start_ix = np.searchsorted(g_pos, req_bed[1]) + g_chr_pos[0]
snp_end_ix = np.searchsorted(g_pos, req_bed[2]) + g_chr_pos[0]
return(np.arange(snp_start_ix, snp_end_ix))
def genotypes_for_scikit(self, accs_ix, filter_pos_ix):
t_array = np.zeros( (len(filter_pos_ix), len(accs_ix), 2), dtype = 'int8' )
# if filter_pos_ix = None:
# filter_pos_ix = np.array()
t_snps = self.g.snps[filter_pos_ix,:][:,accs_ix]
t_array[t_snps == 1] = np.array([1, 1])
t_array[t_snps == 2] = np.array([0, 1])
return( t_array )
def kinship_given_snps(self, filter_acc_ix = None, filter_snp_ix = None):
"""
Function to calculate the kinship between all the pairs
input:
filter_acc_ix:: list of accessions for which kinship needs to be calculated
filter_snp_ix:: only consider these SNP positions
"""
if filter_acc_ix is None:
num_lines = self.accessions.shape[0]
else:
num_lines = np.array(filter_acc_ix).shape[0]
if filter_snp_ix is None:
num_snps = self.g.snps.shape[0]
filter_snp_ix = np.arange( num_snps )
else:
num_snps = filter_snp_ix.shape[0]
filter_snp_ix = np.arange(num_snps)
# t_kin = calc_kinship_mat( self.g.snps[:] )
k_mat = sp.zeros((num_lines, num_lines), dtype="uint32")
total_info_snps = sp.zeros((num_lines, num_lines), dtype="uint32")
log.info('kinship calculation')
chunk_i = 0
for t_snp_ix in range(0, num_snps, chunk_size):
chunk_i += 1
t_snp_end = min(num_snps, t_snp_ix+chunk_size)
t_snp = self.g.snps[filter_snp_ix[t_snp_ix:t_snp_end],:][:,filter_acc_ix]
t_k_mat, t_num_snps = calc_kinship_mat(t_snp, return_counts=True)
k_mat = k_mat + t_k_mat
total_info_snps = total_info_snps + t_num_snps
if chunk_i % 100 == 0:
log.info("Progress: %s chunks", chunk_i)
#kin_mat = k_mat / (2 * num_snps) + 0.5
kin_mat = np.divide(k_mat, total_info_snps)
return(kin_mat)
def calculate_ld(self, snp_ix, accs_ix):
## adapated function of PyGWAS
t_snps = self.g.snps[snp_ix,:][accs_ix,:]
t_snps[t_snps == -1] = np.nan
return( calculate_ld( t_snps.T ) )
def mismatch_between_accs( self, acc_x_ix, acc_y_ix, bin_length = None, genome_class = None):
"""
Function to get the mismatch between a pair
input:
acc_x_ix -- index for accession x
acc_y_ix -- index for accession y
output:
numpy binary array with shape (num_snps,)
has NA if either x or y has NA
1 if there is match between x and y
0 if there is mimatch
"""
snps_x = self.g_acc.snps[:,acc_x_ix]
snps_y = self.g_acc.snps[:,acc_y_ix]
mismatch_xy = np.zeros( snps_x.shape[0] )
snps_x = numpy.ma.masked_less(numpy.ma.masked_greater(snps_x, 2), 0)
snps_y = numpy.ma.masked_less(numpy.ma.masked_greater(snps_y, 2), 0)
mismatch_xy[np.where( snps_x == snps_y )[0]] = 1
mismatch_xy[np.where(snps_x.mask | snps_y.mask)[0] ] = np.nan
if bin_length is None:
return(mismatch_xy)
assert type(bin_length) is int, "provide an interger for window length"
assert type(genome_class) is genomes.Genome, "provide genome class to determine windows in genome"
mismatch_xy_df = pd.DataFrame( columns=['chr', 'start', 'end', 'mismatch'] )
iter_windows = genome_class.get_bins_genome( self.g, bin_length )
ef_ix = 0
for ef in iter_windows:
mismatch_xy_df.loc[ef_ix, 'chr'] = genome_class.chrs[ef[0]]
mismatch_xy_df.loc[ef_ix, 'start'] = ef[1][0]
mismatch_xy_df.loc[ef_ix, 'end'] = ef[1][1]
mismatch_xy_df.loc[ef_ix, 'mismatch'] = 1 - np.nanmean(mismatch_xy[ef[2]])
ef_ix += 1
return( mismatch_xy_df )
def calculate_ld(snps):
"""
Function to calculate r2 for given numpy array of SNPs
"""
#filter non binary snps
snps_t = sp.transpose(snps)
snps_stand = sp.transpose((snps_t - sp.mean(snps, 1)) / sp.std(snps, 1))
r2_values = sp.dot(snps_stand, sp.transpose(snps_stand))
r2_values *= (1.0 / snps.shape[1])
r2_values **= 2
return(r2_values)
def calculate_af_snp_mat(snp_mat, min_informative = 0, polarize_geno = 1, return_maf = True):
"""
Function to calculate allel frequency given a snp matrix
rows are SNP positions
columns are individuals
"""
## first calculate the number of informative individuals. remove ones with -1
num_alleles = snp_mat.shape[1] - np.sum(snp_mat == -1, axis = 1)
## calculate number of alternative alleles
num_alt = np.multiply(2, np.sum(snp_mat == polarize_geno, axis = 1)) + np.sum(snp_mat == 2, axis = 1)
## get frequencies
maf_snps = np.repeat(np.nan, snp_mat.shape[0] )
informative_ix = np.where(num_alleles > min_informative)[0]
maf_snps[ informative_ix ] = np.array(num_alt[informative_ix], dtype = float) / np.multiply(2, num_alleles[informative_ix])
if return_maf:
maf_snps = np.minimum( maf_snps, 1 - maf_snps )
return( (maf_snps, num_alleles) )
def segregting_snps(t):
t[t < 0] = np.nan
t = np.sort(t,axis=1)
t_r_sum = np.sum( ~np.isnan(t), axis = 1)
t_sum = np.nansum(t[:,1:] == t[:,:-1], axis=1) + 1
return((t_sum, t_r_sum))
def _polarize_snps(snps, polarize_geno=1, genotypes=[0, 1]):
assert len(genotypes) == 2, "assuming it is biallelic"
t_s = np.array(snps)
t_int_ix = np.where(np.sum(t_s == polarize_geno, axis = 1) > float(snps.shape[1])/2)[0]
t_replace = t_s[t_int_ix, :]
t_replace[t_replace == genotypes[1]] = 3
t_replace[t_replace == genotypes[0]] = genotypes[1]
t_replace[t_replace == 3] = genotypes[0]
t_s[t_int_ix, :] = t_replace
return(t_s)
def get_sq_diversity_np(snps, acc_ix=None):
assert type(snps) is pd.core.frame.DataFrame, "please provide pd.Dataframe as input"
if isinstance(acc_ix, numbers.Integral):
assert acc_ix < snps.shape[1], "index of a reference to get sq diversity for all the other"
kin_mat = np.zeros(snps.shape[1], dtype=float)
for i in range(snps.shape[1]):
if i == acc_ix:
kin_mat[acc_ix] = 0
else:
t_s = snps.iloc[:,[acc_ix,i]]
kin_mat[i] = allel.sequence_diversity(range(snps.shape[0]), allel.AlleleCountsArray(np.column_stack((np.sum(t_s == 0, axis =1) * 2, np.sum(t_s == 0.5, axis =1) * 2, np.sum(t_s == 1, axis =1) * 2))))
return(kin_mat)
if acc_ix is None:
acc_ix = np.arange(snps.shape[1])
assert type(acc_ix) is np.ndarray, "provide an index for samples to get pairwise scores"
kin_mat = pd.DataFrame(0, index = acc_ix, columns = acc_ix, dtype = float)
for i,j in itertools.combinations(acc_ix, 2):
t_k = allel.sequence_diversity(range(snps.shape[0]), allel.AlleleCountsArray(np.column_stack((np.sum(snps.iloc[:,[i,j]] == 0, axis =1) * 2, np.sum(snps.iloc[:,[i,j]] == 0.5, axis =1) * 2, np.sum(snps.iloc[:,[i,j]] == 1, axis =1) * 2))))
#t_k = np.sum(snps.iloc[:,i] == snps.iloc[:,j])/float(snps.shape[0])
kin_mat.loc[i,j] = t_k
kin_mat.loc[j,i] = t_k
return(kin_mat)
def snpmat_character_to_biallellic(snpmat, polarize = True):
assert type(snpmat) is pd.DataFrame, "please provide a pd dataframe, ideally a small array"
snpmat_num = pd.DataFrame(dtype="int", index = snpmat.index, columns = snpmat.columns )
genotypes=["A", "T", "G", "C"]
snpmat_num[snpmat == genotypes[0]] = 0
snpmat_num[snpmat == genotypes[1]] = 1
snpmat_num[snpmat == genotypes[2]] = 2
snpmat_num[snpmat == genotypes[3]] = 3
snpmat_num[~snpmat.isin(genotypes)] = -1
# snpmat_num[snpmat == "N"] = -1
for index, row in snpmat_num.iterrows():
t_r = row.factorize(sort=True)
t_r[0][t_r[0] == 0] = -1
t_r[0][t_r[0] == 1] = 0
t_r[0][t_r[0] == 2] = 1
# t_r[0][t_r[0] == 3] = 2
snpmat_num.loc[index] = t_r[0]
if polarize:
return(_polarize_snps(snpmat_num, polarize_geno=1, genotypes=[0, 1]))
return(np.array(snpmat_num))
def calc_kinship_mat(snp, return_counts = False, snp_dtype="int8"):
"""
Calculate kinship given a SNP matrix
only taking values 0, 1 and -1
"""
snps_array = sp.array(snp)
snps_array = snps_array.T
info_array = sp.mat(np.copy(snps_array).astype(float))
info_array[info_array >= 0] = 1
info_array[info_array < 0] = 0
num_snps = info_array * info_array.T
snps_array = snps_array.astype(float)
snps_array[snps_array > 1] = 0.5
snps_array[snps_array < 0] = 0.5
sm = sp.mat(snps_array * 2.0 - 1.0)
k_mat = sm * sm.T
if return_counts:
return k_mat, num_snps
else:
return np.divide(k_mat, num_snps)
| mit | 7,767,166,785,812,983,000 | 46.683973 | 244 | 0.571625 | false |
mherkazandjian/ismcpak | tests/run_dynamicGmechGrid.py | 1 | 6351 | """
<keywords>
example, pdr, amuse, dynamic, mechanical, heating
</keywords>
<description>
run a grid of PDR models using the amuse interface as a function of
n, G0 and gmech
run using the command:
$ cd $ISMCPAK/test
$ mpirun -np 1 run_surfaceGrid.py
$ mpirun -np run_dynamicGmechGrid.py
</description>
"""
#-------------------------------------------------------------------------------
import matplotlib
#matplotlib.use('Qt4Agg')
matplotlib.use('PS')
from numpy import *
from numpy.random import *
import pylab as pyl
from amuse.community.pdr import interface
from mesh import *
from chemicalNetwork import *
from enumSpecies import *
from ismUtils import *
from meshUtils import *
from meshUtils import meshArxv
import time
nWorker = 1 # number of proccesses
pdr = interface.pdrInterface(channel_type = 'mpi', number_of_workers = nWorker, redirection='none')
dataDir = '/ism/ismcpak/data/'
outputDir = '../../data/oneSided/dynamicGrid/'
# create the directory structure of the output
if not os.path.isdir(outputDir):
os.makedirs(outputDir)
meshes_dir = os.path.join(outputDir, 'meshes')
if not os.path.isdir(meshes_dir):
os.makedirs(meshes_dir)
metallicity = 1.0 # in terms of solar metallicity
plotRangenG0 = [[0,6],[0,6]]
# path of the database from which the surface mech heating
# rates will be extracted
databasePath = os.path.join(outputDir, '../surfaceGrid-z-1.0-no-gmech/')
#----------------amuse modeling parameters----------------------------------------
pdr.set_outputDir (outputDir + 'meshes/');
pdr.set_species_fName (dataDir + 'pdr/species.inp');
pdr.set_underUbundant_fName (dataDir + 'pdr/underabundant.inp');
pdr.set_rate99_fName (dataDir + 'pdr/rate99Fixed.inp');
pdr.set_selfSheilding_CO_fName (dataDir + 'pdr/self_shielding_CO.inp');
pdr.set_rotationalCooling_baseName (dataDir + 'pdr/rotationalcooling/rotcool');
pdr.set_vibrationalCooling_baseName(dataDir + 'pdr/vibrationalcooling/vibcool');
pdr.set_database_fName (dataDir + 'pdr/z-1.0.dat');
pdr.set_zeta (5.0e-17);
pdr.set_S_depletion (200.0);
pdr.set_TTol (1e-3);
pdr.set_CTol (1e-3);
pdr.set_metalicity (1.0);
pdr.set_AvMax (30.0);
pdr.set_slabSizeCrit (0.5);
pdr.set_min_deltaAv (0.01);
pdr.set_max_deltaAv (0.5);
pdr.set_maxSlabs (200);
# ======== dump parameter to be used in the analysis to a pickle file =========
# .. todo:: due to some formatting issues/inconvience the files used by ismcpak
# are slightly different from the ones used the C PDF code, hence the slightly
# different paths in the dict below
parms = {
'dirPath' : outputDir,
'relativeGmech': True,
'runDirPath2' : databasePath,
'metallicity' : metallicity,
'chemistry' : {
'rxnFile' : dataDir + "rate99Fixed.inp",
'specNumFile' : dataDir + "species.inp",
'underAbunFile' : dataDir + "underabundant.inp", # .. todo:: fix this in chemicalNetwork.py / ignore first line
'removeManual' : ['13CH3'],
'baseSpecies' : 'baseSpeciesDefault', #name of the module holding the base species
'umistVer' : 'umist99',
}
}
pickle.dump(parms, open(os.path.join(outputDir, 'used_parms.pkl'), 'w'))
# ========================= done dumping parameter ============================
# getting the basic species defined in baseSpecies.py
import baseSpeciesDefault as baseSpecies
baseSpecs = baseSpecies.baseSpecies()
# reading the archive
print 'setting up the archive'
t0 = time.time()
arxv = meshArxv(dirPath = databasePath, readDb = True)
print 'time reading %f' % (time.time() - t0)
#--------------------------grid point to be modelled-------------------------------
dx = 3.0 # log10 density
xMin = 0.0
xMax = 6.01
dy = 3.0 # log10 G0
yMin = 0.0
yMax = 6.01
# factor of surface heating to be added as mechanical heating
#z = [1e-10, 0.001, 0.01, 0.05, 0.1, 0.25, 0.50, 0.75, 1.0,]
z = [1e-10, 0.01, 0.05, 0.1,]
#z = [0.0001, 0.001]
#-----------------------getting the mech heating rates at grid points--------------
x, y = mgrid[ xMin:(xMax+1e-10):dx, yMin:(yMax+1e-10):dy]
grid_shape = x.shape
x = x.flatten()
y = y.flatten()
# getting the interpolation function for the surface heating
nPts = x.size
gMechZero = x.copy()
gMechZero[:] = -50.0 # lowest mechanical energy used (in log)
f = arxv.construct3DInterpolationFunction(quantity = ['therm', 'heating'], slabIdx = 0, log10 = True)
dataNew = np.array( [x, y, gMechZero] ).T
gammaSurf = f(dataNew)
print np.array([x, y, gammaSurf]).T
inds = np.where( np.isnan(gammaSurf) == False )
x = x[ inds ]
y = y[ inds ]
gammaSurf = gammaSurf[ inds ]
xTmp, yTmp, zTmp = [], [], []
for v in z:
xTmp.append(x)
yTmp.append(y)
gMechGrid = np.log10(v * (10.0**gammaSurf) )
zTmp.append( gMechGrid )
x = np.array(xTmp).flatten()
y = np.array(yTmp).flatten()
z = np.array(zTmp).flatten()
n = x.size
pyl.ion()
figModels = pyl.figure()
axsModels = figModels.add_axes([0.1, 0.1, 0.8, 0.8])
fig1 = pyl.Figure()
axsModels.plot( x, y, 'o' )
axsModels.set_xlim( [-1, 7] )
axsModels.set_ylim( [-1, 7] )
pyl.draw()
print 'number of models to run = ', n
rho = x
G0 = y
Lmech = 10**z
# initializing variables and running the models
pdr.initialize()
ids,err = pdr.add_mesh(rho, 1000.0, G0, Lmech)
# writing the parameter into an ascii file
f = file(outputDir+'parameters.out', 'w')
for i in arange(n):
f.write( '%06d %.3f %.3f %.3f\n' % (ids[i], rho[i], G0[i], Lmech[i]))
f.close()
#running the models
pdr.calc_equilibrium()
mshErr,err = pdr.get_errorFlags(ids)
T,err = pdr.get_temperature(ids)
# writing the results to an ascii file
f = file(outputDir+'results.out', 'w')
for i in arange(n):
f.write( '%d %.3f %.3f %.3e %.d\n' % (ids[i], rho[i], G0[i], Lmech[i], mshErr[i]))
f.close()
# constructing the archive
print 'pack the meshes into a database'
t0 = time.time()
arxvW = meshArxv(dirPath=outputDir)
arxvW.construct(meshNamePrefix='mesh', writeDb=True)
print 'time constructing %f' % (time.time() - t0)
print 'done'
| gpl-3.0 | -4,717,182,305,895,810,000 | 30.440594 | 134 | 0.611085 | false |
samuelmanzer/interpolation | chebyshev_nodes.py | 1 | 3491 | #!/usr/bin/env python
###############################################################################
# Interpolation
# Copyright (C) Samuel F. Manzer. All rights reserved.
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library.
#
# FILE: lagrange_interpolation.py
# AUTHOR: Samuel F. Manzer
# URL: http://www.samuelmanzer.com/
###############################################################################
from argparse import ArgumentParser
import numpy as np
import matplotlib.pyplot as plt
import itertools
import tempfile
import pdb
from lagrange_poly import *
parser = ArgumentParser("Produces plots of Lagrange interpolation of cos(x) for various numbers of Chebyshev and equally spaced points")
args = parser.parse_args()
start = 0
end = (5*math.pi)/2
n_eval_pts = 1000
eval_step_size = float(end-start)/n_eval_pts
n_points_range = range(2,6,1)
x = np.linspace(start,end,n_eval_pts)
y_exact = np.cos(x)
f1,(ax1,ax2) = plt.subplots(1,2,sharey=True)
ax1.set_ylim(ymin=-1.1,ymax=1.5)
ax1.set_title("Equally-Spaced")
ax2.set_title("Chebyshev")
f2,ax3 = plt.subplots()
f3,ax4 = plt.subplots()
# Equally spaced points
evenly_spaced_sets = [np.linspace(start,end,n_points) for n_points in n_points_range]
evenly_spaced_polys = [get_lagrange_poly(interp_points,math.cos) for interp_points in evenly_spaced_sets]
lines,mae_list,rmsd_list,maxe_list = plot_lagrange_polys(x,n_points_range,evenly_spaced_polys,y_exact,ax1)
texts_1 = plot_stats(mae_list,rmsd_list,maxe_list,n_points_range,ax3)
f1.legend(lines,map(lambda x: str(x)+" Points",n_points_range)+["cos(x)"],loc="upper right")
# Chebyshev points - we must transform them to our interval
cp_sets = [ [ math.cos((float(2*k - 1)/(2*n))*math.pi) for k in range(1,n+1)] for n in n_points_range ]
tcp_sets = [ [ 0.5*((end - start)*pt + start + end) for pt in point_set] for point_set in cp_sets]
chebyshev_point_polys = [get_lagrange_poly(interp_points,math.cos) for interp_points in tcp_sets]
lines,mae_list,rmsd_list,maxe_list = plot_lagrange_polys(x,n_points_range,chebyshev_point_polys,y_exact,ax2)
texts_2 = plot_stats(mae_list,rmsd_list,maxe_list,n_points_range,ax4)
ax3.set_title("Lagrange Interpolation with Equally-Spaced Points")
ax4.set_title("Lagrange Interpolation with Chebyshev Points")
# Awful haxx for text labels above bars to not get cut off by top of figure
tmp_file = tempfile.NamedTemporaryFile()
f2.savefig(tmp_file.name)
f3.savefig(tmp_file.name)
renderer_2 = f2.axes[0].get_renderer_cache()
renderer_3 = f3.axes[0].get_renderer_cache()
for (ax,renderer,texts) in [(ax3,renderer_2,texts_1),(ax4,renderer_3,texts_2)]:
window_bbox_list = [t.get_window_extent(renderer) for t in texts]
data_bbox_list = [b.transformed(ax.transData.inverted()) for b in window_bbox_list]
data_coords_list = [b.extents for b in data_bbox_list]
heights = [ coords[-1] for coords in data_coords_list]
ax.set_ylim(ymax=max(heights)*1.05)
plt.show()
| lgpl-3.0 | 2,629,411,220,651,227,600 | 41.573171 | 136 | 0.705529 | false |
tom-f-oconnell/multi_tracker | scripts/bag2vid.py | 1 | 28740 | #!/usr/bin/env python
from __future__ import division
from __future__ import print_function
import os
import glob
import sys
from enum import Enum
from distutils.version import LooseVersion, StrictVersion
import argparse
import warnings
import numpy as np
import cv2
# TODO add this as rosdep or import it behind '-c' flag specification
import pandas as pd
import rosbag
from multi_tracker.msg import DeltaVid
# TODO factor into some multi_tracker util thing if we are going to duplicate
# this portion (+ put all prints behind some (ros?) debug flag / ros logdebug
#print('Using open cv: ', cv2.__version__)
if StrictVersion(cv2.__version__.split('-')[0]) >= StrictVersion("3.0.0"):
OPENCV_VERSION = 3
#print('Open CV 3')
else:
OPENCV_VERSION = 2
#print('Open CV 2')
class InterpType(Enum):
NONE = 0
NEAREST = 1
LINEAR = 2
# TODO write some round trip tests with a lossless codec / approx with
# (potentially) lossy codec to be used
class RunningStats:
"""
Adapted from Marc Liyanage's answer here:
https://stackoverflow.com/questions/1174984
...which is itself based on:
https://www.johndcook.com/blog/standard_deviation/
See also "Welford's online algorithm" from this Wikipedia page:
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
"""
def __init__(self, initial_frame):
self.n = 1
# NOTE: it seems that without always immediately converting to
# np.float64 (from uint8 the frames are initially represented as), the
# minimum variance can sometimes be negative. Not 100% sure this could
# not happen despite this change, but some discussion of this algorithm
# I found online indicates this algorithm should not have this problem
# (a problem that some other algorithms *do* have).
self.old_m = self.new_m = initial_frame.astype(np.float64)
self.old_s = np.zeros_like(initial_frame, dtype=np.float64)
self.new_s = np.zeros_like(initial_frame, dtype=np.float64)
def push(self, frame):
frame = frame.astype(np.float64)
self.n += 1
self.new_m = self.old_m + (frame - self.old_m) / self.n
self.new_s = self.old_s + (frame - self.old_m) * (frame - self.new_m)
self.old_m = self.new_m
self.old_s = self.new_s
def mean(self):
return self.new_m
def variance(self):
var = self.new_s / (self.n - 1) if self.n > 1 else self.new_s
assert var.min() >= 0
return var
def standard_deviation(self):
return np.sqrt(self.variance())
class Converter:
def __init__(self, directory, mode='mono', **kwargs):
self.directory = os.path.abspath(os.path.expanduser(directory))
bag_files = glob.glob(os.path.join(self.directory, '*.bag'))
if len(bag_files) == 0:
raise IOError('no bagfiles in directory {}'.format(self.directory))
elif len(bag_files) > 1:
raise IOError('more than one bagfile in directory {}'.format(
self.directory
))
self.bag_file = bag_files[0]
if 'no_avi' in kwargs and kwargs['no_avi']:
self.video_filename = None
else:
self.video_filename = self.bag_file[:-4] + '.avi'
if 'timestamps' in kwargs and kwargs['timestamps']:
self.write_timestamps = True
else:
self.write_timestamps = False
if 'overwrite' in kwargs and kwargs['overwrite']:
overwrite = True
else:
overwrite = False
if 'overlay_csv' in kwargs and kwargs['overlay_csv']:
overlay_csv = kwargs['overlay_csv']
if not os.path.exists(overlay_csv):
raise IOError('{} (specified via -c) not found'.format(
overlay_csv
))
# `self.overlay_df` will be checked by a call to
# `self._validate_overlay_df()` AFTER `self.start_time` is set in
# first iteration of the loop in `process_bag`.
self.overlay_df = pd.read_csv(overlay_csv)
self.curr_overlay_row = 0
self._checked_text_overlay = False
else:
self.overlay_df = None
if 'verbose' in kwargs and kwargs['verbose']:
self.verbose = True
else:
self.verbose = False
self._n_zero_length_delta_vid_values = 0
if not overwrite and (
self.video_filename and os.path.exists(self.video_filename)):
print(self.video_filename, 'already exists.')
self.video_filename = None
if self.video_filename is None:
self.write_timestamps = False
self.min_projection_fname = None
if 'min_project' in kwargs and kwargs['min_project']:
self.min_projection_fname = os.path.join(self.directory,
'_'.join(os.path.basename(self.bag_file).split('_')[:4]) +
'_min_projection.png'
)
if not overwrite and os.path.exists(self.min_projection_fname):
print(self.min_projection_fname, 'already exists.')
self.min_projection_fname = None
self.std_dev_fname = None
if 'std_dev' in kwargs and kwargs['std_dev']:
self.std_dev_fname = os.path.join(self.directory,
'_'.join(os.path.basename(self.bag_file).split('_')[:4]) +
'_std_dev.png'
)
if not overwrite and os.path.exists(self.std_dev_fname):
print(self.std_dev_fname, 'already exists.')
self.std_dev_fname = None
if (self.video_filename is None and
self.min_projection_fname is None and self.std_dev_fname is None):
sys.exit()
self.delta_video_topic, freq, self.message_count = self.topic_info()
if self.verbose:
print('Topic:', self.delta_video_topic)
print('Average frame rate: {:.2f} Hz'.format(freq))
print('Number of messages:', self.message_count)
if 'interpolation' in kwargs:
if kwargs['interpolation'] is None:
self.interpolation = InterpType.NONE
elif kwargs['interpolation'] is 'nearest':
self.interpolation = InterpType.NEAREST
raise NotImplementedError
elif kwargs['interpolation'] is 'linear':
self.interpolation = InterpType.LINEAR
raise NotImplementedError
else:
self.interpolation = InterpType.NONE
if self.interpolation == InterpType.NONE:
self.desired_frame_rate = freq
else:
# TODO get from arg
self.desired_frame_rate = 30
self.desired_frame_interval = 1.0 / self.desired_frame_rate
# TODO queue of some sort?
self.frames = []
self.frame_times = []
self.background_image = None
self.background_img_filename = None
self.mode = mode
self.videowriter = None
self.min_frame = None
self.running_stats = None
try:
from tqdm import tqdm
self.tqdm = tqdm
self.use_tqdm = True
except ImportError:
self.use_tqdm = False
pass
def _validate_overlay_df(self):
"""
Checks some time information in `pd.DataFrame` derived from overlay CSV
makes sense, including:
- onsets all before offsets (for a given row, representing one interval)
- intervals occuring earlier in time are earlier in CSV
- intervals are non-overlapping
- no intervals occur before first delta video message in bag file
Does NOT check intervals against end of bag file video data, as this
case is handled via warnings in the loop in `process_bag`.
"""
for x in self.overlay_df.itertuples(index=False):
# (both of these should be of type `float`)
assert x.onset < x.offset, 'one onset >= offset'
# Checks intervals are sorted and non-overlapping.
for i in range(len(self.overlay_df)):
if i + 1 >= len(self.overlay_df):
break
curr_row = self.overlay_df.iloc[i]
next_row = self.overlay_df.iloc[i + 1]
# Not allowing equality so that it's always clear which message
# should be drawn.
assert next_row.onset > curr_row.offset
# Ensuring we are calling this after this is defined, since we need it
# to check intervals don't precede start of video data.
assert self.start_time is not None
# Since we already know the intervals are sorted, we can just check that
# the first interval (first row) doesn't precede start time.
start_to_first_onset_s = \
self.overlay_df.iloc[0].onset - self.start_time.to_sec()
if self.verbose:
print('First video frame time to first onset: {:.2f} sec'.format(
start_to_first_onset_s
))
if start_to_first_onset_s < 0:
raise ValueError('At LEAST the first onset preceded first video '
'frame time. This likely reflects a serious error in '
'coordinating stimulus presentation and video acquisition.'
)
def topic_info(self):
bag = rosbag.Bag(self.bag_file)
ti = bag.get_type_and_topic_info()
topics = []
for t, info in ti.topics.items():
if info.msg_type == 'multi_tracker/DeltaVid':
topics.append(t)
bag.close()
if len(topics) == 0:
raise ValueError('no topics of type multi_tracker/DeltaVid '
'in bag file.'
)
elif len(topics) > 1:
raise ValueError('bag has multiple topics of type '
'multi_tracker/DeltaVid.'
)
topic = topics[0]
freq = ti.topics[topic].frequency
count = ti.topics[topic].message_count
return topic, freq, count
def load_background_image(self, png_filename):
"""
Sets self.background_img_filename and attempts to set
self.background_image.
Also slightly reformats ("for ROS hydro"), but this may need fixing.
"""
self.background_img_filename = png_filename
basename = os.path.basename(self.background_img_filename)
full_png_filename = os.path.join(self.directory, basename)
if not os.path.exists(full_png_filename):
raise IOError('background image file ' + full_png_filename +
' did not exist'
)
if not os.path.getsize(full_png_filename) > 0:
raise IOError('background image file ' + full_png_filename +
' was empty'
)
self.background_image = cv2.imread(full_png_filename, cv2.CV_8UC1)
# TODO seems to imply not color, but other code doesnt assert that
# (fix in delta_video_player.py) (might also need to change read
# arguments to support color, perhaps conditional on some check as to
# whether it is color or not)
try:
# for hydro
self.background_image = self.background_image.reshape([
self.background_image.shape[0], self.background_image[1], 1
])
# TODO check this code actually isn't running before deleting
# TODO handle cases by version explicitly or at least specify expected
# error
except:
# for indigo
pass
def write_frame(self, image):
"""
Write a frame to the output file.
Potentially derived from =/= 1 input frames, as resampled in time.
"""
if self.mode == 'color':
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
if self.videowriter is None:
# TODO detect fourcc to use from input extension?
#fourcc = cv2.cv.CV_FOURCC(*'MP4V')
# TODO maybe warn that the data is being compressed
# (it is https://www.xvid.com/faq/) ?
format_code = 'XVID'
if OPENCV_VERSION == 2:
fourcc = cv2.cv.CV_FOURCC(*format_code)
self.videowriter = cv2.VideoWriter(self.video_filename, fourcc,
self.desired_frame_rate, (image.shape[1], image.shape[0]),
1 if self.mode == 'color' else 0
)
elif OPENCV_VERSION == 3:
fourcc = cv2.VideoWriter_fourcc(*format_code)
self.videowriter = cv2.VideoWriter(self.video_filename, fourcc,
self.desired_frame_rate, (image.shape[1], image.shape[0]),
1 if self.mode == 'color' else 0
)
self.videowriter.write(image)
def add_frame_from_deltavid_msg(self, delta_vid):
secs_from_start = (delta_vid.header.stamp - self.start_time).to_sec()
if (self.background_image is None or
self.background_img_filename != delta_vid.background_image):
self.load_background_image(delta_vid.background_image)
assert self.background_image is not None
new_image = self.background_image.copy()
# TODO delete if inspection of multi_tracker code that generates this
# seems to indicate this is not possible. left in cause it was a
# component of an old check, but seemed to be irrelevant
assert delta_vid.values is not None
# This seems to happen each time a new background frame is added (if not
# on the first)? Actually, much more. For example:
# `.../choice_20210129_022422$ rosrun nagel_laminar bag2vid -v`
# includes this in its output:
# ``
# and `.../choice_20210129_044604$ rosrun nagel_laminar bag2vid -v`
# includes: ``
# ...though there it seems neither test experiment had any additional
# background frames (beyond the first set) taken.
if len(delta_vid.values) > 0:
# NOTE: hydro is older than indigo. if i should try to delete one
# branch, try deleting the hydro one.
# TODO check not off by one here?
try:
# for hydro
new_image[delta_vid.xpixels, delta_vid.ypixels, 0] = \
delta_vid.values
# TODO look for specific error type or handle differently
except:
# for indigo
new_image[delta_vid.xpixels, delta_vid.ypixels] = \
delta_vid.values
else:
self._n_zero_length_delta_vid_values += 1
# TODO resample for constant framerate.
# maybe warn if deviate much from it?
if self.interpolation != InterpType.NONE:
self.frames.append(new_image)
self.frame_times.append(secs_from_start)
return new_image, secs_from_start
def closest_timestamp(self, t):
return (round(t.to_secs() / self.desired_frame_rate) *
self.desired_frame_rate
)
# TODO how to resample online? (i don't think i can load all of the videos
# into memory) source too fast -> wait until next goal time and average
# intervening (weighting maybe)
def process_bag(self):
# TODO is there a context manager for this?
bag = rosbag.Bag(self.bag_file)
if self.write_timestamps:
ts_fp = open(os.path.join(self.directory, 'frametimes.txt'), 'w')
# TODO could maybe use bag.get_start_time() (returns a float Unix epoch
# time)
self.start_time = None
self.current_goal_timestamp = 0
# TODO are these guaranteed to be sorted? how close to? (sort if not?)
# make one pass through getting times -> sort -> revisit in sorted order
# (how?) (or just assert they are in order by keeping track of last and
# checking...)
iterate_over = bag.read_messages(topics=[self.delta_video_topic])
if self.use_tqdm:
iterate_over = self.tqdm(iterate_over, total=self.message_count)
# The third value in each element of `iterate_over` is some sort of
# time.
# TODO could maybe check against msg.header.stamp? why even have
# msg.header.stamp in acquisition of these other times are available for
# free, and are always consistent (if true)?
for topic, msg, _ in iterate_over:
if self.start_time is None:
self.start_time = msg.header.stamp
if self.overlay_df is not None:
self._validate_overlay_df()
current_frame, current_frame_time = \
self.add_frame_from_deltavid_msg(msg)
if self.interpolation == InterpType.NONE:
if self.video_filename is not None:
# TODO if i do fix the interpolation implementation(s)
# later, might want to refactor write_frame into a get_frame
# type call so that the overlay can be added in a call
# between that and the write_frame call
if (self.overlay_df is not None and
self.curr_overlay_row < len(self.overlay_df)):
curr_time_s = msg.header.stamp.to_sec()
row = self.overlay_df.iloc[self.curr_overlay_row]
if curr_time_s < row.onset:
text = None
elif row.onset <= curr_time_s <= row.offset:
text = row.overlay
assert type(text) is str and len(text) > 0
elif row.offset < curr_time_s:
self.curr_overlay_row += 1
if self.curr_overlay_row >= len(self.overlay_df):
text = None
else:
# Just assuming that the frame rate is such that
# we won't completely skip past this interval
# (so not checking against the end of this new
# row this iteration). If that were not true,
# would probably want to implment this as
# recursion or something, though we'd also have
# bigger issues then... Mostly just checking
# here to support the case where the CSV
# specifies intervals where one ends the frame
# before the next starts.
row = self.overlay_df.iloc[
self.curr_overlay_row
]
if row.onset <= curr_time_s:
text = row.text
else:
text = None
else:
assert False, \
'previous cases should have been complete'
if text is not None:
font = cv2.FONT_HERSHEY_PLAIN
# Only non-int parameter putText seems to take.
font_scale = 5.0
# White. Would need to convert all frames to some
# colorspace to add a colored overlay.
color = 255
font_thickness = 5
# So we can center the text.
(text_width, _), _ = cv2.getTextSize(text, font,
font_scale, font_thickness
)
# (0,0) is at the top left.
y_margin = 80
bottom_left = (
(current_frame.shape[1] - text_width) // 2,
current_frame.shape[0] - y_margin
)
if not self._checked_text_overlay:
before_overlay = current_frame.copy()
cv2.putText(current_frame, text, bottom_left, font,
font_scale, color, font_thickness, cv2.LINE_AA
)
# TODO maybe also overlay counter to next onset?
# or just when not text to overlay?
if not self._checked_text_overlay:
assert not np.array_equal(
before_overlay, current_frame
), 'did not actually draw anything'
self._checked_text_overlay = True
self.write_frame(current_frame)
if self.min_projection_fname is not None:
if self.min_frame is None:
self.min_frame = current_frame
else:
# This is a different fn than `np.min`
self.min_frame = np.minimum(
current_frame, self.min_frame
)
if self.std_dev_fname is not None:
if self.running_stats is None:
self.running_stats = RunningStats(current_frame)
else:
self.running_stats.push(current_frame)
if self.write_timestamps:
print(current_frame_time, file=ts_fp)
'''
if (current_frame_time >=
self.current_goal_timestamp - self.desired_frame_interval / 2):
# we skipped past a goal time (source too slow between these two
# frames) make each intervening time a weighted average of the
# two surrounding frames? or just take closest? (take multiple
# interpolation options?)
# TODO maybe i should just define the goals such that i dont
# need to add half interval to compare? offset initial?
if (current_frame_time > self.current_goal_timestamp +
self.desired_frame_interval / 2):
# TODO TODO handle case where we dont have a previous frame
previous_frame = self.frames[-2]
previous_frame_time = self.frame_times[-2]
current_frame_time = self.frame_times[-1]
# TODO TODO recursion for this part?
tmp_goal = self.current_goal_timestamp
self.current_goal_timestamp = \
self.closest_timestamp(current_frame_time)
# TODO TODO handle case where there were > 1 frames already
# in buffer (average those closer one?)
while (tmp_goal < self.current_goal_timestamp +
self.desired_frame_interval / 2):
# TODO or weighted average / linear or not
closest_frame = (current_frame if
abs(previous_frame_time - tmp_goal) >
abs(current_frame_time - tmp_goal)
else previous_frame)
self.write_frame(closest_frame)
tmp_goal += self.desired_frame_interval
self.frames = []
self.frame_times = []
# we didn't skip a goal time.
# TODO it is possible other frames fall in window that would be
# closest to this goal time
# wait until we go into region closest to next goal time, and
# then average all prior
self.current_goal_timestamp += self.desired_frame_interval
'''
if self.verbose:
print('# of frames with empty delta_vid.values:',
self._n_zero_length_delta_vid_values
)
if self.overlay_df is not None:
last_offset_to_last_frame = \
msg.header.stamp.to_sec() - self.overlay_df.iloc[-1].offset
if self.verbose:
print('Last offset to last frame: {:.2f} sec'.format(
last_offset_to_last_frame
))
if last_offset_to_last_frame < 0:
warnings.warn('At least one offset happened after time of last'
' frame. This is OK if experiment was intentionally stopped'
' early.'
)
# TODO probably (also/exclusively) call all these cleanup functions in
# an atexit call
bag.close()
if self.write_timestamps:
ts_fp.close()
if self.videowriter is not None:
self.videowriter.release()
print('Note: use this command to make a mac / quicktime ' +
'friendly video: avconv -i test.avi -c:v libx264 -c:a ' +
'copy outputfile.mp4'
)
if self.min_projection_fname:
assert self.min_frame is not None
cv2.imwrite(self.min_projection_fname, self.min_frame)
if self.std_dev_fname:
assert self.running_stats is not None
stddev_frame = self.running_stats.standard_deviation()
# Because I'm not sure how the OpenCV code would react in this case.
# It'd probably fail anyway...
assert stddev_frame.max() <= 255, 'stddev > 255 somewhere'
# NOTE: this works, but rounds from np.float64 to uint8, so if you
# try loading this PNG, you will only have the rounded values. If
# you need analysis on the standard deviation image, it would
# probably be best to export full precision somewhere.
cv2.imwrite(self.std_dev_fname, stddev_frame)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--min-project', default=False,
action='store_true', help='Saves the minimum projection of the video to'
' a PNG in current directory.'
)
parser.add_argument('-s', '--std-dev', default=False,
action='store_true', help='Saves the standard deviation image of the'
' video to a PNG in current directory.'
)
parser.add_argument('-a', '--no-avi', default=False,
action='store_true', help='Does NOT save a .avi movie in the current '
'directory.'
)
parser.add_argument('-o', '--overwrite', default=False, action='store_true',
help='Overwrite any file that would be generated, yet already exists.'
)
# TODO arg for x,y position (floats in [0,1] probably) of overlay?
# also maybe font size / color of overlay? or just optional extra columns
# for that information (+ then it could be changed row-by-row)?
# TODO TODO arg for specification of roi (via gui unless position passed by
# other args?) that is used to compute a binary signal (via clustering /
# other automatic thresholding methods) to compute a signal to be compared
# against these intervals? (e.g. for an LED hardware indicator of odor pulse
# to be used to check the times recorded in the stimulus metadata are
# accurate?) maybe rename overlay-csv arg then, and just have overlay as an
# optional column? should i just output the signal to a CSV or something to
# be checked by some other program, or check against intervals in here?
parser.add_argument('-c', '--overlay-csv', default=None, action='store',
help='CSV with columns onset, offset, and overlay. onset and offset '
'must contains Unix timestamps in the ROS bag file, and each onset '
'must precede the corresponding offset. overlay contains text '
'to be overlayed on the image between each onset and offset.'
)
parser.add_argument('-v', '--verbose', default=False, action='store_true')
kwargs = {
'interpolation': None,
'timestamps': True
}
kwargs.update(vars(parser.parse_args()))
input_dir = os.getcwd()
conv = Converter(input_dir, **kwargs)
conv.process_bag()
| mit | -8,510,185,894,812,896,000 | 39.365169 | 80 | 0.551183 | false |
GoogleCloudPlatform/ai-platform-samples | training/pytorch/structured/python_package/setup.py | 1 | 1057 | #!/usr/bin/env python
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = [
'google-cloud-storage>=1.14.0',
'pandas>=0.23.4'
]
setup(
name='trainer',
version='0.1',
install_requires=REQUIRED_PACKAGES,
packages=find_packages(),
include_package_data=True,
description='AI Platform | Training | PyTorch | Structured | Python Package'
)
| apache-2.0 | 8,624,365,759,072,714,000 | 32.03125 | 80 | 0.676443 | false |
mscross/pysplit | docs/examples/basic_plotting_example.py | 1 | 2137 | """
=============================================
Basic Trajectory Plotting and Using MapDesign
=============================================
How to quickly initialize a matplotlib Basemap with the ``MapDesign``
class and plot ``Trajectory`` paths.
For this example we'll initialize only the January trajectories created in
``bulk_trajgen_example.py``.
"""
import pysplit
trajgroup = pysplit.make_trajectorygroup(r'C:/trajectories/colgate/*jan*')
"""
Basemaps and MapDesign
----------------------
PySPLIT's ``MapDesign`` class uses the matplotlib Basemap toolkit to quickly
set up attractive maps. The user is not restricted to using maps
produced from ``MapDesign``, however- any Basemap will serve in the section
below entitled 'Plotting ``Trajectory`` Paths.
Creating a basic cylindrical map using ``MapDesign`` only requires
``mapcorners``, a list of the lower-left longitude, lower-left latitude,
upper-right longitude, and upper-right latitude values.
The ``standard_pm``, a list of standard parallels and meridians,
may be passed as ``None``.
"""
mapcorners = [-150, 15, -50, 65]
standard_pm = None
bmap_params = pysplit.MapDesign(mapcorners, standard_pm)
"""
Once the ``MapDesign`` is initialized it can be used to draw a map:
"""
bmap = bmap_params.make_basemap()
"""
Plotting ``Trajectory`` Paths
-----------------------------
For this example, we will color-code by initialization (t=0) altitude,
(500, 1000, or 1500 m), which can be accessed via ``Trajectory.data.geometry``,
a ``GeoSeries`` of Shapely ``Point`` objects.
We can store the trajectory color in ``Trajectory.trajcolor`` for convenience.
"""
color_dict = {500.0 : 'blue',
1000.0 : 'orange',
1500.0 : 'black'}
for traj in trajgroup:
altitude0 = traj.data.geometry.apply(lambda p: p.z)[0]
traj.trajcolor = color_dict[altitude0]
"""
For display purposes, let's plot only every fifth ``Trajectory``. The lats,
lons are obtained by unpacking the ``Trajectory.Path``
(Shapely ``LineString``) xy coordinates.
"""
for traj in trajgroup[::5]:
bmap.plot(*traj.path.xy, c=traj.trajcolor, latlon=True, zorder=20)
| bsd-3-clause | 912,440,568,227,639,900 | 30.426471 | 79 | 0.672438 | false |
aestrivex/mne-python | examples/plot_compute_mne_inverse.py | 21 | 1885 | """
================================================
Compute MNE-dSPM inverse solution on evoked data
================================================
Compute dSPM inverse solution on MNE evoked dataset
and stores the solution in stc files for visualisation.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
from mne.datasets import sample
from mne import read_evokeds
from mne.minimum_norm import apply_inverse, read_inverse_operator
print(__doc__)
data_path = sample.data_path()
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
subjects_dir = data_path + '/subjects'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
# Load data
evoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
inverse_operator = read_inverse_operator(fname_inv)
# Compute inverse solution
stc = apply_inverse(evoked, inverse_operator, lambda2, method,
pick_ori=None)
# Save result in stc files
stc.save('mne_%s_inverse' % method)
###############################################################################
# View activation time-series
plt.plot(1e3 * stc.times, stc.data[::100, :].T)
plt.xlabel('time (ms)')
plt.ylabel('%s value' % method)
plt.show()
# Plot brain in 3D with PySurfer if available
brain = stc.plot(hemi='rh', subjects_dir=subjects_dir)
brain.show_view('lateral')
# use peak getter to move vizualization to the time point of the peak
vertno_max, time_idx = stc.get_peak(hemi='rh', time_as_index=True)
brain.set_data_time_index(time_idx)
# draw marker at maximum peaking vertex
brain.add_foci(vertno_max, coords_as_verts=True, hemi='rh', color='blue',
scale_factor=0.6)
brain.save_image('dSPM_map.png')
| bsd-3-clause | -3,465,899,873,645,605,000 | 30.416667 | 79 | 0.65252 | false |
mwickert/scikit-dsp-comm | sk_dsp_comm/iir_design_helper.py | 1 | 20593 | """
Basic IIR Bilinear Transform-Based Digital Filter Design Helper
Copyright (c) March 2017, Mark Wickert
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
"""
import numpy as np
import scipy.signal as signal
import matplotlib.pyplot as plt
from logging import getLogger
log = getLogger(__name__)
def IIR_lpf(f_pass, f_stop, Ripple_pass, Atten_stop,
fs = 1.00, ftype = 'butter', status = True):
"""
Design an IIR lowpass filter using scipy.signal.iirdesign.
The filter order is determined based on
f_pass Hz, f_stop Hz, and the desired stopband attenuation
d_stop in dB, all relative to a sampling rate of fs Hz.
Parameters
----------
f_pass : Passband critical frequency in Hz
f_stop : Stopband critical frequency in Hz
Ripple_pass : Filter gain in dB at f_pass
Atten_stop : Filter attenuation in dB at f_stop
fs : Sampling rate in Hz
ftype : Analog prototype from 'butter' 'cheby1', 'cheby2',
'ellip', and 'bessel'
Returns
-------
b : ndarray of the numerator coefficients
a : ndarray of the denominator coefficients
sos : 2D ndarray of second-order section coefficients
Notes
-----
Additionally a text string telling the user the filter order is
written to the console, e.g., IIR cheby1 order = 8.
Examples
--------
>>> fs = 48000
>>> f_pass = 5000
>>> f_stop = 8000
>>> b_but,a_but,sos_but = IIR_lpf(f_pass,f_stop,0.5,60,fs,'butter')
>>> b_cheb1,a_cheb1,sos_cheb1 = IIR_lpf(f_pass,f_stop,0.5,60,fs,'cheby1')
>>> b_cheb2,a_cheb2,sos_cheb2 = IIR_lpf(f_pass,f_stop,0.5,60,fs,'cheby2')
>>> b_elli,a_elli,sos_elli = IIR_lpf(f_pass,f_stop,0.5,60,fs,'ellip')
Mark Wickert October 2016
"""
b,a = signal.iirdesign(2*float(f_pass)/fs, 2*float(f_stop)/fs,
Ripple_pass, Atten_stop,
ftype = ftype, output='ba')
sos = signal.iirdesign(2*float(f_pass)/fs, 2*float(f_stop)/fs,
Ripple_pass, Atten_stop,
ftype = ftype, output='sos')
tag = 'IIR ' + ftype + ' order'
if status:
log.info('%s = %d.' % (tag,len(a)-1))
return b, a, sos
def IIR_hpf(f_stop, f_pass, Ripple_pass, Atten_stop,
fs = 1.00, ftype = 'butter', status = True):
"""
Design an IIR highpass filter using scipy.signal.iirdesign.
The filter order is determined based on
f_pass Hz, f_stop Hz, and the desired stopband attenuation
d_stop in dB, all relative to a sampling rate of fs Hz.
Parameters
----------
f_stop :
f_pass :
Ripple_pass :
Atten_stop :
fs : sampling rate in Hz
ftype : Analog prototype from 'butter' 'cheby1', 'cheby2',
'ellip', and 'bessel'
Returns
-------
b : ndarray of the numerator coefficients
a : ndarray of the denominator coefficients
sos : 2D ndarray of second-order section coefficients
Examples
--------
>>> fs = 48000
>>> f_pass = 8000
>>> f_stop = 5000
>>> b_but,a_but,sos_but = IIR_hpf(f_stop,f_pass,0.5,60,fs,'butter')
>>> b_cheb1,a_cheb1,sos_cheb1 = IIR_hpf(f_stop,f_pass,0.5,60,fs,'cheby1')
>>> b_cheb2,a_cheb2,sos_cheb2 = IIR_hpf(f_stop,f_pass,0.5,60,fs,'cheby2')
>>> b_elli,a_elli,sos_elli = IIR_hpf(f_stop,f_pass,0.5,60,fs,'ellip')
Mark Wickert October 2016
"""
b,a = signal.iirdesign(2*float(f_pass)/fs, 2*float(f_stop)/fs,
Ripple_pass, Atten_stop,
ftype = ftype, output='ba')
sos = signal.iirdesign(2*float(f_pass)/fs, 2*float(f_stop)/fs,
Ripple_pass, Atten_stop,
ftype =ftype, output='sos')
tag = 'IIR ' + ftype + ' order'
if status:
log.info('%s = %d.' % (tag,len(a)-1))
return b, a, sos
def IIR_bpf(f_stop1, f_pass1, f_pass2, f_stop2, Ripple_pass, Atten_stop,
fs = 1.00, ftype = 'butter', status = True):
"""
Design an IIR bandpass filter using scipy.signal.iirdesign.
The filter order is determined based on
f_pass Hz, f_stop Hz, and the desired stopband attenuation
d_stop in dB, all relative to a sampling rate of fs Hz.
Parameters
----------
f_stop1 : ndarray of the numerator coefficients
f_pass : ndarray of the denominator coefficients
Ripple_pass :
Atten_stop :
fs : sampling rate in Hz
ftype : Analog prototype from 'butter' 'cheby1', 'cheby2',
'ellip', and 'bessel'
Returns
-------
b : ndarray of the numerator coefficients
a : ndarray of the denominator coefficients
sos : 2D ndarray of second-order section coefficients
Examples
--------
>>> fs = 48000
>>> f_pass = 8000
>>> f_stop = 5000
>>> b_but,a_but,sos_but = IIR_hpf(f_stop,f_pass,0.5,60,fs,'butter')
>>> b_cheb1,a_cheb1,sos_cheb1 = IIR_hpf(f_stop,f_pass,0.5,60,fs,'cheby1')
>>> b_cheb2,a_cheb2,sos_cheb2 = IIR_hpf(f_stop,f_pass,0.5,60,fs,'cheby2')
>>> b_elli,a_elli,sos_elli = IIR_hpf(f_stop,f_pass,0.5,60,fs,'ellip')
Mark Wickert October 2016
"""
b,a = signal.iirdesign([2*float(f_pass1)/fs, 2*float(f_pass2)/fs],
[2*float(f_stop1)/fs, 2*float(f_stop2)/fs],
Ripple_pass, Atten_stop,
ftype = ftype, output='ba')
sos = signal.iirdesign([2*float(f_pass1)/fs, 2*float(f_pass2)/fs],
[2*float(f_stop1)/fs, 2*float(f_stop2)/fs],
Ripple_pass, Atten_stop,
ftype =ftype, output='sos')
tag = 'IIR ' + ftype + ' order'
if status:
log.info('%s = %d.' % (tag,len(a)-1))
return b, a, sos
def IIR_bsf(f_pass1, f_stop1, f_stop2, f_pass2, Ripple_pass, Atten_stop,
fs = 1.00, ftype = 'butter', status = True):
"""
Design an IIR bandstop filter using scipy.signal.iirdesign.
The filter order is determined based on
f_pass Hz, f_stop Hz, and the desired stopband attenuation
d_stop in dB, all relative to a sampling rate of fs Hz.
Mark Wickert October 2016
"""
b,a = signal.iirdesign([2*float(f_pass1)/fs, 2*float(f_pass2)/fs],
[2*float(f_stop1)/fs, 2*float(f_stop2)/fs],
Ripple_pass, Atten_stop,
ftype = ftype, output='ba')
sos = signal.iirdesign([2*float(f_pass1)/fs, 2*float(f_pass2)/fs],
[2*float(f_stop1)/fs, 2*float(f_stop2)/fs],
Ripple_pass, Atten_stop,
ftype =ftype, output='sos')
tag = 'IIR ' + ftype + ' order'
if status:
log.info('%s = %d.' % (tag,len(a)-1))
return b, a, sos
def freqz_resp_list(b,a=np.array([1]),mode = 'dB',fs=1.0,Npts = 1024,fsize=(6,4)):
"""
A method for displaying digital filter frequency response magnitude,
phase, and group delay. A plot is produced using matplotlib
freq_resp(self,mode = 'dB',Npts = 1024)
A method for displaying the filter frequency response magnitude,
phase, and group delay. A plot is produced using matplotlib
freqz_resp(b,a=[1],mode = 'dB',Npts = 1024,fsize=(6,4))
b = ndarray of numerator coefficients
a = ndarray of denominator coefficents
mode = display mode: 'dB' magnitude, 'phase' in radians, or
'groupdelay_s' in samples and 'groupdelay_t' in sec,
all versus frequency in Hz
Npts = number of points to plot; default is 1024
fsize = figure size; defult is (6,4) inches
Mark Wickert, January 2015
"""
if type(b) == list:
# We have a list of filters
N_filt = len(b)
f = np.arange(0,Npts)/(2.0*Npts)
for n in range(N_filt):
w,H = signal.freqz(b[n],a[n],2*np.pi*f)
if n == 0:
plt.figure(figsize=fsize)
if mode.lower() == 'db':
plt.plot(f*fs,20*np.log10(np.abs(H)))
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
plt.ylabel('Gain (dB)')
plt.title('Frequency Response - Magnitude')
elif mode.lower() == 'phase':
plt.plot(f*fs,np.angle(H))
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
plt.ylabel('Phase (rad)')
plt.title('Frequency Response - Phase')
elif (mode.lower() == 'groupdelay_s') or (mode.lower() == 'groupdelay_t'):
"""
Notes
-----
Since this calculation involves finding the derivative of the
phase response, care must be taken at phase wrapping points
and when the phase jumps by +/-pi, which occurs when the
amplitude response changes sign. Since the amplitude response
is zero when the sign changes, the jumps do not alter the group
delay results.
"""
theta = np.unwrap(np.angle(H))
# Since theta for an FIR filter is likely to have many pi phase
# jumps too, we unwrap a second time 2*theta and divide by 2
theta2 = np.unwrap(2*theta)/2.
theta_dif = np.diff(theta2)
f_diff = np.diff(f)
Tg = -np.diff(theta2)/np.diff(w)
# For gain almost zero set groupdelay = 0
idx = np.nonzero(np.ravel(20*np.log10(H[:-1]) < -400))[0]
Tg[idx] = np.zeros(len(idx))
max_Tg = np.max(Tg)
#print(max_Tg)
if mode.lower() == 'groupdelay_t':
max_Tg /= fs
plt.plot(f[:-1]*fs,Tg/fs)
plt.ylim([0,1.2*max_Tg])
else:
plt.plot(f[:-1]*fs,Tg)
plt.ylim([0,1.2*max_Tg])
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
if mode.lower() == 'groupdelay_t':
plt.ylabel('Group Delay (s)')
else:
plt.ylabel('Group Delay (samples)')
plt.title('Frequency Response - Group Delay')
else:
s1 = 'Error, mode must be "dB", "phase, '
s2 = '"groupdelay_s", or "groupdelay_t"'
log.info(s1 + s2)
def freqz_cas(sos,w):
"""
Cascade frequency response
Mark Wickert October 2016
"""
Ns,Mcol = sos.shape
w,Hcas = signal.freqz(sos[0,:3],sos[0,3:],w)
for k in range(1,Ns):
w,Htemp = signal.freqz(sos[k,:3],sos[k,3:],w)
Hcas *= Htemp
return w, Hcas
def freqz_resp_cas_list(sos, mode = 'dB', fs=1.0, n_pts=1024, fsize=(6, 4)):
"""
A method for displaying cascade digital filter form frequency response
magnitude, phase, and group delay. A plot is produced using matplotlib
freq_resp(self,mode = 'dB',Npts = 1024)
A method for displaying the filter frequency response magnitude,
phase, and group delay. A plot is produced using matplotlib
freqz_resp(b,a=[1],mode = 'dB',Npts = 1024,fsize=(6,4))
b = ndarray of numerator coefficients
a = ndarray of denominator coefficents
mode = display mode: 'dB' magnitude, 'phase' in radians, or
'groupdelay_s' in samples and 'groupdelay_t' in sec,
all versus frequency in Hz
Npts = number of points to plot; default is 1024
fsize = figure size; defult is (6,4) inches
Mark Wickert, January 2015
"""
if type(sos) == list:
# We have a list of filters
N_filt = len(sos)
f = np.arange(0, n_pts) / (2.0 * n_pts)
for n in range(N_filt):
w,H = freqz_cas(sos[n],2*np.pi*f)
if n == 0:
plt.figure(figsize=fsize)
if mode.lower() == 'db':
plt.plot(f*fs,20*np.log10(np.abs(H)))
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
plt.ylabel('Gain (dB)')
plt.title('Frequency Response - Magnitude')
elif mode.lower() == 'phase':
plt.plot(f*fs,np.angle(H))
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
plt.ylabel('Phase (rad)')
plt.title('Frequency Response - Phase')
elif (mode.lower() == 'groupdelay_s') or (mode.lower() == 'groupdelay_t'):
"""
Notes
-----
Since this calculation involves finding the derivative of the
phase response, care must be taken at phase wrapping points
and when the phase jumps by +/-pi, which occurs when the
amplitude response changes sign. Since the amplitude response
is zero when the sign changes, the jumps do not alter the group
delay results.
"""
theta = np.unwrap(np.angle(H))
# Since theta for an FIR filter is likely to have many pi phase
# jumps too, we unwrap a second time 2*theta and divide by 2
theta2 = np.unwrap(2*theta)/2.
theta_dif = np.diff(theta2)
f_diff = np.diff(f)
Tg = -np.diff(theta2)/np.diff(w)
# For gain almost zero set groupdelay = 0
idx = np.nonzero(np.ravel(20*np.log10(H[:-1]) < -400))[0]
Tg[idx] = np.zeros(len(idx))
max_Tg = np.max(Tg)
#print(max_Tg)
if mode.lower() == 'groupdelay_t':
max_Tg /= fs
plt.plot(f[:-1]*fs,Tg/fs)
plt.ylim([0,1.2*max_Tg])
else:
plt.plot(f[:-1]*fs,Tg)
plt.ylim([0,1.2*max_Tg])
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
if mode.lower() == 'groupdelay_t':
plt.ylabel('Group Delay (s)')
else:
plt.ylabel('Group Delay (samples)')
plt.title('Frequency Response - Group Delay')
else:
s1 = 'Error, mode must be "dB", "phase, '
s2 = '"groupdelay_s", or "groupdelay_t"'
log.info(s1 + s2)
def unique_cpx_roots(rlist,tol = 0.001):
"""
The average of the root values is used when multiplicity
is greater than one.
Mark Wickert October 2016
"""
uniq = [rlist[0]]
mult = [1]
for k in range(1,len(rlist)):
N_uniq = len(uniq)
for m in range(N_uniq):
if abs(rlist[k]-uniq[m]) <= tol:
mult[m] += 1
uniq[m] = (uniq[m]*(mult[m]-1) + rlist[k])/float(mult[m])
break
uniq = np.hstack((uniq,rlist[k]))
mult = np.hstack((mult,[1]))
return np.array(uniq), np.array(mult)
def sos_cascade(sos1,sos2):
"""
Mark Wickert October 2016
"""
return np.vstack((sos1,sos2))
def sos_zplane(sos,auto_scale=True,size=2,tol = 0.001):
"""
Create an z-plane pole-zero plot.
Create an z-plane pole-zero plot using the numerator
and denominator z-domain system function coefficient
ndarrays b and a respectively. Assume descending powers of z.
Parameters
----------
sos : ndarray of the sos coefficients
auto_scale : bool (default True)
size : plot radius maximum when scale = False
Returns
-------
(M,N) : tuple of zero and pole counts + plot window
Notes
-----
This function tries to identify repeated poles and zeros and will
place the multiplicity number above and to the right of the pole or zero.
The difficulty is setting the tolerance for this detection. Currently it
is set at 1e-3 via the function signal.unique_roots.
Examples
--------
>>> # Here the plot is generated using auto_scale
>>> sos_zplane(sos)
>>> # Here the plot is generated using manual scaling
>>> sos_zplane(sos,False,1.5)
"""
Ns,Mcol = sos.shape
# Extract roots from sos num and den removing z = 0
# roots due to first-order sections
N_roots = []
for k in range(Ns):
N_roots_tmp = np.roots(sos[k,:3])
if N_roots_tmp[1] == 0.:
N_roots = np.hstack((N_roots,N_roots_tmp[0]))
else:
N_roots = np.hstack((N_roots,N_roots_tmp))
D_roots = []
for k in range(Ns):
D_roots_tmp = np.roots(sos[k,3:])
if D_roots_tmp[1] == 0.:
D_roots = np.hstack((D_roots,D_roots_tmp[0]))
else:
D_roots = np.hstack((D_roots,D_roots_tmp))
# Plot labels if multiplicity greater than 1
x_scale = 1.5*size
y_scale = 1.5*size
x_off = 0.02
y_off = 0.01
M = len(N_roots)
N = len(D_roots)
if auto_scale:
if M > 0 and N > 0:
size = max(np.max(np.abs(N_roots)),np.max(np.abs(D_roots)))+.1
elif M > 0:
size = max(np.max(np.abs(N_roots)),1.0)+.1
elif N > 0:
size = max(1.0,np.max(np.abs(D_roots)))+.1
else:
size = 1.1
plt.figure(figsize=(5,5))
plt.axis('equal')
r = np.linspace(0,2*np.pi,200)
plt.plot(np.cos(r),np.sin(r),'r--')
plt.plot([-size,size],[0,0],'k-.')
plt.plot([0,0],[-size,size],'k-.')
if M > 0:
#N_roots = np.roots(b)
N_uniq, N_mult=unique_cpx_roots(N_roots,tol=tol)
plt.plot(np.real(N_uniq),np.imag(N_uniq),'ko',mfc='None',ms=8)
idx_N_mult = np.nonzero(np.ravel(N_mult>1))[0]
for k in range(len(idx_N_mult)):
x_loc = np.real(N_uniq[idx_N_mult[k]]) + x_off*x_scale
y_loc =np.imag(N_uniq[idx_N_mult[k]]) + y_off*y_scale
plt.text(x_loc,y_loc,str(N_mult[idx_N_mult[k]]),
ha='center',va='bottom',fontsize=10)
if N > 0:
#D_roots = np.roots(a)
D_uniq, D_mult=unique_cpx_roots(D_roots,tol=tol)
plt.plot(np.real(D_uniq),np.imag(D_uniq),'kx',ms=8)
idx_D_mult = np.nonzero(np.ravel(D_mult>1))[0]
for k in range(len(idx_D_mult)):
x_loc = np.real(D_uniq[idx_D_mult[k]]) + x_off*x_scale
y_loc =np.imag(D_uniq[idx_D_mult[k]]) + y_off*y_scale
plt.text(x_loc,y_loc,str(D_mult[idx_D_mult[k]]),
ha='center',va='bottom',fontsize=10)
if M - N < 0:
plt.plot(0.0,0.0,'bo',mfc='None',ms=8)
elif M - N > 0:
plt.plot(0.0,0.0,'kx',ms=8)
if abs(M - N) > 1:
plt.text(x_off*x_scale,y_off*y_scale,str(abs(M-N)),
ha='center',va='bottom',fontsize=10)
plt.xlabel('Real Part')
plt.ylabel('Imaginary Part')
plt.title('Pole-Zero Plot')
#plt.grid()
plt.axis([-size,size,-size,size])
return M,N
| bsd-2-clause | 6,339,515,815,895,065,000 | 36.135185 | 82 | 0.549798 | false |
trachelr/mne-python | mne/viz/utils.py | 2 | 29921 | """Utility functions for plotting M/EEG data
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
#
# License: Simplified BSD
import math
from functools import partial
import difflib
import webbrowser
from warnings import warn
import tempfile
import numpy as np
from ..io import show_fiff
from ..utils import verbose, set_config
COLORS = ['b', 'g', 'r', 'c', 'm', 'y', 'k', '#473C8B', '#458B74',
'#CD7F32', '#FF4040', '#ADFF2F', '#8E2323', '#FF1493']
def _setup_vmin_vmax(data, vmin, vmax):
"""Aux function to handle vmin and vamx parameters"""
if vmax is None and vmin is None:
vmax = np.abs(data).max()
vmin = -vmax
else:
if callable(vmin):
vmin = vmin(data)
elif vmin is None:
vmin = np.min(data)
if callable(vmax):
vmax = vmax(data)
elif vmin is None:
vmax = np.max(data)
return vmin, vmax
def tight_layout(pad=1.2, h_pad=None, w_pad=None, fig=None):
""" Adjust subplot parameters to give specified padding.
Note. For plotting please use this function instead of plt.tight_layout
Parameters
----------
pad : float
padding between the figure edge and the edges of subplots, as a
fraction of the font-size.
h_pad : float
Padding height between edges of adjacent subplots.
Defaults to `pad_inches`.
w_pad : float
Padding width between edges of adjacent subplots.
Defaults to `pad_inches`.
fig : instance of Figure
Figure to apply changes to.
"""
import matplotlib.pyplot as plt
fig = plt.gcf() if fig is None else fig
fig.canvas.draw()
try: # see https://github.com/matplotlib/matplotlib/issues/2654
fig.tight_layout(pad=pad, h_pad=h_pad, w_pad=w_pad)
except Exception:
warn('Matplotlib function \'tight_layout\' is not supported.'
' Skipping subplot adjusment.')
else:
try:
fig.set_tight_layout(dict(pad=pad, h_pad=h_pad, w_pad=w_pad))
except Exception:
pass
def _check_delayed_ssp(container):
""" Aux function to be used for interactive SSP selection
"""
if container.proj is True or\
all(p['active'] for p in container.info['projs']):
raise RuntimeError('Projs are already applied. Please initialize'
' the data with proj set to False.')
elif len(container.info['projs']) < 1:
raise RuntimeError('No projs found in evoked.')
def mne_analyze_colormap(limits=[5, 10, 15], format='mayavi'):
"""Return a colormap similar to that used by mne_analyze
Parameters
----------
limits : list (or array) of length 3 or 6
Bounds for the colormap, which will be mirrored across zero if length
3, or completely specified (and potentially asymmetric) if length 6.
format : str
Type of colormap to return. If 'matplotlib', will return a
matplotlib.colors.LinearSegmentedColormap. If 'mayavi', will
return an RGBA array of shape (256, 4).
Returns
-------
cmap : instance of matplotlib.pyplot.colormap | array
A teal->blue->gray->red->yellow colormap.
Notes
-----
For this will return a colormap that will display correctly for data
that are scaled by the plotting function to span [-fmax, fmax].
Examples
--------
The following code will plot a STC using standard MNE limits:
colormap = mne.viz.mne_analyze_colormap(limits=[5, 10, 15])
brain = stc.plot('fsaverage', 'inflated', 'rh', colormap)
brain.scale_data_colormap(fmin=-15, fmid=0, fmax=15, transparent=False)
"""
# Ensure limits is an array
limits = np.asarray(limits, dtype='float')
if len(limits) != 3 and len(limits) != 6:
raise ValueError('limits must have 3 or 6 elements')
if len(limits) == 3 and any(limits < 0.):
raise ValueError('if 3 elements, limits must all be non-negative')
if any(np.diff(limits) <= 0):
raise ValueError('limits must be monotonically increasing')
if format == 'matplotlib':
from matplotlib import colors
if len(limits) == 3:
limits = (np.concatenate((-np.flipud(limits), limits)) +
limits[-1]) / (2 * limits[-1])
else:
limits = (limits - np.min(limits)) / np.max(limits -
np.min(limits))
cdict = {'red': ((limits[0], 0.0, 0.0),
(limits[1], 0.0, 0.0),
(limits[2], 0.5, 0.5),
(limits[3], 0.5, 0.5),
(limits[4], 1.0, 1.0),
(limits[5], 1.0, 1.0)),
'green': ((limits[0], 1.0, 1.0),
(limits[1], 0.0, 0.0),
(limits[2], 0.5, 0.5),
(limits[3], 0.5, 0.5),
(limits[4], 0.0, 0.0),
(limits[5], 1.0, 1.0)),
'blue': ((limits[0], 1.0, 1.0),
(limits[1], 1.0, 1.0),
(limits[2], 0.5, 0.5),
(limits[3], 0.5, 0.5),
(limits[4], 0.0, 0.0),
(limits[5], 0.0, 0.0))}
return colors.LinearSegmentedColormap('mne_analyze', cdict)
elif format == 'mayavi':
if len(limits) == 3:
limits = np.concatenate((-np.flipud(limits), [0], limits)) /\
limits[-1]
else:
limits = np.concatenate((limits[:3], [0], limits[3:]))
limits /= np.max(np.abs(limits))
r = np.array([0, 0, 0, 0, 1, 1, 1])
g = np.array([1, 0, 0, 0, 0, 0, 1])
b = np.array([1, 1, 1, 0, 0, 0, 0])
a = np.array([1, 1, 0, 0, 0, 1, 1])
xp = (np.arange(256) - 128) / 128.0
colormap = np.r_[[np.interp(xp, limits, 255 * c)
for c in [r, g, b, a]]].T
return colormap
else:
raise ValueError('format must be either matplotlib or mayavi')
def _toggle_options(event, params):
"""Toggle options (projectors) dialog"""
import matplotlib.pyplot as plt
if len(params['projs']) > 0:
if params['fig_proj'] is None:
_draw_proj_checkbox(event, params, draw_current_state=False)
else:
# turn off options dialog
plt.close(params['fig_proj'])
del params['proj_checks']
params['fig_proj'] = None
def _toggle_proj(event, params):
"""Operation to perform when proj boxes clicked"""
# read options if possible
if 'proj_checks' in params:
bools = [x[0].get_visible() for x in params['proj_checks'].lines]
for bi, (b, p) in enumerate(zip(bools, params['projs'])):
# see if they tried to deactivate an active one
if not b and p['active']:
bools[bi] = True
else:
bools = [True] * len(params['projs'])
compute_proj = False
if 'proj_bools' not in params:
compute_proj = True
elif not np.array_equal(bools, params['proj_bools']):
compute_proj = True
# if projectors changed, update plots
if compute_proj is True:
params['plot_update_proj_callback'](params, bools)
def _get_help_text(params):
"""Aux function for customizing help dialogs text."""
text, text2 = list(), list()
text.append(u'\u2190 : \n')
text.append(u'\u2192 : \n')
text.append(u'\u2193 : \n')
text.append(u'\u2191 : \n')
text.append(u'- : \n')
text.append(u'+ or = : \n')
text.append(u'Home : \n')
text.append(u'End : \n')
text.append(u'Page down : \n')
text.append(u'Page up : \n')
text.append(u'F11 : \n')
text.append(u'? : \n')
text.append(u'Esc : \n\n')
text.append(u'Mouse controls\n')
text.append(u'click on data :\n')
text2.append('Navigate left\n')
text2.append('Navigate right\n')
text2.append('Scale down\n')
text2.append('Scale up\n')
text2.append('Toggle full screen mode\n')
text2.append('Open help box\n')
text2.append('Quit\n\n\n')
if 'raw' in params:
text2.insert(4, 'Reduce the time shown per view\n')
text2.insert(5, 'Increase the time shown per view\n')
text.append(u'click elsewhere in the plot :\n')
if 'ica' in params:
text.append(u'click component name :\n')
text2.insert(2, 'Navigate components down\n')
text2.insert(3, 'Navigate components up\n')
text2.insert(8, 'Reduce the number of components per view\n')
text2.insert(9, 'Increase the number of components per view\n')
text2.append('Mark bad channel\n')
text2.append('Vertical line at a time instant\n')
text2.append('Show topography for the component\n')
else:
text.append(u'click channel name :\n')
text2.insert(2, 'Navigate channels down\n')
text2.insert(3, 'Navigate channels up\n')
text2.insert(8, 'Reduce the number of channels per view\n')
text2.insert(9, 'Increase the number of channels per view\n')
text2.append('Mark bad channel\n')
text2.append('Vertical line at a time instant\n')
text2.append('Mark bad channel\n')
elif 'epochs' in params:
text.append(u'right click :\n')
text2.insert(4, 'Reduce the number of epochs per view\n')
text2.insert(5, 'Increase the number of epochs per view\n')
if 'ica' in params:
text.append(u'click component name :\n')
text2.insert(2, 'Navigate components down\n')
text2.insert(3, 'Navigate components up\n')
text2.insert(8, 'Reduce the number of components per view\n')
text2.insert(9, 'Increase the number of components per view\n')
text2.append('Mark component for exclusion\n')
text2.append('Vertical line at a time instant\n')
text2.append('Show topography for the component\n')
else:
text.append(u'click channel name :\n')
text2.insert(2, 'Navigate channels down\n')
text2.insert(3, 'Navigate channels up\n')
text2.insert(8, 'Reduce the number of channels per view\n')
text2.insert(9, 'Increase the number of channels per view\n')
text.insert(10, u'b : \n')
text2.insert(10, 'Toggle butterfly plot on/off\n')
text.insert(11, u'h : \n')
text2.insert(11, 'Show histogram of peak-to-peak values\n')
text2.append('Mark bad epoch\n')
text2.append('Vertical line at a time instant\n')
text2.append('Mark bad channel\n')
text.append(u'middle click :\n')
text2.append('Show channel name (butterfly plot)\n')
text.insert(11, u'o : \n')
text2.insert(11, 'View settings (orig. view only)\n')
return ''.join(text), ''.join(text2)
def _prepare_trellis(n_cells, max_col):
"""Aux function
"""
import matplotlib.pyplot as plt
if n_cells == 1:
nrow = ncol = 1
elif n_cells <= max_col:
nrow, ncol = 1, n_cells
else:
nrow, ncol = int(math.ceil(n_cells / float(max_col))), max_col
fig, axes = plt.subplots(nrow, ncol, figsize=(7.4, 1.5 * nrow + 1))
axes = [axes] if ncol == nrow == 1 else axes.flatten()
for ax in axes[n_cells:]: # hide unused axes
ax.set_visible(False)
return fig, axes
def _draw_proj_checkbox(event, params, draw_current_state=True):
"""Toggle options (projectors) dialog"""
from matplotlib import widgets
projs = params['projs']
# turn on options dialog
labels = [p['desc'] for p in projs]
actives = ([p['active'] for p in projs] if draw_current_state else
[True] * len(params['projs']))
width = max([len(p['desc']) for p in projs]) / 6.0 + 0.5
height = len(projs) / 6.0 + 0.5
fig_proj = figure_nobar(figsize=(width, height))
fig_proj.canvas.set_window_title('SSP projection vectors')
params['fig_proj'] = fig_proj # necessary for proper toggling
ax_temp = fig_proj.add_axes((0, 0, 1, 1), frameon=False)
proj_checks = widgets.CheckButtons(ax_temp, labels=labels, actives=actives)
# change already-applied projectors to red
for ii, p in enumerate(projs):
if p['active'] is True:
for x in proj_checks.lines[ii]:
x.set_color('r')
# make minimal size
# pass key presses from option dialog over
proj_checks.on_clicked(partial(_toggle_proj, params=params))
params['proj_checks'] = proj_checks
# this should work for non-test cases
try:
fig_proj.canvas.draw()
fig_proj.show()
except Exception:
pass
def _layout_figure(params):
"""Function for setting figure layout. Shared with raw and epoch plots"""
size = params['fig'].get_size_inches() * params['fig'].dpi
scroll_width = 25
hscroll_dist = 25
vscroll_dist = 10
l_border = 100
r_border = 10
t_border = 35
b_border = 40
# only bother trying to reset layout if it's reasonable to do so
if size[0] < 2 * scroll_width or size[1] < 2 * scroll_width + hscroll_dist:
return
# convert to relative units
scroll_width_x = scroll_width / size[0]
scroll_width_y = scroll_width / size[1]
vscroll_dist /= size[0]
hscroll_dist /= size[1]
l_border /= size[0]
r_border /= size[0]
t_border /= size[1]
b_border /= size[1]
# main axis (traces)
ax_width = 1.0 - scroll_width_x - l_border - r_border - vscroll_dist
ax_y = hscroll_dist + scroll_width_y + b_border
ax_height = 1.0 - ax_y - t_border
pos = [l_border, ax_y, ax_width, ax_height]
params['ax'].set_position(pos)
if 'ax2' in params:
params['ax2'].set_position(pos)
params['ax'].set_position(pos)
# vscroll (channels)
pos = [ax_width + l_border + vscroll_dist, ax_y,
scroll_width_x, ax_height]
params['ax_vscroll'].set_position(pos)
# hscroll (time)
pos = [l_border, b_border, ax_width, scroll_width_y]
params['ax_hscroll'].set_position(pos)
if 'ax_button' in params:
# options button
pos = [l_border + ax_width + vscroll_dist, b_border,
scroll_width_x, scroll_width_y]
params['ax_button'].set_position(pos)
if 'ax_help_button' in params:
pos = [l_border - vscroll_dist - scroll_width_x * 2, b_border,
scroll_width_x * 2, scroll_width_y]
params['ax_help_button'].set_position(pos)
params['fig'].canvas.draw()
@verbose
def compare_fiff(fname_1, fname_2, fname_out=None, show=True, indent=' ',
read_limit=np.inf, max_str=30, verbose=None):
"""Compare the contents of two fiff files using diff and show_fiff
Parameters
----------
fname_1 : str
First file to compare.
fname_2 : str
Second file to compare.
fname_out : str | None
Filename to store the resulting diff. If None, a temporary
file will be created.
show : bool
If True, show the resulting diff in a new tab in a web browser.
indent : str
How to indent the lines.
read_limit : int
Max number of bytes of data to read from a tag. Can be np.inf
to always read all data (helps test read completion).
max_str : int
Max number of characters of string representation to print for
each tag's data.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fname_out : str
The filename used for storing the diff. Could be useful for
when a temporary file is used.
"""
file_1 = show_fiff(fname_1, output=list, indent=indent,
read_limit=read_limit, max_str=max_str)
file_2 = show_fiff(fname_2, output=list, indent=indent,
read_limit=read_limit, max_str=max_str)
diff = difflib.HtmlDiff().make_file(file_1, file_2, fname_1, fname_2)
if fname_out is not None:
f = open(fname_out, 'w')
else:
f = tempfile.NamedTemporaryFile('w', delete=False, suffix='.html')
fname_out = f.name
with f as fid:
fid.write(diff)
if show is True:
webbrowser.open_new_tab(fname_out)
return fname_out
def figure_nobar(*args, **kwargs):
"""Make matplotlib figure with no toolbar"""
from matplotlib import rcParams, pyplot as plt
old_val = rcParams['toolbar']
try:
rcParams['toolbar'] = 'none'
fig = plt.figure(*args, **kwargs)
# remove button press catchers (for toolbar)
cbs = list(fig.canvas.callbacks.callbacks['key_press_event'].keys())
for key in cbs:
fig.canvas.callbacks.disconnect(key)
except Exception as ex:
raise ex
finally:
rcParams['toolbar'] = old_val
return fig
def _helper_raw_resize(event, params):
"""Helper for resizing"""
size = ','.join([str(s) for s in params['fig'].get_size_inches()])
set_config('MNE_BROWSE_RAW_SIZE', size)
_layout_figure(params)
def _plot_raw_onscroll(event, params, len_channels=None):
"""Interpret scroll events"""
if len_channels is None:
len_channels = len(params['info']['ch_names'])
orig_start = params['ch_start']
if event.step < 0:
params['ch_start'] = min(params['ch_start'] + params['n_channels'],
len_channels - params['n_channels'])
else: # event.key == 'up':
params['ch_start'] = max(params['ch_start'] - params['n_channels'], 0)
if orig_start != params['ch_start']:
_channels_changed(params, len_channels)
def _channels_changed(params, len_channels):
"""Helper function for dealing with the vertical shift of the viewport."""
if params['ch_start'] + params['n_channels'] > len_channels:
params['ch_start'] = len_channels - params['n_channels']
if params['ch_start'] < 0:
params['ch_start'] = 0
params['plot_fun']()
def _plot_raw_time(value, params):
"""Deal with changed time value"""
info = params['info']
max_times = params['n_times'] / float(info['sfreq']) - params['duration']
if value > max_times:
value = params['n_times'] / info['sfreq'] - params['duration']
if value < 0:
value = 0
if params['t_start'] != value:
params['t_start'] = value
params['hsel_patch'].set_x(value)
def _plot_raw_onkey(event, params):
"""Interpret key presses"""
import matplotlib.pyplot as plt
if event.key == 'escape':
plt.close(params['fig'])
elif event.key == 'down':
params['ch_start'] += params['n_channels']
_channels_changed(params, len(params['info']['ch_names']))
elif event.key == 'up':
params['ch_start'] -= params['n_channels']
_channels_changed(params, len(params['info']['ch_names']))
elif event.key == 'right':
value = params['t_start'] + params['duration']
_plot_raw_time(value, params)
params['update_fun']()
params['plot_fun']()
elif event.key == 'left':
value = params['t_start'] - params['duration']
_plot_raw_time(value, params)
params['update_fun']()
params['plot_fun']()
elif event.key in ['+', '=']:
params['scale_factor'] *= 1.1
params['plot_fun']()
elif event.key == '-':
params['scale_factor'] /= 1.1
params['plot_fun']()
elif event.key == 'pageup':
n_channels = params['n_channels'] + 1
offset = params['ax'].get_ylim()[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
params['n_channels'] = n_channels
params['ax'].set_yticks(params['offsets'])
params['vsel_patch'].set_height(n_channels)
_channels_changed(params, len(params['info']['ch_names']))
elif event.key == 'pagedown':
n_channels = params['n_channels'] - 1
if n_channels == 0:
return
offset = params['ax'].get_ylim()[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
params['n_channels'] = n_channels
params['ax'].set_yticks(params['offsets'])
params['vsel_patch'].set_height(n_channels)
if len(params['lines']) > n_channels: # remove line from view
params['lines'][n_channels].set_xdata([])
params['lines'][n_channels].set_ydata([])
_channels_changed(params, len(params['info']['ch_names']))
elif event.key == 'home':
duration = params['duration'] - 1.0
if duration <= 0:
return
params['duration'] = duration
params['hsel_patch'].set_width(params['duration'])
params['update_fun']()
params['plot_fun']()
elif event.key == 'end':
duration = params['duration'] + 1.0
if duration > params['raw'].times[-1]:
duration = params['raw'].times[-1]
params['duration'] = duration
params['hsel_patch'].set_width(params['duration'])
params['update_fun']()
params['plot_fun']()
elif event.key == '?':
_onclick_help(event, params)
elif event.key == 'f11':
mng = plt.get_current_fig_manager()
mng.full_screen_toggle()
def _mouse_click(event, params):
"""Vertical select callback"""
if event.button != 1:
return
if event.inaxes is None:
if params['n_channels'] > 100:
return
ax = params['ax']
ylim = ax.get_ylim()
pos = ax.transData.inverted().transform((event.x, event.y))
if pos[0] > params['t_start'] or pos[1] < 0 or pos[1] > ylim[0]:
return
params['label_click_fun'](pos)
# vertical scrollbar changed
if event.inaxes == params['ax_vscroll']:
ch_start = max(int(event.ydata) - params['n_channels'] // 2, 0)
if params['ch_start'] != ch_start:
params['ch_start'] = ch_start
params['plot_fun']()
# horizontal scrollbar changed
elif event.inaxes == params['ax_hscroll']:
_plot_raw_time(event.xdata - params['duration'] / 2, params)
params['update_fun']()
params['plot_fun']()
elif event.inaxes == params['ax']:
params['pick_bads_fun'](event)
def _select_bads(event, params, bads):
"""Helper for selecting bad channels onpick. Returns updated bads list."""
# trade-off, avoid selecting more than one channel when drifts are present
# however for clean data don't click on peaks but on flat segments
def f(x, y):
return y(np.mean(x), x.std() * 2)
lines = event.inaxes.lines
for line in lines:
ydata = line.get_ydata()
if not isinstance(ydata, list) and not np.isnan(ydata).any():
ymin, ymax = f(ydata, np.subtract), f(ydata, np.add)
if ymin <= event.ydata <= ymax:
this_chan = vars(line)['ch_name']
if this_chan in params['info']['ch_names']:
ch_idx = params['ch_start'] + lines.index(line)
if this_chan not in bads:
bads.append(this_chan)
color = params['bad_color']
line.set_zorder(-1)
else:
while this_chan in bads:
bads.remove(this_chan)
color = vars(line)['def_color']
line.set_zorder(0)
line.set_color(color)
params['ax_vscroll'].patches[ch_idx].set_color(color)
break
else:
x = np.array([event.xdata] * 2)
params['ax_vertline'].set_data(x, np.array(params['ax'].get_ylim()))
params['ax_hscroll_vertline'].set_data(x, np.array([0., 1.]))
params['vertline_t'].set_text('%0.3f' % x[0])
return bads
def _onclick_help(event, params):
"""Function for drawing help window"""
import matplotlib.pyplot as plt
text, text2 = _get_help_text(params)
width = 6
height = 5
fig_help = figure_nobar(figsize=(width, height), dpi=80)
fig_help.canvas.set_window_title('Help')
ax = plt.subplot2grid((8, 5), (0, 0), colspan=5)
ax.set_title('Keyboard shortcuts')
plt.axis('off')
ax1 = plt.subplot2grid((8, 5), (1, 0), rowspan=7, colspan=2)
ax1.set_yticklabels(list())
plt.text(0.99, 1, text, fontname='STIXGeneral', va='top', weight='bold',
ha='right')
plt.axis('off')
ax2 = plt.subplot2grid((8, 5), (1, 2), rowspan=7, colspan=3)
ax2.set_yticklabels(list())
plt.text(0, 1, text2, fontname='STIXGeneral', va='top')
plt.axis('off')
tight_layout(fig=fig_help)
# this should work for non-test cases
try:
fig_help.canvas.draw()
fig_help.show()
except Exception:
pass
class ClickableImage(object):
"""
Display an image so you can click on it and store x/y positions.
Takes as input an image array (can be any array that works with imshow,
but will work best with images. Displays the image and lets you
click on it. Stores the xy coordinates of each click, so now you can
superimpose something on top of it.
Upon clicking, the x/y coordinate of the cursor will be stored in
self.coords, which is a list of (x, y) tuples.
Parameters
----------
imdata: ndarray
The image that you wish to click on for 2-d points.
**kwargs : dict
Keyword arguments. Passed to ax.imshow.
Notes
-----
.. versionadded:: 0.9.0
"""
def __init__(self, imdata, **kwargs):
"""Display the image for clicking."""
from matplotlib.pyplot import figure, show
self.coords = []
self.imdata = imdata
self.fig = figure()
self.ax = self.fig.add_subplot(111)
self.ymax = self.imdata.shape[0]
self.xmax = self.imdata.shape[1]
self.im = self.ax.imshow(imdata, aspect='auto',
extent=(0, self.xmax, 0, self.ymax),
picker=True, **kwargs)
self.ax.axis('off')
self.fig.canvas.mpl_connect('pick_event', self.onclick)
show()
def onclick(self, event):
"""Mouse click handler.
Parameters
----------
event: matplotlib event object
The matplotlib object that we use to get x/y position.
"""
mouseevent = event.mouseevent
self.coords.append((mouseevent.xdata, mouseevent.ydata))
def plot_clicks(self, **kwargs):
"""Plot the x/y positions stored in self.coords.
Parameters
----------
**kwargs : dict
Arguments are passed to imshow in displaying the bg image.
"""
from matplotlib.pyplot import subplots, show
f, ax = subplots()
ax.imshow(self.imdata, extent=(0, self.xmax, 0, self.ymax), **kwargs)
xlim, ylim = [ax.get_xlim(), ax.get_ylim()]
xcoords, ycoords = zip(*self.coords)
ax.scatter(xcoords, ycoords, c='r')
ann_text = np.arange(len(self.coords)).astype(str)
for txt, coord in zip(ann_text, self.coords):
ax.annotate(txt, coord, fontsize=20, color='r')
ax.set_xlim(xlim)
ax.set_ylim(ylim)
show()
def to_layout(self, **kwargs):
"""Turn coordinates into an MNE Layout object.
Normalizes by the image you used to generate clicks
Parameters
----------
**kwargs : dict
Arguments are passed to generate_2d_layout
"""
from mne.channels.layout import generate_2d_layout
coords = np.array(self.coords)
lt = generate_2d_layout(coords, bg_image=self.imdata, **kwargs)
return lt
def _fake_click(fig, ax, point, xform='ax'):
"""Helper to fake a click at a relative point within axes."""
if xform == 'ax':
x, y = ax.transAxes.transform_point(point)
elif xform == 'data':
x, y = ax.transData.transform_point(point)
else:
raise ValueError('unknown transform')
try:
fig.canvas.button_press_event(x, y, 1, False, None)
except Exception: # for old MPL
fig.canvas.button_press_event(x, y, 1, False)
def add_background_image(fig, im, set_ratios=None):
"""Add a background image to a plot.
Adds the image specified in `im` to the
figure `fig`. This is generally meant to
be done with topo plots, though it could work
for any plot.
Note: This modifies the figure and/or axes
in place.
Parameters
----------
fig: plt.figure
The figure you wish to add a bg image to.
im: ndarray
A numpy array that works with a call to
plt.imshow(im). This will be plotted
as the background of the figure.
set_ratios: None | str
Set the aspect ratio of any axes in fig
to the value in set_ratios. Defaults to None,
which does nothing to axes.
Returns
-------
ax_im: instance of the create matplotlib axis object
corresponding to the image you added.
Notes
-----
.. versionadded:: 0.9.0
"""
if set_ratios is not None:
for ax in fig.axes:
ax.set_aspect(set_ratios)
ax_im = fig.add_axes([0, 0, 1, 1])
ax_im.imshow(im, aspect='auto')
ax_im.set_zorder(-1)
return ax_im
| bsd-3-clause | 7,840,341,043,604,901,000 | 34.79067 | 79 | 0.574179 | false |
barbagroup/cuIBM | examples/cylinder/Re550/scripts/plotDragCoefficient.py | 1 | 2482 | """
Plots the instantaneous drag coefficient between 0 and 3 time-units of flow
simulation and compares with numerical results from
Koumoutsakos and Leonard (1995).
_References:_
* Koumoutsakos, P., & Leonard, A. (1995).
High-resolution simulations of the flow around an impulsively started
cylinder using vortex methods.
Journal of Fluid Mechanics, 296, 1-38.
"""
import os
import argparse
from matplotlib import pyplot
from snake.cuibm.simulation import CuIBMSimulation
from snake.solutions.koumoutsakosLeonard1995 import KoumoutsakosLeonard1995
# Parse from the command-line the directory of the runs.
parser = argparse.ArgumentParser()
parser.add_argument('--directory',
dest='directory',
default=os.getcwd(),
type=str,
help='directory of the runs')
args = parser.parse_args()
directory = args.directory
simulation = CuIBMSimulation(directory=directory,
description='cuIBM')
simulation.read_forces()
# Reads drag coefficient of Koumoutsakos and Leonard (1995) for Re=550.
file_name = 'koumoutsakos_leonard_1995_cylinder_dragCoefficientRe550.dat'
file_path = os.path.join(os.environ['CUIBM_DIR'], 'data', file_name)
kl1995 = KoumoutsakosLeonard1995(file_path=file_path, Re=550)
# Plots the instantaneous drag coefficients.
images_directory = os.path.join(directory, 'images')
if not os.path.isdir(images_directory):
os.makedirs(images_directory)
pyplot.style.use('seaborn-dark')
kwargs_data = {'label': simulation.description,
'color': '#336699',
'linestyle': '-',
'linewidth': 3,
'zorder': 10}
kwargs_kl1995 = {'label': kl1995.description,
'color': '#993333',
'linewidth': 0,
'markeredgewidth': 2,
'markeredgecolor': '#993333',
'markerfacecolor': 'none',
'marker': 'o',
'markersize': 4,
'zorder': 10}
fig, ax = pyplot.subplots(figsize=(6, 6))
ax.grid(True, zorder=0)
ax.set_xlabel('non-dimensional time', fontsize=16)
ax.set_ylabel('drag coefficient', fontsize=16)
ax.plot(simulation.forces[0].times, 2.0 * simulation.forces[0].values,
**kwargs_data)
ax.plot(kl1995.cd.times, kl1995.cd.values,
**kwargs_kl1995)
ax.axis([0.0, 3.0, 0.0, 2.0])
ax.legend(prop={'size': 16})
pyplot.savefig(os.path.join(images_directory, 'dragCoefficient.png'))
| mit | 9,193,859,521,668,979,000 | 34.457143 | 75 | 0.653505 | false |
tommy-u/chaco | chaco/tests/test_image_plot.py | 1 | 5566 | import os
import tempfile
from contextlib import contextmanager
import numpy as np
from traits.etsconfig.api import ETSConfig
from chaco.api import (PlotGraphicsContext, GridDataSource, GridMapper,
DataRange2D, ImageData, ImagePlot)
# The Quartz backend rescales pixel values, so use a higher threshold.
MAX_RMS_ERROR = 16 if ETSConfig.kiva_backend == 'quartz' else 1
IMAGE = np.random.random_integers(0, 255, size=(100, 200)).astype(np.uint8)
RGB = np.dstack([IMAGE] * 3)
# Rendering adds rows and columns for some reason.
TRIM_RENDERED = (slice(1, -1), slice(1, -1), 0)
@contextmanager
def temp_image_file(suffix='.tif', prefix='test', dir=None):
fd, filename = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=dir)
try:
yield filename
finally:
os.close(fd)
os.remove(filename)
def get_image_index_and_mapper(image):
h, w = image.shape[:2]
index = GridDataSource(np.arange(h+1), np.arange(w+1))
index_mapper = GridMapper(range=DataRange2D(low=(0, 0), high=(h, w)))
return index, index_mapper
def save_renderer_result(renderer, filename):
renderer.padding = 0
gc = PlotGraphicsContext(renderer.outer_bounds)
with gc:
gc.render_component(renderer)
gc.save(filename)
def image_from_renderer(renderer, orientation):
data = renderer.value
# Set bounding box size and origin to align rendered image with array
renderer.bounds = (data.get_width() + 1, data.get_height() + 1)
if orientation == 'v':
renderer.bounds = renderer.bounds[::-1]
renderer.position = 0, 0
with temp_image_file() as filename:
save_renderer_result(renderer, filename)
rendered_image = ImageData.fromfile(filename).data[TRIM_RENDERED]
return rendered_image
def rendered_image_result(image, filename=None, **plot_kwargs):
data_source = ImageData(data=image)
index, index_mapper = get_image_index_and_mapper(image)
renderer = ImagePlot(value=data_source, index=index,
index_mapper=index_mapper,
**plot_kwargs)
orientation = plot_kwargs.get('orientation', 'h')
return image_from_renderer(renderer, orientation)
def calculate_rms(image_result, expected_image):
"""Return root-mean-square error.
Implementation taken from matplotlib.
"""
# calculate the per-pixel errors, then compute the root mean square error
num_values = np.prod(expected_image.shape)
# Cast to int64 to reduce likelihood of over-/under-flow.
abs_diff_image = abs(np.int64(expected_image) - np.int64(image_result))
histogram = np.bincount(abs_diff_image.ravel(), minlength=256)
sum_of_squares = np.sum(histogram * np.arange(len(histogram))**2)
rms = np.sqrt(float(sum_of_squares) / num_values)
return rms
def verify_result_image(input_image, expected_image, **plot_kwargs):
# These tests were written assuming uint8 inputs.
assert input_image.dtype == np.uint8
assert expected_image.dtype == np.uint8
image_result = rendered_image_result(input_image, **plot_kwargs)
rms = calculate_rms(image_result, expected_image)
print "RMS =", rms
assert rms < MAX_RMS_ERROR
def plot_comparison(input_image, expected_image, **plot_kwargs):
import matplotlib.pyplot as plt
image_result = rendered_image_result(input_image, **plot_kwargs)
diff = np.int64(expected_image) - np.int64(image_result)
max_diff = max(abs(diff.min()), abs(diff.max()), 1)
fig, (ax0, ax1, ax2) = plt.subplots(ncols=3, sharex='all', sharey='all')
ax0.imshow(expected_image)
ax1.imshow(image_result)
im_plot = ax2.imshow(diff, vmin=-max_diff, vmax=max_diff, cmap=plt.cm.bwr)
fig.colorbar(im_plot)
plt.show()
def test_horizontal_top_left():
# Horizontal orientation with top left origin renders original image.
verify_result_image(RGB, IMAGE, origin='top left')
def test_horizontal_bottom_left():
# Horizontal orientation with bottom left origin renders a vertically
# flipped image.
verify_result_image(RGB, IMAGE[::-1], origin='bottom left')
def test_horizontal_top_right():
# Horizontal orientation with top right origin renders a horizontally
# flipped image.
verify_result_image(RGB, IMAGE[:, ::-1], origin='top right')
def test_horizontal_bottom_right():
# Horizontal orientation with top right origin renders an image flipped
# horizontally and vertically.
verify_result_image(RGB, IMAGE[::-1, ::-1], origin='bottom right')
def test_vertical_top_left():
# Vertical orientation with top left origin renders transposed image.
verify_result_image(RGB, IMAGE.T, origin='top left', orientation='v')
def test_vertical_bottom_left():
# Vertical orientation with bottom left origin renders transposed image
# that is vertically flipped.
verify_result_image(RGB, (IMAGE.T)[::-1],
origin='bottom left', orientation='v')
def test_vertical_top_right():
# Vertical orientation with top right origin renders transposed image
# that is horizontally flipped.
verify_result_image(RGB, (IMAGE.T)[:, ::-1],
origin='top right', orientation='v')
def test_vertical_bottom_right():
# Vertical orientation with bottom right origin renders transposed image
# that is flipped vertically and horizontally.
verify_result_image(RGB, (IMAGE.T)[::-1, ::-1],
origin='bottom right', orientation='v')
if __name__ == "__main__":
np.testing.run_module_suite()
| bsd-3-clause | 1,514,425,791,645,359,900 | 33.571429 | 78 | 0.681639 | false |
airbnb/superset | tests/viz_tests.py | 1 | 52994 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# isort:skip_file
import uuid
from datetime import date, datetime, timezone
import logging
from math import nan
from unittest.mock import Mock, patch
from typing import Any, Dict, List, Set
import numpy as np
import pandas as pd
import pytest
import tests.test_app
import superset.viz as viz
from superset import app
from superset.constants import NULL_STRING
from superset.exceptions import QueryObjectValidationError, SpatialException
from superset.utils.core import DTTM_ALIAS
from .base_tests import SupersetTestCase
from .utils import load_fixture
logger = logging.getLogger(__name__)
class TestBaseViz(SupersetTestCase):
def test_constructor_exception_no_datasource(self):
form_data = {}
datasource = None
with self.assertRaises(Exception):
viz.BaseViz(datasource, form_data)
def test_process_metrics(self):
# test TableViz metrics in correct order
form_data = {
"url_params": {},
"row_limit": 500,
"metric": "sum__SP_POP_TOTL",
"entity": "country_code",
"secondary_metric": "sum__SP_POP_TOTL",
"granularity_sqla": "year",
"page_length": 0,
"all_columns": [],
"viz_type": "table",
"since": "2014-01-01",
"until": "2014-01-02",
"metrics": ["sum__SP_POP_TOTL", "SUM(SE_PRM_NENR_MA)", "SUM(SP_URB_TOTL)"],
"country_fieldtype": "cca3",
"percent_metrics": ["count"],
"slice_id": 74,
"time_grain_sqla": None,
"order_by_cols": [],
"groupby": ["country_name"],
"compare_lag": "10",
"limit": "25",
"datasource": "2__table",
"table_timestamp_format": "%Y-%m-%d %H:%M:%S",
"markup_type": "markdown",
"where": "",
"compare_suffix": "o10Y",
}
datasource = Mock()
datasource.type = "table"
test_viz = viz.BaseViz(datasource, form_data)
expect_metric_labels = [
u"sum__SP_POP_TOTL",
u"SUM(SE_PRM_NENR_MA)",
u"SUM(SP_URB_TOTL)",
u"count",
]
self.assertEqual(test_viz.metric_labels, expect_metric_labels)
self.assertEqual(test_viz.all_metrics, expect_metric_labels)
def test_get_df_returns_empty_df(self):
form_data = {"dummy": 123}
query_obj = {"granularity": "day"}
datasource = self.get_datasource_mock()
test_viz = viz.BaseViz(datasource, form_data)
result = test_viz.get_df(query_obj)
self.assertEqual(type(result), pd.DataFrame)
self.assertTrue(result.empty)
def test_get_df_handles_dttm_col(self):
form_data = {"dummy": 123}
query_obj = {"granularity": "day"}
results = Mock()
results.query = Mock()
results.status = Mock()
results.error_message = Mock()
datasource = Mock()
datasource.type = "table"
datasource.query = Mock(return_value=results)
mock_dttm_col = Mock()
datasource.get_column = Mock(return_value=mock_dttm_col)
test_viz = viz.BaseViz(datasource, form_data)
test_viz.df_metrics_to_num = Mock()
test_viz.get_fillna_for_columns = Mock(return_value=0)
results.df = pd.DataFrame(data={DTTM_ALIAS: ["1960-01-01 05:00:00"]})
datasource.offset = 0
mock_dttm_col = Mock()
datasource.get_column = Mock(return_value=mock_dttm_col)
mock_dttm_col.python_date_format = "epoch_ms"
result = test_viz.get_df(query_obj)
import logging
logger.info(result)
pd.testing.assert_series_equal(
result[DTTM_ALIAS], pd.Series([datetime(1960, 1, 1, 5, 0)], name=DTTM_ALIAS)
)
mock_dttm_col.python_date_format = None
result = test_viz.get_df(query_obj)
pd.testing.assert_series_equal(
result[DTTM_ALIAS], pd.Series([datetime(1960, 1, 1, 5, 0)], name=DTTM_ALIAS)
)
datasource.offset = 1
result = test_viz.get_df(query_obj)
pd.testing.assert_series_equal(
result[DTTM_ALIAS], pd.Series([datetime(1960, 1, 1, 6, 0)], name=DTTM_ALIAS)
)
datasource.offset = 0
results.df = pd.DataFrame(data={DTTM_ALIAS: ["1960-01-01"]})
mock_dttm_col.python_date_format = "%Y-%m-%d"
result = test_viz.get_df(query_obj)
pd.testing.assert_series_equal(
result[DTTM_ALIAS], pd.Series([datetime(1960, 1, 1, 0, 0)], name=DTTM_ALIAS)
)
def test_cache_timeout(self):
datasource = self.get_datasource_mock()
datasource.cache_timeout = 0
test_viz = viz.BaseViz(datasource, form_data={})
self.assertEqual(0, test_viz.cache_timeout)
datasource.cache_timeout = 156
test_viz = viz.BaseViz(datasource, form_data={})
self.assertEqual(156, test_viz.cache_timeout)
datasource.cache_timeout = None
datasource.database.cache_timeout = 0
self.assertEqual(0, test_viz.cache_timeout)
datasource.database.cache_timeout = 1666
self.assertEqual(1666, test_viz.cache_timeout)
datasource.database.cache_timeout = None
test_viz = viz.BaseViz(datasource, form_data={})
self.assertEqual(app.config["CACHE_DEFAULT_TIMEOUT"], test_viz.cache_timeout)
class TestTableViz(SupersetTestCase):
def test_get_data_applies_percentage(self):
form_data = {
"groupby": ["groupA", "groupB"],
"metrics": [
{
"expressionType": "SIMPLE",
"aggregate": "SUM",
"label": "SUM(value1)",
"column": {"column_name": "value1", "type": "DOUBLE"},
},
"count",
"avg__C",
],
"percent_metrics": [
{
"expressionType": "SIMPLE",
"aggregate": "SUM",
"label": "SUM(value1)",
"column": {"column_name": "value1", "type": "DOUBLE"},
},
"avg__B",
],
}
datasource = self.get_datasource_mock()
df = pd.DataFrame(
{
"SUM(value1)": [15, 20, 25, 40],
"avg__B": [10, 20, 5, 15],
"avg__C": [11, 22, 33, 44],
"count": [6, 7, 8, 9],
"groupA": ["A", "B", "C", "C"],
"groupB": ["x", "x", "y", "z"],
}
)
test_viz = viz.TableViz(datasource, form_data)
data = test_viz.get_data(df)
# Check method correctly transforms data and computes percents
self.assertEqual(
[
"groupA",
"groupB",
"SUM(value1)",
"count",
"avg__C",
"%SUM(value1)",
"%avg__B",
],
list(data["columns"]),
)
expected = [
{
"groupA": "A",
"groupB": "x",
"SUM(value1)": 15,
"count": 6,
"avg__C": 11,
"%SUM(value1)": 0.15,
"%avg__B": 0.2,
},
{
"groupA": "B",
"groupB": "x",
"SUM(value1)": 20,
"count": 7,
"avg__C": 22,
"%SUM(value1)": 0.2,
"%avg__B": 0.4,
},
{
"groupA": "C",
"groupB": "y",
"SUM(value1)": 25,
"count": 8,
"avg__C": 33,
"%SUM(value1)": 0.25,
"%avg__B": 0.1,
},
{
"groupA": "C",
"groupB": "z",
"SUM(value1)": 40,
"count": 9,
"avg__C": 44,
"%SUM(value1)": 0.4,
"%avg__B": 0.3,
},
]
self.assertEqual(expected, data["records"])
def test_parse_adhoc_filters(self):
form_data = {
"metrics": [
{
"expressionType": "SIMPLE",
"aggregate": "SUM",
"label": "SUM(value1)",
"column": {"column_name": "value1", "type": "DOUBLE"},
}
],
"adhoc_filters": [
{
"expressionType": "SIMPLE",
"clause": "WHERE",
"subject": "value2",
"operator": ">",
"comparator": "100",
},
{
"expressionType": "SIMPLE",
"clause": "HAVING",
"subject": "SUM(value1)",
"operator": "<",
"comparator": "10",
},
{
"expressionType": "SQL",
"clause": "HAVING",
"sqlExpression": "SUM(value1) > 5",
},
{
"expressionType": "SQL",
"clause": "WHERE",
"sqlExpression": "value3 in ('North America')",
},
],
}
datasource = self.get_datasource_mock()
test_viz = viz.TableViz(datasource, form_data)
query_obj = test_viz.query_obj()
self.assertEqual(
[{"col": "value2", "val": "100", "op": ">"}], query_obj["filter"]
)
self.assertEqual(
[{"op": "<", "val": "10", "col": "SUM(value1)"}],
query_obj["extras"]["having_druid"],
)
self.assertEqual("(value3 in ('North America'))", query_obj["extras"]["where"])
self.assertEqual("(SUM(value1) > 5)", query_obj["extras"]["having"])
def test_adhoc_filters_overwrite_legacy_filters(self):
form_data = {
"metrics": [
{
"expressionType": "SIMPLE",
"aggregate": "SUM",
"label": "SUM(value1)",
"column": {"column_name": "value1", "type": "DOUBLE"},
}
],
"adhoc_filters": [
{
"expressionType": "SIMPLE",
"clause": "WHERE",
"subject": "value2",
"operator": ">",
"comparator": "100",
},
{
"expressionType": "SQL",
"clause": "WHERE",
"sqlExpression": "value3 in ('North America')",
},
],
"having": "SUM(value1) > 5",
}
datasource = self.get_datasource_mock()
test_viz = viz.TableViz(datasource, form_data)
query_obj = test_viz.query_obj()
self.assertEqual(
[{"col": "value2", "val": "100", "op": ">"}], query_obj["filter"]
)
self.assertEqual([], query_obj["extras"]["having_druid"])
self.assertEqual("(value3 in ('North America'))", query_obj["extras"]["where"])
self.assertEqual("", query_obj["extras"]["having"])
def test_query_obj_merges_percent_metrics(self):
datasource = self.get_datasource_mock()
form_data = {
"metrics": ["sum__A", "count", "avg__C"],
"percent_metrics": ["sum__A", "avg__B", "max__Y"],
}
test_viz = viz.TableViz(datasource, form_data)
query_obj = test_viz.query_obj()
self.assertEqual(
["sum__A", "count", "avg__C", "avg__B", "max__Y"], query_obj["metrics"]
)
def test_query_obj_throws_columns_and_metrics(self):
datasource = self.get_datasource_mock()
form_data = {"all_columns": ["A", "B"], "metrics": ["x", "y"]}
with self.assertRaises(Exception):
test_viz = viz.TableViz(datasource, form_data)
test_viz.query_obj()
del form_data["metrics"]
form_data["groupby"] = ["B", "C"]
with self.assertRaises(Exception):
test_viz = viz.TableViz(datasource, form_data)
test_viz.query_obj()
@patch("superset.viz.BaseViz.query_obj")
def test_query_obj_merges_all_columns(self, super_query_obj):
datasource = self.get_datasource_mock()
form_data = {
"all_columns": ["colA", "colB", "colC"],
"order_by_cols": ['["colA", "colB"]', '["colC"]'],
}
super_query_obj.return_value = {
"columns": ["colD", "colC"],
"groupby": ["colA", "colB"],
}
test_viz = viz.TableViz(datasource, form_data)
query_obj = test_viz.query_obj()
self.assertEqual(form_data["all_columns"], query_obj["columns"])
self.assertEqual([], query_obj["groupby"])
self.assertEqual([["colA", "colB"], ["colC"]], query_obj["orderby"])
def test_query_obj_uses_sortby(self):
datasource = self.get_datasource_mock()
form_data = {
"metrics": ["colA", "colB"],
"order_desc": False,
}
def run_test(metric):
form_data["timeseries_limit_metric"] = metric
test_viz = viz.TableViz(datasource, form_data)
query_obj = test_viz.query_obj()
self.assertEqual(["colA", "colB", metric], query_obj["metrics"])
self.assertEqual([(metric, True)], query_obj["orderby"])
run_test("simple_metric")
run_test(
{
"label": "adhoc_metric",
"expressionType": "SIMPLE",
"aggregate": "SUM",
"column": {"column_name": "sort_column",},
}
)
def test_should_be_timeseries_raises_when_no_granularity(self):
datasource = self.get_datasource_mock()
form_data = {"include_time": True}
with self.assertRaises(Exception):
test_viz = viz.TableViz(datasource, form_data)
test_viz.should_be_timeseries()
def test_adhoc_metric_with_sortby(self):
metrics = [
{
"expressionType": "SIMPLE",
"aggregate": "SUM",
"label": "sum_value",
"column": {"column_name": "value1", "type": "DOUBLE"},
}
]
form_data = {
"metrics": metrics,
"timeseries_limit_metric": {
"expressionType": "SIMPLE",
"aggregate": "SUM",
"label": "SUM(value1)",
"column": {"column_name": "value1", "type": "DOUBLE"},
},
"order_desc": False,
}
df = pd.DataFrame({"SUM(value1)": [15], "sum_value": [15]})
datasource = self.get_datasource_mock()
test_viz = viz.TableViz(datasource, form_data)
data = test_viz.get_data(df)
self.assertEqual(["sum_value"], data["columns"])
class TestDistBarViz(SupersetTestCase):
def test_groupby_nulls(self):
form_data = {
"metrics": ["votes"],
"adhoc_filters": [],
"groupby": ["toppings"],
"columns": [],
}
datasource = self.get_datasource_mock()
df = pd.DataFrame(
{
"toppings": ["cheese", "pepperoni", "anchovies", None],
"votes": [3, 5, 1, 2],
}
)
test_viz = viz.DistributionBarViz(datasource, form_data)
data = test_viz.get_data(df)[0]
self.assertEqual("votes", data["key"])
expected_values = [
{"x": "pepperoni", "y": 5},
{"x": "cheese", "y": 3},
{"x": NULL_STRING, "y": 2},
{"x": "anchovies", "y": 1},
]
self.assertEqual(expected_values, data["values"])
def test_groupby_nans(self):
form_data = {
"metrics": ["count"],
"adhoc_filters": [],
"groupby": ["beds"],
"columns": [],
}
datasource = self.get_datasource_mock()
df = pd.DataFrame({"beds": [0, 1, nan, 2], "count": [30, 42, 3, 29]})
test_viz = viz.DistributionBarViz(datasource, form_data)
data = test_viz.get_data(df)[0]
self.assertEqual("count", data["key"])
expected_values = [
{"x": "1.0", "y": 42},
{"x": "0.0", "y": 30},
{"x": "2.0", "y": 29},
{"x": NULL_STRING, "y": 3},
]
self.assertEqual(expected_values, data["values"])
def test_column_nulls(self):
form_data = {
"metrics": ["votes"],
"adhoc_filters": [],
"groupby": ["toppings"],
"columns": ["role"],
}
datasource = self.get_datasource_mock()
df = pd.DataFrame(
{
"toppings": ["cheese", "pepperoni", "cheese", "pepperoni"],
"role": ["engineer", "engineer", None, None],
"votes": [3, 5, 1, 2],
}
)
test_viz = viz.DistributionBarViz(datasource, form_data)
data = test_viz.get_data(df)
expected = [
{
"key": NULL_STRING,
"values": [{"x": "pepperoni", "y": 2}, {"x": "cheese", "y": 1}],
},
{
"key": "engineer",
"values": [{"x": "pepperoni", "y": 5}, {"x": "cheese", "y": 3}],
},
]
self.assertEqual(expected, data)
class TestPairedTTest(SupersetTestCase):
def test_get_data_transforms_dataframe(self):
form_data = {
"groupby": ["groupA", "groupB", "groupC"],
"metrics": ["metric1", "metric2", "metric3"],
}
datasource = self.get_datasource_mock()
# Test data
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
df = pd.DataFrame(raw)
pairedTTestViz = viz.viz_types["paired_ttest"](datasource, form_data)
data = pairedTTestViz.get_data(df)
# Check method correctly transforms data
expected = {
"metric1": [
{
"values": [
{"x": 100, "y": 1},
{"x": 200, "y": 2},
{"x": 300, "y": 3},
],
"group": ("a1", "a2", "a3"),
},
{
"values": [
{"x": 100, "y": 4},
{"x": 200, "y": 5},
{"x": 300, "y": 6},
],
"group": ("b1", "b2", "b3"),
},
{
"values": [
{"x": 100, "y": 7},
{"x": 200, "y": 8},
{"x": 300, "y": 9},
],
"group": ("c1", "c2", "c3"),
},
],
"metric2": [
{
"values": [
{"x": 100, "y": 10},
{"x": 200, "y": 20},
{"x": 300, "y": 30},
],
"group": ("a1", "a2", "a3"),
},
{
"values": [
{"x": 100, "y": 40},
{"x": 200, "y": 50},
{"x": 300, "y": 60},
],
"group": ("b1", "b2", "b3"),
},
{
"values": [
{"x": 100, "y": 70},
{"x": 200, "y": 80},
{"x": 300, "y": 90},
],
"group": ("c1", "c2", "c3"),
},
],
"metric3": [
{
"values": [
{"x": 100, "y": 100},
{"x": 200, "y": 200},
{"x": 300, "y": 300},
],
"group": ("a1", "a2", "a3"),
},
{
"values": [
{"x": 100, "y": 400},
{"x": 200, "y": 500},
{"x": 300, "y": 600},
],
"group": ("b1", "b2", "b3"),
},
{
"values": [
{"x": 100, "y": 700},
{"x": 200, "y": 800},
{"x": 300, "y": 900},
],
"group": ("c1", "c2", "c3"),
},
],
}
self.assertEqual(data, expected)
def test_get_data_empty_null_keys(self):
form_data = {"groupby": [], "metrics": ["", None]}
datasource = self.get_datasource_mock()
# Test data
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300]
raw[""] = [1, 2, 3]
raw[None] = [10, 20, 30]
df = pd.DataFrame(raw)
pairedTTestViz = viz.viz_types["paired_ttest"](datasource, form_data)
data = pairedTTestViz.get_data(df)
# Check method correctly transforms data
expected = {
"N/A": [
{
"values": [
{"x": 100, "y": 1},
{"x": 200, "y": 2},
{"x": 300, "y": 3},
],
"group": "All",
}
],
"NULL": [
{
"values": [
{"x": 100, "y": 10},
{"x": 200, "y": 20},
{"x": 300, "y": 30},
],
"group": "All",
}
],
}
self.assertEqual(data, expected)
class TestPartitionViz(SupersetTestCase):
@patch("superset.viz.BaseViz.query_obj")
def test_query_obj_time_series_option(self, super_query_obj):
datasource = self.get_datasource_mock()
form_data = {}
test_viz = viz.PartitionViz(datasource, form_data)
super_query_obj.return_value = {}
query_obj = test_viz.query_obj()
self.assertFalse(query_obj["is_timeseries"])
test_viz.form_data["time_series_option"] = "agg_sum"
query_obj = test_viz.query_obj()
self.assertTrue(query_obj["is_timeseries"])
def test_levels_for_computes_levels(self):
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
df = pd.DataFrame(raw)
groups = ["groupA", "groupB", "groupC"]
time_op = "agg_sum"
test_viz = viz.PartitionViz(Mock(), {})
levels = test_viz.levels_for(time_op, groups, df)
self.assertEqual(4, len(levels))
expected = {DTTM_ALIAS: 1800, "metric1": 45, "metric2": 450, "metric3": 4500}
self.assertEqual(expected, levels[0].to_dict())
expected = {
DTTM_ALIAS: {"a1": 600, "b1": 600, "c1": 600},
"metric1": {"a1": 6, "b1": 15, "c1": 24},
"metric2": {"a1": 60, "b1": 150, "c1": 240},
"metric3": {"a1": 600, "b1": 1500, "c1": 2400},
}
self.assertEqual(expected, levels[1].to_dict())
self.assertEqual(["groupA", "groupB"], levels[2].index.names)
self.assertEqual(["groupA", "groupB", "groupC"], levels[3].index.names)
time_op = "agg_mean"
levels = test_viz.levels_for(time_op, groups, df)
self.assertEqual(4, len(levels))
expected = {
DTTM_ALIAS: 200.0,
"metric1": 5.0,
"metric2": 50.0,
"metric3": 500.0,
}
self.assertEqual(expected, levels[0].to_dict())
expected = {
DTTM_ALIAS: {"a1": 200, "c1": 200, "b1": 200},
"metric1": {"a1": 2, "b1": 5, "c1": 8},
"metric2": {"a1": 20, "b1": 50, "c1": 80},
"metric3": {"a1": 200, "b1": 500, "c1": 800},
}
self.assertEqual(expected, levels[1].to_dict())
self.assertEqual(["groupA", "groupB"], levels[2].index.names)
self.assertEqual(["groupA", "groupB", "groupC"], levels[3].index.names)
def test_levels_for_diff_computes_difference(self):
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
df = pd.DataFrame(raw)
groups = ["groupA", "groupB", "groupC"]
test_viz = viz.PartitionViz(Mock(), {})
time_op = "point_diff"
levels = test_viz.levels_for_diff(time_op, groups, df)
expected = {"metric1": 6, "metric2": 60, "metric3": 600}
self.assertEqual(expected, levels[0].to_dict())
expected = {
"metric1": {"a1": 2, "b1": 2, "c1": 2},
"metric2": {"a1": 20, "b1": 20, "c1": 20},
"metric3": {"a1": 200, "b1": 200, "c1": 200},
}
self.assertEqual(expected, levels[1].to_dict())
self.assertEqual(4, len(levels))
self.assertEqual(["groupA", "groupB", "groupC"], levels[3].index.names)
def test_levels_for_time_calls_process_data_and_drops_cols(self):
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
df = pd.DataFrame(raw)
groups = ["groupA", "groupB", "groupC"]
test_viz = viz.PartitionViz(Mock(), {"groupby": groups})
def return_args(df_drop, aggregate):
return df_drop
test_viz.process_data = Mock(side_effect=return_args)
levels = test_viz.levels_for_time(groups, df)
self.assertEqual(4, len(levels))
cols = [DTTM_ALIAS, "metric1", "metric2", "metric3"]
self.assertEqual(sorted(cols), sorted(levels[0].columns.tolist()))
cols += ["groupA"]
self.assertEqual(sorted(cols), sorted(levels[1].columns.tolist()))
cols += ["groupB"]
self.assertEqual(sorted(cols), sorted(levels[2].columns.tolist()))
cols += ["groupC"]
self.assertEqual(sorted(cols), sorted(levels[3].columns.tolist()))
self.assertEqual(4, len(test_viz.process_data.mock_calls))
def test_nest_values_returns_hierarchy(self):
raw = {}
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
df = pd.DataFrame(raw)
test_viz = viz.PartitionViz(Mock(), {})
groups = ["groupA", "groupB", "groupC"]
levels = test_viz.levels_for("agg_sum", groups, df)
nest = test_viz.nest_values(levels)
self.assertEqual(3, len(nest))
for i in range(0, 3):
self.assertEqual("metric" + str(i + 1), nest[i]["name"])
self.assertEqual(3, len(nest[0]["children"]))
self.assertEqual(1, len(nest[0]["children"][0]["children"]))
self.assertEqual(1, len(nest[0]["children"][0]["children"][0]["children"]))
def test_nest_procs_returns_hierarchy(self):
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
df = pd.DataFrame(raw)
test_viz = viz.PartitionViz(Mock(), {})
groups = ["groupA", "groupB", "groupC"]
metrics = ["metric1", "metric2", "metric3"]
procs = {}
for i in range(0, 4):
df_drop = df.drop(groups[i:], 1)
pivot = df_drop.pivot_table(
index=DTTM_ALIAS, columns=groups[:i], values=metrics
)
procs[i] = pivot
nest = test_viz.nest_procs(procs)
self.assertEqual(3, len(nest))
for i in range(0, 3):
self.assertEqual("metric" + str(i + 1), nest[i]["name"])
self.assertEqual(None, nest[i].get("val"))
self.assertEqual(3, len(nest[0]["children"]))
self.assertEqual(3, len(nest[0]["children"][0]["children"]))
self.assertEqual(1, len(nest[0]["children"][0]["children"][0]["children"]))
self.assertEqual(
1, len(nest[0]["children"][0]["children"][0]["children"][0]["children"])
)
def test_get_data_calls_correct_method(self):
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
df = pd.DataFrame(raw)
test_viz = viz.PartitionViz(Mock(), {})
with self.assertRaises(ValueError):
test_viz.get_data(df)
test_viz.levels_for = Mock(return_value=1)
test_viz.nest_values = Mock(return_value=1)
test_viz.form_data["groupby"] = ["groups"]
test_viz.form_data["time_series_option"] = "not_time"
test_viz.get_data(df)
self.assertEqual("agg_sum", test_viz.levels_for.mock_calls[0][1][0])
test_viz.form_data["time_series_option"] = "agg_sum"
test_viz.get_data(df)
self.assertEqual("agg_sum", test_viz.levels_for.mock_calls[1][1][0])
test_viz.form_data["time_series_option"] = "agg_mean"
test_viz.get_data(df)
self.assertEqual("agg_mean", test_viz.levels_for.mock_calls[2][1][0])
test_viz.form_data["time_series_option"] = "point_diff"
test_viz.levels_for_diff = Mock(return_value=1)
test_viz.get_data(df)
self.assertEqual("point_diff", test_viz.levels_for_diff.mock_calls[0][1][0])
test_viz.form_data["time_series_option"] = "point_percent"
test_viz.get_data(df)
self.assertEqual("point_percent", test_viz.levels_for_diff.mock_calls[1][1][0])
test_viz.form_data["time_series_option"] = "point_factor"
test_viz.get_data(df)
self.assertEqual("point_factor", test_viz.levels_for_diff.mock_calls[2][1][0])
test_viz.levels_for_time = Mock(return_value=1)
test_viz.nest_procs = Mock(return_value=1)
test_viz.form_data["time_series_option"] = "adv_anal"
test_viz.get_data(df)
self.assertEqual(1, len(test_viz.levels_for_time.mock_calls))
self.assertEqual(1, len(test_viz.nest_procs.mock_calls))
test_viz.form_data["time_series_option"] = "time_series"
test_viz.get_data(df)
self.assertEqual("agg_sum", test_viz.levels_for.mock_calls[3][1][0])
self.assertEqual(7, len(test_viz.nest_values.mock_calls))
class TestRoseVis(SupersetTestCase):
def test_rose_vis_get_data(self):
raw = {}
t1 = pd.Timestamp("2000")
t2 = pd.Timestamp("2002")
t3 = pd.Timestamp("2004")
raw[DTTM_ALIAS] = [t1, t2, t3, t1, t2, t3, t1, t2, t3]
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
df = pd.DataFrame(raw)
fd = {"metrics": ["metric1"], "groupby": ["groupA"]}
test_viz = viz.RoseViz(Mock(), fd)
test_viz.metrics = fd["metrics"]
res = test_viz.get_data(df)
expected = {
946684800000000000: [
{"time": t1, "value": 1, "key": ("a1",), "name": ("a1",)},
{"time": t1, "value": 4, "key": ("b1",), "name": ("b1",)},
{"time": t1, "value": 7, "key": ("c1",), "name": ("c1",)},
],
1009843200000000000: [
{"time": t2, "value": 2, "key": ("a1",), "name": ("a1",)},
{"time": t2, "value": 5, "key": ("b1",), "name": ("b1",)},
{"time": t2, "value": 8, "key": ("c1",), "name": ("c1",)},
],
1072915200000000000: [
{"time": t3, "value": 3, "key": ("a1",), "name": ("a1",)},
{"time": t3, "value": 6, "key": ("b1",), "name": ("b1",)},
{"time": t3, "value": 9, "key": ("c1",), "name": ("c1",)},
],
}
self.assertEqual(expected, res)
class TestTimeSeriesTableViz(SupersetTestCase):
def test_get_data_metrics(self):
form_data = {"metrics": ["sum__A", "count"], "groupby": []}
datasource = self.get_datasource_mock()
raw = {}
t1 = pd.Timestamp("2000")
t2 = pd.Timestamp("2002")
raw[DTTM_ALIAS] = [t1, t2]
raw["sum__A"] = [15, 20]
raw["count"] = [6, 7]
df = pd.DataFrame(raw)
test_viz = viz.TimeTableViz(datasource, form_data)
data = test_viz.get_data(df)
# Check method correctly transforms data
self.assertEqual(set(["count", "sum__A"]), set(data["columns"]))
time_format = "%Y-%m-%d %H:%M:%S"
expected = {
t1.strftime(time_format): {"sum__A": 15, "count": 6},
t2.strftime(time_format): {"sum__A": 20, "count": 7},
}
self.assertEqual(expected, data["records"])
def test_get_data_group_by(self):
form_data = {"metrics": ["sum__A"], "groupby": ["groupby1"]}
datasource = self.get_datasource_mock()
raw = {}
t1 = pd.Timestamp("2000")
t2 = pd.Timestamp("2002")
raw[DTTM_ALIAS] = [t1, t1, t1, t2, t2, t2]
raw["sum__A"] = [15, 20, 25, 30, 35, 40]
raw["groupby1"] = ["a1", "a2", "a3", "a1", "a2", "a3"]
df = pd.DataFrame(raw)
test_viz = viz.TimeTableViz(datasource, form_data)
data = test_viz.get_data(df)
# Check method correctly transforms data
self.assertEqual(set(["a1", "a2", "a3"]), set(data["columns"]))
time_format = "%Y-%m-%d %H:%M:%S"
expected = {
t1.strftime(time_format): {"a1": 15, "a2": 20, "a3": 25},
t2.strftime(time_format): {"a1": 30, "a2": 35, "a3": 40},
}
self.assertEqual(expected, data["records"])
@patch("superset.viz.BaseViz.query_obj")
def test_query_obj_throws_metrics_and_groupby(self, super_query_obj):
datasource = self.get_datasource_mock()
form_data = {"groupby": ["a"]}
super_query_obj.return_value = {}
test_viz = viz.TimeTableViz(datasource, form_data)
with self.assertRaises(Exception):
test_viz.query_obj()
form_data["metrics"] = ["x", "y"]
test_viz = viz.TimeTableViz(datasource, form_data)
with self.assertRaises(Exception):
test_viz.query_obj()
class TestBaseDeckGLViz(SupersetTestCase):
def test_get_metrics(self):
form_data = load_fixture("deck_path_form_data.json")
datasource = self.get_datasource_mock()
test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
result = test_viz_deckgl.get_metrics()
assert result == [form_data.get("size")]
form_data = {}
test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
result = test_viz_deckgl.get_metrics()
assert result == []
def test_scatterviz_get_metrics(self):
form_data = load_fixture("deck_path_form_data.json")
datasource = self.get_datasource_mock()
form_data = {}
test_viz_deckgl = viz.DeckScatterViz(datasource, form_data)
test_viz_deckgl.point_radius_fixed = {"type": "metric", "value": "int"}
result = test_viz_deckgl.get_metrics()
assert result == ["int"]
form_data = {}
test_viz_deckgl = viz.DeckScatterViz(datasource, form_data)
test_viz_deckgl.point_radius_fixed = {}
result = test_viz_deckgl.get_metrics()
assert result == []
def test_get_js_columns(self):
form_data = load_fixture("deck_path_form_data.json")
datasource = self.get_datasource_mock()
mock_d = {"a": "dummy1", "b": "dummy2", "c": "dummy3"}
test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
result = test_viz_deckgl.get_js_columns(mock_d)
assert result == {"color": None}
def test_get_properties(self):
mock_d = {}
form_data = load_fixture("deck_path_form_data.json")
datasource = self.get_datasource_mock()
test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
with self.assertRaises(NotImplementedError) as context:
test_viz_deckgl.get_properties(mock_d)
self.assertTrue("" in str(context.exception))
def test_process_spatial_query_obj(self):
form_data = load_fixture("deck_path_form_data.json")
datasource = self.get_datasource_mock()
mock_key = "spatial_key"
mock_gb = []
test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
with self.assertRaises(ValueError) as context:
test_viz_deckgl.process_spatial_query_obj(mock_key, mock_gb)
self.assertTrue("Bad spatial key" in str(context.exception))
test_form_data = {
"latlong_key": {"type": "latlong", "lonCol": "lon", "latCol": "lat"},
"delimited_key": {"type": "delimited", "lonlatCol": "lonlat"},
"geohash_key": {"type": "geohash", "geohashCol": "geo"},
}
datasource = self.get_datasource_mock()
expected_results = {
"latlong_key": ["lon", "lat"],
"delimited_key": ["lonlat"],
"geohash_key": ["geo"],
}
for mock_key in ["latlong_key", "delimited_key", "geohash_key"]:
mock_gb = []
test_viz_deckgl = viz.BaseDeckGLViz(datasource, test_form_data)
test_viz_deckgl.process_spatial_query_obj(mock_key, mock_gb)
assert expected_results.get(mock_key) == mock_gb
def test_geojson_query_obj(self):
form_data = load_fixture("deck_geojson_form_data.json")
datasource = self.get_datasource_mock()
test_viz_deckgl = viz.DeckGeoJson(datasource, form_data)
results = test_viz_deckgl.query_obj()
assert results["metrics"] == []
assert results["groupby"] == []
assert results["columns"] == ["test_col"]
def test_parse_coordinates(self):
form_data = load_fixture("deck_path_form_data.json")
datasource = self.get_datasource_mock()
viz_instance = viz.BaseDeckGLViz(datasource, form_data)
coord = viz_instance.parse_coordinates("1.23, 3.21")
self.assertEqual(coord, (1.23, 3.21))
coord = viz_instance.parse_coordinates("1.23 3.21")
self.assertEqual(coord, (1.23, 3.21))
self.assertEqual(viz_instance.parse_coordinates(None), None)
self.assertEqual(viz_instance.parse_coordinates(""), None)
def test_parse_coordinates_raises(self):
form_data = load_fixture("deck_path_form_data.json")
datasource = self.get_datasource_mock()
test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
with self.assertRaises(SpatialException):
test_viz_deckgl.parse_coordinates("NULL")
with self.assertRaises(SpatialException):
test_viz_deckgl.parse_coordinates("fldkjsalkj,fdlaskjfjadlksj")
@patch("superset.utils.core.uuid.uuid4")
def test_filter_nulls(self, mock_uuid4):
mock_uuid4.return_value = uuid.UUID("12345678123456781234567812345678")
test_form_data = {
"latlong_key": {"type": "latlong", "lonCol": "lon", "latCol": "lat"},
"delimited_key": {"type": "delimited", "lonlatCol": "lonlat"},
"geohash_key": {"type": "geohash", "geohashCol": "geo"},
}
datasource = self.get_datasource_mock()
expected_results = {
"latlong_key": [
{
"clause": "WHERE",
"expressionType": "SIMPLE",
"filterOptionName": "12345678-1234-5678-1234-567812345678",
"comparator": "",
"operator": "IS NOT NULL",
"subject": "lat",
"isExtra": False,
},
{
"clause": "WHERE",
"expressionType": "SIMPLE",
"filterOptionName": "12345678-1234-5678-1234-567812345678",
"comparator": "",
"operator": "IS NOT NULL",
"subject": "lon",
"isExtra": False,
},
],
"delimited_key": [
{
"clause": "WHERE",
"expressionType": "SIMPLE",
"filterOptionName": "12345678-1234-5678-1234-567812345678",
"comparator": "",
"operator": "IS NOT NULL",
"subject": "lonlat",
"isExtra": False,
}
],
"geohash_key": [
{
"clause": "WHERE",
"expressionType": "SIMPLE",
"filterOptionName": "12345678-1234-5678-1234-567812345678",
"comparator": "",
"operator": "IS NOT NULL",
"subject": "geo",
"isExtra": False,
}
],
}
for mock_key in ["latlong_key", "delimited_key", "geohash_key"]:
test_viz_deckgl = viz.BaseDeckGLViz(datasource, test_form_data.copy())
test_viz_deckgl.spatial_control_keys = [mock_key]
test_viz_deckgl.add_null_filters()
adhoc_filters = test_viz_deckgl.form_data["adhoc_filters"]
assert expected_results.get(mock_key) == adhoc_filters
class TestTimeSeriesViz(SupersetTestCase):
def test_timeseries_unicode_data(self):
datasource = self.get_datasource_mock()
form_data = {"groupby": ["name"], "metrics": ["sum__payout"]}
raw = {}
raw["name"] = [
"Real Madrid C.F.🇺🇸🇬🇧",
"Real Madrid C.F.🇺🇸🇬🇧",
"Real Madrid Basket",
"Real Madrid Basket",
]
raw["__timestamp"] = [
"2018-02-20T00:00:00",
"2018-03-09T00:00:00",
"2018-02-20T00:00:00",
"2018-03-09T00:00:00",
]
raw["sum__payout"] = [2, 2, 4, 4]
df = pd.DataFrame(raw)
test_viz = viz.NVD3TimeSeriesViz(datasource, form_data)
viz_data = {}
viz_data = test_viz.get_data(df)
expected = [
{
u"values": [
{u"y": 4, u"x": u"2018-02-20T00:00:00"},
{u"y": 4, u"x": u"2018-03-09T00:00:00"},
],
u"key": (u"Real Madrid Basket",),
},
{
u"values": [
{u"y": 2, u"x": u"2018-02-20T00:00:00"},
{u"y": 2, u"x": u"2018-03-09T00:00:00"},
],
u"key": (u"Real Madrid C.F.\U0001f1fa\U0001f1f8\U0001f1ec\U0001f1e7",),
},
]
self.assertEqual(expected, viz_data)
def test_process_data_resample(self):
datasource = self.get_datasource_mock()
df = pd.DataFrame(
{
"__timestamp": pd.to_datetime(
["2019-01-01", "2019-01-02", "2019-01-05", "2019-01-07"]
),
"y": [1.0, 2.0, 5.0, 7.0],
}
)
self.assertEqual(
viz.NVD3TimeSeriesViz(
datasource,
{"metrics": ["y"], "resample_method": "sum", "resample_rule": "1D"},
)
.process_data(df)["y"]
.tolist(),
[1.0, 2.0, 0.0, 0.0, 5.0, 0.0, 7.0],
)
np.testing.assert_equal(
viz.NVD3TimeSeriesViz(
datasource,
{"metrics": ["y"], "resample_method": "asfreq", "resample_rule": "1D"},
)
.process_data(df)["y"]
.tolist(),
[1.0, 2.0, np.nan, np.nan, 5.0, np.nan, 7.0],
)
def test_apply_rolling(self):
datasource = self.get_datasource_mock()
df = pd.DataFrame(
index=pd.to_datetime(
["2019-01-01", "2019-01-02", "2019-01-05", "2019-01-07"]
),
data={"y": [1.0, 2.0, 3.0, 4.0]},
)
self.assertEqual(
viz.BigNumberViz(
datasource,
{
"metrics": ["y"],
"rolling_type": "cumsum",
"rolling_periods": 0,
"min_periods": 0,
},
)
.apply_rolling(df)["y"]
.tolist(),
[1.0, 3.0, 6.0, 10.0],
)
self.assertEqual(
viz.BigNumberViz(
datasource,
{
"metrics": ["y"],
"rolling_type": "sum",
"rolling_periods": 2,
"min_periods": 0,
},
)
.apply_rolling(df)["y"]
.tolist(),
[1.0, 3.0, 5.0, 7.0],
)
self.assertEqual(
viz.BigNumberViz(
datasource,
{
"metrics": ["y"],
"rolling_type": "mean",
"rolling_periods": 10,
"min_periods": 0,
},
)
.apply_rolling(df)["y"]
.tolist(),
[1.0, 1.5, 2.0, 2.5],
)
def test_apply_rolling_without_data(self):
datasource = self.get_datasource_mock()
df = pd.DataFrame(
index=pd.to_datetime(
["2019-01-01", "2019-01-02", "2019-01-05", "2019-01-07"]
),
data={"y": [1.0, 2.0, 3.0, 4.0]},
)
test_viz = viz.BigNumberViz(
datasource,
{
"metrics": ["y"],
"rolling_type": "cumsum",
"rolling_periods": 4,
"min_periods": 4,
},
)
with pytest.raises(QueryObjectValidationError):
test_viz.apply_rolling(df)
class TestBigNumberViz(SupersetTestCase):
def test_get_data(self):
datasource = self.get_datasource_mock()
df = pd.DataFrame(
data={
DTTM_ALIAS: pd.to_datetime(
["2019-01-01", "2019-01-02", "2019-01-05", "2019-01-07"]
),
"y": [1.0, 2.0, 3.0, 4.0],
}
)
data = viz.BigNumberViz(datasource, {"metrics": ["y"]}).get_data(df)
self.assertEqual(data[2], {DTTM_ALIAS: pd.Timestamp("2019-01-05"), "y": 3})
def test_get_data_with_none(self):
datasource = self.get_datasource_mock()
df = pd.DataFrame(
data={
DTTM_ALIAS: pd.to_datetime(
["2019-01-01", "2019-01-02", "2019-01-05", "2019-01-07"]
),
"y": [1.0, 2.0, None, 4.0],
}
)
data = viz.BigNumberViz(datasource, {"metrics": ["y"]}).get_data(df)
assert np.isnan(data[2]["y"])
class TestPivotTableViz(SupersetTestCase):
df = pd.DataFrame(
data={
"intcol": [1, 2, 3, None],
"floatcol": [0.1, 0.2, 0.3, None],
"strcol": ["a", "b", "c", None],
}
)
def test_get_aggfunc_numeric(self):
# is a sum function
func = viz.PivotTableViz.get_aggfunc("intcol", self.df, {})
assert hasattr(func, "__call__")
assert func(self.df["intcol"]) == 6
assert (
viz.PivotTableViz.get_aggfunc("intcol", self.df, {"pandas_aggfunc": "min"})
== "min"
)
assert (
viz.PivotTableViz.get_aggfunc(
"floatcol", self.df, {"pandas_aggfunc": "max"}
)
== "max"
)
def test_get_aggfunc_non_numeric(self):
assert viz.PivotTableViz.get_aggfunc("strcol", self.df, {}) == "max"
assert (
viz.PivotTableViz.get_aggfunc("strcol", self.df, {"pandas_aggfunc": "sum"})
== "max"
)
assert (
viz.PivotTableViz.get_aggfunc("strcol", self.df, {"pandas_aggfunc": "min"})
== "min"
)
def test_format_datetime_from_pd_timestamp(self):
tstamp = pd.Timestamp(datetime(2020, 9, 3, tzinfo=timezone.utc))
assert (
viz.PivotTableViz._format_datetime(tstamp) == "__timestamp:1599091200000.0"
)
def test_format_datetime_from_datetime(self):
tstamp = datetime(2020, 9, 3, tzinfo=timezone.utc)
assert (
viz.PivotTableViz._format_datetime(tstamp) == "__timestamp:1599091200000.0"
)
def test_format_datetime_from_date(self):
tstamp = date(2020, 9, 3)
assert (
viz.PivotTableViz._format_datetime(tstamp) == "__timestamp:1599091200000.0"
)
def test_format_datetime_from_string(self):
tstamp = "2020-09-03T00:00:00"
assert (
viz.PivotTableViz._format_datetime(tstamp) == "__timestamp:1599091200000.0"
)
def test_format_datetime_from_invalid_string(self):
tstamp = "abracadabra"
assert viz.PivotTableViz._format_datetime(tstamp) == tstamp
def test_format_datetime_from_int(self):
assert viz.PivotTableViz._format_datetime(123) == 123
assert viz.PivotTableViz._format_datetime(123.0) == 123.0
| apache-2.0 | -8,580,062,733,230,792,000 | 37.217893 | 88 | 0.47491 | false |
mpienkosz/cifar10-classification | utils/visualization.py | 1 | 1389 | import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# A helper function for plotting dataset of 2-D features (different collors for different classes)
def plot_features(X_proj, Y_train):
colors = ['blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'brown', 'gray', 'orange']
Y_colors = [colors[val] for val in Y_train]
plt.scatter(X_proj[:, 0], X_proj[:, 1], c=Y_colors, s=5)
plt.savefig('resources/features.png')
# A helper function for plotting sample images from dataset
def plot_images(images, labels, num_samples):
cifar_classes = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
fig, axes = plt.subplots(num_samples,num_samples)
for row in range(num_samples):
images_for_label = [img for img, label in zip(images, labels) if label == row]
random_indices = np.random.choice(range(len(images_for_label)), num_samples, replace=False)
for col, random_idx in enumerate(random_indices):
axes[row][col].imshow(images_for_label[random_idx])
axes[row][col].get_xaxis().set_ticks([])
axes[row][col].get_yaxis().set_ticks([])
axes[row][0].set_ylabel(cifar_classes[row], labelpad=50, rotation=0)
#plt.subplots_adjust(left=0.1, right=0.2)
plt.savefig('resources/cifar_sample', bbox_inches='tight') | mit | -317,488,456,261,648,960 | 52.461538 | 110 | 0.659467 | false |
crowd-ai/post-processing-experiments | inference2D.py | 1 | 5596 | """
Adapted from the inference.py to demonstate the usage of the util functions.
"""
import sys
import numpy as np
import pydensecrf.densecrf as dcrf
import ipdb
# Get im{read,write} from somewhere.
try:
from cv2 import imread, imwrite
except ImportError:
# Note that, sadly, skimage unconditionally import scipy and matplotlib,
# so you'll need them if you don't have OpenCV. But you probably have them.
from skimage.io import imread, imsave
imwrite = imsave
# TODO: Use scipy instead.
from pydensecrf.utils import unary_from_labels, create_pairwise_bilateral, create_pairwise_gaussian
if len(sys.argv) != 4:
print("Usage: python {} IMAGE ANNO OUTPUT".format(sys.argv[0]))
print("")
print("IMAGE and ANNO are inputs and OUTPUT is where the result should be written.")
print("If there's at least one single full-black pixel in ANNO, black is assumed to mean unknown.")
sys.exit(1)
fn_im = sys.argv[1]
fn_anno = sys.argv[2]
fn_output = sys.argv[3]
##################################
### Read images and annotation ###
##################################
img = imread(fn_im)
# Convert the annotation's RGB color to a single 32-bit integer color 0xBBGGRR
anno_rgb = imread(fn_anno,0).astype(np.int32)
ipdb.set_trace()
anno_rgb[anno_rgb>=5] = 1
anno_rgb += 1
anno_rgb[0,0] = 0
#anno_lbl = anno_rgb[:,:,0] + (anno_rgb[:,:,1] << 8) + (anno_rgb[:,:,2] << 16)
anno_lbl = anno_rgb
# Convert the 32bit integer color to 1, 2, ... labels.
# Note that all-black, i.e. the value 0 for background will stay 0.
#colors, labels = np.unique(anno_lbl, return_inverse=True)
colors = np.unique(anno_lbl)
labels = anno_lbl
# But remove the all-0 black, that won't exist in the MAP!
HAS_UNK = 0 in colors
if HAS_UNK:
print("Found a full-black pixel in annotation image, assuming it means 'unknown' label, and will thus not be present in the output!")
print("If 0 is an actual label for you, consider writing your own code, or simply giving your labels only non-zero values.")
colors = colors[1:]
#else:
# print("No single full-black pixel found in annotation image. Assuming there's no 'unknown' label!")
# Compute the number of classes in the label image.
# We subtract one because the number shouldn't include the value 0 which stands
# for "unknown" or "unsure".
n_labels = len(set(labels.flat)) - int(HAS_UNK)
print(n_labels, " labels", (" plus \"unknown\" 0: " if HAS_UNK else ""), set(labels.flat))
###########################
### Setup the CRF model ###
###########################
use_2d = False
#use_2d = True
if use_2d:
print("Using 2D specialized functions")
# Example using the DenseCRF2D code
d = dcrf.DenseCRF2D(img.shape[1], img.shape[0], n_labels)
# get unary potentials (neg log probability)
U = unary_from_labels(labels, n_labels, gt_prob=0.7, zero_unsure=HAS_UNK)
d.setUnaryEnergy(U)
## This adds the color-independent term, features are the locations only.
#d.addPairwiseGaussian(sxy=(3, 3), compat=3, kernel=dcrf.DIAG_KERNEL,
#normalization=dcrf.NORMALIZE_SYMMETRIC)
## This adds the color-dependent term, i.e. features are (x,y,r,g,b).
#d.addPairwiseBilateral(sxy=(80, 80), srgb=(13, 13, 13), rgbim=img,
#compat=10,
#kernel=dcrf.DIAG_KERNEL,
#normalization=dcrf.NORMALIZE_SYMMETRIC)
# This adds the color-independent term, features are the locations only.
d.addPairwiseGaussian(sxy=(10, 10), compat=3, kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
# This adds the color-dependent term, i.e. features are (x,y,r,g,b).
d.addPairwiseBilateral(sxy=(50, 50), srgb=(20, 20, 20), rgbim=img,
compat=10,
kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
else:
print("Using generic 2D functions")
# Example using the DenseCRF class and the util functions
d = dcrf.DenseCRF(img.shape[1] * img.shape[0], n_labels)
# get unary potentials (neg log probability)
U = unary_from_labels(labels, n_labels, gt_prob=0.7, zero_unsure=HAS_UNK)
d.setUnaryEnergy(U)
# This creates the color-independent features and then add them to the CRF
feats = create_pairwise_gaussian(sdims=(3, 3), shape=img.shape[:2])
d.addPairwiseEnergy(feats, compat=3,
kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
# This creates the color-dependent features and then add them to the CRF
feats = create_pairwise_bilateral(sdims=(80, 80), schan=(13, 13, 13),
img=img, chdim=2)
d.addPairwiseEnergy(feats, compat=10,
kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
####################################
### Do inference and compute MAP ###
####################################
# Run five inference steps.
Q = d.inference(5)
# Find out the most probable class for each pixel.
MAP = np.argmax(Q, axis=0)
# Convert the MAP (labels) back to the corresponding colors and save the image.
# Note that there is no "unknown" here anymore, no matter what we had at first.
ipdb.set_trace()
imwrite(fn_output, MAP.reshape(img.shape[0], img.shape[1]))
# Just randomly manually run inference iterations
Q, tmp1, tmp2 = d.startInference()
for i in range(5):
print("KL-divergence at {}: {}".format(i, d.klDivergence(Q)))
d.stepInference(Q, tmp1, tmp2)
| mit | -4,922,680,398,478,113,000 | 38.132867 | 137 | 0.63867 | false |
jwass/geopandas | setup.py | 1 | 2285 | #!/usr/bin/env/python
"""Installation script
Version handling borrowed from pandas project.
"""
import os
import warnings
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
LONG_DESCRIPTION = """GeoPandas is a project to add support for geographic data to
`pandas`_ objects.
The goal of GeoPandas is to make working with geospatial data in
python easier. It combines the capabilities of `pandas`_ and `shapely`_,
providing geospatial operations in pandas and a high-level interface
to multiple geometries to shapely. GeoPandas enables you to easily do
operations in python that would otherwise require a spatial database
such as PostGIS.
.. _pandas: http://pandas.pydata.org
.. _shapely: http://toblerity.github.io/shapely
"""
MAJOR = 0
MINOR = 1
MICRO = 0
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
QUALIFIER = ''
FULLVERSION = VERSION
if not ISRELEASED:
FULLVERSION += '.dev'
try:
import subprocess
try:
pipe = subprocess.Popen(["git", "rev-parse", "--short", "HEAD"],
stdout=subprocess.PIPE).stdout
except OSError:
# msysgit compatibility
pipe = subprocess.Popen(
["git.cmd", "describe", "HEAD"],
stdout=subprocess.PIPE).stdout
rev = pipe.read().strip()
FULLVERSION = '%d.%d.%d.dev-%s' % (MAJOR, MINOR, MICRO, rev)
except:
warnings.warn("WARNING: Couldn't get git revision")
else:
FULLVERSION += QUALIFIER
def write_version_py(filename=None):
cnt = """\
version = '%s'
short_version = '%s'
"""
if not filename:
filename = os.path.join(
os.path.dirname(__file__), 'geopandas', 'version.py')
a = open(filename, 'w')
try:
a.write(cnt % (FULLVERSION, VERSION))
finally:
a.close()
write_version_py()
setup(name='geopandas',
version=FULLVERSION,
description='Geographic pandas extensions',
license='BSD',
author='Kelsey Jordahl',
author_email='kjordahl@enthought.com',
url='http://geopandas.org',
long_description=LONG_DESCRIPTION,
packages=['geopandas'],
install_requires=['pandas', 'shapely', 'fiona', 'descartes', 'pyproj'],
)
| bsd-3-clause | 4,994,985,725,104,256,000 | 25.569767 | 82 | 0.638512 | false |
RAPD/RAPD | src/plugins/merge/rapd_agent_mergemany.py | 1 | 62002 | '''
Created August 09, 2012
By Kay Perry, Frank Murphy
Distributed under the terms of the GNU General Public License.
This file is part of RAPD
RAPD is a free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 3 of the license,
or (at your option) any later version.
RAPD is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
rapd_agent_mergemany.py is a pipeline to be used by the rapd_cluster.py
process for combining multiple data wedges created by
the integration pipeline of rapd
This pipeline expects a command with:
datasets = list of all filenames to be merged
'''
from multiprocessing import Process, cpu_count
import matplotlib
# Force matplotlib to not use any Xwindows backend. Must be called before any other matplotlib/pylab import.
matplotlib.use('Agg')
import os, subprocess, sys
#import stat
import logging, logging.handlers
#import threading
import hashlib
import shutil
from itertools import combinations
from collections import OrderedDict
from iotbx import reflection_file_reader
from cctbx import miller
from cctbx.array_family import flex
from cctbx.sgtbx import space_group_symbols
from hcluster import linkage,dendrogram
import cPickle as pickle # For storing dicts as pickle files for later use
class MergeMany(Process):
"""
To merge multiple datasets from RAPD's integration pipeline
"""
def __init__(self, input, logger):
"""
Initialize the merging processing using agglomerative hierachical clustering process
"""
logger.info('HCMerge::Initiating variables.')
# Setting up data input
self.input = input[0:4]
self.controller_address = input[-1]
self.logger = logger
self.command = self.input[0]
self.dirs = self.input[1]
# self.datasets = self.command[2]['MergeMany'] # RAPD 1 - This is a dict of all results. Each result is a dict.
# List of original data files to be merged. Currently expected to be ASCII.HKL
self.datasets = self.input[2]
self.settings = self.input[3]
# Variables
self.cmdline = self.settings['cmdline']
self.process_id = self.settings['process_id']
if 'work_dir_override' in self.settings:
if (self.settings['work_dir_override'] == True or
self.settings['work_dir_override'] == 'True'):
self.dirs['work'] = self.settings['work_directory']
# Variables for holding filenames and results
self.data_files = [] # List of data file names
self.graphs = {}
self.results = {}
self.merged_files = [] # List of prefixes for final files.
# dict for keeping track of file identities
self.id_list = OrderedDict() # Dict will hold prefix as key and pair of file names as value
self.dirs['data'] = 'DATA'
# Establish setting defaults
# Check for agglomerative clustering linkage method
# Options for linkage are:
# single: the single/min/nearest algorithm. (alias)
# complete: the complete/max/farthest algorithm. (alias)
# average: the average/UPGMA algorithm. (alias)
# weighted: the weighted/WPGMA algorithm. (alias)
# centroid: the centroid/UPGMC algorithm. (alias)
# median: the median/WPGMC algorithm. (alias)
# ward: the Ward/incremental algorithm. (alias)
if self.settings.has_key('method'):
self.method = self.settings['method']
else:
self.method = 'complete'
# Check for cutoff value
if self.settings.has_key('cutoff'):
self.cutoff = self.settings['cutoff'] # CC 1/2 value passed in by user
else:
self.cutoff = 0.95
# Check for filename for merged dataset
if self.settings.has_key('prefix'):
self.prefix = self.settings['prefix']
else:
self.prefix = 'merged'
# Check for user-defined spacegroup
if self.settings.has_key('user_spacegroup'):
self.user_spacegroup = self.settings['user_spacegroup']
else:
self.user_spacegroup = 0 # Default to None
# Check for user-defined high resolution cutoff
if self.settings.has_key('resolution'):
self.resolution = self.settings['resolution']
else:
self.resolution = 0 # Default high resolution limit to 0
# Check for file cleanup
if self.settings.has_key('cleanup_files'):
self.cleanup_files = self.settings['cleanup_files']
else:
self.cleanup_files = True
# Check whether to make all clusters or the first one that exceeds the cutoff
if self.settings.has_key('all_clusters'):
self.all_clusters=self.settings['all_clusters']
else:
self.all_clusters = False
# Check whether to add labels to the dendrogram
if self.settings.has_key('labels'):
self.labels=self.settings['labels']
else:
self.labels = False
# Check whether to start at the beginning or skip to a later step
if self.settings.has_key('start_point'):
self.start_point=self.settings['start_point']
else:
self.start_point= 'start'
# Check whether to skip prechecking files during preprocess
if self.settings.has_key('precheck'):
self.precheck=self.settings['precheck']
else:
self.precheck = True
# Set resolution for dendrogram image
if self.settings.has_key('dpi'):
self.dpi=self.settings['dpi']
else:
self.dpi = 100
# Check on number of processors
if self.settings.has_key('nproc'):
self.nproc = self.settings['nproc']
else:
try:
self.nproc = cpu_count()
# cpu_count() can raise NotImplementedError
except:
self.nproc = 1
# Check on whether job should be run on a cluster
if self.settings.has_key('cluster_use'):
if self.settings['cluster_use'] == True:
self.cmd_prefix = 'qsub -N combine -sync y'
else:
self.cmd_prefix = 'sh'
else:
self.cmd_prefix = 'sh'
Process.__init__(self,name='MergeMany')
self.start()
def run(self):
self.logger.debug('HCMerge::Data Merging Started.')
# Need to change to work also when data is still collecting. Currently set up for when
# data collection is complete.
# Provide a flag for whether data is still being collected or is complete
try:
if self.start_point == 'start':
self.preprocess()
self.process()
else:
pkl_file = self.datasets
self.rerun(pkl_file)
self.postprocess()
except ValueError, Argument:
self.logger.error('HCMerge::Failure to Run.')
self.logger.exception(Argument)
def preprocess(self):
"""
Before running the main process
- change to the current directory
- copy files to the working directory
- convert all files to cctbx usable format and save in self
- test reflection files for acceptable format (XDS and unmerged mtz only)
- ensure all files are the same format
"""
self.logger.debug('HCMerge::Prechecking files: %s' % str(self.datasets))
if self.precheck:
# mtz and xds produce different file formats. Check for type to do duplicate comparison specific to file type.
types = []
hashset = {}
for dataset in self.datasets:
reflection_file = reflection_file_reader.any_reflection_file(file_name=dataset)
types.append(reflection_file.file_type()) # Get types for format test
hashset[dataset] = hashlib.md5(open(dataset, 'rb').read()).hexdigest() # hash for duplicates test
# Test for SCA format
if reflection_file.file_type() == 'scalepack_no_merge_original_index' or reflection_file.file_type() == 'scalepack_merge':
self.logger.error('HCMerge::Scalepack format. Aborted')
raise ValueError("Scalepack Format. Unmerged mtz format required.")
# Test reflection files to make sure they are XDS or MTZ format
elif reflection_file.file_type() != 'xds_ascii' and reflection_file.file_type() != 'ccp4_mtz':
self.logger.error('HCMerge::%s Reflection Check Failed. Not XDS format.' % reflection_file.file_name())
raise ValueError("%s has incorrect file format. Unmerged reflections in XDS format only." % reflection_file.file_name())
# Test for all the same format
elif len(set(types)) > 1:
self.logger.error('HCMerge::Too Many File Types')
raise ValueError("All files must be the same type and format.")
# Test reflection files to make sure they have observations
elif ((reflection_file.file_type() == 'xds_ascii') and (reflection_file.file_content().iobs.size() == 0)):
self.logger.error('HCMerge::%s Reflection Check Failed. No Observations.' % reflection_file.file_name())
raise ValueError("%s Reflection Check Failed. No Observations." % reflection_file.file_name())
elif ((reflection_file.file_type() == 'ccp4_mtz') and (reflection_file.file_content().n_reflections() == 0)):
self.logger.error('HCMerge::%s Reflection Check Failed. No Observations.' % reflection_file.file_name())
raise ValueError("%s Reflection Check Failed. No Observations." % reflection_file.file_name())
# Test reflection file if mtz and make sure it isn't merged by checking for amplitude column
elif ((reflection_file.file_type() == 'ccp4_mtz') and ('F' in reflection_file.file_content().column_labels())):
self.logger.error('HCMerge::%s Reflection Check Failed. Must be unmerged reflections.' % reflection_file.file_name())
raise ValueError("%s Reflection Check Failed. Must be unmerged reflections." % reflection_file.file_name())
# Test reflection files to make sure there are no duplicates
combos_temp = self.make_combinations(self.datasets,2)
for combo in combos_temp:
if hashset[combo[0]] == hashset[combo[1]]:
self.datasets.remove(combo[1]) # Remove second occurrence in list of datasets
self.logger.error('HCMerge::Same file Entered Twice. %s deleted from list.' % combo[1])
# Make and move to the work directory
if os.path.isdir(self.dirs['work']) == False:
os.makedirs(self.dirs['work'])
os.chdir(self.dirs['work'])
else:
combine_dir = self.create_subdirectory(prefix='COMBINE', path=self.dirs['work'])
os.chdir(combine_dir)
# convert all files to mtz format
# copy the files to be merged to the work directory
for count, dataset in enumerate(self.datasets):
hkl_filename = str(count)+'_'+dataset.rsplit("/",1)[1].rsplit(".",1)[0]+'.mtz'
if self.user_spacegroup != 0:
sg = space_group_symbols(self.user_spacegroup).universal_hermann_mauguin()
self.logger.debug('HCMerge::Converting %s to %s and copying to Working Directory.' % (str(hkl_filename), str(sg)))
out_file = hkl_filename.rsplit(".",1)[0]
command = []
command.append('pointless hklout '+hkl_filename+'> '+out_file+'_import.log <<eof \n')
command.append('xdsin '+dataset+' \n')
command.append('lauegroup %s \n' % sg)
command.append('choose spacegroup %s \n' % sg)
command.append('eof\n')
comfile = open(out_file+'_import.sh','w')
comfile.writelines(command)
comfile.close()
os.chmod('./'+out_file+'_import.sh',0755)
p = subprocess.Popen(self.cmd_prefix+' ./'+out_file+'_import.sh',
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).wait()
else:
self.logger.debug('HCMerge::Copying %s to Working Directory.' % str(dataset))
p = subprocess.Popen('pointless -copy xdsin ' + dataset + ' hklout ' + hkl_filename,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).wait()
# Make a list of filenames
self.data_files.append(hkl_filename)
def process(self):
"""
Make 1x1 combinations, combine using pointless, scale, determine CC, make matrix and select dataset
over CC cutoff
"""
self.logger.debug('HCMerge::Data Merging Started.')
# Make 1 x 1 combinations
combos = self.make_combinations(self.data_files,2)
# lists for running the multiprocessing
jobs = []
# combine the files with POINTLESS
for pair in combos:
outfile_prefix = str(pair[0].split('_')[0])+'x'+str(pair[1].split('_')[0])
self.id_list[outfile_prefix] = pair
# combine = pool.map(self.merge,id_list)
combine = Process(target=self.combine,args=(pair,outfile_prefix))
jobs.append(combine)
# combine.start()
# combine = self.combine(pair,outfile_prefix)
# Wait for all worker processes to finish
numjobs = len(jobs)
jobNum = 0
while jobNum < numjobs:
endjobNum = min(jobNum+self.nproc, numjobs)
for job in jobs[jobNum:endjobNum]:
job.start()
for job in jobs[jobNum:endjobNum]:
job.join()
jobNum = endjobNum
# When POINTLESS is complete, calculate correlation coefficient
for pair in self.id_list.keys():
self.results[pair] = {}
if os.path.isfile(pair+'_pointless.mtz'):
# First, get batch information from pointless log file
batches = self.get_batch(pair)
# Second, check if both datasets made it into the final mtz
if (batches.get(1) and batches.get(2)):
# Third, calculate the linear correlation coefficient if there are two datasets
self.results[pair]['CC'] = self.get_cc_pointless(pair,batches) # results are a dict with pair as key
else:
# If only one dataset in mtz, default to no correlation.
self.logger.error('HCMerge::%s_pointless.mtz has only one run. CC defaults to 0.'% pair)
self.results[pair]['CC'] = 0
else:
self.results[pair]['CC'] = 0
# Make relationship matrix
matrix = self.make_matrix(self.method)
# Find data above CC cutoff. Key 0 is most wedges and above CC cutoff
wedge_files = self.select_data(matrix, 1 - self.cutoff)
# Merge files in selected wedges together using POINTLESS and AIMLESS
self.merge_wedges(wedge_files)
# Store the dicts for future use
self.store_dicts({'data_files': self.data_files, 'id_list': self.id_list,
'results': self.results, 'graphs': self.graphs, 'matrix': matrix,
'merged_files': self.merged_files})
# Make the summary text file for all merged files
self.make_log(self.merged_files)
# Make the dendrogram and write it out as a PNG
self.make_dendrogram(matrix, self.dpi)
self.logger.debug('HCMerge::Data merging finished.')
def process_continuous(self):
"""
As wedges are collected, continuously make 1x1 combinations, combine using pointless, scale, determine CC,
make matrix and select dataset over CC cutoff
"""
def postprocess(self):
"""
Data transfer, file cleanup and other maintenance issues.
"""
self.logger.debug('HCMerge::Cleaning up in postprocess.')
# Copy original datasets to a DATA directory
data_dir = self.create_subdirectory(prefix='DATA', path=self.dirs['work'])
for file in self.data_files:
shutil.copy(file,data_dir)
self.get_dicts(self.prefix + '.pkl')
self.store_dicts({'data_files': self.data_files, 'id_list': self.id_list,
'results': self.results, 'graphs': self.graphs, 'matrix': self.matrix,
'merged_files': self.merged_files, 'data_dir': data_dir})
# Check for postprocessing flags
if self.settings['cleanup_files']:
self.cleanup()
if self.cmdline is False:
self.write_db()
self.results['status'] = 'SUCCESS'
#print tables
# Move final files to top directory
for file in self.merged_files:
shutil.copy(file + '_scaled.mtz', self.dirs['work'])
shutil.copy(file + '_scaled.log', self.dirs['work'])
shutil.copy(self.prefix + '-dendrogram.png', self.dirs['work'])
shutil.copy(self.prefix + '.log', self.dirs['work'])
shutil.copy(self.prefix + '.pkl', self.dirs['work'])
def make_combinations(self, files, number=2):
"""
Makes combinations using itertools
files = list of files to be combined (usually self.data_files)
number = number of files in a combination. For a pair, use 2
"""
self.logger.debug('HCMerge::Setting up %s as %s file combinations' % (str(files),number))
combos = list()
for i in combinations(files,number):
combos.append(i)
return(combos)
def combine(self, in_files, out_file):
"""
Combine XDS_ASCII.HKL files using POINTLESS
in_files = list of files
"""
self.logger.debug('HCMerge::Pair-wise joining of %s using pointless.' % str(in_files))
# command = ['#!/bin/csh \n']
command = []
command.append('pointless hklout '+out_file+'_pointless.mtz> '+out_file+'_pointless.log <<eof \n')
for hklin in in_files:
command.append('hklin '+hklin+' \n')
# Add ability to do batches
# Make TOLERANCE huge to accept unit cell variations
command.append('tolerance 1000.0 \n')
# Add LAUEGROUP if user has chosen a spacegroup
if self.user_spacegroup:
command.append('lauegroup %s \n' % space_group_symbols(self.user_spacegroup).universal_hermann_mauguin())
command.append('choose spacegroup %s \n' % space_group_symbols(self.user_spacegroup).universal_hermann_mauguin())
command.append('eof\n')
comfile = open(out_file+'_pointless.sh','w')
comfile.writelines(command)
comfile.close()
os.chmod('./'+out_file+'_pointless.sh',0755)
# p = subprocess.Popen('qsub -N combine -sync y ./'+out_file+'_pointless.sh',shell=True).wait()
p = subprocess.Popen(self.cmd_prefix+' ./'+out_file+'_pointless.sh',
shell = True,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE).communicate()
# p = utils.processCluster('sh '+out_file+'_pointless.sh')
if self.user_spacegroup == 0:
# Sub-routine for different point groups
if (p[0] == '' and p[1] == '') == False:
self.logger.debug('HCMerge::Error Messages from %s pointless log. %s' % (out_file, str(p)))
if ('WARNING: Cannot combine reflection lists with different symmetry' or 'ERROR: cannot combine files belonging to different crystal systems') in p[1]:
self.logger.debug('HCMerge::Different symmetries. Placing %s in best spacegroup.' % str(in_files))
for hklin in in_files:
cmd = []
cmd.append('pointless hklin '+hklin+' hklout '+hklin+'> '+hklin+'_p.log \n')
subprocess.Popen(cmd, shell = True, stdout = subprocess.PIPE, stderr=subprocess.PIPE).wait()
p = subprocess.Popen(self.cmd_prefix+' ./'+out_file+'_pointless.sh',
shell = True,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE).communicate()
if 'WARNING: Cannot combine reflection lists with different symmetry' in p[1]:
self.logger.debug('HCMerge::Still different symmetries after best spacegroup. Reducing %s to P1.' % str(in_files))
for hklin in in_files:
cmd = []
hklout = hklin.rsplit('.',1)[0]+'p1.mtz'
cmd.append('pointless hklin '+hklin+' hklout '+hklout+'> '+hklin+'_p1.log <<eof \n')
cmd.append('lauegroup P1 \n')
cmd.append('choose spacegroup P1 \n')
cmd.append('eof\n')
cmdfile = open('p1_pointless.sh','w')
cmdfile.writelines(cmd)
cmdfile.close()
os.chmod('./p1_pointless.sh',0755)
p1 = subprocess.Popen('p1_pointless.sh',
shell = True,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE).communicate()
command = [x.replace('.mtz', 'p1.mtz') for x in command if any('hklin')]
comfile = open(out_file+'_pointless.sh','w')
comfile.writelines(command)
comfile.close()
p = subprocess.Popen(self.cmd_prefix+' ./'+out_file+'_pointless.sh',
shell = True,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE).communicate()
# Check for known FATAL ERROR of unable to pick LAUE GROUP due to not enough reflections
plog = open(out_file+'_pointless.log', 'r').readlines()
for num,line in enumerate(plog):
if line.startswith('FATAL ERROR'):
# Go to the next line for error message
if 'ERROR: cannot decide on which Laue group to select\n' in plog[num+1]:
self.logger.debug('HCMerge::Cannot automatically choose a Laue group. Forcing solution 1.')
for num,itm in enumerate(command):
if itm == 'eof\n':
command.insert(num, 'choose solution 1\n' )
break
comfile = open(out_file+'_pointless.sh','w')
comfile.writelines(command)
comfile.close()
# Run pointless again with new keyword
p = subprocess.Popen(self.cmd_prefix+' ./'+out_file+'_pointless.sh',
shell=True,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE).wait()
if 'ERROR: cannot combine files belonging to different crystal systems' in plog[num+1]:
self.logger.debug('HCMerge:: Forcing P1 due to different crystal systems in %s.' % str(in_files))
for hklin in in_files:
cmd = []
hklout = hklin.rsplit('.',1)[0]+'p1.mtz'
cmd.append('pointless hklin '+hklin+' hklout '+hklout+'> '+hklin+'_p1.log <<eof \n')
cmd.append('lauegroup P1 \n')
cmd.append('choose spacegroup P1 \n')
cmd.append('eof\n')
cmdfile = open('p1_pointless.sh','w')
cmdfile.writelines(cmd)
cmdfile.close()
os.chmod('./p1_pointless.sh',0755)
p1 = subprocess.Popen('p1_pointless.sh',
shell = True,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE).communicate()
command = [x.replace('.mtz', 'p1.mtz') for x in command if any('hklin')]
comfile = open(out_file+'_pointless.sh','w')
comfile.writelines(command)
comfile.close()
p = subprocess.Popen(self.cmd_prefix+' ./'+out_file+'_pointless.sh',
shell = True,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE).communicate()
# sts = os.waitpid(p.pid, 1)[1]
def get_batch(self, in_file):
"""
Obtain batch numbers from pointless log.
"""
self.logger.debug('HCMerge::get_batch %s from pointless log.' % str(in_file))
batches = {}
log = open(in_file+'_pointless.log','r').readlines()
for line in log:
if ('consists of batches' in line):
sline = line.split()
run_number = int(sline[2])
batch_start = int(sline[6])
batch_end = int(sline[len(sline)-1])
batches[run_number] = (batch_start,batch_end)
self.logger.debug('%s has batches %d to %d' % (in_file,batch_start,batch_end))
return(batches)
def get_cc_pointless(self, in_file, batches):
"""
Calculate correlation coefficient (CC) between two datasets which have been combined
by pointless. Uses cctbx. Reads in an mtz file.
"""
self.logger.debug('HCMerge::get_cc_pointless::Obtain correlation coefficient from %s with batches %s' % (str(in_file),str(batches)))
# Read in mtz file
mtz_file = reflection_file_reader.any_reflection_file(file_name=in_file+'_pointless.mtz')
# Convert to miller arrays
# ma[1] has batch information, ma[2] has I and SIGI
ma = mtz_file.as_miller_arrays(merge_equivalents=False)
# Set up objects to hold miller indices and data for both datasets
data1 = flex.double()
data2 = flex.double()
indices1 = flex.miller_index()
indices2 = flex.miller_index()
run1_batch_start = batches[1][0]
run1_batch_end = batches[1][1]
run2_batch_start = batches[2][0]
run2_batch_end = batches[2][1]
# Separate datasets by batch
for cnt,batch in enumerate(ma[1].data()):
if batch >= run1_batch_start and batch <= run1_batch_end:
data1.append(ma[2].data()[cnt])
indices1.append(ma[2].indices()[cnt])
elif batch >= run2_batch_start and batch <= run2_batch_end:
data2.append(ma[2].data()[cnt])
indices2.append(ma[2].indices()[cnt])
crystal_symmetry=ma[1].crystal_symmetry()
# Create miller arrays for each dataset and merge equivalent reflections
my_millerset1 = miller.set(crystal_symmetry,indices=indices1)
my_miller1 = miller.array(my_millerset1,data=data1)
merged1 = my_miller1.merge_equivalents().array()
my_millerset2 = miller.set(crystal_symmetry,indices=indices2)
my_miller2 = miller.array(my_millerset2,data=data2)
merged2 = my_miller2.merge_equivalents().array()
# Obtain common set of reflections
common1 = merged1.common_set(merged2)
common2 = merged2.common_set(merged1)
# Deal with only 1 or 2 common reflections in small wedges
if (len(common1.indices()) == 1 or len(common1.indices()) == 2):
return(0)
else:
# Calculate correlation between the two datasets.
cc = flex.linear_correlation(common1.data(),common2.data())
self.logger.debug('HCMerge::Linear Correlation Coefficient for %s = %s.' % (str(in_file),str(cc.coefficient())))
return(cc.coefficient())
def get_cc(self, in_files):
"""
Calculate correlation coefficient (CC) between two datasets. Uses cctbx.
"""
# Read in reflection files
file1 = reflection_file_reader.any_reflection_file(file_name=in_files[0])
file2 = reflection_file_reader.any_reflection_file(file_name=in_files[1])
# Convert to miller arrays
# ma[2] has I and SIGI
my_miller1 = file1.as_miller_arrays(merge_equivalents=False)
my_miller2 = file2.as_miller_arrays(merge_equivalents=False)
# Create miller arrays for each dataset and merge equivalent reflections
# Obtain common set of reflections
common1 = my_miller1[0].common_set(my_miller2[0])
common2 = my_miller2[0].common_set(my_miller1[0])
# Calculate correlation between the two datasets.
cc = flex.linear_correlation(common1.data(),common2.data())
return(cc.coefficient())
def scale(self, in_file, out_file, VERBOSE=False):
"""
Scaling files using AIMLESS
in_file = prefix for input mtz file from POINTLESS
out_file = prefix for output mtz file
"""
self.logger.debug('HCMerge::Scale with AIMLESS in_file: %s as out_file: %s' % (in_file,out_file))
command = []
command.append('aimless hklin '+in_file+'_pointless.mtz hklout '+out_file+'_scaled.mtz > '+out_file+'_scaled.log <<eof \n')
command.append('bins 10 \n')
command.append('scales constant \n')
command.append('anomalous on \n')
command.append('output mtz scalepack merged \n')
if self.resolution:
command.append('resolution high %s \n' % self.resolution)
# command.append('END \n')
command.append('eof \n')
comfile = open(in_file+'_aimless.sh','w')
comfile.writelines(command)
comfile.close()
os.chmod(in_file+'_aimless.sh',0755)
p = subprocess.Popen(self.cmd_prefix+' ./'+in_file+'_aimless.sh',
shell=True,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE).wait()
# Check for maximum resolution and re-run scaling if resolution is too high
scalog = open(out_file+'_scaled.log', 'r').readlines()
if self.resolution:
pass
else:
for num,line in enumerate(scalog):
if line.startswith('Estimates of resolution limits: overall'):
# Go to the next line to check resolution
if scalog[num+1].endswith('maximum resolution\n'):
break
elif scalog[num+2].endswith('maximum resolution\n'):
break
# Make exception for really weak data
elif scalog[num+1].endswith('WARNING: weak data, all data below threshold'):
self.results['errormsg'].append('Weak Data. Check %s_scaled.log.\n' % out_file)
break
elif scalog[num+2].endswith('WARNING: weak data, all data below threshold'):
self.results['errormsg'].append('Weak Data. Check %s_scaled.log.\n' % out_file)
break
else:
new_res = min(scalog[num+1].split()[8].rstrip('A'),scalog[num+2].split()[6].rstrip('A'))
self.logger.debug('HCMerge::Scale resolution %s' % new_res)
for num,itm in enumerate(command):
if itm == 'END \n':
command.insert(num, 'resolution high %s \n' % new_res)
break
comfile = open(out_file+'_aimless.sh','w')
comfile.writelines(command)
comfile.close()
p = subprocess.Popen(self.cmd_prefix+' ./'+in_file+'_aimless.sh',
shell=True,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE).wait()
def parse_aimless(self, in_file_prefix):
"""
Parse the aimless logfile in order to pull out data for graph, and a results dictionary
(for the plots and the results table).
Returns a dictionary for graphs, dictionary for results.
graphs dictionary - key = name of table
value = list of table rows where row0 (first row) holds the the column labels
each row is a list
results dictionary - key = name of result value
value = list of three numbers, 1 - Overall
2 - Inner Shell
3 - Outer Shell
"""
self.logger.debug('HCMerge::Parse AIMLESS %s_scaled.log for statistics.' % in_file_prefix)
# The list 'table_list' contains the names (as given in the aimless log) of the tables
# you wish to extract data from.
# Only tables with these names will be pulled out.
table_list = ['Analysis against all Batches for all runs',
'Analysis against resolution',
'Completeness, multiplicity, Rmeas v. resolution',
'Correlations within dataset']
flag = 0
anom_cut_flag = 0
results = {}
graphs = {}
scalog = open(in_file_prefix+'_scaled.log','r').readlines()
# Remove all the empty lines
scalog = [x for x in scalog if x !='\n']
for line in scalog:
if 'TABLE' in line:
for title in table_list:
if title in line:
# Signal the potential start of a table (flag = 1), get title as key
# for dict and reinitialize the data lists.
table_title = line.rsplit(':',2)[1].rsplit(',',1)[0].strip()
data = []
flag = 1
elif flag == 1:
# Start of a table
if line.startswith('$$'):
flag = 2
elif flag == 2:
# End of a table
if line.startswith('$$'):
graphs[table_title] = data
flag = 0
# Grab column headers for table
elif len(line) > 0 and line.endswith('$$ $$\n'):
sline = line.strip('$$ $$\n').split()
data.append(sline)
elif len(line) > 0 and line.split()[0].isdigit():
sline = line.split()
data.append(sline)
if anom_cut_flag == 1:
if sline[3] == '-' or sline[7] =='-':
pass
else:
anom_cc = float(sline[3])
anom_rcr = float(sline[7])
if anom_cc >= 0.3:
results['CC_cut'] = [ sline[2], sline[3] ]
if anom_rcr >= 1.5:
results['RCR_cut'] = [ sline[2], sline[7]]
elif 'GRAPHS: Anom & Imean CCs' in line:
# Signal for start of the anomalous table, to obtain CC_anom and RCR_anom
anom_cut_flag = 1
elif anom_cut_flag == 1:
if line.startswith('Overall'):
anom_cut_flag = 0
results['CC_anom_overall'] = line.split()[1]
results['RCR_anom_overall'] = line.split()[5]
# Grab statistics from results table at bottom of the log
#bin resolution limits
elif 'Low resolution limit' in line:
results['bins_low']=line.rsplit(None,3)[1:4]
elif 'High resolution limit' in line:
results['bins_high'] = line.rsplit(None,3)[1:4]
#Rmerge
elif 'Rmerge (within I+/I-)' in line:
results['rmerge_anom'] = line.rsplit(None,3)[1:4]
elif 'Rmerge (all I+ and I-)' in line:
results['rmerge_norm'] = line.rsplit(None,3)[1:4]
#Rmeas
elif 'Rmeas (within I+/I-)' in line:
results['rmeas_anom'] = line.rsplit(None,3)[1:4]
elif 'Rmeas (all I+ & I-)' in line:
results['rmeas_norm'] = line.rsplit(None,3)[1:4]
#Rpim
elif 'Rpim (within I+/I-)' in line:
results['rpim_anom'] = line.rsplit(None,3)[1:4]
elif 'Rpim (all I+ & I-)' in line:
results['rpim_norm'] = line.rsplit(None,3)[1:4]
#Number of refections
elif 'Total number of observations' in line:
results['total_obs'] = line.rsplit(None,3)[1:4]
elif 'Total number unique' in line:
results['unique_obs'] = line.rsplit(None,3)[1:4]
#I/sigI
elif 'Mean((I)/sd(I))' in line:
results['isigi'] = line.rsplit(None,3)[1:4]
#CC1/2
elif 'Mn(I) half-set correlation CC(1/2)' in line:
results['CC_half'] = line.rsplit(None,3)[1:4]
#Completeness
elif line.startswith('Completeness'):
results['completeness'] = line.rsplit(None,3)[1:4]
elif 'Anomalous completeness' in line:
results['anom_completeness'] = line.rsplit(None,3)[1:4]
#Multiplicity
elif line.startswith('Multiplicity'):
results['multiplicity'] = line.rsplit(None,3)[1:4]
elif 'Anomalous multiplicity' in line:
results['anom_multiplicity'] = line.rsplit(None,3)[1:4]
#Anomalous indicators
elif 'DelAnom correlation between half-sets' in line:
results['anom_correlation'] = line.rsplit(None,3)[1:4]
elif 'Mid-Slope of Anom Normal Probability' in line:
results['anom_slope'] = line.rsplit(None,3)[1]
#unit cell
elif line.startswith('Average unit cell:'):
results['scaling_unit_cell'] = line.rsplit(':',1)[1].split()
#spacegroup
elif line.startswith('Space group:'):
results['scale_spacegroup'] = line.rsplit(':',1)[1].strip()
return(results,graphs)
def make_matrix(self, method):
"""
Take all the combinations and make a matrix of their relationships using agglomerative
hierarchical clustering, hcluster. Use CC as distance.
self.id_list = OrderedDict of pairs of datasets
self.results = Dict of values extracted from aimless log
method = linkage method: single, average, complete, weighted
Return Z, the linkage array
"""
self.logger.info('HCMerge::make_matrix using method %s' % method)
Y = [] # The list of distances, our equivalent of pdist
for pair in self.id_list.keys():
# grab keys with stats of interest, but ensure that keys go in numerical order
cc = 1 - self.results[pair]['CC']
Y.append(cc)
self.logger.debug('HCMerge::Array of distances for matrix = %s' % Y)
Z = linkage(Y,method=method) # The linkage defaults to single and euclidean
return Z
def select_data(self, Z, cutoff):
"""
Select dataset above the CC of interest
Z = linkage array
self.data_files = list of datasets in working directory
cutoff = CC of interest
most_wedges = dict with the wedge number as key, list of data sets as value
"""
self.logger.info('HCMerge::Apply cutoff to linkage array %s' % Z)
node_list = {} # Dict to hold new nodes
for cnt,row in enumerate(Z):
node_list[len(Z)+cnt+1] = [int(row[0]), int(row[1])]
# Set up for making groups of all clusters below cutoff. Turn most_wedges into a dict.
# Default to most closely linked pair of wedges
if any(item for item in Z.tolist() if item[2] < cutoff):
most_wedges = {}
else:
most_wedges = {0: ([int(Z[0][0]),int(Z[0][1])], Z[0][2])}
for cnt,item in enumerate(Z[::-1]):
# Apply cutoff
if item[2] <= cutoff:
# Dict holding clusters using node ID as key
most_wedges[cnt] = [int(item[0]),int(item[1])], item[2]
# iteratively go through dict values and reduce to original leaves
for i in most_wedges.values():
# use set because it is faster than list
while set(i[0]).intersection(set(node_list.keys())):
self.replace_wedges(i[0],node_list)
# Convert numbers to filenames
for i in most_wedges:
for cnt,item in enumerate(most_wedges[i][0]):
most_wedges[i][0][cnt] = self.data_files[item]
# # Convert to a flat list and remove the duplicates
# most_wedges = set(list(chain.from_iterable(most_wedges)))
self.logger.info('HCMerge::Selected wedges by node %s' % most_wedges)
return most_wedges
def replace_wedges(self, wedges, node_dict):
"""
Helper function to go through a pair of nodes from linkage list and expand it to a flat
list that has all the original leaves
"""
self.logger.debug('HCMerge::Replace Wedges: %s' % wedges)
for count,item in enumerate(wedges):
if item in node_dict.keys():
wedges[count] = node_dict[item][0]
wedges.append(node_dict[item][1])
return wedges
def merge_wedges(self, wedge_files):
"""
Merge all the separate wedges together at the cutoff value. Combines all files with POINTLESS then scales with AIMLESS.
Parses AIMLESS log file for statistics
Separated from process so that I can use it in rerun.
Requires: wedge_files = flat list of all the original leaves.
"""
# lists for running the multiprocessing
jobs = []
# Check for all_clusters flag
if self.all_clusters:
for cnt,cluster in enumerate(wedge_files.values()):
combine_all = Process(target=self.combine(cluster[0], self.prefix+str(cnt)))
jobs.append(combine_all)
combine_all.start()
for pair in jobs:
pair.join()
# Scale the files with aimless
for cnt,itm in enumerate(wedge_files):
scale = Process(target=self.scale,args=(self.prefix+str(cnt),self.prefix+str(cnt)))
jobs.append(scale)
scale.start()
for pair in jobs:
pair.join()
for cnt,itm in enumerate(wedge_files):
new_prefix = self.prefix+str(cnt)
self.results[new_prefix],self.graphs[new_prefix] = self.parse_aimless(self.prefix+str(cnt))
self.results[new_prefix]['files'] = wedge_files[itm][0]
self.results[new_prefix]['CC'] = 1 - wedge_files[itm][1]
self.merged_files.append(new_prefix)
else:
combine_all = Process(target=self.combine,args=(next(wedge_files.itervalues())[0],self.prefix))
combine_all.start()
combine_all.join()
# Scale the files with aimless
scale = Process(target=self.scale,args=(self.prefix,self.prefix))
scale.start()
scale.join()
self.results[self.prefix],self.graphs[self.prefix] = self.parse_aimless(self.prefix)
self.results[self.prefix]['files'] = next(wedge_files.itervalues())[0]
self.results[self.prefix]['CC'] = 1 - next(wedge_files.itervalues())[1]
self.merged_files.append(self.prefix)
def make_dendrogram(self, matrix, resolution):
"""
Make a printable dendrogram as either high resolution or low resolution (dpi) image.
"""
self.logger.debug('HCMerge::Generating Dendrogram')
# Make a dendrogram of the hierarchical clustering. Options are from scipy.cluster.hierarchy.dendrogram
try:
import matplotlib.pylab
if self.labels:
dendrogram(matrix, color_threshold = 1 - self.cutoff, labels = self.data_files, leaf_rotation = -90)
matplotlib.pylab.xlabel('Datasets')
matplotlib.pylab.ylabel('1 - Correlation Coefficient')
f = matplotlib.pylab.gcf()
f.set_size_inches([8,8])
f.subplots_adjust(bottom=0.4)
else:
dendrogram(matrix, color_threshold = 1 - self.cutoff)
# Save a PNG of the plot
matplotlib.pylab.savefig(self.prefix+'-dendrogram.png', dpi=resolution)
except:
dendrogram(matrix, color_threshold = 1 - self.cutoff, no_plot=True)
self.logger.error('HCMerge::matplotlib.pylab unavailable in your version of cctbx. Plot not generated.')
def write_db(self):
"""
Writes the results to a database, currently MySQL for RAPD
"""
self.logger.debug('HCMerge::Write Results to Database')
def make_log(self, files):
"""
Makes a log file of the merging results
files = list of results files, prefix only
"""
self.logger.debug('HCMerge::Write tabulated results to %s' % (self.prefix + '.log'))
# Make a comparison table of results
# Set up list of lists for making comparison table
table = [['', 'Correlation', 'Space Group', 'Resolution', 'Completeness',
'Multiplicity', 'I/SigI', 'Rmerge', 'Rmeas', 'Anom Rmeas',
'Rpim', 'Anom Rpim', 'CC 1/2', 'Anom Completeness', 'Anom Multiplicity',
'Anom CC', 'Anom Slope', 'Total Obs', 'Unique Obs']]
key_list = ['CC', 'scale_spacegroup', 'bins_high', 'completeness', 'multiplicity',
'isigi', 'rmerge_norm', 'rmeas_norm', 'rmeas_anom',
'rpim_norm', 'rpim_anom', 'CC_half', 'anom_completeness',
'anom_multiplicity', 'anom_correlation', 'anom_slope', 'total_obs', 'unique_obs']
for file in files:
row = [ file ]
for item in key_list:
# If it is a list, add first item from the list which is overall stat
if type(self.results[file][item]) == list:
row.append(self.results[file][item][0])
# Otherwise, add the entire contents of the item
else:
row.append(self.results[file][item])
table.append(row)
# flip columns and rows since rows are so long
table = zip(*table)
out_file = self.prefix + '.log'
out = open(out_file, 'w')
table_print = MakeTables()
table_print.pprint_table(out, table)
out.close()
# Append a key for merged file names
out = open(out_file, 'a')
for file in files:
out.write(file + ' = ' + str(self.results[file]['files']) + '\n')
out.close()
def cleanup(self):
"""
Remove excess log files and tidy up the directory.
safeext = list of file extensions which should be saved.
"""
self.logger.debug('HCMerge::Cleanup excess log files.')
killlist = []
killext = ['_pointless.sh', '_pointless.mtz', '_pointless.log', '_pointless_p1.log']
for ext in killext:
for prefix in self.id_list:
killlist.append(prefix+ext)
# for itm in self.merged_files:
# killlist.append(itm+'_aimless.sh')
filelist = [ f for f in os.getcwd() if f.endswith(".sh") or f.endswith(".log")
or f.endswith(".mtz")]
purgelist = set(filelist).intersection( set(killlist) )
# purgelist = [f for f in filelist if f not in safelist]
for file in purgelist:
os.remove(file)
def store_dicts(self, dicts):
"""
Create pickle files of dicts with CC, aimless stats, combinations already processed, merged file list
"""
self.logger.debug('HCMerge::Pickling Dicts')
file = open(self.prefix + '.pkl','wb')
pickle.dump(dicts,file)
file.close()
def get_dicts(self, file):
"""
Extract dicts out of pickle file
"""
self.logger.debug('HCMerge::UnPickling Dicts')
tmp = pickle.load(open(file,'rb'))
for itm,val in tmp.iteritems():
setattr(self, itm, val)
def rerun(self, pkl_file):
"""
Re-running parts of the agent
self.dirs['data'] should be self.data_dir when pulled from pkl file
"""
self.logger.debug('HCMerge::rerun')
self.get_dicts(pkl_file)
if self.start_point == 'clustering':
self.merged_files = [] # List for storing new merged files.
# Make new COMBINE directory and move data files over
combine_dir = self.create_subdirectory(prefix='COMBINE', path=self.dirs['work'])
os.chdir(combine_dir)
self.logger.debug('HCMerge::Copying files from %s to %s' % (self.data_dir,combine_dir))
for file in self.data_files:
shutil.copy(self.data_dir + '/' + file, combine_dir)
# Make relationship matrix
matrix = self.make_matrix(self.method)
# Find data above CC cutoff. Key 0 is most wedges and above CC cutoff
wedge_files = self.select_data(matrix, 1 - self.cutoff)
# Merge all wedges together
self.merge_wedges(wedge_files)
# Store the dicts for future use
self.store_dicts({'data_files': self.data_files, 'id_list': self.id_list,
'results': self.results, 'graphs': self.graphs, 'matrix': matrix,
'merged_files': self.merged_files})
# Make the summary text file for all merged files
self.make_log(self.merged_files)
else:
pass
# Make the dendrogram and write it out as a PNG
self.make_dendrogram(self.matrix, self.dpi)
def create_subdirectory(self, n_dir_max=None, prefix="TEMP", path="", directory_number=None):
"""
Make subdirectories as needed for all the many script, log and mtz files
Is the same code as phenix create_temp_directory. Replace if distributed with phenix.
"""
if n_dir_max is None:
n_dir_max=1000
temp_dir=prefix
if directory_number is None:
starting_number = 1
ending_number = n_dir_max
e = "Maximum number of directories is %d" %(n_dir_max)
else:
starting_number = directory_number
ending_number = directory_number
e = "The directory %s could not be created (it may already exist)" %(
os.path.join(path, prefix + "_" + str(directory_number)))
for i in xrange(starting_number, ending_number + 1):
temp_dir = os.path.join(path, prefix + "_" + str(i))
try:
if not os.path.exists(temp_dir):
os.mkdir(temp_dir)
return os.path.join(os.getcwd(), temp_dir)
except Exception, e: pass
raise ValueError("Unable to create directory %s " %(temp_dir)+ "\nError message is: %s " %(str(e)))
class MakeTables:
"""
From GITS blog: http://ginstrom.com/scribbles/2007/09/04/pretty-printing-a-table-in-python/
In a class so that it can be used separately from the MergeMany pipeline.
"""
def get_max_width(self, table, index):
"""Get the maximum width of the given column index"""
return max([len(str(row[index])) for row in table])
def pprint_table(self, out, table):
"""
Prints out a table of data, padded for alignment
out = Output stream (file-like object)
table = The table to print. A list of lists.
Each row must have the same number of columns.
"""
col_paddings = []
for i in range(len(table[0])):
col_paddings.append(self.get_max_width(table, i))
for row in table:
# left col
print >> out, row[0].ljust(col_paddings[0] + 1),
# rest of the cols
for i in range(1, len(row)):
col = str(row[i]).rjust(col_paddings[i] + 2)
print >> out, col,
print >> out
if __name__ == '__main__':
# Command Line Execution
from optparse import OptionParser # For commandline option parsing
LOG_FILENAME = 'hcmerge.log'
logger = logging.getLogger('RAPDLogger')
logger.setLevel(logging.DEBUG)
handler = logging.handlers.RotatingFileHandler(
LOG_FILENAME, maxBytes=1000000, backupCount=5)
formatter = logging.Formatter('%(asctime)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
echo = logging.StreamHandler(sys.stdout)
echo.setLevel(logging.DEBUG)
echo.setFormatter(formatter)
logger.addHandler(echo)
command = 'MERGE'
working_dir = os.getcwd()
dirs = {'work': working_dir}
usage = "usage: %prog [options] filelist or pickle file"
parser = OptionParser(usage=usage)
parser.add_option("-a", "--all", action="store_true", dest="all_clusters", default=False,
help = "make all agglomerative clusters greater than cutoff value")
parser.add_option("-c", "--cutoff", dest="cutoff", type="float", default=0.95,
help = "set a percentage cutoff for the similarity between datasets")
parser.add_option("-d", "--dpi", dest="dpi", type="int", default=100,
help = "set resolution in dpi for the dendrogram image")
parser.add_option("-l", "--labels", action="store_true", dest="labels", default=False,
help = "add file names and labels to the dendrogram")
parser.add_option("-n", "--nproc", dest="nproc", type="int", default=0,
help = "set number of processors")
parser.add_option("-m", "--method", dest="method", type="string", default="complete",
help = "set alternative clustering method: single, complete, average, or weighted")
parser.add_option("-o", "--output_prefix", dest="prefix", type="string", default="merged",
help = "set a prefix for output files. Used in rerun as the name of the .pkl file")
parser.add_option("-p", "--precheck", action="store_false", dest="precheck", default=True,
help = "precheck for duplicate or incorrect data files, default=True")
# parser.add_option("-q", "--qsub", action="store_true", dest="cluster_use", default=False,
# help = "use qsub on a cluster to execute the job, default is sh for a single computer")
parser.add_option("-r", "--resolution", dest="resolution", type= "float", default=0,
help = "set a resolution cutoff for merging data")
parser.add_option("-s", "--spacegroup", dest="spacegroup", type="string",
help = "set a user-defined spacegroup")
# parser.add_option("-u", "--unit_cell", dest="cell", type="string",
# help = "set a unit cell for unmerged scalepack files")
parser.add_option("-v", "--verbose", action="store_false", dest="cleanup_files", default=True,
help = "do not clean up excess script and log files")
parser.add_option("-x", "--rerun", dest="start_point", type="string", default="start",
help = "use pickle file and run merging again starting at: clustering, dendrogram")
(options,args) = parser.parse_args()
files = args
if len(files) > 1:
# Get absolute path in case people have relative paths
datasets = [os.path.abspath(x) for x in files]
elif len(files) == 0:
# print 'MergeMany requires a text file with a list of files (one per line) or a list of files on the command line'
parser.print_help()
sys.exit(9)
elif files[0].split('.')[1].lower() == 'pkl':
datasets = files[0]
else:
# Read in text file with each file on a separate line
datasets = open(files[0],'rb').readlines()
# Remove entries created from the blank lines in the file. Compensating for returns at end of file.
datasets = filter(lambda x: x != '\n',datasets)
# Remove empty space on either side of the filenames
datasets = [os.path.abspath(x.strip()) for x in datasets]
# datasets = ['/gpfs5/users/necat/rapd/copper/trunk/integrate/2012-10-24/cmd13_1_1/cmd13_1_1_1/cmd13_1_1_1_XDS.HKL',
# '/gpfs5/users/necat/rapd/copper/trunk/integrate/2012-10-24/cmd13_1_2/cmd13_1_2_2/cmd13_1_2_2_XDS.HKL'
# ]
settings = {'cmdline': True,
'all_clusters': options.all_clusters,
'dpi': options.dpi,
'precheck': options.precheck,
'cutoff': options.cutoff,
# 'cell': options.cell,
'labels': options.labels,
'prefix': options.prefix,
'user_spacegroup': 0, # Default the user_spacegroup to None.
'resolution': options.resolution,
'cleanup_files': options.cleanup_files,
'work_directory': working_dir,
'work_dir_override': 'False',
'process_id': '0',
'cluster_use': 'False',
'nproc': options.nproc,
'start_point': options.start_point,
}
# Check to see if the user has set a spacegroup. If so, then change from symbol to IUCR number. Add to settings.
try:
if options.spacegroup:
settings['user_spacegroup'] = space_group_symbols(options.spacegroup).number()
except:
print 'Unrecognized space group symbol.'
sys.exit()
try:
method_list = ['single', 'complete', 'average', 'weighted']
if [i for i in method_list if i in options.method]:
settings['method'] = options.method
except:
print 'Unrecognized method.'
sys.exit()
try:
rerun_list = ['start', 'clustering', 'dendrogram']
if [i for i in rerun_list if i in options.start_point]:
settings['start_point'] = options.start_point
except:
print 'Unrecognized option for rerunning HCMerge.'
sys.exit()
# Deal with negative integers and what happens if cpu_count() raises NotImplementedError
if options.nproc <= 0:
try:
settings['nproc'] = cpu_count()
except:
settings['nproc'] = 1
controller_address = ['127.0.0.1' , 50001]
input = [command, dirs, datasets, settings, controller_address]
# Call the handler.
T = MergeMany(input, logger)
else:
# Execution when Module is Imported
# Set up logging
LOG_FILENAME = '/gpfs5/users/necat/kay/rapd/merge.logger'
logger = logging.getLogger('RAPDLogger')
logger.setLevel(logging.DEBUG)
handler = logging.handlers.RotatingFileHandler(
LOG_FILENAME, maxBytes=1000000, backupCount=5)
formatter = logging.Formatter('%(asctime)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
command = 'MERGE'
dirs = { 'work': '/gpfs5/users/necat/kay/rapd/ribo/'
}
datasets = ['/gpfs5/users/necat/rapd/copper/trunk/integrate/2012-10-24/cmd13_1_1/cmd13_1_1_1/cmd13_1_1_1_XDS.HKL',
'/gpfs5/users/necat/rapd/copper/trunk/integrate/2012-10-24/cmd13_1_2/cmd13_1_2_2/cmd13_1_2_2_XDS.HKL'
]
settings = {'cmdline': False,
'all_clusters': False,
'labels': False,
'method': 'complete',
'cutoff': 0.95,
'prefix': 'merged',
'user_spacegroup': 0,
'resolution': 0,
'cleanup_files': True,
'work_directory': '/gpfs5/users/necat/kay/rapd/',
'work_dir_override': 'False',
'process_id': '0',
'cluster_use': True,
'nproc': cpu_count(),
}
controller_address = ['127.0.0.1' , 50001]
input = [command, dirs, datasets, settings, controller_address]
# Call the handler.
T = MergeMany(input, logger)
| agpl-3.0 | 2,108,948,982,147,536,100 | 45.339312 | 164 | 0.560885 | false |
adrinjalali/clue-hackathon | src/save_binary.py | 1 | 1153 | import sys
from os.path import join
import os
import pandas as pd
def save_binary(data_dir,
binary_dir = 'binary',
active_days = 'active_days.csv',
cycles = 'cycles.csv',
tracking = 'tracking.csv',
users = 'users.csv',
labels = 'labels.csv'):
"""
loads the data from the csv files
then pickles them.
"""
df_active_days = pd.read_csv(join(data_dir, active_days))
df_cycles = pd.read_csv(join(data_dir, cycles))
df_users = pd.read_csv(join(data_dir, users))
df_tracking = pd.read_csv(join(data_dir, tracking))
df_labels = pd.read_csv(join(data_dir, labels))
os.makedirs(binary_dir, exist_ok=True)
df_active_days.to_pickle(os.path.join(binary_dir, 'active_days.pkl'))
df_cycles.to_pickle(os.path.join(binary_dir, 'cycles.pkl'))
df_users.to_pickle(os.path.join(binary_dir, 'users.pkl'))
df_tracking.to_pickle(os.path.join(binary_dir, 'tracking.pkl'))
df_labels.to_pickle(os.path.join(binary_dir, 'labels.pkl'))
if __name__ == '__main__':
data_fname = sys.argv[-1]
save_binary(data_fname)
| apache-2.0 | 4,806,227,463,584,719,000 | 32.911765 | 73 | 0.607979 | false |
Stargrazer82301/ChrisFuncs | ChrisFuncs/FromGitHub/denis_bz.py | 1 | 3174 | # Import smorgasbord
import numpy as np
import matplotlib
from matplotlib import pyplot as pl, cm, colors
# Function to extract a colourmap from cmap object, from https://gist.github.com/denis-bz/8052855
def get_cmap( cmap, name=None, n=256 ):
""" in: a name "Blues" "BuGn_r" ... of a builtin cmap (case-sensitive)
or a filename, np.loadtxt() n x 3 or 4 ints 0..255 or floats 0..1
or a cmap already
or a numpy array.
See http://wiki.scipy.org/Cookbook/Matplotlib/Show_colormaps
or in IPython, pl.cm.<tab>
"""
if isinstance( cmap, colors.Colormap ):
return cmap
if isinstance( cmap, basestring ):
if cmap in cm.cmap_d:
return pl.get_cmap( cmap ) # "Blues" ...
A = np.loadtxt( cmap, delimiter=None ) # None: white space
name = name or cmap.split("/")[-1] .split(".")[0] # .../xx.csv -> xx
else:
A = cmap # numpy array or array-like
return array_cmap( A, name, n=n )
# Function to create a truncated version of an existing colourmap, from https://gist.github.com/denis-bz/8052855
def truncate_colormap( cmap, minval=0.0, maxval=1.0, n=256 ):
""" mycolormap = truncate_colormap(
cmap name or file or ndarray,
minval=0.2, maxval=0.8 ): subset
minval=1, maxval=0 ) : reverse
by unutbu http://stackoverflow.com/questions/18926031/how-to-extract-a-subset-of-a-colormap-as-a-new-colormap-in-matplotlib
"""
cmap = get_cmap( cmap )
name = "%s-trunc-%.2g-%.2g" % (cmap.name, minval, maxval)
return colors.LinearSegmentedColormap.from_list(
name, cmap( np.linspace( minval, maxval, n )))
# Function to apply an arbitrary function to a colourmap
def cmap_map(function, cmap):
""" Applies function (which should operate on vectors of shape 3: [r, g, b]), on colormap cmap.
This routine will break any discontinuous points in a colormap.
by http://scipy-cookbook.readthedocs.io/items/Matplotlib_ColormapTransformations.html
"""
cdict = cmap._segmentdata
step_dict = {}
# Firt get the list of points where the segments start or end
for key in ('red', 'green', 'blue'):
step_dict[key] = list(map(lambda x: x[0], cdict[key]))
step_list = sum(step_dict.values(), [])
step_list = np.array(list(set(step_list)))
# Then compute the LUT, and apply the function to the LUT
reduced_cmap = lambda step : np.array(cmap(step)[0:3])
old_LUT = np.array(list(map(reduced_cmap, step_list)))
new_LUT = np.array(list(map(function, old_LUT)))
# Now try to make a minimal segment definition of the new LUT
cdict = {}
for i, key in enumerate(['red','green','blue']):
this_cdict = {}
for j, step in enumerate(step_list):
if step in step_dict[key]:
this_cdict[step] = new_LUT[j, i]
elif new_LUT[j,i] != old_LUT[j, i]:
this_cdict[step] = new_LUT[j, i]
colorvector = list(map(lambda x: x + (x[1], ), this_cdict.items()))
colorvector.sort()
cdict[key] = colorvector
return matplotlib.colors.LinearSegmentedColormap('colormap',cdict,1024) | mit | -5,383,533,145,721,893,000 | 40.233766 | 127 | 0.627914 | false |
TimoRoth/oggm | oggm/tests/conftest.py | 1 | 9836 | """Pytest fixtures to be used in other test modules"""
import os
import shutil
import logging
import getpass
from functools import wraps
import numpy as np
import pytest
import shapely.geometry as shpg
import matplotlib.pyplot as plt
from oggm.shop import cru, histalp, ecmwf
from oggm import cfg, tasks
from oggm.core import flowline
from oggm.tests.funcs import init_hef, get_test_dir
from oggm import utils
from oggm.utils import mkdir, _downloads
from oggm.utils import oggm_urlretrieve
from oggm.tests import HAS_MPL_FOR_TESTS, HAS_INTERNET
logger = logging.getLogger(__name__)
def pytest_configure(config):
for marker in ["slow", "download", "creds", "internet", "test_env",
"graphic "]:
config.addinivalue_line("markers", marker)
if config.pluginmanager.hasplugin('xdist'):
try:
from ilock import ILock
utils.lock = ILock("oggm_xdist_download_lock_" + getpass.getuser())
logger.info("ilock locking setup successfully for xdist tests")
except BaseException:
logger.warning("could not setup ilock locking for distributed "
"tests")
def pytest_addoption(parser):
parser.addoption("--run-slow", action="store_true", default=False,
help="Run slow tests")
parser.addoption("--run-download", action="store_true", default=False,
help="Run download tests")
parser.addoption("--run-creds", action="store_true", default=False,
help="Run download tests requiring credentials")
parser.addoption("--run-test-env", metavar="ENVNAME", default="",
help="Run only specified test env")
parser.addoption("--no-run-internet", action="store_true", default=False,
help="Don't run any tests accessing the internet")
def pytest_collection_modifyitems(config, items):
use_internet = HAS_INTERNET and not config.getoption("--no-run-internet")
skip_slow = not config.getoption("--run-slow")
skip_download = not use_internet or not config.getoption("--run-download")
skip_cred = skip_download or not config.getoption("--run-creds")
run_test_env = config.getoption("--run-test-env")
slow_marker = pytest.mark.skip(reason="need --run-slow option to run")
download_marker = pytest.mark.skip(reason="need --run-download option to "
"run, internet access is "
"required")
cred_marker = pytest.mark.skip(reason="need --run-creds option to run, "
"internet access is required")
internet_marker = pytest.mark.skip(reason="internet access is required")
test_env_marker = pytest.mark.skip(reason="only test_env=%s tests are run"
% run_test_env)
graphic_marker = pytest.mark.skip(reason="requires mpl V1.5+ and "
"pytest-mpl")
for item in items:
if skip_slow and "slow" in item.keywords:
item.add_marker(slow_marker)
if skip_download and "download" in item.keywords:
item.add_marker(download_marker)
if skip_cred and "creds" in item.keywords:
item.add_marker(cred_marker)
if not use_internet and "internet" in item.keywords:
item.add_marker(internet_marker)
if run_test_env:
test_env = item.get_closest_marker("test_env")
if not test_env or test_env.args[0] != run_test_env:
item.add_marker(test_env_marker)
if "graphic" in item.keywords:
def wrap_graphic_test(test):
@wraps(test)
def test_wrapper(*args, **kwargs):
try:
return test(*args, **kwargs)
finally:
plt.close()
return test_wrapper
item.obj = wrap_graphic_test(item.obj)
if not HAS_MPL_FOR_TESTS:
item.add_marker(graphic_marker)
@pytest.fixture(autouse=True)
def patch_data_urls(monkeypatch):
"""This makes sure we never download the big files with our tests"""
url = 'https://cluster.klima.uni-bremen.de/~oggm/test_climate/'
monkeypatch.setattr(cru, 'CRU_SERVER', url + 'cru/')
monkeypatch.setattr(cru, 'CRU_BASE', 'cru_ts3.23.1901.2014.{}.dat.nc')
monkeypatch.setattr(histalp, 'HISTALP_SERVER', url + 'histalp/')
monkeypatch.setattr(ecmwf, 'ECMWF_SERVER', url)
basenames = {
'ERA5': {
'inv': 'era5/monthly/v1.0/era5_invariant.nc',
'pre': 'era5/monthly/v1.0/era5_monthly_prcp_1979-2018.nc',
'tmp': 'era5/monthly/v1.0/era5_monthly_t2m_1979-2018.nc'
},
'ERA5L': {
'inv': 'era5-land/monthly/v1.0/era5_land_invariant_flat.nc',
'pre': 'era5-land/monthly/v1.0/era5_land_monthly_prcp_1981-2018_flat'
'.nc',
'tmp': 'era5-land/monthly/v1.0/era5_land_monthly_t2m_1981-2018_flat.nc'
},
'CERA': {
'inv': 'cera-20c/monthly/v1.0/cera-20c_invariant.nc',
'pre': 'cera-20c/monthly/v1.0/cera-20c_pcp_1901-2010.nc',
'tmp': 'cera-20c/monthly/v1.0/cera-20c_t2m_1901-2010.nc'
},
'ERA5dr': {
'inv': 'era5/monthly/vdr/ERA5_geopotential_monthly.nc',
'lapserates': 'era5/monthly/vdr/ERA5_lapserates_monthly.nc',
'tmp': 'era5/monthly/vdr/ERA5_temp_monthly.nc',
'tempstd': 'era5/monthly/vdr/ERA5_tempstd_monthly.nc',
'pre': 'era5/monthly/vdr/ERA5_totalprecip_monthly.nc',
}
}
monkeypatch.setattr(ecmwf, 'BASENAMES', basenames)
def secure_url_retrieve(url, *args, **kwargs):
"""A simple patch to OGGM's download function to make sure we don't
download elsewhere than expected."""
assert ('github' in url or
'cluster.klima.uni-bremen.de/~oggm/ref_mb_params' in url or
'cluster.klima.uni-bremen.de/~oggm/test_gdirs/' in url or
'cluster.klima.uni-bremen.de/~oggm/demo_gdirs/' in url or
'cluster.klima.uni-bremen.de/~oggm/test_climate/' in url or
'klima.uni-bremen.de/~oggm/climate/cru/cru_cl2.nc.zip' in url
)
return oggm_urlretrieve(url, *args, **kwargs)
@pytest.fixture(autouse=True)
def patch_url_retrieve(monkeypatch):
monkeypatch.setattr(_downloads, 'oggm_urlretrieve', secure_url_retrieve)
@pytest.fixture()
def dummy_constant_bed():
dx = 1.
hmax = 3000.
hmin = 1000.
nx = 200
map_dx = 100.
widths = 3.
surface_h = np.linspace(hmax, hmin, nx)
bed_h = surface_h
widths = surface_h * 0. + widths
coords = np.arange(0, nx - 0.5, 1)
line = shpg.LineString(np.vstack([coords, coords * 0.]).T)
return [flowline.RectangularBedFlowline(line, dx, map_dx, surface_h,
bed_h, widths)]
@pytest.fixture(scope='session')
def test_dir():
""" Provides a reference to the test directory for the entire test session.
Named after the current git revision.
As a session-scoped fixture, this will only be created once and
then injected to each test that depends on it.
"""
return get_test_dir()
def _setup_case_dir(call, test_dir):
casedir = os.path.join(test_dir, call.__name__)
mkdir(casedir, reset=True)
return casedir
def _teardown_case_dir(casedir):
if os.path.exists(casedir):
shutil.rmtree(casedir)
@pytest.fixture(scope='function')
def case_dir(request, test_dir):
""" Provides a unique directory for the current test function, a child of
the session test directory (test_dir > case_dir). Named after the
current test function.
As a function-scoped fixture, a new directory is created for
each function that uses this and then cleaned up when the case
completes.
"""
cd = _setup_case_dir(request.function, test_dir)
yield cd
_teardown_case_dir(cd)
@pytest.fixture(scope='class')
def class_case_dir(request, test_dir):
""" Provides a unique directory for the current test class, a child of
the session test directory (test_dir > class_case_dir). Named after
the current test class.
As a class-scoped fixture, a class directory is created once for
the current class and used by each test inside it. It is cleaned
up when the all the cases in the class complete.
"""
cd = _setup_case_dir(request.cls, test_dir)
yield cd
_teardown_case_dir(cd)
@pytest.fixture(scope='module')
def hef_gdir_base(request, test_dir):
""" Provides an initialized Hintereisferner glacier directory.
As a module fixture, the initialization is run only once per test
module that uses it.
IMPORTANT: To preserve a constant starting condition, hef_gdir_base
should almost never be directly injected into a test case. Test cases
should use the below hef_gdir fixture to provide a directory that has
been copied into an ephemeral case directory.
"""
try:
module = request.module
border = module.DOM_BORDER if module.DOM_BORDER is not None else 40
return init_hef(border=border)
except AttributeError:
return init_hef()
@pytest.fixture(scope='class')
def hef_gdir(hef_gdir_base, class_case_dir):
""" Provides a copy of the base Hintereisenferner glacier directory in
a case directory specific to the current test class. All cases in
the test class will use the same copy of this glacier directory.
"""
return tasks.copy_to_basedir(hef_gdir_base, base_dir=class_case_dir,
setup='all')
| bsd-3-clause | -6,844,716,580,379,379,000 | 38.031746 | 83 | 0.620272 | false |
kshedstrom/pyroms | pyroms_toolbox/pyroms_toolbox/remapping_bound.py | 1 | 39859 | # encoding: utf-8
import os
import numpy as np
import glob
import re
try:
import netCDF4 as netCDF
except:
import netCDF3 as netCDF
import pyroms
import pyroms_toolbox
import _remapping
import matplotlib.pyplot as plt
import datetime
def remapping_bound(varname, srcfile, wts_files, srcgrd, dst_grd, \
rotate_uv=False, trange=None, irange=None, jrange=None, \
dstdir='./' ,zlevel=None, dmax=0, cdepth=0, kk=0, \
uvar='u', vvar='v', rotate_part=False):
'''
A remapping function to extract boundary conditions from one ROMS grid
to another. It will optionally rotating u and v variables, but needs
to be called separately for each u/v pair (such as u/v, uice/vice).
'''
# get input and output grid
if type(srcgrd).__name__ == 'ROMS_Grid':
srcgrd = srcgrd
else:
srcgrd = pyroms.grid.get_ROMS_grid(srcgrd)
if type(dst_grd).__name__ == 'ROMS_Grid':
dst_grd = dst_grd
else:
dst_grd = pyroms.grid.get_ROMS_grid(dst_grd)
# build intermediate zgrid
if zlevel is None:
zlevel = np.array([-7500.,-7000.,-6500.,-6000.,-5500.,-5000.,\
-4500.,-4000.,-3500.,-3000.,-2500.,-2000.,-1750.,\
-1500.,-1250.,-1000.,-900.,-800.,-700.,-600.,-500.,\
-400.,-300.,-250.,-200.,-175.,-150.,-125.,-100.,-90.,\
-80.,-70.,-60.,-50.,-45.,-40.,-35.,-30.,-25.,-20.,-17.5,\
-15.,-12.5,-10.,-7.5,-5.,-2.5,0.])
else:
zlevel = np.sort(-abs(zlevel))
nzlevel = len(zlevel)
src_zcoord = pyroms.vgrid.z_coordinate(srcgrd.vgrid.h, zlevel, nzlevel)
dst_zcoord = pyroms.vgrid.z_coordinate(dst_grd.vgrid.h, zlevel, nzlevel)
srcgrdz = pyroms.grid.ROMS_Grid(srcgrd.name+'_Z', srcgrd.hgrid, src_zcoord)
dst_grdz = pyroms.grid.ROMS_Grid(dst_grd.name+'_Z', dst_grd.hgrid, dst_zcoord)
# varname argument
if type(varname).__name__ == 'list':
nvar = len(varname)
elif type(varname).__name__ == 'str':
varname = [varname]
nvar = len(varname)
else:
raise ValueError, 'varname must be a str or a list of str'
# if we're working on u and v, we'll compute ubar,vbar afterwards
compute_ubar = False
if (varname.__contains__('u') == 1 and varname.__contains__('v') == 1) or \
(varname.__contains__('u_eastward') == 1 and varname.__contains__('v_northward') == 1):
compute_ubar = True
print 'ubar/vbar to be computed from u/v'
if varname.__contains__('ubar'):
varname.remove('ubar')
nvar = nvar-1
if varname.__contains__('vbar'):
varname.remove('vbar')
nvar = nvar-1
# if rotate_uv=True, check that u and v are in varname
if rotate_uv is True:
if varname.__contains__(uvar) == 0 or varname.__contains__(vvar) == 0:
raise Warning, 'varname must include uvar and vvar in order to' \
+ ' rotate the velocity field'
else:
varname.remove(uvar)
varname.remove(vvar)
nvar = nvar-2
# srcfile argument
if type(srcfile).__name__ == 'list':
nfile = len(srcfile)
elif type(srcfile).__name__ == 'str':
srcfile = sorted(glob.glob(srcfile))
nfile = len(srcfile)
else:
raise ValueError, 'src_srcfile must be a str or a list of str'
# get wts_file
if type(wts_files).__name__ == 'str':
wts_files = sorted(glob.glob(wts_files))
sides = ['_west','_east','_north','_south']
long = {'_west':'Western', '_east':'Eastern', \
'_north':'Northern', '_south':'Southern'}
dimexcl = {'_west':'xi', '_east':'xi', \
'_north':'eta', '_south':'eta'}
nctidx = 0
# loop over the srcfile
for nf in range(nfile):
print 'Working with file', srcfile[nf], '...'
# get time
ocean_time = pyroms.utility.get_nc_var('ocean_time', srcfile[nf])
ntime = len(ocean_time[:])
# trange argument
if trange is None:
trange = range(ntime)
# create destination file
if nctidx == 0:
dstfile = dstdir + os.path.basename(srcfile[nf])[:-3] + '_' \
+ dst_grd.name + '_bdry.nc'
if os.path.exists(dstfile) is False:
print 'Creating destination file', dstfile
pyroms_toolbox.nc_create_roms_file(dstfile, dst_grd, \
ocean_time, lgrid=False)
# open destination file
nc = netCDF.Dataset(dstfile, 'a', format='NETCDF3_64BIT')
# loop over time
for nt in trange:
nc.variables['ocean_time'][nctidx] = ocean_time[nt]
# loop over variable
for nv in range(nvar):
print ' '
print 'remapping', varname[nv], 'from', srcgrd.name, \
'to', dst_grd.name
print 'time =', ocean_time[nt]
Mp, Lp = dst_grd.hgrid.mask_rho.shape
# get source data
src_var = pyroms.utility.get_nc_var(varname[nv], srcfile[nf])
# determine variable dimension
ndim = len(src_var.dimensions)-1
# get spval
try:
spval = src_var._FillValue
except:
raise Warning, 'Did not find a _FillValue attribute.'
# irange
if irange is None:
iirange = (0,src_var.shape[-1])
else:
iirange = irange
# jrange
if jrange is None:
jjrange = (0,src_var.shape[-2])
else:
jjrange = jrange
# determine where on the C-grid these variable lies
if src_var.dimensions[2].find('_rho') != -1:
Cpos='rho'
if src_var.dimensions[2].find('_u') != -1:
Cpos='u'
Lp = Lp-1
if irange is not None:
iirange = (irange[0], irange[1]-1)
if src_var.dimensions[2].find('_v') != -1:
Cpos='v'
Mp = Mp-1
if jrange is not None:
jjrange = (jrange[0], jrange[1]-1)
if src_var.dimensions[1].find('_w') != -1:
Cpos='w'
print 'Arakawa C-grid position is', Cpos
# create variable in _destination file
if nctidx == 0:
for sid in sides:
varn = varname[nv]+str(sid)
dimens = [i for i in src_var.dimensions]
for dim in dimens:
if re.match(dimexcl[sid],dim):
dimens.remove(dim)
print 'Creating variable', varn, dimens
nc.createVariable(varn, 'f8', dimens, \
fill_value=spval)
nc.variables[varn].long_name = varname[nv] + \
' ' + long[sid] + ' boundary condition'
try:
nc.variables[varn].units = src_var.units
except:
print varn+' has no units'
nc.variables[varn].time = src_var.time
nc.variables[varn].coordinates = \
str(dimens.reverse())
nc.variables[varn].field = src_var.field
# get the right remap weights file
for s in range(len(wts_files)):
if wts_files[s].__contains__(Cpos+'_to_'+Cpos+'.nc'):
wts_file = wts_files[s]
break
else:
if s == len(wts_files) - 1:
raise ValueError, 'Did not find the appropriate remap weights file'
if ndim == 3:
# vertical interpolation from sigma to standard z level
print 'vertical interpolation from sigma to standard z level'
src_varz = pyroms.remapping.roms2z( \
src_var[nt,:,jjrange[0]:jjrange[1],iirange[0]:iirange[1]], \
srcgrd, srcgrdz, Cpos=Cpos, spval=spval, \
irange=iirange, jrange=jjrange)
# flood the grid
print 'flood the grid'
src_varz = pyroms.remapping.flood(src_varz, srcgrdz, Cpos=Cpos, \
irange=iirange, jrange=jjrange, spval=spval, \
dmax=dmax, cdepth=cdepth, kk=kk)
else:
src_varz = src_var[nt,jjrange[0]:jjrange[1],iirange[0]:iirange[1]]
print datetime.datetime.now()
# horizontal interpolation using scrip weights
print 'horizontal interpolation using scrip weights'
dst_varz = pyroms.remapping.remap(src_varz, wts_file, \
spval=spval)
if ndim == 3:
dst_var_north = pyroms.remapping.z2roms(dst_varz[:, \
Mp-1:Mp,0:Lp], dst_grdz, dst_grd, Cpos=Cpos, \
spval=spval, flood=False, irange=(0,Lp), \
jrange=(Mp-1,Mp))
dst_var_south = pyroms.remapping.z2roms(dst_varz[:, \
0:1, :], dst_grdz, dst_grd, Cpos=Cpos, \
spval=spval, flood=False, irange=(0,Lp), \
jrange=(0,1))
dst_var_east = pyroms.remapping.z2roms(dst_varz[:, \
:, Lp-1:Lp], dst_grdz, dst_grd, Cpos=Cpos, \
spval=spval, flood=False, irange=(Lp-1,Lp), \
jrange=(0,Mp))
dst_var_west = pyroms.remapping.z2roms(dst_varz[:, \
:, 0:1], dst_grdz, dst_grd, Cpos=Cpos, \
spval=spval, flood=False, irange=(0,1), \
jrange=(0,Mp))
if varname[nv] == 'u':
dst_u_west = dst_var_west
dst_u_east = dst_var_east
dst_u_north = dst_var_north
dst_u_south = dst_var_south
if varname[nv] == 'v':
dst_v_west = dst_var_west
dst_v_east = dst_var_east
dst_v_north = dst_var_north
dst_v_south = dst_var_south
else:
dst_var_north = dst_varz[-1, :]
dst_var_south = dst_varz[0, :]
dst_var_east = dst_varz[:, -1]
dst_var_west = dst_varz[:, 0]
# print datetime.datetime.now()
# write data in destination file
print 'write data in destination file'
sid = '_west'
varn = varname[nv]+str(sid)
nc.variables[varn][nctidx] = np.squeeze(dst_var_west)
sid = '_east'
varn = varname[nv]+str(sid)
nc.variables[varn][nctidx] = np.squeeze(dst_var_east)
sid = '_north'
varn = varname[nv]+str(sid)
nc.variables[varn][nctidx] = np.squeeze(dst_var_north)
sid = '_south'
varn = varname[nv]+str(sid)
nc.variables[varn][nctidx] = np.squeeze(dst_var_south)
# rotate the velocity field if requested
if rotate_uv is True:
print ' '
print 'remapping and rotating u and v from', srcgrd.name, \
'to', dst_grd.name
# get source data
src_u = pyroms.utility.get_nc_var(uvar, srcfile[nf])
src_v = pyroms.utility.get_nc_var(vvar, srcfile[nf])
# get spval
try:
spval = src_v._FillValue
except:
raise Warning, 'Did not find a _FillValue attribute.'
if rotate_part:
ndim = len(src_u.dimensions)-1
ind = uvar.find('_eastward')
uvar_out = uvar[0:ind]
print "Warning: renaming uvar to", uvar_out
ind = vvar.find('_northward')
vvar_out = vvar[0:ind]
print "Warning: renaming vvar to", vvar_out
if ndim == 3:
dimens_u = ['ocean_time', 's_rho', 'eta_u', 'xi_u']
dimens_v = ['ocean_time', 's_rho', 'eta_v', 'xi_v']
else:
dimens_u = ['ocean_time', 'eta_u', 'xi_u']
dimens_v = ['ocean_time', 'eta_v', 'xi_v']
else:
dimens_u = [i for i in src_u.dimensions]
dimens_v = [i for i in src_v.dimensions]
uvar_out = uvar
vvar_out = vvar
# create variable in destination file
if nctidx == 0:
print 'Creating boundary variables for '+uvar
for sid in sides:
varn = uvar_out+str(sid)
print 'Creating variable', varn
dimens = list(dimens_u)
for dim in dimens:
if re.match(dimexcl[sid],dim):
dimens.remove(dim)
nc.createVariable(varn, 'f8', dimens, \
fill_value=spval)
nc.variables[varn].long_name = uvar_out + \
' ' + long[sid] + ' boundary condition'
try:
nc.variables[varn].units = src_u.units
except:
print varn+' has no units'
nc.variables[varn].time = src_u.time
nc.variables[varn].coordinates = \
str(dimens.reverse())
nc.variables[varn].field = src_u.field
print 'Creating boundary variables for '+vvar
for sid in sides:
varn = vvar_out+str(sid)
print 'Creating variable', varn
dimens = list(dimens_v)
for dim in dimens:
if re.match(dimexcl[sid],dim):
dimens.remove(dim)
nc.createVariable(varn, 'f8', dimens, \
fill_value=spval)
nc.variables[varn].long_name = vvar_out + \
' ' + long[sid] + ' boundary condition'
try:
nc.variables[varn].units = src_v.units
except:
print varn+' has no units'
nc.variables[varn].time = src_v.time
nc.variables[varn].coordinates = \
str(dimens.reverse())
nc.variables[varn].field = src_v.field
# get the right remap weights file
if rotate_part:
for s in range(len(wts_files)):
if wts_files[s].__contains__('rho_to_rho.nc'):
wts_file_u = wts_files[s]
wts_file_v = wts_files[s]
Cpos_u = 'rho'
Cpos_v = 'rho'
# irange
if irange is None:
iirange = (0,src_u.shape[-1])
else:
iirange = irange
# jrange
if jrange is None:
jjrange = (0,src_u.shape[-2])
else:
jjrange = jrange
else:
for s in range(len(wts_files)):
if wts_files[s].__contains__('u_to_rho.nc'):
wts_file_u = wts_files[s]
if wts_files[s].__contains__('v_to_rho.nc'):
wts_file_v = wts_files[s]
Cpos_u = 'u'
Cpos_v = 'v'
# irange
if irange is None:
iirange = (0,src_u.shape[-1])
else:
iirange = (irange[0], irange[1]-1)
# jrange
if jrange is None:
jjrange = (0,src_u.shape[-2])
else:
jjrange = jrange
# vertical interpolation from sigma to standard z level
ndim = len(src_v.dimensions)-1
if ndim == 3:
print 'vertical interpolation from sigma to standard z level'
src_uz = pyroms.remapping.roms2z( \
src_u[nt,:,jjrange[0]:jjrange[1],iirange[0]:iirange[1]], \
srcgrd, srcgrdz, Cpos=Cpos_u, spval=spval, \
irange=iirange, jrange=jjrange)
# flood the grid
print 'flood the u grid'
src_uz = pyroms.remapping.flood(src_uz, srcgrdz, Cpos=Cpos_u, \
irange=iirange, jrange=jjrange, \
spval=spval, dmax=dmax, cdepth=cdepth, kk=kk)
else:
src_uz = src_u[nt,jjrange[0]:jjrange[1],iirange[0]:iirange[1]]
src_uz = pyroms.remapping.flood2d(src_uz, srcgrdz, Cpos=Cpos_u, \
irange=iirange, jrange=jjrange, spval=spval, \
dmax=dmax)
# get the right ranges
if rotate_part:
# irange
if irange is None:
iirange = (0,src_v.shape[-1])
else:
iirange = irange
# jrange
if jrange is None:
jjrange = (0,src_v.shape[-2])
else:
jjrange = jrange
else:
# irange
if irange is None:
iirange = (0,src_v.shape[-1])
else:
iirange = irange
# jrange
if jrange is None:
jjrange = (0,src_v.shape[-2])
else:
jjrange = (jrange[0], jrange[1]-1)
if ndim == 3:
src_vz = pyroms.remapping.roms2z( \
src_v[nt,:,jjrange[0]:jjrange[1],iirange[0]:iirange[1]], \
srcgrd, srcgrdz, Cpos=Cpos_v, spval=spval, \
irange=iirange, jrange=jjrange)
# flood the grid
print 'flood the v grid'
src_vz = pyroms.remapping.flood(src_vz, srcgrdz, Cpos=Cpos_v, \
irange=iirange, jrange=jjrange, \
spval=spval, dmax=dmax, cdepth=cdepth, kk=kk)
else:
src_vz = src_v[nt,jjrange[0]:jjrange[1],iirange[0]:iirange[1]]
src_vz = pyroms.remapping.flood2d(src_vz, srcgrdz, Cpos=Cpos_v, \
irange=iirange, jrange=jjrange, spval=spval, \
dmax=dmax)
# horizontal interpolation using scrip weights
print 'horizontal interpolation using scrip weights'
dst_uz = pyroms.remapping.remap(src_uz, wts_file_u, \
spval=spval)
dst_vz = pyroms.remapping.remap(src_vz, wts_file_v, \
spval=spval)
Mp, Lp = dst_grd.hgrid.mask_rho.shape
if ndim == 3:
# vertical interpolation from standard z level to sigma
print 'vertical interpolation from standard z level to sigma'
dst_u_north = pyroms.remapping.z2roms(dst_uz[:, Mp-2:Mp, 0:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,Lp), jrange=(Mp-2,Mp))
dst_u_south = pyroms.remapping.z2roms(dst_uz[:, 0:2, 0:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,Lp), jrange=(0,2))
dst_u_east = pyroms.remapping.z2roms(dst_uz[:, 0:Mp, Lp-2:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(Lp-2,Lp), jrange=(0,Mp))
dst_u_west = pyroms.remapping.z2roms(dst_uz[:, 0:Mp, 0:2], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,2), jrange=(0,Mp))
dst_v_north = pyroms.remapping.z2roms(dst_vz[:, Mp-2:Mp, 0:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,Lp), jrange=(Mp-2,Mp))
dst_v_south = pyroms.remapping.z2roms(dst_vz[:, 0:2, 0:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,Lp), jrange=(0,2))
dst_v_east = pyroms.remapping.z2roms(dst_vz[:, 0:Mp, Lp-2:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(Lp-2,Lp), jrange=(0,Mp))
dst_v_west = pyroms.remapping.z2roms(dst_vz[:, 0:Mp, 0:2], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,2), jrange=(0,Mp))
else:
dst_u_north = dst_uz[Mp-2:Mp, 0:Lp]
dst_u_south = dst_uz[0:2, 0:Lp]
dst_u_east = dst_uz[0:Mp, Lp-2:Lp]
dst_u_west = dst_uz[0:Mp, 0:2]
dst_v_north = dst_vz[Mp-2:Mp, 0:Lp]
dst_v_south = dst_vz[0:2, 0:Lp]
dst_v_east = dst_vz[0:Mp, Lp-2:Lp]
dst_v_west = dst_vz[0:Mp, 0:2]
# rotate u,v fields
if rotate_part:
src_angle = np.zeros(dst_grd.hgrid.angle_rho.shape)
else:
for s in range(len(wts_files)):
if wts_files[s].__contains__('rho_to_rho.nc'):
wts_file = wts_files[s]
src_ang = srcgrd.hgrid.angle_rho[jjrange[0]:jjrange[1],iirange[0]:iirange[1]]
src_angle = pyroms.remapping.remap(src_ang, wts_file)
dst_angle = dst_grd.hgrid.angle_rho
angle = dst_angle - src_angle
if ndim == 3:
angle = np.tile(angle, (dst_grd.vgrid.N, 1, 1))
U_north = dst_u_north + dst_v_north*1j
eitheta_north = np.exp(-1j*angle[:,Mp-2:Mp, 0:Lp])
U_south = dst_u_south + dst_v_south*1j
eitheta_south = np.exp(-1j*angle[:,0:2, 0:Lp])
U_east = dst_u_east + dst_v_east*1j
eitheta_east = np.exp(-1j*angle[:,0:Mp, Lp-2:Lp])
U_west = dst_u_west + dst_v_west*1j
eitheta_west = np.exp(-1j*angle[:,0:Mp, 0:2])
else:
U_north = dst_u_north + dst_v_north*1j
eitheta_north = np.exp(-1j*angle[Mp-2:Mp, 0:Lp])
U_south = dst_u_south + dst_v_south*1j
eitheta_south = np.exp(-1j*angle[0:2, 0:Lp])
U_east = dst_u_east + dst_v_east*1j
eitheta_east = np.exp(-1j*angle[0:Mp, Lp-2:Lp])
U_west = dst_u_west + dst_v_west*1j
eitheta_west = np.exp(-1j*angle[0:Mp, 0:2])
U_north = U_north * eitheta_north
dst_u_north = np.real(U_north)
dst_v_north = np.imag(U_north)
U_south = U_south * eitheta_south
dst_u_south = np.real(U_south)
dst_v_south = np.imag(U_south)
U_east = U_east * eitheta_east
dst_u_east = np.real(U_east)
dst_v_east = np.imag(U_east)
U_west = U_west * eitheta_west
dst_u_west = np.real(U_west)
dst_v_east = np.imag(U_east)
# move back to u,v points
if ndim == 3:
dst_u_north = 0.5 * np.squeeze(dst_u_north[:,-1,:-1] + \
dst_u_north[:,-1,1:])
dst_v_north = 0.5 * np.squeeze(dst_v_north[:,:-1,:] + \
dst_v_north[:,1:,:])
dst_u_south = 0.5 * np.squeeze(dst_u_south[:,0,:-1] + \
dst_u_south[:,0,1:])
dst_v_south = 0.5 * np.squeeze(dst_v_south[:,:-1,:] + \
dst_v_south[:,1:,:])
dst_u_east = 0.5 * np.squeeze(dst_u_east[:,:,:-1] + \
dst_u_east[:,:,1:])
dst_v_east = 0.5 * np.squeeze(dst_v_east[:,:-1,-1] + \
dst_v_east[:,1:,-1])
dst_u_west = 0.5 * np.squeeze(dst_u_west[:,:,:-1] + \
dst_u_west[:,:,1:])
dst_v_west = 0.5 * np.squeeze(dst_v_west[:,:-1,0] + \
dst_v_west[:,1:,0])
else:
dst_u_north = 0.5 * np.squeeze(dst_u_north[-1,:-1] + \
dst_u_north[-1,1:])
dst_v_north = 0.5 * np.squeeze(dst_v_north[:-1,:] + \
dst_v_north[1:,:])
dst_u_south = 0.5 * np.squeeze(dst_u_south[0,:-1] + \
dst_u_south[0,1:])
dst_v_south = 0.5 * np.squeeze(dst_v_south[:-1,:] + \
dst_v_south[1:,:])
dst_u_east = 0.5 * np.squeeze(dst_u_east[:,:-1] + \
dst_u_east[:,1:])
dst_v_east = 0.5 * np.squeeze(dst_v_east[:-1,-1] + \
dst_v_east[1:,-1])
dst_u_west = 0.5 * np.squeeze(dst_u_west[:,:-1] + \
dst_u_west[:,1:])
dst_v_west = 0.5 * np.squeeze(dst_v_west[:-1,0] + \
dst_v_west[1:,0])
# spval
idxu_north = np.where(dst_grd.hgrid.mask_u[-1,:] == 0)
idxv_north = np.where(dst_grd.hgrid.mask_v[-1,:] == 0)
idxu_south = np.where(dst_grd.hgrid.mask_u[0,:] == 0)
idxv_south = np.where(dst_grd.hgrid.mask_v[0,:] == 0)
idxu_east = np.where(dst_grd.hgrid.mask_u[:,-1] == 0)
idxv_east = np.where(dst_grd.hgrid.mask_v[:,-1] == 0)
idxu_west = np.where(dst_grd.hgrid.mask_u[:,0] == 0)
idxv_west = np.where(dst_grd.hgrid.mask_v[:,0] == 0)
if ndim == 3:
for n in range(dst_grd.vgrid.N):
dst_u_north[n, idxu_north[0]] = spval
dst_v_north[n, idxv_north[0]] = spval
dst_u_south[n, idxu_south[0]] = spval
dst_v_south[n, idxv_south[0]] = spval
dst_u_east[n, idxu_east[0]] = spval
dst_v_east[n, idxv_east[0]] = spval
dst_u_west[n, idxu_west[0]] = spval
dst_v_west[n, idxv_west[0]] = spval
else:
dst_u_north[idxu_north[0]] = spval
dst_v_north[idxv_north[0]] = spval
dst_u_south[idxu_south[0]] = spval
dst_v_south[idxv_south[0]] = spval
dst_u_east[idxu_east[0]] = spval
dst_v_east[idxv_east[0]] = spval
dst_u_west[idxu_west[0]] = spval
dst_v_west[idxv_west[0]] = spval
# write data in destination file
print 'write data in destination file'
sid = '_west'
varn = uvar_out+str(sid)
nc.variables[varn][nctidx] = dst_u_west
varn = vvar_out+str(sid)
nc.variables[varn][nctidx] = dst_v_west
sid = '_north'
varn = uvar_out+str(sid)
nc.variables[varn][nctidx] = dst_u_north
varn = vvar_out+str(sid)
nc.variables[varn][nctidx] = dst_v_north
sid = '_east'
varn = uvar_out+str(sid)
nc.variables[varn][nctidx] = dst_u_east
varn = vvar_out+str(sid)
nc.variables[varn][nctidx] = dst_v_east
sid = '_south'
varn = uvar_out+str(sid)
nc.variables[varn][nctidx] = dst_u_south
varn = vvar_out+str(sid)
nc.variables[varn][nctidx] = dst_v_south
if compute_ubar:
if nctidx == 0:
print 'Creating variable ubar_north'
nc.createVariable('ubar_north', 'f8', \
('ocean_time', 'xi_u'), fill_value=spval)
nc.variables['ubar_north'].long_name = \
'2D u-momentum north boundary condition'
nc.variables['ubar_north'].units = 'meter second-1'
nc.variables['ubar_north'].time = 'ocean_time'
nc.variables['ubar_north'].coordinates = 'xi_u ocean_time'
nc.variables['ubar_north'].field = 'ubar_north, scalar, series'
print 'Creating variable vbar_north'
nc.createVariable('vbar_north', 'f8', \
('ocean_time', 'xi_v'), fill_value=spval)
nc.variables['vbar_north'].long_name = \
'2D v-momentum north boundary condition'
nc.variables['vbar_north'].units = 'meter second-1'
nc.variables['vbar_north'].time = 'ocean_time'
nc.variables['vbar_north'].coordinates = 'xi_v ocean_time'
nc.variables['vbar_north'].field = 'vbar_north,, scalar, series'
print 'Creating variable ubar_south'
nc.createVariable('ubar_south', 'f8', \
('ocean_time', 'xi_u'), fill_value=spval)
nc.variables['ubar_south'].long_name = \
'2D u-momentum south boundary condition'
nc.variables['ubar_south'].units = 'meter second-1'
nc.variables['ubar_south'].time = 'ocean_time'
nc.variables['ubar_south'].coordinates = 'xi_u ocean_time'
nc.variables['ubar_south'].field = 'ubar_south, scalar, series'
print 'Creating variable vbar_south'
nc.createVariable('vbar_south', 'f8', \
('ocean_time', 'xi_v'), fill_value=spval)
nc.variables['vbar_south'].long_name = \
'2D v-momentum south boundary condition'
nc.variables['vbar_south'].units = 'meter second-1'
nc.variables['vbar_south'].time = 'ocean_time'
nc.variables['vbar_south'].coordinates = 'xi_v ocean_time'
print 'Creating variable ubar_west'
nc.createVariable('ubar_west', 'f8', \
('ocean_time', 'eta_u'), fill_value=spval)
nc.variables['ubar_west'].long_name = \
'2D u-momentum west boundary condition'
nc.variables['ubar_west'].units = 'meter second-1'
nc.variables['ubar_west'].time = 'ocean_time'
nc.variables['ubar_west'].coordinates = 'eta_u ocean_time'
nc.variables['ubar_west'].field = 'ubar_west, scalar, series'
print 'Creating variable vbar_west'
nc.createVariable('vbar_west', 'f8', \
('ocean_time', 'eta_v'), fill_value=spval)
nc.variables['vbar_west'].long_name = \
'2D v-momentum west boundary condition'
nc.variables['vbar_west'].units = 'meter second-1'
nc.variables['vbar_west'].time = 'ocean_time'
nc.variables['vbar_west'].coordinates = 'eta_v ocean_time'
print 'Creating variable ubar_east'
nc.createVariable('ubar_east', 'f8', \
('ocean_time', 'eta_u'), fill_value=spval)
nc.variables['ubar_east'].long_name = \
'2D u-momentum east boundary condition'
nc.variables['ubar_east'].units = 'meter second-1'
nc.variables['ubar_east'].time = 'ocean_time'
nc.variables['ubar_east'].coordinates = 'eta_u ocean_time'
nc.variables['ubar_east'].field = 'ubar_east, scalar, series'
print 'Creating variable vbar_east'
nc.createVariable('vbar_east', 'f8', \
('ocean_time', 'eta_v'), fill_value=spval)
nc.variables['vbar_east'].long_name = \
'2D v-momentum east boundary condition'
nc.variables['vbar_east'].units = 'meter second-1'
nc.variables['vbar_east'].time = 'ocean_time'
nc.variables['vbar_east'].coordinates = 'eta_v ocean_time'
# compute depth average velocity ubar and vbar
# get z at the right position
print 'Computing ubar/vbar from u/v'
z_u_north = 0.5 * (dst_grd.vgrid.z_w[0,:,-1,:-1] +
dst_grd.vgrid.z_w[0,:,-1, 1:])
z_v_north = 0.5 * (dst_grd.vgrid.z_w[0,:,-1,:] +
dst_grd.vgrid.z_w[0,:,-2,:])
z_u_south = 0.5 * (dst_grd.vgrid.z_w[0,:,0,:-1] +
dst_grd.vgrid.z_w[0,:,0,1:])
z_v_south = 0.5 * (dst_grd.vgrid.z_w[0,:,0,:] +
dst_grd.vgrid.z_w[0,:,1,:])
z_u_east = 0.5 * (dst_grd.vgrid.z_w[0,:,:,-1] +
dst_grd.vgrid.z_w[0,:,:,-2])
z_v_east = 0.5 * (dst_grd.vgrid.z_w[0,:,:-1,-1] +
dst_grd.vgrid.z_w[0,:,1:,-1])
z_u_west = 0.5 * (dst_grd.vgrid.z_w[0,:,:,0] +
dst_grd.vgrid.z_w[0,:,:,1])
z_v_west = 0.5 * (dst_grd.vgrid.z_w[0,:,:-1,0] +
dst_grd.vgrid.z_w[0,:,1:,0])
if not rotate_uv:
dst_u_north = np.squeeze(dst_u_north)
dst_v_north = np.squeeze(dst_v_north)
dst_u_south = np.squeeze(dst_u_south)
dst_v_south = np.squeeze(dst_v_south)
dst_u_east = np.squeeze(dst_u_east)
dst_v_east = np.squeeze(dst_v_east)
dst_u_west = np.squeeze(dst_u_west)
dst_v_west = np.squeeze(dst_v_west)
dst_ubar_north = np.zeros(dst_u_north.shape[1])
dst_ubar_south = np.zeros(dst_u_south.shape[1])
dst_ubar_east = np.zeros(dst_u_east.shape[1])
dst_ubar_west = np.zeros(dst_u_west.shape[1])
dst_vbar_north = np.zeros(dst_v_north.shape[1])
dst_vbar_south = np.zeros(dst_v_south.shape[1])
dst_vbar_east = np.zeros(dst_v_east.shape[1])
dst_vbar_west = np.zeros(dst_v_west.shape[1])
# print 'Shapes 3', dst_u_north.shape, dst_ubar_north.shape, z_u_north.shape, np.diff(z_u_north[:,1]).shape
for i in range(dst_u_north.shape[1]):
dst_ubar_north[i] = (dst_u_north[:,i] * \
np.diff(z_u_north[:,i])).sum() / -z_u_north[0,i]
dst_ubar_south[i] = (dst_u_south[:,i] * \
np.diff(z_u_south[:,i])).sum() / -z_u_south[0,i]
for i in range(dst_v_north.shape[1]):
dst_vbar_north[i] = (dst_v_north[:,i] * \
np.diff(z_v_north[:,i])).sum() / -z_v_north[0,i]
dst_vbar_south[i] = (dst_v_south[:,i] * \
np.diff(z_v_south[:,i])).sum() / -z_v_south[0,i]
for j in range(dst_u_east.shape[1]):
dst_ubar_east[j] = (dst_u_east[:,j] * \
np.diff(z_u_east[:,j])).sum() / -z_u_east[0,j]
dst_ubar_west[j] = (dst_u_west[:,j] * \
np.diff(z_u_west[:,j])).sum() / -z_u_west[0,j]
for j in range(dst_v_east.shape[1]):
dst_vbar_east[j] = (dst_v_east[:,j] * \
np.diff(z_v_east[:,j])).sum() / -z_v_east[0,j]
dst_vbar_west[j] = (dst_v_west[:,j] * \
np.diff(z_v_west[:,j])).sum() / -z_v_west[0,j]
# spval
idxu_north = np.where(dst_grd.hgrid.mask_u[-1,:] == 0)
idxv_north = np.where(dst_grd.hgrid.mask_v[-1,:] == 0)
idxu_south = np.where(dst_grd.hgrid.mask_u[0,:] == 0)
idxv_south = np.where(dst_grd.hgrid.mask_v[0,:] == 0)
idxu_east = np.where(dst_grd.hgrid.mask_u[:,-1] == 0)
idxv_east = np.where(dst_grd.hgrid.mask_v[:,-1] == 0)
idxu_west = np.where(dst_grd.hgrid.mask_u[:,0] == 0)
idxv_west = np.where(dst_grd.hgrid.mask_v[:,0] == 0)
dst_ubar_north[idxu_north[0]] = spval
dst_vbar_north[idxv_north[0]] = spval
dst_ubar_south[idxu_south[0]] = spval
dst_vbar_south[idxv_south[0]] = spval
dst_ubar_east[idxu_east[0]] = spval
dst_vbar_east[idxv_east[0]] = spval
dst_ubar_west[idxu_west[0]] = spval
dst_vbar_west[idxv_west[0]] = spval
nc.variables['ubar_north'][nctidx] = dst_ubar_north
nc.variables['ubar_south'][nctidx] = dst_ubar_south
nc.variables['ubar_east'][nctidx] = dst_ubar_east
nc.variables['ubar_west'][nctidx] = dst_ubar_west
nc.variables['vbar_north'][nctidx] = dst_vbar_north
nc.variables['vbar_south'][nctidx] = dst_vbar_south
nc.variables['vbar_east'][nctidx] = dst_vbar_east
nc.variables['vbar_west'][nctidx] = dst_vbar_west
nctidx = nctidx + 1
print 'ADDING to nctidx ', nctidx
nc.sync()
# close files here? how?
# close destination file
nc.close()
return
| bsd-3-clause | -8,091,286,453,163,524,000 | 47.022892 | 122 | 0.437593 | false |
Josue-Martinez-Moreno/trackeddy | examples/random_field.py | 1 | 8259 | import time
tic=time.time()
import matplotlib
matplotlib.use('Agg')
import trackeddy
import trackeddy.tracking as ttrack
from trackeddy.geometryfunc import *
from pylab import *
import random
import pdb
import cmocean as cm
import matplotlib.gridspec as gridspec
import trackeddy.utils.field_generator as fg
import importlib
importlib.reload(ttrack)
t = 1000
n = 13
xx = linspace(10,12,200)
yy = linspace(10,12,200)
#print("Generate field")
#gf=fg.Generate_field(0.1,0.1,n,xx,yy,'Nint')
#data = gf.assemble_field(t)
data = zeros((t,300,300))
for tt in range(t):
print(tt)
gf=fg.Generate_field(0.1,0.1,randint(5, 15),xx,yy,'Nint')
data[tt,:,:] = gf.assemble_field(1)
##
x = linspace(10,12,300)
y = linspace(10,12,300)
################################################################################
################################################################################
#################################### FLAT ######################################
################################################################################
################################################################################
preferences={'ellipse':0.85,'eccentricity':0.85,'gaussian':0.8}
eddytd={}
eddytdn={}
t0 = 0
t = 1000
levels = {'max':data.max(),'min':0.05,'step':0.05}
eddytd = trackeddy.tracking.analyseddyzt(data,x,y,t0,t,1,levels,preferences=preferences,areamap='',mask='',maskopt='forcefit'\
,destdir='',physics='',diagnostics=False,plotdata=False,pprint=True,debug=False)
####
levels = {'max':data.min(),'min':-0.05,'step':-0.05}
eddytdn = trackeddy.tracking.analyseddyzt(data,x,y,t0,t,1,levels,preferences=preferences,areamap='',mask='',maskopt='forcefit'\
,destdir='',physics='',diagnostics=False,plotdata=False,pprint=True,debug=False)
pos_f = reconstruct_syntetic(shape(data),x,y,eddytd)
neg_f = reconstruct_syntetic(shape(data),x,y,eddytdn)
f_field = pos_f+neg_f
for tt in range(t0,t):
f = plt.figure()
gs = gridspec.GridSpec(2, 1)
ax1 = plt.subplot(gs[0])
ax1.pcolormesh(x,y,data[tt,:,:],vmin=-1,vmax=1,cmap=cm.cm.balance)
ax2 = plt.subplot(gs[1])
ax2.pcolormesh(f_field[tt,:,:],vmin=-1,vmax=1,cmap=cm.cm.balance)
ax2.contour(f_field[tt,:,:])
ax1.set_title('Assamble: %03d' % tt)
plt.savefig('time_%03d.png' %tt)
################################################################################
################################################################################
#################################### WAVE ######################################
################################################################################
################################################################################
amplitude = 1
frequency = 20
phase = 1
waves = zeros(shape(data))
X,Y = meshgrid(x,y)
for t in range(0,t):
r = X+y/10
waves[t,:,:] = 0.3*sin(r*frequency-t + phase)
wave_data = waves+data
levels = {'max':data.max(),'min':0.05,'step':0.05}
eddytd=ttrack.analyseddyzt(data,x,y,0,t,1,levels,preferences=preferences,areamap='',mask='',maskopt='forcefit'\
,destdir='',physics='',diagnostics=False,plotdata=False,pprint=True)
levels = {'max':data.min(),'min':-0.05,'step':-0.05}
eddytdn=ttrack.analyseddyzt(data,x,y,0,t,1,levels,preferences=preferences,areamap='',mask='',maskopt='forcefit'\
,destdir='',physics='',diagnostics=False,plotdata=False,pprint=True)
pos_w = reconstruct_syntetic(shape(data),x,y,eddytd)
neg_w = reconstruct_syntetic(shape(data),x,y,eddytdn)
w_field = pos_w+neg_w
################################################################################
################################################################################
#################################### JETS ######################################
################################################################################
################################################################################
k_y = 3
phase = 1
k_x = 2
jets = zeros(shape(data))
for t in range(0,t):
r = Y
k_y=random.uniform(2, 3)
phase=random.uniform(0, 1)
k_x=random.uniform(1, 2)
amp=0.3
jets[t,:,:] = amp*cos((k_y*(k_y*Y+phase+sin(k_x*X-t))))
jet_data = jets+data
levels = {'max':data.max(),'min':0.05,'step':0.05}
eddytd=ttrack.analyseddyzt(data,x,y,0,t,1,levels,preferences=preferences,areamap='',mask='',maskopt='forcefit'\
,destdir='',physics='',diagnostics=False,plotdata=False,pprint=True)
levels = {'max':data.min(),'min':-0.05,'step':-0.05}
eddytdn=ttrack.analyseddyzt(data,x,y,0,t,1,levels,preferences=preferences,areamap='',mask='',maskopt='forcefit'\
,destdir='',physics='',diagnostics=False,plotdata=False,pprint=True)
pos_f = reconstruct_syntetic(shape(data),x,y,eddytd)
neg_f = reconstruct_syntetic(shape(data),x,y,eddytdn)
j_field = pos_f+neg_f
################################################################################
################################################################################
##################################### KE #######################################
################################################################################
################################################################################
m_ke_c = []
m_ke_f = []
m_ke_w = []
m_ke_j = []
for tt in range(shape(data)[0]):
u_c,v_c = geovelfield( data[tt,:,:] ,x,y)
u_f,v_f = geovelfield(f_field[tt,:,:],x,y)
u_w,v_w = geovelfield(w_field[tt,:,:],x,y)
u_j,v_j = geovelfield(j_field[tt,:,:],x,y)
ke_c = KE(u_c,v_c)
ke_f = KE(u_f,v_f)
ke_w = KE(u_w,v_w)
ke_j = KE(u_j,v_j)
m_ke_c.append(mean(ke_c))
m_ke_f.append(mean(ke_f))
m_ke_w.append(mean(ke_w))
m_ke_j.append(mean(ke_j))
################################################################################
################################################################################
#################################### PLOT ######################################
################################################################################
################################################################################
import seaborn as sns
import pandas as pd
from scipy.stats import spearmanr,linregress
figure(dpi=300)
data=np.vstack([m_ke_c,m_ke_f]).T
df = pd.DataFrame(data, columns=[r"$KE_c$", r"$KE_r$"])
g1 = sns.jointplot(x=r"$KE_c$", y=r"$KE_r$", data=df, kind="kde",cmap='Blues',joint_kws={'shade_lowest':False})
lims = [100, 0]
g1.ax_joint.plot(lims, lims, '--k')
s,i,r,p,std=linregress(m_ke_c,m_ke_f)
x0=0
y0=s*x0+i
x1=100
y1=s*x1+i
g1.ax_joint.plot([x0,x1], [y0,y1], '-.b')
g1.ax_joint.text(60,20,r'R = %2f' % r, color='b')
g1.ax_marg_x.set_xlim(0,100)
g1.ax_marg_y.set_ylim(0,100)
print('estimate flat: ',mean([abs(y0/100),abs(1-y1/100)]))
plt.savefig('e_vs_e.png')
figure(dpi=300)
data=np.vstack([m_ke_c,m_ke_w]).T
df = pd.DataFrame(data, columns=[r"$KE_c$", r"$KE_r$"])
g1 = sns.jointplot(x=r"$KE_c$", y=r"$KE_r$", data=df, kind="kde",cmap='Blues',joint_kws={'shade_lowest':False})
lims = [100, 0]
g1.ax_joint.plot(lims, lims, '--k')
s,i,r,p,std=linregress(m_ke_c,m_ke_w)
x0=0
y0=s*x0+i
x1=100
y1=s*x1+i
g1.ax_joint.plot([x0,x1], [y0,y1], '-.b')
g1.ax_joint.text(60,20,r'R = %2f' % r, color='b')
g1.ax_marg_x.set_xlim(0,100)
g1.ax_marg_y.set_ylim(0,100)
print('estimate sin: ',mean([abs(y0/100),abs(1-y1/100)]))
plt.savefig('w_vs_e.png')
figure(dpi=300)
data=np.vstack([m_ke_c,m_ke_j]).T
df = pd.DataFrame(data, columns=[r"$KE_c$", r"$KE_r$"])
g1 = sns.jointplot(x=r"$KE_c$", y=r"$KE_r$", data=df, kind="kde",cmap='Blues',joint_kws={'shade_lowest':False})
lims = [100, 0]
g1.ax_joint.plot(lims, lims, '--k')
s,i,r,p,std=linregress(m_ke_c,m_ke_j)
x0=0
y0=s*x0+i
x1=100
y1=s*x1+i
g1.ax_joint.plot([x0,x1], [y0,y1], '-.b')
g1.ax_joint.text(60,20,r'R = %2f' % r, color='b')
g1.ax_marg_x.set_xlim(0,100)
g1.ax_marg_y.set_ylim(0,100)
print('estimate jet: ',mean([abs(y0/100),abs(1-y1/100)]))
plt.savefig('j_vs_e.png')
# for ii in range(0,30):
# plt.figure()
# plt.pcolormesh(af[ii])
# plt.savefig('%03d.png' %ii)
# plt.show()
toc=time.time()
print("######## ELAPSED TIME: ###########")
print("######## %2f s ###########" % (toc-tic)) | mit | -887,333,901,396,933,600 | 31.392157 | 127 | 0.47754 | false |
great-expectations/great_expectations | great_expectations/expectations/metrics/column_aggregate_metrics/column_most_common_value.py | 1 | 2692 | from typing import Any, Dict, Optional, Tuple
from great_expectations.core import ExpectationConfiguration
from great_expectations.execution_engine import (
ExecutionEngine,
PandasExecutionEngine,
SparkDFExecutionEngine,
)
from great_expectations.execution_engine.sqlalchemy_execution_engine import (
SqlAlchemyExecutionEngine,
)
from great_expectations.expectations.metrics.column_aggregate_metric import (
ColumnMetricProvider,
column_aggregate_partial,
column_aggregate_value,
)
from great_expectations.expectations.metrics.column_aggregate_metric import sa as sa
from great_expectations.expectations.metrics.import_manager import F
from great_expectations.expectations.metrics.metric_provider import metric_value
from great_expectations.validator.validation_graph import MetricConfiguration
class ColumnMostCommonValue(ColumnMetricProvider):
metric_name = "column.most_common_value"
@column_aggregate_value(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
mode_list = list(column.mode().values)
return mode_list
@metric_value(engine=SparkDFExecutionEngine)
def _spark(
cls,
execution_engine: "SqlAlchemyExecutionEngine",
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[Tuple, Any],
runtime_configuration: Dict,
):
column_value_counts = metrics.get("column.value_counts")
return list(
column_value_counts[column_value_counts == column_value_counts.max()].index
)
@classmethod
def _get_evaluation_dependencies(
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[Dict] = None,
):
"""Returns a dictionary of given metric names and their corresponding configuration,
specifying the metric types and their respective domains"""
dependencies: dict = super()._get_evaluation_dependencies(
metric=metric,
configuration=configuration,
execution_engine=execution_engine,
runtime_configuration=runtime_configuration,
)
if isinstance(execution_engine, SparkDFExecutionEngine):
dependencies["column.value_counts"] = MetricConfiguration(
metric_name="column.value_counts",
metric_domain_kwargs=metric.metric_domain_kwargs,
metric_value_kwargs={
"sort": "value",
"collate": None,
},
)
return dependencies
| apache-2.0 | 4,233,955,800,987,410,000 | 36.388889 | 92 | 0.688336 | false |
npdoty/bigbang | bigbang/git_repo.py | 1 | 5882 | from git import *
import git
import pandas as pd
import numpy as np
from time import mktime
from datetime import datetime
from entity_resolution import entity_resolve
import networkx as nx
from config.config import CONFIG
ALL_ATTRIBUTES = CONFIG.all_attributes #["HEXSHA", "Committer Name", "Committer Email", "Commit Message", "Time", "Parent Commit", "Touched File"]
def cache_fixer(r): # Adds info from row to graph
r["Touched File"] = [x.strip() for x in r["Touched File"][1:-1].split(",")]
r["Time"] = pd.to_datetime(r["Time"]);
return r
"""
Class that stores an instance of a git repository given the address to that
repo relative to this file. It returns the data in multiple useful forms.
"""
class GitRepo(object):
""" A pandas DataFrame object indexed by time that stores
the raw form of the repo's commit data as a table where
each row is a commit and each col represents an attribute
of that commit (time, message, commiter name, committer email,
commit hexsha)
"""
def __init__(self, name, url=None, attribs = ALL_ATTRIBUTES, cache=None):
self._commit_data = None;
self.url = url;
self.repo = None
self.name = name;
if cache is None:
self.repo = Repo(url)
self.populate_data(ALL_ATTRIBUTES)
else:
cache = cache.apply(cache_fixer, axis=1)
cache.set_index(cache["Time"])
self._commit_data = cache;
missing = list();
cols = self.commit_data.columns
for attr in attribs:
if attr not in cols and unicode(attr) not in cols:
missing.append(attr);
if len(missing) > 0:
print("There were " + str(len(missing)) + " missing attributes: ")
print(missing);
if ("Committer Name" in attribs and "Committer Email" in attribs):
self._commit_data["Person-ID"] = None;
self._commit_data = self._commit_data.apply(lambda row: entity_resolve(row, "Committer Email", "Committer Name"), axis=1)
def gen_data(self, repo, raw):
if not repo.active_branch.is_valid():
print("Found an empty repo: " + str(self.name))
return;
first = repo.commit()
commit = first
firstHexSha = first.hexsha;
generator = git.Commit.iter_items(repo, firstHexSha);
if "Touched File" in raw:
print("WARNING: Currently going through file diffs. This will take a very long time (1 minute per 3000 commits.) We suggest using a small repository.")
for commit in generator:
try:
if "Touched File" in raw:
diff_list = list();
for diff in commit.diff(commit.parents[0]):
if diff.b_blob:
diff_list.append(diff.b_blob.path);
else:
diff_list.append(diff.a_blob.path);
raw["Touched File"].append(diff_list)
if "Committer Name" in raw:
raw["Committer Name"].append(commit.committer.name)
if "Committer Email" in raw:
raw["Committer Email"].append(commit.committer.email)
if "Commit Message" in raw:
raw["Commit Message"].append(commit.message)
if "Time" in raw or True: # TODO: For now, we always ask for the time
raw["Time"].append(pd.to_datetime(commit.committed_date, unit = "s"));
if "Parent Commit" in raw:
raw["Parent Commit"].append([par.hexsha for par in commit.parents])
if "HEXSHA" in raw:
raw["HEXSHA"].append(commit.hexsha)
except LookupError:
print("failed to add a commit because of an encoding error")
def populate_data(self, attribs = ALL_ATTRIBUTES):
raw = dict()
for attrib in attribs:
raw[attrib] = list();
repo = self.repo
self.gen_data(repo, raw);
print(type(raw["Time"]))
# TODO: NEEDS TIME
time_index = pd.DatetimeIndex(raw["Time"], periods = 24, freq = "H")
self._commit_data = pd.DataFrame(raw, index = time_index);
def by_committer(self):
return self.commit_data.groupby('Committer Name').size().order()
def commits_per_day(self):
ans = self.commit_data.groupby(self.commit_data.index).size()
ans = ans.resample("D", how=np.sum)
return ans;
def commits_per_week(self):
ans = self.commits_per_day();
ans = ans.resample("W", how=np.sum)
return ans;
def commits_per_day_full(self):
ans = self.commit_data.groupby([self.commit_data.index, "Committer Name" ]).size()
return ans;
@property
def commit_data(self):
return self._commit_data;
def commits_for_committer(self, committer_name):
full_info = self.commit_data
time_index = pd.DatetimeIndex(self.commit_data["Time"], periods = 24, freq = "H");
df = full_info.loc[full_info["Committer Name"] == committer_name]
df = df.groupby([df.index]).size()
df = df.resample("D", how = np.sum, axis = 0)
return df
def merge_with_repo(self, other):
# TODO: What if commits have the same time?
self._commit_data = self.commit_data.append(other.commit_data);
class MultiGitRepo(GitRepo):
"""
Repos must have a "Repo Name" column
"""
def __init__(self, repos, attribs=ALL_ATTRIBUTES):
self._commit_data = repos[0].commit_data.copy(deep=True);
for i in range(1, len(repos)):
self.merge_with_repo(repos[i]);
| agpl-3.0 | -4,474,020,302,358,678,500 | 36.464968 | 163 | 0.570384 | false |
mprego/NBA | Schedule/Test_Schedule.py | 1 | 2540 | from unittest import TestCase
from Schedule import Schedule
import pandas as pd
class Test_Schedule(TestCase):
def test_get_season_name(self):
sched = Schedule('1/1/2001', '2/1/2001')
name = sched.get_season_name()
exp_name = '2000-01'
self.assertEqual(name, exp_name)
sched = Schedule('11/1/2001', '12/1/2001')
name = sched.get_season_name()
exp_name = '2001-02'
self.assertEqual(name, exp_name)
def test_games(self):
sched = Schedule('1/1/2015')
games = sched.get_games()
size = len(games)
exp_size = 1230
self.assertEqual(size, exp_size)
cols = len(games.columns)
exp_cols = 40 + 4
self.assertEqual(cols, exp_cols)
def test_team_list(self):
sched = Schedule('1/1/2015')
_list = sched.get_team_list()
_len = len(_list)
exp_len = 30
self.assertEqual(_len, exp_len)
# def test_four_factors_helper(self):
# df = pd.DataFrame({'FGM': {1: 5, 2:5},
# '3PM': {1: 2, 2:2},
# 'FGA': {1:10, 2:10},
# 'ORB': {1:1, 2:1},
# 'DRB': {1:10, 2:10},
# 'O_ORB': {1:1, 2:1},
# 'O_DRB': {1: 10, 2: 10},
# 'TO':{1:5, 2:5},
# 'FTM':{1:5, 2:5},
# 'FTA':{1:10, 2:10}
# })
# sched = Schedule('1/1/2015')
# ff = sched.add_four_factors_helper(df, '_', 'FGM', '3PM', 'FGA', 'ORB', 'O_DRB', 'TO', 'FTM', 'FTA')
# efg = ff.ix[1,'_EFG']
# exp_efg = .6
# self.assertEqual(efg, exp_efg)
# tov = ff.ix[1, '_TOV']
# exp_tov = 5/(10+10*.44-1+5)
# self.assertEqual(tov, exp_tov)
# orb = ff.ix[1, '_ORB']
# exp_orb = 1.0/11
# self.assertEqual(orb, exp_orb)
# ftfga = ff.ix[1, '_FTFGA']
# exp_ftfga = .5
# self.assertEqual(ftfga, exp_ftfga)
#
# def test_four_factors(self):
# sched = Schedule('1/1/2015')
# ff = sched.add_four_factors()
# cols = len(ff.columns)
# exp_cols = 44 + 8
# self.assertEqual(cols, exp_cols)
#
# def test_dunk_data(self):
# sched = Schedule('1/1/2015', '1/2/2015')
# dunk = sched.add_dunk_data()
# cols = len(dunk.columns)
# exp_cols = 44 + 6
# self.assertEqual(cols, exp_cols)
| mit | 4,990,053,213,845,532,000 | 33.794521 | 110 | 0.457087 | false |
ECP-CANDLE/Benchmarks | Pilot1/TC1/tc1_baseline_keras2.py | 1 | 7370 | from __future__ import print_function
import pandas as pd
import numpy as np
import os
import sys
import gzip
import argparse
try:
import configparser
except ImportError:
import ConfigParser as configparser
from keras import backend as K
from keras.layers import Input, Dense, Dropout, Activation, Conv1D, MaxPooling1D, Flatten
from keras.optimizers import SGD, Adam, RMSprop
from keras.models import Sequential, Model, model_from_json, model_from_yaml
from keras.utils import np_utils
from keras.callbacks import ModelCheckpoint, CSVLogger, ReduceLROnPlateau
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler, MinMaxScaler, MaxAbsScaler
file_path = os.path.dirname(os.path.realpath(__file__))
lib_path2 = os.path.abspath(os.path.join(file_path, '..', '..', 'common'))
sys.path.append(lib_path2)
import tc1 as bmk
import candle
def initialize_parameters(default_model = 'tc1_default_model.txt'):
# Build benchmark object
tc1Bmk = bmk.BenchmarkTC1(file_path, default_model, 'keras',
prog='tc1_baseline', desc='Multi-task (DNN) for data extraction from clinical reports - Pilot 3 Benchmark 1')
# Initialize parameters
gParameters = candle.finalize_parameters(tc1Bmk)
#benchmark.logger.info('Params: {}'.format(gParameters))
return gParameters
def run(gParameters):
X_train, Y_train, X_test, Y_test = bmk.load_data(gParameters)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
print('Y_train shape:', Y_train.shape)
print('Y_test shape:', Y_test.shape)
x_train_len = X_train.shape[1]
# this reshaping is critical for the Conv1D to work
X_train = np.expand_dims(X_train, axis=2)
X_test = np.expand_dims(X_test, axis=2)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
model = Sequential()
dense_first = True
layer_list = list(range(0, len(gParameters['conv']), 3))
for l, i in enumerate(layer_list):
filters = gParameters['conv'][i]
filter_len = gParameters['conv'][i+1]
stride = gParameters['conv'][i+2]
print(i/3, filters, filter_len, stride)
if gParameters['pool']:
pool_list=gParameters['pool']
if type(pool_list) != list:
pool_list=list(pool_list)
if filters <= 0 or filter_len <= 0 or stride <= 0:
break
dense_first = False
if 'locally_connected' in gParameters:
model.add(LocallyConnected1D(filters, filter_len, strides=stride, padding='valid', input_shape=(x_train_len, 1)))
else:
#input layer
if i == 0:
model.add(Conv1D(filters=filters, kernel_size=filter_len, strides=stride, padding='valid', input_shape=(x_train_len, 1)))
else:
model.add(Conv1D(filters=filters, kernel_size=filter_len, strides=stride, padding='valid'))
model.add(Activation(gParameters['activation']))
if gParameters['pool']:
model.add(MaxPooling1D(pool_size=pool_list[i//3]))
if not dense_first:
model.add(Flatten())
for i, layer in enumerate(gParameters['dense']):
if layer:
if i == 0 and dense_first:
model.add(Dense(layer, input_shape=(x_train_len, 1)))
else:
model.add(Dense(layer))
model.add(Activation(gParameters['activation']))
if gParameters['dropout']:
model.add(Dropout(gParameters['dropout']))
if dense_first:
model.add(Flatten())
model.add(Dense(gParameters['classes']))
model.add(Activation(gParameters['out_activation']))
model.summary()
model.compile(loss=gParameters['loss'],
optimizer=gParameters['optimizer'],
metrics=[gParameters['metrics']])
output_dir = gParameters['output_dir']
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# set up callbacks to do work during model training..
model_name = gParameters['model_name']
path = '{}/{}.autosave.model.h5'.format(output_dir, model_name)
checkpointer = ModelCheckpoint(filepath=path, verbose=1, save_weights_only=False, save_best_only=True)
csv_logger = CSVLogger('{}/training.log'.format(output_dir))
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=1, mode='auto', epsilon=0.0001, cooldown=0, min_lr=0)
history = model.fit(X_train, Y_train,
batch_size=gParameters['batch_size'],
epochs=gParameters['epochs'],
verbose=1,
validation_data=(X_test, Y_test),
callbacks = [checkpointer, csv_logger, reduce_lr])
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
# serialize model to JSON
model_json = model.to_json()
with open("{}/{}.model.json".format(output_dir, model_name), "w") as json_file:
json_file.write(model_json)
# serialize model to YAML
model_yaml = model.to_yaml()
with open("{}/{}.model.yaml".format(output_dir, model_name), "w") as yaml_file:
yaml_file.write(model_yaml)
# serialize weights to HDF5
model.save_weights("{}/{}.model.h5".format(output_dir, model_name))
print("Saved model to disk")
# load json and create model
json_file = open('{}/{}.model.json'.format(output_dir, model_name), 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model_json = model_from_json(loaded_model_json)
# load yaml and create model
yaml_file = open('{}/{}.model.yaml'.format(output_dir, model_name), 'r')
loaded_model_yaml = yaml_file.read()
yaml_file.close()
loaded_model_yaml = model_from_yaml(loaded_model_yaml)
# load weights into new model
loaded_model_json.load_weights('{}/{}.model.h5'.format(output_dir, model_name))
print("Loaded json model from disk")
# evaluate json loaded model on test data
loaded_model_json.compile(loss=gParameters['loss'],
optimizer=gParameters['optimizer'],
metrics=[gParameters['metrics']])
score_json = loaded_model_json.evaluate(X_test, Y_test, verbose=0)
print('json Test score:', score_json[0])
print('json Test accuracy:', score_json[1])
print("json %s: %.2f%%" % (loaded_model_json.metrics_names[1], score_json[1]*100))
# load weights into new model
loaded_model_yaml.load_weights('{}/{}.model.h5'.format(output_dir, model_name))
print("Loaded yaml model from disk")
# evaluate loaded model on test data
loaded_model_yaml.compile(loss=gParameters['loss'],
optimizer=gParameters['optimizer'],
metrics=[gParameters['metrics']])
score_yaml = loaded_model_yaml.evaluate(X_test, Y_test, verbose=0)
print('yaml Test score:', score_yaml[0])
print('yaml Test accuracy:', score_yaml[1])
print("yaml %s: %.2f%%" % (loaded_model_yaml.metrics_names[1], score_yaml[1]*100))
return history
def main():
gParameters = initialize_parameters()
run(gParameters)
if __name__ == '__main__':
main()
try:
K.clear_session()
except AttributeError: # theano does not have this function
pass
| mit | 5,746,599,763,966,178,000 | 32.807339 | 140 | 0.639891 | false |
katholt/Kleborate | scripts/kleborate_to_microreact.py | 1 | 12287 | #!/usr/bin/env python3
"""
Copyright 2018 Kat Holt
Copyright 2018 Ryan Wick (rrwick@gmail.com)
https://github.com/katholt/Kleborate/
This file is part of Kleborate. Kleborate is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by the Free Software Foundation,
either version 3 of the License, or (at your option) any later version. Kleborate is distributed in
the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details. You should have received a copy of the GNU General Public License along with Kleborate. If
not, see <http://www.gnu.org/licenses/>.
"""
import sys
import argparse
import collections
import pandas as pd
from Bio import Phylo
def get_arguments():
parser = argparse.ArgumentParser(description='A script for converting Kleborate output into a'
'format compatible with Microreact')
required_args = parser.add_argument_group('Required arguments')
required_args.add_argument('--kleborate_in', type=str, required=True,
help='Kleborate tab-delimited results file')
required_args.add_argument('--tree_in', type=str, required=True,
help='Phylogenetic tree')
required_args.add_argument('--csv_out', type=str, required=True,
help='Kleborate results in Microreact format')
required_args.add_argument('--tree_out', type=str, required=True,
help='Tree in Microreact format')
return parser.parse_args()
def main():
args = get_arguments()
name_subs = name_substitution(args.kleborate_in)
check_for_unique_names(name_subs)
save_tree_with_new_names(args.tree_in, args.tree_out, name_subs)
autocolour_columns = get_autocolour_columns(args.kleborate_in)
csv_lines = []
with open(args.kleborate_in, 'rt') as kleborate_results:
original_header, new_header = None, None
for line in kleborate_results:
line = line.rstrip('\n')
if original_header is None:
original_header = line.split('\t')
new_header = get_new_header(original_header, autocolour_columns)
line_parts = new_header
else:
line_parts = get_data(line, name_subs, original_header, new_header)
csv_lines.append((','.join(line_parts)))
print()
print('Writing Microreact table to: {}'.format(args.csv_out))
with open(args.csv_out, 'wt') as output_csv:
for line in csv_lines:
output_csv.write(line)
output_csv.write('\n')
print()
def get_autocolour_columns(kleborate_in):
autocolour_columns = []
table = pd.read_table(kleborate_in)
for col_name in ['species', 'ST', 'YbST', 'CbST', 'AbST', 'SmST', 'wzi', 'K_locus',
'O_locus']:
try:
if len(set(table[col_name])) > 1:
autocolour_columns.append(col_name)
except KeyError:
pass
print()
print('Using "__autocolour" on the following columns:')
print(' ', ', '.join(autocolour_columns))
return set(autocolour_columns)
def get_new_header(original_header, autocolour_columns):
original_header[0] = 'id' # Change 'strain' to 'id' for Microreact.
for autocolour_column in autocolour_columns:
i = find_column_index(original_header, autocolour_column)
original_header[i] = autocolour_column + '__autocolour'
header = list(original_header)
for col in ['virulence_score', 'resistance_score', 'num_resistance_classes',
'num_resistance_genes', 'Yersiniabactin', 'Colibactin', 'Aerobactin',
'Salmochelin', 'rmpA', 'rmpA2']:
header.insert(find_column_index(header, col) + 1, col + '__colour')
for res in ['AGly_acquired', 'Col_acquired', 'Fcyn_acquired', 'Flq_acquired', 'Gly_acquired', 'MLS_acquired', 'Phe_acquired', 'Rif_acquired', 'Sul_acquired', 'Tet_acquired',
'Tgc_acquired', 'Tmt_acquired', 'Bla_acquired', 'Bla_inhR_acquired', 'Bla_ESBL_acquired', 'Bla_ESBL_inhR_acquired', 'Bla_Carb_acquired', 'Bla_chr', 'SHV_mutations',
'Col_mutations', 'Flq_mutations', 'Omp_mutations']:
header.insert(find_column_index(header, res) + 1, res + '__colour')
header.remove(res)
return header
def get_data(line, name_subs, original_header, new_header):
line = line.replace(',', ';')
line_parts = line.split('\t')
line_parts[0] = name_subs[line_parts[0]]
original_data = dict(zip(original_header, line_parts))
new_data = {h: '' for h in new_header}
for label, value in original_data.items():
new_data[label] = value
vir_score = int(original_data['virulence_score'])
res_score = int(original_data['resistance_score'])
res_classes = int(original_data['num_resistance_classes'])
res_genes = int(original_data['num_resistance_genes'])
new_data['virulence_score__colour'] = get_vir_score_colour(vir_score)
new_data['resistance_score__colour'] = get_res_score_colour(res_score)
new_data['num_resistance_classes__colour'] = get_res_classes_colour(res_classes)
new_data['num_resistance_genes__colour'] = get_res_genes_colour(res_genes)
# new_data['Yersiniabactin__colour'] = get_vir_lineage_colour(original_data['Yersiniabactin'])
new_data['Colibactin__colour'] = get_vir_lineage_colour(original_data['Colibactin'])
new_data['Aerobactin__colour'] = get_vir_lineage_colour(original_data['Aerobactin'])
new_data['Salmochelin__colour'] = get_vir_lineage_colour(original_data['Salmochelin'])
new_data['rmpA__colour'] = get_rmpA_colour(original_data['rmpA'])
new_data['rmpA2__colour'] = get_rmpA2_colour(original_data['rmpA2'])
for res_class in ['AGly_acquired', 'Col_acquired', 'Fcyn_acquired', 'Flq_acquired', 'Gly_acquired', 'MLS_acquired', 'Phe_acquired', 'Rif_acquired', 'Sul_acquired', 'Tet_acquired',
'Tgc_acquired', 'Tmt_acquired', 'Bla_acquired', 'Bla_inhR_acquired', 'Bla_ESBL_acquired', 'Bla_ESBL_inhR_acquired', 'Bla_Carb_acquired', 'Bla_chr', 'SHV_mutations',
'Col_mutations', 'Flq_mutations', 'Omp_mutations']:
new_data[res_class + '__colour'] = get_res_class_colour(original_data[res_class])
return [new_data[h] for h in new_header]
def name_substitution(kleborate_in):
name_subs = {}
with open(kleborate_in, 'rt') as kleborate_results:
header = None
for line in kleborate_results:
if header is None:
header = line.split('\t')
if header[0] != 'strain':
sys.exit('Error: first column is not "strain" - is this Kleborate output?')
else:
line_parts = line.split('\t')
if len(line_parts) != len(header):
sys.exit('Error: inconsistent number of columns')
old_name = line_parts[0]
if old_name in name_subs:
sys.exit('Error: duplicate sample ID: ' + old_name)
new_name = old_name.replace('.', '_')
new_name = new_name.replace(',', '_')
new_name = new_name.replace("'", '_')
new_name = new_name.replace('"', '_')
name_subs[old_name] = new_name
return name_subs
def check_for_unique_names(name_subs):
names = list(name_subs.values())
duplicate_names = [item for item, count in collections.Counter(names).items() if count > 1]
if duplicate_names:
sys.exit('Error: duplicate sample IDs: ' + ', '.join(duplicate_names))
def save_tree_with_new_names(tree_in, tree_out, name_subs):
print()
print('Writing Microreact tree to: {}'.format(tree_out))
tree_format = None
for try_tree_format in ['newick', 'nexus', 'nexml', 'phyloxml', 'cdao']:
try:
Phylo.read(tree_in, try_tree_format)
tree_format = try_tree_format
break
except ValueError:
pass
if tree_format is None:
sys.exit('Error: could not read input tree')
tree = Phylo.read(tree_in, tree_format)
for node in tree.get_terminals():
name = str(node.name)
try:
node.name = name_subs[name]
except IndexError:
sys.exit('Error: sample name in tree not in Kleborate data: ' + name)
Phylo.write(tree, tree_out, 'newick')
def scale_num(start, end, progress):
return int(round(start * (1.0 - progress) + end * progress))
def colour_range(start, end, count):
start, end = start.lower(), end.lower()
if start.startswith('#'):
start = start[1:]
if end.startswith('#'):
end = end[1:]
start_r, start_g, start_b = int(start[0:2], 16), int(start[2:4], 16), int(start[4:6], 16)
end_r, end_g, end_b = int(end[0:2], 16), int(end[2:4], 16), int(end[4:6], 16)
colours = []
for i in range(count):
progress = i / (count - 1)
r, g, b = scale_num(start_r, end_r, progress), scale_num(start_g, end_g, progress), \
scale_num(start_b, end_b, progress)
hex_colour = '"#' + ('0x%X' % r)[2:] + ('0x%X' % g)[2:] + ('0x%X' % b)[2:] + '"'
colours.append(hex_colour)
return colours
def find_column_index(header, col_name):
try:
return header.index(col_name)
except ValueError:
sys.exit('Error: could not find ' + col_name + ' column in Kleborate')
def get_vir_score_colour(vir_score):
try:
return ['#DEEBF7', '#9ECAE1', '#6BAED6', '#4292C6', '#2171B5', '#08306B'][vir_score]
except IndexError:
return '#BFBFBF'
def get_res_score_colour(res_score):
try:
return ['#FCBBA1', '#FC9272', '#FB6A4A', '#BE413D'][res_score]
except IndexError:
return '#BFBFBF'
def get_res_classes_colour(res_classes):
try:
return colour_range('#FCBBA1', '#BE413D', 11)[res_classes]
except IndexError:
return '#BE413D'
def get_res_genes_colour(res_genes):
try:
return colour_range('#FCBBA1', '#BE413D', 21)[res_genes]
except IndexError:
return '#BE413D'
def get_species_colour(species):
try:
return {'Klebsiella pneumoniae': '#875F9A',
'Klebsiella variicola subsp. variicola': '#8CBDB2',
'Klebsiella quasivariicola': '#F0B663',
'Klebsiella quasipneumoniae subsp. quasipneumoniae': '#ED6060',
'Klebsiella quasipneumoniae subsp. similipneumoniae': '#EDA483'}[species]
except IndexError:
return '#BFBFBF'
def get_vir_lineage_colour(vir_lineage):
vir_lineage_colours = {'ybt 1': '#B27F91', 'ybt 2': '#CDA12C', 'ybt 3': '#56A354',
'ybt 4': '#F28FA2', 'ybt 5': '#DB7723', 'ybt 6': '#93539D',
'ybt 7': '#3A85A8', 'ybt 8': '#7B75CC', 'ybt 9': '#D9C5EF',
'ybt 10': '#449D72', 'ybt 11': '#EBD930', 'ybt 12': '#6AA3C6',
'ybt 13': '#A39F93', 'ybt 14': '#93539D', 'ybt 15': '#EDC59A',
'ybt 16': '#840639', 'ybt 17': '#E25065', 'clb 1': '#99BBE0',
'clb 2A': '#5972AF', 'clb 2B': '#242F69', 'clb 3': '#242F69',
'iro 1': '#B6D5EF', 'iro 2': '#DEC4E8', 'iro 3': '#E29771',
'iro 4': '#A4A4EA', 'iro 5': '#E0AAAA', 'iuc 1': '#B6D5EF',
'iuc 2': '#DEC4E8', 'iuc 2A': '#D8ABDD', 'iuc 3': '#C3EADB',
'iuc 4': '#9ACCBC', 'iuc 5': '#E0AAAA'}
vir_lineage = vir_lineage.split(';')[0]
if vir_lineage in vir_lineage_colours:
return vir_lineage_colours[vir_lineage]
elif vir_lineage == '-':
return '#FFFFFF'
else:
return '#BFBFBF'
def get_rmpA_colour(rmpA):
return '#FFFFFF' if rmpA == '-' else '#08306B'
def get_rmpA2_colour(rmpA2):
return '#FFFFFF' if rmpA2 == '-' else '#08306B'
def get_res_class_colour(res_class):
return '#FFFFFF' if res_class == '-' else '#BE413D'
if __name__ == '__main__':
main()
| gpl-3.0 | 4,596,114,687,895,091,000 | 40.093645 | 187 | 0.593717 | false |
saullocastro/pyNastran | pyNastran/op2/tables/ogf_gridPointForces/ogf_objects.py | 1 | 55421 | from __future__ import print_function
from six import iteritems
import numpy as np
from numpy import zeros, unique, array_equal, empty
from pyNastran.op2.result_objects.op2_objects import ScalarObject
from pyNastran.f06.f06_formatting import write_floats_13e, _eigenvalue_header, write_imag_floats_13e
from pyNastran.op2.vector_utils import transform_force_moment, transform_force_moment_sum, sortedsum1d
from pyNastran.utils import integer_types
try:
import pandas as pd
except ImportError:
pass
class RealGridPointForcesArray(ScalarObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
ScalarObject.__init__(self, data_code, isubcase, apply_data_code=True)
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
self.ntotal = 0
self.itotal = 0
# do the element_names/node_element vectors change with the time step
self.is_unique = False
self.element_names = []
#self.ielement = 0
#self.nelements = 0 # result specific
#self.nnodes = None
def is_real(self):
return True
def is_complex(self):
return False
def _reset_indices(self):
self.itotal = 0
#self.ielement = 0
@property
def element_name(self):
headers = [name.strip() for name in unique(self.element_names) if name.strip()]
#headers = unique(self.element_names)
return str(', '.join(headers))
def build(self):
#print("self.ielement = %s" % self.ielement)
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
if self.is_built:
return
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
#assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#if self.ntotal != max(self._ntotals) or self.ntotal != min(self._ntotals):
#raise ValueError('RealGridPointForcesArray: ntotal=%s _ntotals=%s' % (self.ntotal, self._ntotals))
self.is_unique = False
if self.ntotal != min(self._ntotals) or 1:
self.ntotal = max(self._ntotals)
self.is_unique = True
#self.names = []
#self.nnodes = nnodes_per_element
#self.nelements //= nnodes_per_element
self.itime = 0
#self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("***name=%s ntimes=%s ntotal=%s" % (
#self.element_names, self.ntimes, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, int):
dtype = 'int32'
self._times = zeros(self.ntimes, dtype=dtype)
if self.is_unique:
assert isinstance(self.ntotal, int), self.ntotal
self.node_element = zeros((self.ntimes, self.ntotal, 2), dtype='int32')
self.element_names = empty((self.ntimes, self.ntotal), dtype='U8')
else:
self.node_element = zeros((self.ntotal, 2), dtype='int32')
self.element_names = empty(self.ntotal, dtype='U8')
#[t1, t2, t3, r1, r2, r3]
self.data = zeros((self.ntimes, self.ntotal, 6), dtype='float32')
def build_dataframe(self):
"""
major-axis - the axis
mode 1 2 3
freq 1.0 2.0 3.0
nodeID ElementID Item
1 2 T1
T2
...
major_axis / top = [
[1, 2, 3],
[1.0, 2.0, 3.0]
]
minor_axis / headers = [T1, T2, T3, R1, R2, R3]
name = mode
"""
headers = self.get_headers()
#name = self.name
if self.is_unique:
ntimes = self.data.shape[0]
nnodes = self.data.shape[1]
nvalues = ntimes * nnodes
node_element = self.node_element.reshape((ntimes * nnodes, 2))
if self.nonlinear_factor is not None:
column_names, column_values = self._build_dataframe_transient_header()
#column_names = column_names[0]
#column_values = column_values[0]
column_values2 = []
for value in column_values:
values2 = []
for valuei in value:
values = np.ones(nnodes) * valuei
values2.append(values)
values3 = np.vstack(values2).ravel()
column_values2.append(values3)
df1 = pd.DataFrame(column_values2).T
df1.columns = column_names
#df1.columns.names = column_names
#self.data_frame.columns.names = column_names
df2 = pd.DataFrame(node_element)
df2.columns = ['NodeID', 'ElementID']
df3 = pd.DataFrame(self.element_names.ravel())
df3.columns = ['ElementType']
dfs = [df2, df3]
for i, header in enumerate(headers):
df = pd.DataFrame(self.data[:, :, i].ravel())
df.columns = [header]
dfs.append(df)
self.data_frame = df1.join(dfs)
#print(self.data_frame)
else:
df1 = pd.DataFrame(node_element)
df1.columns = ['NodeID', 'ElementID']
df2 = pd.DataFrame(self.element_names[0, :])
df2.columns = ['ElementType']
df3 = pd.DataFrame(self.data[0])
df3.columns = headers
self.data_frame = df1.join([df2, df3])
#print(self.data_frame)
else:
node_element = [self.node_element[:, 0], self.node_element[:, 1]]
if self.nonlinear_factor is not None:
column_names, column_values = self._build_dataframe_transient_header()
self.data_frame = pd.Panel(self.data, items=column_values, major_axis=node_element, minor_axis=headers).to_frame()
self.data_frame.columns.names = column_names
self.data_frame.index.names = ['NodeID', 'ElementID', 'Item']
else:
self.data_frame = pd.Panel(self.data, major_axis=node_element, minor_axis=headers).to_frame()
self.data_frame.columns.names = ['Static']
self.data_frame.index.names = ['NodeID', 'ElementID', 'Item']
#print(self.data_frame)
def __eq__(self, table):
return self.assert_equal(table)
def assert_equal(self, table, rtol=1.e-5, atol=1.e-8):
self._eq_header(table)
assert self.is_sort1() == table.is_sort1()
if not np.array_equal(self.node_element, table.node_element) and array_equal(self.element_names, table.element_names):
assert self.node_element.shape == table.node_element.shape, 'node_element shape=%s table.shape=%s' % (self.node_element.shape, table.node_element.shape)
assert self.element_names.shape == table.element_names.shape, 'element_names shape=%s table.shape=%s' % (self.element_names.shape, table.element_names.shape)
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
msg += '(Nid, Eid, EName)\n'
for (nid1, eid1), ename1, (nid2, eid2), ename2 in zip(self.node_element, self.element_names,
table.element_names, table.element_names):
msg += '(%s, %s, %s) (%s, %s, %s)\n' % (nid1, eid1, ename1, nid2, eid2, ename2)
print(msg)
raise ValueError(msg)
atols = []
rtols = []
if self.is_unique:
# node_element varies with time
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
for itime in range(self.ntimes):
#print('node_element = ', self.node_element)
#print('shape = ', self.node_element.shape)
msg += '#i, Nid, Eid, Name\n'
for ie, e in enumerate(self.node_element[itime, :, :]):
#print('e = ', e)
(nid, eid) = e
ename1 = self.element_names[itime, ie]
ename2 = self.element_names[itime, ie]
t1 = self.data[itime, ie, :]
t2 = table.data[itime, ie, :]
if not np.allclose(t1, t2, rtol=rtol, atol=atol):
(t11, t21, t31, r11, r21, r31) = t1
(t12, t22, t32, r12, r22, r32) = t2
inonzero = np.where(t1 != 0.)[0]
atoli = np.abs(t2 - t1).max()
rtoli = np.abs(t2[inonzero] / t1[inonzero]).max()
pre_msg = '(%s, %s, %s, %s) ' % (ie, nid, eid, ename1)
msg += '%s(%s, %s, %s, %s, %s, %s)\n%s(%s, %s, %s, %s, %s, %s)\n' % (
pre_msg,
t11, t21, t31, r11, r21, r31,
' ' * len(pre_msg),
t12, t22, t32, r12, r22, r32)
i += 1
atols.append(atoli)
rtols.append(rtoli)
if i > 30:
print(atols)
msg += 'atol.max() = %s\n' % max(atols)
msg += 'rtol.max() = %s\n' % max(rtols)
print(msg)
raise ValueError(msg)
#print(msg)
if i > 0:
msg += 'atol.max() = %s\n' % max(atols)
msg += 'rtol.max() = %s\n' % max(rtols)
#print(msg)
raise ValueError(msg)
else:
# node_element does not vary with time
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
for itime in range(self.ntimes):
for ie, e in enumerate(self.node_element):
(nid, eid) = e
ename1 = self.element_names[ie]
ename2 = self.element_names[ie]
t1 = self.data[itime, ie, :]
t2 = table.data[itime, ie, :]
(t11, t21, t31, r11, r21, r31) = t1
(t12, t22, t32, r12, r22, r32) = t2
if not np.allclose(t1, t2, rtol=rtol, atol=atol):
pre_msg = '(%s, %s, %s) ' % (nid, eid, ename1)
msg += '%s(%s, %s, %s, %s, %s, %s)\n%s(%s, %s, %s, %s, %s, %s)\n' % (
pre_msg,
t11, t21, t31, r11, r21, r31,
' ' * len(pre_msg),
t12, t22, t32, r12, r22, r32
)
i += 1
if i > 10:
msg += 'atol.max() = %s\n' % max(atols)
msg += 'rtol.max() = %s\n' % max(rtols)
print(msg)
raise ValueError(msg)
if i > 0:
msg += 'atol.max() = %s\n' % max(atols)
msg += 'rtol.max() = %s\n' % max(rtols)
#print(msg)
raise ValueError(msg)
return True
def extract_freebody_loads(
self, eids,
coord_out, coords, nid_cd, i_transform,
itime=0, debug=True, logger=None):
"""
Extracts Patran-style freebody loads
Parameters
----------
panel_eids : (Ne, ) int ndarray
all the elements to consider
coord_out : CORD2R()
the output coordinate system
coords : dict[int] = CORDx
all the coordinate systems
key : int
value : CORDx
nid_cd : (M, 2) int ndarray
the (BDF.point_ids, cd) array
i_transform : dict[cd] = (Mi, ) int ndarray
the mapping for nid_cd
summation_point : (3, ) float ndarray
the summation point in output??? coordinate system
itime : int; default=0
the time to extract loads for
debug : bool; default=False
debugging flag
logger : logger; default=None
a logger object that gets used when debug=True
Returns
-------
force_out : (n, 3) float ndarray
the ith float components in the coord_out coordinate frame
moment_out : (n, 3) float ndarray
the ith moment components about the summation point in the coord_out coordinate frame
.. todo:: doesn't seem to handle cylindrical/spherical systems
.. warning:: not done
"""
eids = np.asarray(eids)
#nids = np.asarray(nids)
# todo handle multiple values for itime
gpforce_nids = self.node_element[itime, :, 0]
gpforce_eids = self.node_element[itime, :, 1]
# TODO: remove 0s in gpforce_nids/gpforce_eids to handle transient results
# be careful of the sum row
assert isinstance(eids[0], integer_types), type(eids[0])
is_in = np.in1d(gpforce_eids, eids, assume_unique=False)
irange = np.arange(len(gpforce_nids), dtype='int32')[is_in]
nids = gpforce_nids[irange]
if debug:
logger.debug('gpforce_eids =' % gpforce_eids[is_in])
logger.debug('nids = %s' % gpforce_nids[irange])
logger.debug('eids = %s' % gpforce_eids[irange])
try:
is_in3 = np.in1d(nid_cd[:, 0], nids, assume_unique=False)
except IndexError:
msg = 'nids_cd=%s nids=%s' % (nid_cd, nids)
raise IndexError(msg)
force_global = self.data[itime, irange, :3]
moment_global = self.data[itime, irange, 3:]
#data_global = sortedsum1d(nids, self.data[itime, irange, :], axis=0)
#force_global2 = data_global[:, :3]
#moment_global2 = data_global[:, 3:]
force_global = sortedsum1d(nids, force_global)
moment_global = sortedsum1d(nids, moment_global)
#print(force_global)
#print(force_global2)
#assert np.array_equal(force_global, force_global2)
#assert np.array_equal(moment_global, moment_global2)
force, moment = transform_force_moment(
force_global, moment_global,
coord_out, coords, nid_cd[is_in3, :],
i_transform,
xyz_cid0=None, summation_point_cid0=None,
consider_rxf=False,
debug=debug, logger=logger)
return force, moment
def extract_interface_loads(
self, nids, eids,
coord_out, coords, nid_cd, i_transform,
xyz_cid0, summation_point, itime=0, debug=True, logger=None):
"""
Extracts Patran-style interface loads
Parameters
----------
nids : (Nn, ) int ndarray
all the nodes to consider
eids : (Ne, ) int ndarray
all the elements to consider
coord_out : CORD2R()
the output coordinate system
coords : dict[int] = CORDx
all the coordinate systems
key : int
value : CORDx
nid_cd : (M, 2) int ndarray
the (BDF.point_ids, cd) array
i_transform : dict[cd] = (Mi, ) int ndarray
the mapping for nid_cd
xyz_cid0 : (nnodes + nspoints, 3) ndarray
the grid locations in coordinate system 0
summation_point : (3, ) float ndarray
the summation point in output??? coordinate system
itime : int; default=0
the time to extract loads for
debug : bool; default=False
debugging flag
logger : logger; default=None
a logger object that gets used when debug=True
Returns
-------
force_out : (n, 3) float ndarray
the ith float components in the coord_out coordinate frame
moment_out : (n, 3) float ndarray
the ith moment components about the summation point in the coord_out coordinate frame
force_out_sum : (3, ) float ndarray
the sum of forces in the coord_out coordinate frame
moment_out_sum : (3, ) float ndarray
the sum of moments about the summation point in the coord_out coordinate frame
.. todo:: doesn't seem to handle cylindrical/spherical systems
.. todo:: Add support for:
2D output style:
- This would allow for shell problems to have loads applied
in the plane of the shells
- This would require normals
1D output style:
- Make loads in the direction of the element
This process can't be done for 0D or 3D elements
"""
if summation_point is not None:
summation_point = np.asarray(summation_point)
#assert coord_in.Type == 'R', 'Only rectangular coordinate systems are supported; coord_in=\n%s' % str(coord_in)
#assert coord_out.Type == 'R', 'Only rectangular coordinate systems are supported; coord_out=\n%s' % str(coord_out)
eids = np.asarray(eids)
nids = np.asarray(nids)
# TODO: Handle multiple values for itime
# Is this even possible?
gpforce_nids = self.node_element[itime, :, 0]
gpforce_eids = self.node_element[itime, :, 1]
# TODO: Remove 0s in gpforce_nids/gpforce_eids to handle transient results
# be careful of the sum row.
# Do I even need to do this?
assert isinstance(eids[0], integer_types), type(eids[0])
assert isinstance(nids[0], integer_types), type(nids[0])
is_in = np.in1d(gpforce_nids, nids, assume_unique=False)
is_in2 = np.in1d(gpforce_eids[is_in], eids, assume_unique=False)
irange = np.arange(len(gpforce_nids), dtype='int32')[is_in][is_in2]
if irange.size == 0:
msg = 'no nodes/elements found\n'
msg += 'eids=%s\n' % (eids)
msg += 'gpforce_eids=%s\n' % (gpforce_eids)
raise RuntimeError(msg)
if debug:
logger.debug('gpforce_eids =' % gpforce_eids[is_in])
logger.debug('nids = %s' % gpforce_nids[irange])
logger.debug('eids = %s' % gpforce_eids[irange])
try:
is_in3 = np.in1d(nid_cd[:, 0], nids, assume_unique=False)
except IndexError:
msg = 'nids_cd=%s nids=%s' % (nid_cd, nids)
raise IndexError(msg)
force_global = self.data[itime, irange, :3]
moment_global = self.data[itime, irange, 3:]
out = transform_force_moment_sum(force_global, moment_global,
coord_out, coords, nid_cd[is_in3, :],
i_transform,
xyz_cid0[is_in3, :], summation_point_cid0=summation_point,
debug=debug, logger=logger)
return out
def find_centroid_of_load(self, f, m):
"""
Mx = ry*Fz - rz*Fy
My = rz*Fx - rx*Fz
Mz = rx*Fy - ry*Fx
{M} = [F]{r}
[F] = [
[ 0, -Fy, Fz],
[-Fz, 0, Fx],
[ Fy, -Fx, 0],
]
{r} = [F]^-1 {M}
When the determinant of [F] is nonzero:
Life is easy
When the determinant of [F] is zero:
When Fx != Fy != Fz and they don't equal 0
there are 3 solutions:
where M=[0, 0, 0]
det([F]) = 0:
[F]{x} = [lambda]{x}
where one of the eigenvalues is 0? (the trivial case)
and
However, [F] is singular, so let rx=0:
Mx = ry*Fz - rz*Fy
My = rz*Fx
Mz = -ry*Fx
"""
raise NotImplementedError()
def shear_moment_diagram(self, xyz_cid0, eids, nids, element_centroids_cid0,
coord, coords, stations,
idir=0, itime=0, debug=False, logger=None):
"""
Computes a series of forces/moments at various stations along a structure.
Parameters
----------
eids : (nelements, ) int ndarray
an array of element ids to consider
nids_eids : (nnodes, ) int ndarray
an array of node ids corresponding to eids
#nids_xyz : (nnodes, ) int ndarray
# an array of node ids corresponding to xyz_cid0
xyz_cid0 : (nnodes, 3) float ndarray
all the nodes in the model xyz position in the global frame
element_centroids_cid0 : (nelements, 3) float ndarray
an array of element centroids
coord_out : CORD2R()
the output coordinate system
coords : dict[int] = CORDx
all the coordinate systems
key : int
value : CORDx
nid_cd : (M, 2) int ndarray
the (BDF.point_ids, cd) array
i_transform : dict[cd] = (Mi, ) int ndarray
the mapping for nid_cd
stations : (nstations, ) float ndarray
the station to sum forces/moments about
be careful of picking exactly on symmetry planes/boundaries
of elements or nodes
this list should be sorted (negative to positive)
idir : int; default=0
the axis of the coordinate system to consider
Procedure
---------
1. Clip elements based on centroid.
Elements that are less than the ith station are kept.
2. Get the nodes for those elements.
3a. Extract the freebody loads and sum them about the
summation point (todo).
3b. Extract the interface loads and sum them about the
summation point.
Example
-------
Imagine a swept aircraft wing. Define a coordinate system
in the primary direction of the sweep. Note that station 0
doesn't have to be perfectly at the root of the wing.
Create stations from this point.
TODO
----
Not Tested...Does 3b work? Can 3a give the right answer?
"""
assert coord_out.type in ['CORD2R', 'CORD1R'], coord_out.type
beta = coord_out.beta()
element_centroids_coord = np.dot(beta, element_centroids_cid0) # TODO: verify
xyz_coord = np.dot(beta, xyz_cid0) # TODO: verify
x_centroid = element_centroids_coord[:, idir]
x_coord = xyz_coord[:, idir]
eids = np.unique(eids)
force_sum = zeros((nstations, 3), dtype='float32')
moment_sum = zeros((nstations, 3), dtype='float32')
for istation, station in enumerate(stations):
# we're picking the elements on one side of the centroid
# and nodes on the other side
# Calculate the nodes on the boundary.
# If we make a cutting plane and find all the nodes on
# one side of the cutting plane, we can take all the
# nodes within some tolerance of the station direction and
# find the free nodes
i = np.where(x_centroid <= station)
j = np.where(x_coord >= station)
# summation point creation
offset = np.zeros(3, dtype='float64')
offset[idir] = station
summation_point = coord_out.origin + offset
if 0:
# I don't think this will work...
forcei, momenti = extract_freebody_loads(
eids[i],
coord_out, coords, nid_cd, i_transform,
xyz_cid0, summation_point, itime=itime, debug=debug, logger=logger)
force_sum[istation, :] = forcei.sum(axis=0)
# TODO: extract_freebody_loads doesn't sum forces/moments
# sum loads about summation point
moment_sum[istation, :] = momenti.sum(axis=0)
else:
forcei, momenti, force_sumi, moment_sumi = self.extract_interface_loads(
eids[i], nids[j],
coord_out, coords, nid_cd, i_transform,
xyz_cid0, summation_point, itime=itime, debug=debug, logger=logger)
force_sum[istation, :] = forcei_sum
moment_sum[istation, :] = momenti_sum
return force_sum, moment_sum
def add(self, dt, node_id, eid, ename, t1, t2, t3, r1, r2, r3):
assert isinstance(node_id, int), node_id
self.add_sort1(dt, eid, node_id, ename, t1, t2, t3, r1, r2, r3)
def add_sort1(self, dt, node_id, eid, ename, t1, t2, t3, r1, r2, r3):
assert eid is not None, eid
assert isinstance(node_id, int), node_id
self._times[self.itime] = dt
if self.is_unique:
self.node_element[self.itime, self.itotal, :] = [node_id, eid]
self.element_names[self.itime, self.itotal] = ename
else:
self.node_element[self.itotal, :] = [node_id, eid]
self.element_names[self.itotal] = ename
#self.node_element[self.itotal, :] = [eid, node_id]
#self.element_names[self.itotal] = ename
self.data[self.itime, self.itotal, :] = [t1, t2, t3, r1, r2, r3]
self.itotal += 1
def get_stats(self):
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
' _ntotals: %s\n' % self._ntotals,
]
#nelements = self.nelements
ntimes = self.ntimes
#nnodes = self.nnodes
ntotal = self.ntotal
nelements = unique(self.node_element[:, 1]).size
msg = []
if self.nonlinear_factor is not None: # transient
msgi = ' type=%s ntimes=%i nelements=%i ntotal=%i\n' % (
self.__class__.__name__, ntimes, nelements, ntotal)
if self.ntotal != min(self._ntotals):
msgi += ' _ntotals=%s\n' % (self._ntotals)
ntimes_word = 'ntimes'
else:
msgi = ' type=%s nelements=%i total=%i\n' % (
self.__class__.__name__, nelements, ntotal)
if self.ntotal != min(self._ntotals):
msgi += ' _ntotals=%s\n' % (self._ntotals)
ntimes_word = '1'
msg.append(msgi)
headers = self.get_headers()
n = len(headers)
#element_names = [name.strip() for name in unique(self.element_names)]
msg.append(' data: [%s, ntotal, %i] where %i=[%s]\n' % (ntimes_word, n, n,
', '.join(headers)))
msg.append(' data.shape=%s\n' % str(self.data.shape))
msg.append(' element type: %s\n ' % self.element_name)
msg += self.get_data_code()
return msg
#def get_element_index(self, eids):
#itot = searchsorted(eids, self.node_element[:, 0])
#return itot
#def eid_to_element_node_index(self, eids):
#ind = ravel([searchsorted(self.node_element[:, 0] == eid) for eid in eids])
#return ind
def write_csv(self, csv_file, is_mag_phase=False):
name = str(self.__class__.__name__)
csv_file.write('%s\n' % name)
headers = ['Nid', 'Eid', 'EName', 'T1', 'T2', 'T3', 'R1', 'R2', 'R3']
csv_file.write('%s,' * len(headers) % tuple(headers) + '\n')
#node = self.node_gridtype[:, 0]
#gridtype = self.node_gridtype[:, 1]
itime = 0
times = self._times
assert self.is_unique, self.is_unique
# sort1 as sort1
for itime in range(self.ntimes):
dt = self._times[itime]
t1 = self.data[itime, :, 0]
t2 = self.data[itime, :, 1]
t3 = self.data[itime, :, 2]
r1 = self.data[itime, :, 3]
r2 = self.data[itime, :, 4]
r3 = self.data[itime, :, 5]
nids = self.node_element[itime, :, 0]
eids = self.node_element[itime, :, 1]
enames = self.element_names[itime, :]
zero = ' '
ntotal = self._ntotals[itime]
for (i, nid, eid, ename, t1i, t2i, t3i, r1i, r2i, r3i) in zip(
range(ntotal), nids, eids, enames, t1, t2, t3, r1, r2, r3):
csv_file.write('%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n' % (
itime, nid, eid, ename.strip(), t1i, t2i, t3i, r1i, r2i, r3i))
return
def write_f06(self, f, header=None, page_stamp='PAGE %s', page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
msg = self._get_f06_msg()
ntimes = self.data.shape[0]
if self.is_unique:
for itime in range(ntimes):
dt = self._times[itime]
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f.write(''.join(header + msg))
#print("self.data.shape=%s itime=%s ieids=%s" % (str(self.data.shape), itime, str(ieids)))
#[t1, t2, t3, r1, r2, r3]
t1 = self.data[itime, :, 0]
t2 = self.data[itime, :, 1]
t3 = self.data[itime, :, 2]
r1 = self.data[itime, :, 3]
r2 = self.data[itime, :, 4]
r3 = self.data[itime, :, 5]
nids = self.node_element[itime, :, 0]
eids = self.node_element[itime, :, 1]
enames = self.element_names[itime, :]
zero = ' '
ntotal = self._ntotals[itime]
#print(self._ntotals)
assert len(eids) == len(nids)
assert len(enames) == len(nids), 'enames=%s nnids=%s' % (len(enames), len(nids))
assert len(t1) == len(nids)
assert len(t2) == len(nids)
assert len(t3) == len(nids)
assert len(r1) == len(nids)
assert len(r2) == len(nids)
assert len(nids) == ntotal, 'len(nids)=%s ntotal=%s' % (len(nids), ntotal)
for (i, nid, eid, ename, t1i, t2i, t3i, r1i, r2i, r3i) in zip(
range(ntotal), nids, eids, enames, t1, t2, t3, r1, r2, r3):
#print(nid, eid, ename, t1i)
vals = [t1i, t2i, t3i, r1i, r2i, r3i]
vals2 = write_floats_13e(vals)
[f1, f2, f3, m1, m2, m3] = vals2
if eid == 0:
f.write(' %8s %10s %s %-13s %-13s %-13s %-13s %-13s %s\n' % (
nid, eid, ename, f1, f2, f3, m1, m2, m3))
zero = '0'
else:
f.write('%s %8s %10s %s %-13s %-13s %-13s %-13s %-13s %s\n' % (
zero, nid, eid, ename, f1, f2, f3, m1, m2, m3))
zero = ' '
f.write(page_stamp % page_num)
page_num += 1
else:
nids = self.node_element[:, 0]
eids = self.node_element[:, 1]
enames = self.element_names
for itime in range(ntimes):
dt = self._times[itime]
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f.write(''.join(header + msg))
#print("self.data.shape=%s itime=%s ieids=%s" % (str(self.data.shape), itime, str(ieids)))
#[t1, t2, t3, r1, r2, r3]
t1 = self.data[itime, :, 0]
t2 = self.data[itime, :, 1]
t3 = self.data[itime, :, 2]
r1 = self.data[itime, :, 3]
r2 = self.data[itime, :, 4]
r3 = self.data[itime, :, 5]
zero = ' '
for (nid, eid, ename, t1i, t2i, t3i, r1i, r2i, r3i) in zip(
nids, eids, enames, t1, t2, t3, r1, r2, r3):
vals = [t1i, t2i, t3i, r1i, r2i, r3i]
vals2 = write_floats_13e(vals)
[f1, f2, f3, m1, m2, m3] = vals2
if eid == 0:
f.write(' %8s %10s %s %-13s %-13s %-13s %-13s %-13s %s\n' % (
nid, eid, ename, f1, f2, f3, m1, m2, m3))
zero = '0'
else:
f.write('%s %8s %10s %s %-13s %-13s %-13s %-13s %-13s %s\n' % (
zero, nid, eid, ename, f1, f2, f3, m1, m2, m3))
zero = ' '
f.write(page_stamp % page_num)
page_num += 1
return page_num - 1
def _get_f06_msg(self):
msg = [
' G R I D P O I N T F O R C E B A L A N C E\n',
' \n',
' POINT-ID ELEMENT-ID SOURCE T1 T2 T3 R1 R2 R3\n',
#'0 13683 3736 TRIAX6 4.996584E+00 0.0 1.203093E+02 0.0 0.0 0.0'
#' 13683 3737 TRIAX6 -4.996584E+00 0.0 -1.203093E+02 0.0 0.0 0.0'
#' 13683 *TOTALS* 6.366463E-12 0.0 -1.364242E-12 0.0 0.0 0.0'
]
return msg
def get_headers(self):
headers = ['f1', 'f2', 'f3', 'm1', 'm2', 'm3']
return headers
class ComplexGridPointForcesArray(ScalarObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
ScalarObject.__init__(self, data_code, isubcase, apply_data_code=True)
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
self.ntotal = 0
self.itotal = 0
# do the element_names/node_element vectors change with the time step
self.is_unique = False
#self.ielement = 0
#self.nelements = 0 # result specific
#self.nnodes = None
def is_real(self):
return False
def is_complex(self):
return True
def _reset_indices(self):
self.itotal = 0
#self.ielement = 0
@property
def element_name(self):
headers = [name.strip() for name in unique(self.element_names)]
#headers = unique(self.element_names)
return str(', '.join(headers))
def build(self):
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
if self.is_built:
return
#self.ntotal += 5 # TODO: remove
#print("self.ntotal = %s" % self.ntotal)
#print("self.itotal = %s" % self.itotal)
#print("self._ntotals = %s" % self._ntotals)
#self.is_unique = False
if self.ntotal != max(self._ntotals) or 1:
self.ntotal = max(self._ntotals)
self.is_unique = True
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
#assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
#self.nnodes = nnodes_per_element
#self.nelements //= nnodes_per_element
self.itime = 0
#self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("***name=%s type=%s nnodes_per_element=%s ntimes=%s nelements=%s ntotal=%s" % (
#self.element_names, self.element_type, nnodes_per_element, self.ntimes, self.nelements, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, int):
dtype = 'int32'
self._times = zeros(self.ntimes, dtype=dtype)
if self.is_unique:
self.node_element = zeros((self.ntimes, self.ntotal, 2), dtype='int32')
self.element_names = empty((self.ntimes, self.ntotal), dtype='U8')
else:
self.node_element = zeros((self.ntotal, 2), dtype='int32')
self.element_names = empty(self.ntotal, dtype='U8')
#[t1, t2, t3, r1, r2, r3]
self.data = zeros((self.ntimes, self.ntotal, 6), dtype='complex64')
def build_dataframe(self):
"""
major-axis - the axis
mode 1 2 3
freq 1.0 2.0 3.0
nodeID ElementID Item
1 2 T1
T2
...
major_axis / top = [
[1, 2, 3],
[1.0, 2.0, 3.0]
]
minor_axis / headers = [T1, T2, T3, R1, R2, R3]
name = mode
"""
headers = self.get_headers()
#name = self.name
if self.is_unique:
ntimes = self.data.shape[0]
nnodes = self.data.shape[1]
nvalues = ntimes * nnodes
node_element = self.node_element.reshape((ntimes * nnodes, 2))
df1 = pd.DataFrame(node_element)
df1.columns = ['NodeID', 'ElementID']
df2 = pd.DataFrame(self.element_names[0, :])
df2.columns = ['ElementType']
df3 = pd.DataFrame(self.data[0])
df3.columns = headers
self.data_frame = df1.join([df2, df3])
#print(self.data_frame)
else:
node_element = [self.node_element[:, 0], self.node_element[:, 1]]
if self.nonlinear_factor is not None:
column_names, column_values = self._build_dataframe_transient_header()
self.data_frame = pd.Panel(self.data, items=column_values, major_axis=node_element, minor_axis=headers).to_frame()
self.data_frame.columns.names = column_names
self.data_frame.index.names = ['NodeID', 'ElementID', 'Item']
else:
self.data_frame = pd.Panel(self.data, major_axis=node_element, minor_axis=headers).to_frame()
self.data_frame.columns.names = ['Static']
self.data_frame.index.names = ['NodeID', 'ElementID', 'Item']
#print(self.data_frame)
def _build_dataframe(self):
"""
major-axis - the axis
mode 1 2 3
freq 1.0 2.0 3.0
nodeID ElementID Item
1 2 T1
T2
...
major_axis / top = [
[1, 2, 3],
[1.0, 2.0, 3.0]
]
minor_axis / headers = [T1, T2, T3, R1, R2, R3]
name = mode
"""
headers = self.get_headers()
column_names, column_values = self._build_dataframe_transient_header()
if self.is_unique:
#node_element = [self.node_element[:, 0], self.node_element[:, 1]]
ntimes = self.data.shape[0]
nnodes = self.data.shape[1]
node_element_temp = self.node_element.reshape((ntimes * nnodes, 2))
node_element = [node_element_temp[:, 0], node_element_temp[:, 1]]
self.data_frame = pd.Panel(self.data, items=column_values, major_axis=node_element, minor_axis=headers).to_frame()
self.data_frame.columns.names = column_names
self.data_frame.index.names = ['NodeID', 'ElementID', 'Item']
else:
node_element = [self.node_element[:, 0], self.node_element[:, 1]]
#print('column_names =', column_names)
#for name, values in zip(column_names, column_values):
#print(' %s = %s' % (name, values))
self.data_frame = pd.Panel(self.data, items=column_values, major_axis=node_element, minor_axis=headers).to_frame()
self.data_frame.columns.names = column_names
self.data_frame.index.names = ['NodeID', 'ElementID', 'Item']
def __eq__(self, table):
self._eq_header(table)
assert self.is_sort1() == table.is_sort1()
if not np.array_equal(self.node_element, table.node_element) and array_equal(self.element_names, table.element_names):
assert self.node_element.shape == table.node_element.shape, 'node_element shape=%s table.shape=%s' % (self.node_element.shape, table.node_element.shape)
assert self.element_names.shape == table.element_names.shape, 'element_names shape=%s table.shape=%s' % (self.element_names.shape, table.element_names.shape)
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
msg += '(Eid, Nid, EName)\n'
for (nid1, eid1), ename1, (nid2, eid2), ename2 in zip(self.node_element, self.element_names,
table.element_names, table.element_names):
msg += '(%s, %s, %s) (%s, %s, %s)\n' % (nid1, eid1, ename1, nid2, eid2, ename2)
print(msg)
raise ValueError(msg)
if self.is_unique:
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
for itime in range(self.ntimes):
for ie, e in enumerate(self.node_element):
(eid, nid) = e
ename1 = self.element_names[itime, ie]
ename2 = self.element_names[itime, ie]
t1 = self.data[itime, ie, :]
t2 = table.data[itime, ie, :]
(t11, t21, t31, r11, r21, r31) = t1
(t12, t22, t32, r12, r22, r32) = t2
if not np.array_equal(t1, t2):
msg += '(%s, %s, %s) (%s, %s, %s, %s, %s, %s) (%s, %s, %s, %s, %s, %s)\n' % (
eid, nid, ename1,
t12, t22, t32, r12, r22, r32,
t12, t22, t32, r12, r22, r32)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
#print(msg)
if i > 0:
raise ValueError(msg)
else:
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
for itime in range(self.ntimes):
for ie, e in enumerate(self.node_element):
(eid, nid) = e
ename1 = self.element_names[ie]
ename2 = self.element_names[ie]
t1 = self.data[itime, ie, :]
t2 = table.data[itime, ie, :]
(t11, t21, t31, r11, r21, r31) = t1
(t12, t22, t32, r12, r22, r32) = t2
if not np.array_equal(t1, t2):
msg += '(%s, %s, %s) (%s, %s, %s, %s, %s, %s) (%s, %s, %s, %s, %s, %s)\n' % (
eid, nid, ename1,
t12, t22, t32, r12, r22, r32,
t12, t22, t32, r12, r22, r32)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
#print(msg)
if i > 0:
raise ValueError(msg)
return True
def add_sort1(self, dt, node_id, eid, ename, t1, t2, t3, r1, r2, r3):
assert eid is not None, eid
assert isinstance(node_id, int), node_id
if self.is_unique:
self.node_element[self.itime, self.itotal, :] = [node_id, eid]
self.element_names[self.itime, self.itotal] = ename
else:
self.node_element[self.itotal, :] = [node_id, eid]
self.element_names[self.itotal] = ename
self.data[self.itime, self.itotal, :] = [t1, t2, t3, r1, r2, r3]
self.itotal += 1
def get_stats(self):
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
#nelements = self.nelements
ntimes = self.ntimes
#nnodes = self.nnodes
ntotal = self.ntotal
nelements = unique(self.node_element[:, 1]).size
msg = []
if self.nonlinear_factor is not None: # transient
msgi = ' type=%s ntimes=%i nelements=%i ntotal=%i\n' % (
self.__class__.__name__, ntimes, nelements, ntotal)
ntimes_word = 'ntimes'
else:
msgi = ' type=%s nelements=%i total=%i\n' % (
self.__class__.__name__, nelements, ntotal)
ntimes_word = '1'
msg.append(msgi)
headers = self.get_headers()
n = len(headers)
#element_names = [name.strip() for name in unique(self.element_names)]
msg.append(' data: [%s, ntotal, %i] where %i=[%s]\n' % (ntimes_word, n, n,
', '.join(headers)))
msg.append(' data.shape=%s\n' % str(self.data.shape))
msg.append(' element type: %s\n ' % self.element_name)
msg += self.get_data_code()
return msg
#def get_element_index(self, eids):
#itot = searchsorted(eids, self.node_element[:, 0])
#return itot
#def eid_to_element_node_index(self, eids):
#ind = ravel([searchsorted(self.node_element[:, 0] == eid) for eid in eids])
#return ind
def write_f06(self, f, header=None, page_stamp='PAGE %s', page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
msg = self._get_f06_msg(is_mag_phase=is_mag_phase, is_sort1=is_sort1)
ntimes = self.data.shape[0]
if self.is_unique:
for itime in range(ntimes):
dt = self._times[itime]
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f.write(''.join(header + msg))
#print("self.data.shape=%s itime=%s ieids=%s" % (str(self.data.shape), itime, str(ieids)))
#[t1, t2, t3, r1, r2, r3]
t1 = self.data[itime, :, 0]
t2 = self.data[itime, :, 1]
t3 = self.data[itime, :, 2]
r1 = self.data[itime, :, 3]
r2 = self.data[itime, :, 4]
r3 = self.data[itime, :, 5]
eids = self.node_element[itime, :, 1]
nids = self.node_element[itime, :, 0]
enames = self.element_names[itime, :]
zero = ' '
ntotal = self._ntotals[itime]
for (i, nid, eid, ename, t1i, t2i, t3i, r1i, r2i, r3i) in zip(
range(ntotal), nids, eids, enames, t1, t2, t3, r1, r2, r3):
vals = [t1i, t2i, t3i, r1i, r2i, r3i]
vals2 = write_imag_floats_13e(vals, is_mag_phase)
[f1r, f2r, f3r, m1r, m2r, m3r, f1i, f2i, f3i, m1i, m2i, m3i] = vals2
if eid == 0:
f.write(' %8s %10s %8s %-13s %-13s %-13s %-13s %-13s %s\n'
' %8s %10s %8s %-13s %-13s %-13s %-13s %-13s %s\n' % (
nid, eid, ename, f1r, f2r, f3r, m1r, m2r, m3r,
'', '', '', f1i, f2i, f3i, m1i, m2i, m3i,
))
zero = '0'
else:
f.write('%s %8s %10s %8s %-13s %-13s %-13s %-13s %-13s %s\n'
' %8s %10s %8s %-13s %-13s %-13s %-13s %-13s %s\n' % (
zero, nid, eid, ename, f1r, f2r, f3r, m1r, m2r, m3r,
'', '', '', f1i, f2i, f3i, m1i, m2i, m3i,))
zero = ' '
f.write(page_stamp % page_num)
page_num += 1
else:
eids = self.node_element[:, 1]
nids = self.node_element[:, 0]
enames = self.element_names
for itime in range(ntimes):
dt = self._times[itime]
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f.write(''.join(header + msg))
#print("self.data.shape=%s itime=%s ieids=%s" % (str(self.data.shape), itime, str(ieids)))
#[t1, t2, t3, r1, r2, r3]
t1 = self.data[itime, :, 0]
t2 = self.data[itime, :, 1]
t3 = self.data[itime, :, 2]
r1 = self.data[itime, :, 3]
r2 = self.data[itime, :, 4]
r3 = self.data[itime, :, 5]
zero = ' '
for (nid, eid, ename, t1i, t2i, t3i, r1i, r2i, r3i) in zip(
nids, eids, enames, t1, t2, t3, r1, r2, r3):
vals = [t1i, t2i, t3i, r1i, r2i, r3i]
vals2 = write_imag_floats_13e(vals, is_mag_phase)
[f1r, f2r, f3r, m1r, m2r, m3r, f1i, f2i, f3i, m1i, m2i, m3i] = vals2
if eid == 0:
f.write(' %8s %10s %8s %-13s %-13s %-13s %-13s %-13s %s\n'
' %8s %10s %8s %-13s %-13s %-13s %-13s %-13s %s\n' % (
nid, eid, ename, f1r, f2r, f3r, m1r, m2r, m3r,
'', '', '', f1i, f2i, f3i, m1i, m2i, m3i,
))
zero = '0'
else:
f.write('%s %8s %10s %8s %-13s %-13s %-13s %-13s %-13s %s\n'
' %8s %10s %8s %-13s %-13s %-13s %-13s %-13s %s\n' % (
zero, nid, eid, ename, f1r, f2r, f3r, m1r, m2r, m3r,
'', '', '', f1i, f2i, f3i, m1i, m2i, m3i,))
zero = ' '
f.write(page_stamp % page_num)
page_num += 1
eids = self.node_element[:, 1]
nids = self.node_element[:, 0]
enames = self.element_names
for itime in range(ntimes):
dt = self._times[itime]
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f.write(''.join(header + msg))
#print("self.data.shape=%s itime=%s ieids=%s" % (str(self.data.shape), itime, str(ieids)))
#[t1, t2, t3, r1, r2, r3]
t1 = self.data[itime, :, 0]
t2 = self.data[itime, :, 1]
t3 = self.data[itime, :, 2]
r1 = self.data[itime, :, 3]
r2 = self.data[itime, :, 4]
r3 = self.data[itime, :, 5]
zero = ' '
for (nid, eid, ename, t1i, t2i, t3i, r1i, r2i, r3i) in zip(
nids, eids, enames, t1, t2, t3, r1, r2, r3):
vals = [t1i, t2i, t3i, r1i, r2i, r3i]
vals2 = write_imag_floats_13e(vals, is_mag_phase)
[f1r, f2r, f3r, m1r, m2r, m3r, f1i, f2i, f3i, m1i, m2i, m3i] = vals2
if eid == 0:
f.write(' %8s %10s %8s %-13s %-13s %-13s %-13s %-13s %s\n'
' %8s %10s %8s %-13s %-13s %-13s %-13s %-13s %s\n' % (
nid, eid, ename, f1r, f2r, f3r, m1r, m2r, m3r,
'', '', '', f1i, f2i, f3i, m1i, m2i, m3i,
))
zero = '0'
else:
f.write('%s %8s %10s %8s %-13s %-13s %-13s %-13s %-13s %s\n'
' %8s %10s %8s %-13s %-13s %-13s %-13s %-13s %s\n' % (
zero, nid, eid, ename, f1r, f2r, f3r, m1r, m2r, m3r,
'', '', '', f1i, f2i, f3i, m1i, m2i, m3i,))
zero = ' '
f.write(page_stamp % page_num)
page_num += 1
return page_num - 1
def _get_f06_msg(self, is_mag_phase=True, is_sort1=True):
msg = [
' C O M P L E X G R I D P O I N T F O R C E B A L A N C E\n',
#sort,
#' \n'
#' POINT-ID ELEMENT-ID SOURCE T1 T2 T3 R1 R2 R3\n'
#' 19 21 TRIA3 0.0 0.0 0.0 0.0 0.0 0.0'
#' 0.0 0.0 0.0 0.0 0.0 0.0'
]
if is_mag_phase:
msg += [' (REAL/IMAGINARY)\n \n']
mag_phase
else:
msg += [' (REAL/IMAGINARY)\n \n']
if is_sort1:
#msg += [' FREQ ELEMENT-ID SOURCE T1 T2 T3 R1 R2 R3\n']
msg += [' POINT-ID ELEMENT-ID SOURCE T1 T2 T3 R1 R2 R3\n']
else:
# TODO: get this right
msg += [' POINT-ID ELEMENT-ID SOURCE T1 T2 T3 R1 R2 R3\n']
return msg
def get_headers(self):
headers = ['f1', 'f2', 'f3', 'm1', 'm2', 'm3']
return headers
| lgpl-3.0 | -2,043,231,779,348,391,200 | 42.74191 | 169 | 0.469822 | false |
coblo/isccbench | iscc_bench/algos/cdc_fast_text.py | 1 | 10359 | # -*- coding: utf-8 -*-
"""Fast-CDC on Unicode Code Points"""
import logging
from io import StringIO
from os.path import basename
from statistics import mean
from typing import TextIO
from iscc_bench.algos.metrics import jaccard
from iscc_bench.algos.slide import sliding_window
from iscc_bench.readers.mltext import mltext
from iscc_bench.textid.normalize import text_normalize
from iscc_bench.utils import load_text_file
import matplotlib.pyplot as plt
logr = logging.getLogger(__name__)
MAX_INT64 = 2 ** 64 - 1
GEAR2_NORM = 1024
GEAR2_MIN = 64
GEAR2_MAX = 4096
GEAR2_MASK1 = 0b1101100110110100010000
GEAR2_MASK2 = 0b11100101010110000000
CHUNKING_GEAR = [
9584138480181866666,
4739450037122062430,
1042006760432515769,
10675154520554330663,
15869016765101259526,
8970928072383595559,
1399451202205921674,
14523822808097149755,
16268498464839721299,
10481172452375523505,
17104617054662428007,
1589812074021361642,
5529368114994898429,
16097147859444922117,
7366391750793198740,
11100538009918328137,
1389689728615383157,
4977138822009172500,
908349889557194910,
14452518814433479233,
2122926032271239532,
591612022955043504,
9379034436570273189,
12748258297147873806,
4307386326245858243,
13845229916084989633,
11224472648935237303,
7047696390035316099,
2021133566789993437,
17387162748083618158,
11746787256992261957,
6644482612611712714,
15729398955930993486,
18187694890389888249,
13375007170405426180,
4646676434852504131,
13152698236329639071,
899989819383117385,
1604228284900755822,
13429168974601667864,
3706248770764044735,
3719799868214789934,
339511817415309475,
12306710798301877171,
9844020938499650522,
13507342816267977422,
15331217600725578556,
7506003564454403634,
17943236144189306428,
282153689319390566,
7654271695669749695,
2650412143911437370,
6193440044944269691,
9296646612477743744,
15077579129862372948,
67630558006200567,
11937031764123301943,
1634327986517329169,
16073934395340319514,
11660580892053471307,
12301495579660351243,
16908718276972184511,
6851717516129410187,
13288278789994352315,
17482170774163197685,
12177168157992128323,
1679876621412537528,
15666827561093998679,
4235032027386979601,
17396011814487376094,
2036017399572567727,
4977152437582070133,
11341111713611820820,
5866443846249079891,
5131277185090952872,
8325299058005558320,
5701450024662049407,
15870252139465586153,
641910037851244477,
5172232175829573378,
2261684586607900474,
11396825283718526131,
12408680075109652465,
7761877592432080901,
13820035802684848169,
8150091535052795450,
1103357817677537274,
13470426615970288837,
4696524065622673976,
9336804607285957500,
13043178028673218162,
7139020806469476608,
12450708403507569100,
2877039905016676547,
15118872351294838361,
3277072151995360446,
1979210712452295885,
14822651643543876641,
5849754172112174627,
13664543478254756807,
16186972696580520130,
14259131679517995788,
1772106294408535188,
2668205339646827112,
3734021086026184498,
4257506854909152229,
6797729639474582495,
3708095106171770747,
15445894064208319783,
11045733249000282278,
6925260395759991481,
6761677416581440942,
3134957115005596133,
5496794829211694837,
225035875953155227,
18051382753002575119,
6911658830635795092,
6648838042848840266,
7680838377178993211,
14373546918520540763,
7385952462173201391,
7500965322394952100,
15539214383494689771,
14355530880918970074,
4040759991734970063,
1335151750647325670,
13713452291232361388,
8852782707920062625,
6076783566257059794,
14451547968886132839,
6756882940270420653,
17423128808598833972,
5877907771709558759,
14308413074787508328,
12294727846616188882,
13766545313722789196,
7000331838802888702,
15110028412924060381,
15869145452552081798,
10836437530623796047,
1273143868608979117,
17728019699248776702,
379008101491021165,
6658832383485441856,
6005905363267598720,
4792802520786808134,
17024928019214694263,
7949301678895773307,
14602122883430422290,
6416689239839102410,
18112987618441438141,
5424513836620859057,
12327961344656070412,
18229731317766561349,
6214341855555485197,
14659604854593022088,
18341976098904231516,
9093141550798891276,
4487469223051523007,
12576621890114680116,
11368566035561888278,
16632902625329423294,
13764076000271015053,
11494903226088746337,
14079100963083335535,
5976601008655555884,
5685807667042201553,
16503266544486236927,
5505089898459277917,
17076606531971661551,
939769563919939433,
17217248958964594832,
11196454443995107214,
13253314556391295544,
17340262486782904124,
5483165811177129540,
121736889831618943,
6318157315988658220,
14520375112718267902,
689388276875596813,
5273319774965020902,
7975410517565653865,
13935269057627157047,
16821796908479891795,
5882048506860913277,
18003709489856105216,
1424933842252756366,
6634557257081066175,
16179356916240399588,
11153419399622634817,
15654294493035402949,
2652919763627807814,
16437183290373292867,
16903315446495122175,
3575318971059548300,
3073697257555445515,
16187136733800880291,
15191964085364171996,
11982016174040399757,
1948589207658719032,
14444449012119241408,
7130754012353479650,
7480280819583944745,
3603028513293740433,
7021162527209392860,
2124450348946366496,
14349140477237426219,
7396225914272122063,
16288120608246645021,
7309794834881975478,
16746864570463829614,
9239996606832866982,
14126189643057989505,
5785181374404079776,
16681042508550037223,
9085478584447523753,
12879577862603639783,
13351556131001260565,
10860701565908202403,
9109516948909639475,
2942389181877553466,
1907923359833671766,
1700327967934711796,
4355952370607563279,
6159416062364401684,
8120694842642123744,
4670360822544180192,
12684384265447906291,
11518186189217338692,
14839496566538901930,
13515715604989800698,
12135065096961528408,
9056982071865174221,
12690699907549395246,
2080896935929507230,
14546126411900211421,
6222235617711806766,
13387691023848518640,
1259523422199249803,
1733690531272524911,
16691543548458831721,
3252085970219428027,
790320086519395195,
8366099548552136926,
357423734596052102,
6375583027298966643,
88639135753272123,
13813972796887520980,
8203570281250814300,
18377325011640278855,
2922465295015278442,
2164203008979443347,
7447171935848155518,
3663261456454345351,
5865411828910435346,
13570376904595974307,
]
def chunk_text(stream: TextIO):
section = stream.read(GEAR2_MAX)
while True:
if len(section) < GEAR2_MAX:
section += stream.read(GEAR2_MAX)
if len(section) == 0:
break
boundary = chunk_length(
section, GEAR2_NORM, GEAR2_MIN, GEAR2_MAX, GEAR2_MASK1, GEAR2_MASK2,
)
yield section[:boundary]
section = section[boundary:]
def chunk_length(text, norm_size, min_size, max_size, mask_1, mask_2):
data_length = len(text)
i = min_size
pattern = 0
if data_length <= min_size:
return data_length
barrier = min(norm_size, data_length)
while i < barrier:
pattern = ((pattern << 1) + CHUNKING_GEAR[ord(text[i]) % 256]) & MAX_INT64
if not pattern & mask_1:
return i
i = i + 1
barrier = min(max_size, data_length)
while i < barrier:
pattern = ((pattern << 1) + CHUNKING_GEAR[ord(text[i]) % 256]) & MAX_INT64
if not pattern & mask_2:
return i
i = i + 1
return i
SAMPLES = 500
def test_text_chunks():
fps = list(mltext())
losses = []
chunk_sizes = []
num_chunks = []
similarities = []
dissimilarities = []
for fp_a, fp_b, fp_c in sliding_window(fps, size=3, step=2, fillvalue=None):
if fp_b is None:
break
if fp_c is None:
fp_c = fps[0]
chunks_a = StringIO(text_normalize(load_text_file(fp_a)))
chunks_b = StringIO(text_normalize(load_text_file(fp_b)))
chunks_c = StringIO(text_normalize(load_text_file(fp_c)))
chunks_a = list(chunk_text(chunks_a))
chunks_b = list(chunk_text(chunks_b))
chunks_c = list(chunk_text(chunks_c))
chunk_sizes.extend(len(c) for c in chunks_a)
def cut_regions(chunks):
regs = []
for a, b in sliding_window(chunks, size=2, step=1):
regs.append(":".join((a[-4:], b[:4])))
return regs
# select cutpoint regions only
chunks_a = cut_regions(chunks_a)
chunks_b = cut_regions(chunks_b)
chunks_c = cut_regions(chunks_c)
num_chunks.append(len(chunks_a))
sim_sim = jaccard(chunks_a, chunks_b)
similarities.append(sim_sim)
sim_dif = jaccard(chunks_a, chunks_c)
dissimilarities.append(sim_dif)
loss = (sim_dif or 0.00001) / (sim_sim or 0.00001)
logr.debug(
f"Loss: {loss:.8f} Sim: {sim_sim:.3f} Dif: {sim_dif:.3f} ({basename(fp_a)})"
)
losses.append(loss)
plt.hist(chunk_sizes, color="blue", edgecolor="black", bins=int(4096 / 32))
plt.show()
return {
"status": "ok",
"mean_loss": mean(losses),
"avg_num_chunks": mean(num_chunks),
"avg_chunk_size": mean(chunk_sizes),
"max_chunk_size": max(chunk_sizes),
"avg_sim": mean(similarities),
"avg_dis": mean(dissimilarities),
}
if __name__ == "__main__":
from pprint import pprint
log_format = "%(asctime)s - %(levelname)s - %(message)s"
logging.basicConfig(level=logging.DEBUG, format=log_format)
r = test_text_chunks()
pprint(r)
| bsd-2-clause | 1,405,179,225,969,282,000 | 25.159091 | 88 | 0.700357 | false |
ray-project/ray | python/ray/util/data/__init__.py | 2 | 3354 | from collections import defaultdict
from typing import Iterable
import pandas as pd
from ray.util.data.dataset import MLDataset
from ray.util.data.parquet import read_parquet
from ray.util.iter import T, ParallelIterator
try:
import dataclasses
except: # noqa: E722
pass
else:
from dataclasses import is_dataclass
def to_pandas(it: ParallelIterator[T],
batch_size: int = 32) -> "ParallelIterator[pd.DataFrame]":
"""Convert the a ParallelIterator to ParallelIterator of pd.DataFrame.
The record type should be list like object or dataclass instance. If
the record is a iterable, we will convert to a list. If the record has
__getitem__ attr, we will use __getitem__ to get the given column
indexes data to create pandas DataFrame. If the record is dataclass
instance we will use __getattr__ to get the given column.
Args:
it (ParallelIterator[T]): the ParallelIterator to converted
batch_size (int): batch the given size to create a pandas DataFrame
Returns:
A ParallelIterator of pd.DataFrame
"""
it = it.batch(batch_size)
def convert_fn(input_it: Iterable[T]) -> Iterable[pd.DataFrame]:
names = []
for batch in input_it:
assert isinstance(batch, list)
if hasattr(batch[0], "__getitem__"):
batch = pd.DataFrame(batch)
elif hasattr(batch[0], "__iter__"):
batch = [list(item) for item in batch]
batch = pd.DataFrame(batch)
elif is_dataclass(batch[0]):
if not names:
names = [f.name for f in dataclasses.fields(batch[0])]
values = defaultdict(lambda x: [])
for item in batch:
for col in names:
values[col].append(getattr(item, col))
batch = pd.DataFrame(values, columns=names)
else:
raise ValueError("MLDataset only support list like item or "
"dataclass instance")
yield batch
it = it._with_transform(lambda local_it: local_it.transform(convert_fn),
".to_pandas()")
return it
def from_parallel_iter(para_it: ParallelIterator[T],
need_convert: bool = True,
batch_size: int = 32,
repeated: bool = False) -> MLDataset:
"""Create a MLDataset from an existing ParallelIterator.
The object of the ParallelIterator should be list like object or dataclass
instance.
Args:
para_it (ParallelIterator[T]): An existing parallel iterator, and each
should be a list like object or dataclass instance.
need_convert (bool): whether need to convert to pandas.DataFrame. This
should be False if the record type is pandas.DataFrame.
batch_size (int): if need_convert is True, we will batch the batch_size
records to a pandas.DataFrame
repeated (bool): whether the para_it is repeated.
Returns:
a MLDataset
"""
if need_convert:
para_it = to_pandas(para_it, batch_size)
else:
batch_size = 0
return MLDataset.from_parallel_it(para_it, batch_size, repeated)
__all__ = ["from_parallel_iter", "read_parquet", "MLDataset"]
| apache-2.0 | 6,746,056,552,252,264,000 | 35.064516 | 79 | 0.610614 | false |
gsmaxwell/phase_offset_rx | gr-digital/examples/snr_estimators.py | 14 | 5302 | #!/usr/bin/env python
import sys
try:
import scipy
from scipy import stats
except ImportError:
print "Error: Program requires scipy (www.scipy.org)."
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: Program requires Matplotlib (matplotlib.sourceforge.net)."
sys.exit(1)
from gnuradio import gr, digital
from optparse import OptionParser
from gnuradio.eng_option import eng_option
'''
This example program uses Python and GNU Radio to calculate SNR of a
noise BPSK signal to compare them.
For an explination of the online algorithms, see:
http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Higher-order_statistics
'''
def online_skewness(data, alpha):
n = 0
mean = 0
M2 = 0
M3 = 0
d_M3 = 0
for n in xrange(len(data)):
delta = data[n] - mean
delta_n = delta / (n+1)
term1 = delta * delta_n * (n)
mean = mean + delta_n
M3 = term1 * delta_n * (n - 1) - 3 * delta_n * M2
M2 = M2 + term1
d_M3 = (0.001)*M3 + (1-0.001)*d_M3;
return d_M3
def snr_est_simple(signal):
y1 = scipy.mean(abs(signal))
y2 = scipy.real(scipy.mean(signal**2))
y3 = (y1*y1 - y2)
snr_rat = y1*y1/y3
return 10.0*scipy.log10(snr_rat), snr_rat
def snr_est_skew(signal):
y1 = scipy.mean(abs(signal))
y2 = scipy.mean(scipy.real(signal**2))
y3 = (y1*y1 - y2)
y4 = online_skewness(abs(signal.real), 0.001)
skw = y4*y4 / (y2*y2*y2);
snr_rat = y1*y1 / (y3 + skw*y1*y1)
return 10.0*scipy.log10(snr_rat), snr_rat
def snr_est_m2m4(signal):
M2 = scipy.mean(abs(signal)**2)
M4 = scipy.mean(abs(signal)**4)
snr_rat = 2*scipy.sqrt(2*M2*M2 - M4) / (M2 - scipy.sqrt(2*M2*M2 - M4))
return 10.0*scipy.log10(snr_rat), snr_rat
def snr_est_svr(signal):
N = len(signal)
ssum = 0
msum = 0
for i in xrange(1, N):
ssum += (abs(signal[i])**2)*(abs(signal[i-1])**2)
msum += (abs(signal[i])**4)
savg = (1.0/(float(N)-1.0))*ssum
mavg = (1.0/(float(N)-1.0))*msum
beta = savg / (mavg - savg)
snr_rat = 2*((beta - 1) + scipy.sqrt(beta*(beta-1)))
return 10.0*scipy.log10(snr_rat), snr_rat
def main():
gr_estimators = {"simple": digital.SNR_EST_SIMPLE,
"skew": digital.SNR_EST_SKEW,
"m2m4": digital.SNR_EST_M2M4,
"svr": digital.SNR_EST_SVR}
py_estimators = {"simple": snr_est_simple,
"skew": snr_est_skew,
"m2m4": snr_est_m2m4,
"svr": snr_est_svr}
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=10000,
help="Set the number of samples to process [default=%default]")
parser.add_option("", "--snr-min", type="float", default=-5,
help="Minimum SNR [default=%default]")
parser.add_option("", "--snr-max", type="float", default=20,
help="Maximum SNR [default=%default]")
parser.add_option("", "--snr-step", type="float", default=0.5,
help="SNR step amount [default=%default]")
parser.add_option("-t", "--type", type="choice",
choices=gr_estimators.keys(), default="simple",
help="Estimator type {0} [default=%default]".format(
gr_estimators.keys()))
(options, args) = parser.parse_args ()
N = options.nsamples
xx = scipy.random.randn(N)
xy = scipy.random.randn(N)
bits = 2*scipy.complex64(scipy.random.randint(0, 2, N)) - 1
snr_known = list()
snr_python = list()
snr_gr = list()
# when to issue an SNR tag; can be ignored in this example.
ntag = 10000
n_cpx = xx + 1j*xy
py_est = py_estimators[options.type]
gr_est = gr_estimators[options.type]
SNR_min = options.snr_min
SNR_max = options.snr_max
SNR_step = options.snr_step
SNR_dB = scipy.arange(SNR_min, SNR_max+SNR_step, SNR_step)
for snr in SNR_dB:
SNR = 10.0**(snr/10.0)
scale = scipy.sqrt(SNR)
yy = bits + n_cpx/scale
print "SNR: ", snr
Sknown = scipy.mean(yy**2)
Nknown = scipy.var(n_cpx/scale)/2
snr0 = Sknown/Nknown
snr0dB = 10.0*scipy.log10(snr0)
snr_known.append(snr0dB)
snrdB, snr = py_est(yy)
snr_python.append(snrdB)
gr_src = gr.vector_source_c(bits.tolist(), False)
gr_snr = digital.mpsk_snr_est_cc(gr_est, ntag, 0.001)
gr_chn = gr.channel_model(1.0/scale)
gr_snk = gr.null_sink(gr.sizeof_gr_complex)
tb = gr.top_block()
tb.connect(gr_src, gr_chn, gr_snr, gr_snk)
tb.run()
snr_gr.append(gr_snr.snr())
f1 = pylab.figure(1)
s1 = f1.add_subplot(1,1,1)
s1.plot(SNR_dB, snr_known, "k-o", linewidth=2, label="Known")
s1.plot(SNR_dB, snr_python, "b-o", linewidth=2, label="Python")
s1.plot(SNR_dB, snr_gr, "g-o", linewidth=2, label="GNU Radio")
s1.grid(True)
s1.set_title('SNR Estimators')
s1.set_xlabel('SNR (dB)')
s1.set_ylabel('Estimated SNR')
s1.legend()
pylab.show()
if __name__ == "__main__":
main()
| gpl-3.0 | -9,082,930,186,807,036,000 | 29.471264 | 88 | 0.566767 | false |
LEX2016WoKaGru/pyClamster | examples/fisheye/distmap.py | 1 | 3754 | #!/usr/bin/env python3
import os
import time
import pyclamster
import logging
import numpy as np
logging.basicConfig(level=logging.DEBUG)
### create a fisheye projection object ###
f=pyclamster.fisheye.FisheyeProjection()
### create rectified coordinates ###
outshape=(50,50) # size of output image
rect_azimuth_offset = np.pi / 2 # north angle of rectified image
rect_clockwise = False
rect_x,rect_y=np.meshgrid(
np.linspace(-20,20,num=outshape[1]),# image x coordinate goes right
np.linspace(20,-20,num=outshape[0]) # image y coordinate goes up
)
rect_z = 15 # rectify for height rect_z
rect_coord = pyclamster.coordinates.Coordinates3d(
x = rect_x,
y = rect_y,
z = rect_z,
azimuth_offset = rect_azimuth_offset,
azimuth_clockwise = rect_clockwise,
shape=outshape
)
### create spherical coordinates of original image ###
shape=np.shape(img.data)[:2] # shape of image
image_north_angle = 6 * np.pi / 5 # north angle ON the original image
orig_azimuth_offset = np.pi / 2 # "north angle" on image coordinates
center = None # center of elevation/azimuth in the image
maxelepos = (0,int(shape[1]/2)) # (one) position of maxium elevation
maxele = np.pi / 2.2 # maximum elevation on the image border, < 90° here
img.coordinates.azimuth_offset = orig_azimuth_offset
img.coordinates.azimuth_clockwise = False
logging.debug("setting image elevation")
img.coordinates.elevation = f.createFisheyeElevation(
shape,
maxelepos=maxelepos,
maxele=maxele,
center=center
)
logging.debug("mean image elevation is {}".format(img.coordinates.elevation.mean()))
logging.debug("setting image azimuth")
img.coordinates.azimuth = f.createAzimuth(
shape,
maxelepos=maxelepos,
center=center,
north_angle = image_north_angle,
clockwise=False
)
logging.debug("setting image radius")
img.coordinates.z = rect_z
### create rectification map ###
# based on regular grid
logging.debug("calculating rectification map")
distmap = f.distortionMap(in_coord=img.coordinates,
out_coord=rect_coord, method="nearest")
### rectify image ##
rectimage = img.applyDistortionMap(distmap)
### plot results ###
import matplotlib.pyplot as plt
plt.subplot(3,4,1)
plt.title("original image (fix)")
plt.imshow(img.data, interpolation="nearest")
plt.subplot(3,4,2)
plt.title("image radius (calculated)")
plt.imshow(img.coordinates.radius, interpolation="nearest")
plt.colorbar()
plt.subplot(3,4,3)
plt.title("rectified r (calculated)")
plt.imshow(rect_coord.radius,interpolation="nearest")
plt.colorbar()
plt.subplot(3,4,4)
plt.title("rectified image (calculated)")
plt.imshow(rectimage.data, interpolation="nearest")
plt.subplot(3,4,5)
plt.title("image elevation (fix)")
plt.imshow(img.coordinates.elevation,interpolation="nearest")
plt.colorbar()
plt.subplot(3,4,9)
plt.title("image azimuth (fix)")
plt.imshow(img.coordinates.azimuth,interpolation="nearest")
plt.colorbar()
plt.subplot(3,4,6)
plt.title("image x (calculated)")
plt.imshow(img.coordinates.x,interpolation="nearest")
plt.colorbar()
plt.subplot(3,4,10)
plt.title("image y (calculated)")
plt.imshow(img.coordinates.y,interpolation="nearest")
plt.colorbar()
plt.subplot(3,4,7)
plt.title("rectified x (fix)")
plt.imshow(rect_coord.x,interpolation="nearest")
plt.colorbar()
plt.subplot(3,4,11)
plt.title("rectified y (fix)")
plt.imshow(rect_coord.y,interpolation="nearest")
plt.colorbar()
plt.subplot(3,4,8)
plt.title("rectified elevation (calculated)")
plt.imshow(rect_coord.elevation,interpolation="nearest")
plt.colorbar()
plt.subplot(3,4,12)
plt.title("rectified azimuth (calculated)")
plt.imshow(rect_coord.azimuth,interpolation="nearest")
plt.colorbar()
logging.debug("Time elapsed: {0:.3f} s".format(time.time()-start_time))
plt.show()
| gpl-3.0 | -9,025,229,287,607,347,000 | 29.762295 | 84 | 0.73781 | false |
bflaven/BlogArticlesExamples | using_ludwig_introduction_to_deep_learning/006_ludwig_example/python_ludwig_titanic_1.py | 1 | 1912 | """[directory]
cd /Volumes/van_van/using_ludwig_discovering_ia/002b_ludwig_example/
python python_ludwig_titanic_1.py
"""
from ludwig.api import LudwigModel
import pandas as pd
import yaml
import warnings
# pd.set_option('display.max_rows', 1000)
pd.set_option('display.max_rows', 200)
warnings.filterwarnings('ignore')
model_definition = yaml.load(
'''
input_features:
-
name: Pclass
type: category
-
name: Sex
type: category
-
name: Age
type: numerical
missing_value_strategy: fill_with_mean
-
name: SibSp
type: numerical
-
name: Parch
type: numerical
-
name: Fare
type: numerical
missing_value_strategy: fill_with_mean
-
name: Embarked
type: category
output_features:
-
name: Survived
type: binary
'''
)
ludwig_model = LudwigModel(model_definition)
trainning_set = pd.read_csv('train.csv', index_col=False)
# OUTPUT
# print("\n--- trainning_set ")
# print(trainning_set)
# training
train_stats, _, _ = ludwig_model.train(dataset=trainning_set)
# OUTPUT
# print("\n--- train_stats ")
# print(train_stats)
# TEST_1
# test_set = pd.read_csv('test_1.csv', index_col=False)
# predictions = ludwig_model.predict(test_set)
# print("\n--- predictions ")
# print(predictions)
# TEST_2
test_set = pd.read_csv('test_2.csv', index_col=False)
predictions = ludwig_model.predict(test_set)
print("\n--- predictions ")
print(predictions)
"""[Data Dictionary]
Variable Definition Key
survival Survival 0 = No, 1 = Yes
pclass Ticket class 1 = 1st, 2 = 2nd, 3 = 3rd
embarked Port of Embarkation C = Cherbourg, Q = Queenstown, S = Southampton
sex Sex
Age Age in years
sibsp # of siblings / spouses aboard the Titanic
parch # of parents / children aboard the Titanic
ticket Ticket number
fare Passenger fare
cabin Cabin number
"""
| mit | 676,329,157,085,587,800 | 19.340426 | 75 | 0.657427 | false |
bmazin/SDR | Projects/ChannelizerSim/legacy/channelizer_1stStage.py | 1 | 1307 | import numpy as np
import matplotlib.pyplot as plt
import math
import random
fs = 512e6
freqRes = 7812.5
dt = 1/fs
f = 8.90e6
def pfb_fir(x):
N = len(x)
T = 4
L = 512
bin_width_scale = 2.5
dx = T*math.pi/L/T
X = np.array([n*dx-T*math.pi/2 for n in range(T*L)])
coeff = np.sinc(bin_width_scale*X/math.pi)*np.hamming(T*L)
y = np.array([0+0j]*(N-T*L))
for n in range((T-1)*L, N):
m = n%L
coeff_sub = coeff[L*T-m::-L]
y[n-T*L] = (x[n-(T-1)*L:n+L:L]*coeff_sub).sum()
return y
###########################
# First stage and freq comb
###########################
lut_len = int(fs/freqRes)
time = [dt*i for i in range(lut_len)]
signal = np.array([complex(math.cos(2*math.pi*f*t), math.sin(2*math.pi*f*t)) for t in time])
samples = 4*lut_len
L = samples/512
y0 = pfb_fir(signal[0:samples-256])
y1 = pfb_fir(signal[256::])
bins = np.array([[0+0j]*200]*32)
for n in range(100):
fft0 = np.fft.fft(y0[n*512:(n+1)*512])
fft1 = np.fft.fft(y1[n*512:(n+1)*512])
for i in range(512):
if i%2 ==1:
fft1[i] = -fft1[i]
for m in range(1):
bins[m,2*n] = fft0[10]
bins[m,2*n+1] = fft1[10]
print abs(bins[0,0])
fig = plt.figure()
ax0 = fig.add_subplot(211)
ax0.plot(bins[0].real, '.-', bins[0].imag, '.-')
ax2 = fig.add_subplot(212)
ax2.plot(abs(np.fft.fft(bins[0])), '.-')
plt.show()
| gpl-2.0 | 8,847,304,630,940,959,000 | 19.107692 | 92 | 0.575363 | false |
james-nichols/dtrw | viral_penetration/fit_EDTA_ensemble.py | 1 | 5932 | #!/usr/local/bin/python3
# Libraries are in parent directory
import sys
sys.path.append('../')
import numpy as np
import pandas as pd
import scipy
import time, csv, math, collections
from dtrw import *
# Local fit functions for a variety of scripts
from fit_functions import *
import mpmath
import scipy.integrate
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import animation
from matplotlib import cm
from matplotlib.backends.backend_pdf import PdfPages
import pdb
output_pdf = sys.argv[1]
def append_string_as_int(array, item):
try:
array = np.append(array, np.int32(item))
except ValueError:
array = np.append(array, np.nan)
return array
def append_string_as_float(array, item):
try:
array = np.append(array, np.float64(item))
except ValueError:
array = np.append(array, np.nan)
return array
labels = []
image_index = []
cervix = []
EDTA = []
p24 = np.array([], dtype=np.int32)
virions = np.array([], dtype=np.int32)
penetrators = np.array([], dtype=np.int32)
depth = np.array([], dtype=np.float64)
no_mucous_data = 'SMEG_Data/NeuraminidaseNOBAFLinear.csv'
with_mucous_data = 'SMEG_Data/PenetrationMLoadnewestOMITAngelafixed.csv'
EDTA_data = 'SMEG_Data/EctoCervixEDTABaLAngelafixed.csv'
data = pd.read_csv(EDTA_data)
depth_no = data.ix[data.EDTA =='N'].ix[data.ix[:,6]>0.0].get_values()[:,6]
depth_yes = data.ix[data.EDTA =='Y'].ix[data.ix[:,6]>0.0].get_values()[:,6]
pp = PdfPages(output_pdf + sys.argv[2] + '.pdf')
#for site in [sites_yes, sites_no]:
# for site in sites:
if sys.argv[2] == 'Y':
nz_depth = depth_yes
else:
nz_depth = depth_no
# Depth based survival function - sometimes a better function to fit to, and we certainly don't lose resolution
surv_func = scipy.stats.itemfreq(nz_depth-1.0)
surv_func_x = surv_func[:,0]
surv_func_y = 1.0 - np.insert(np.cumsum(surv_func[:,1]), 0, 0.0)[:-1] / surv_func[:,1].sum()
if surv_func_x[0] != 0.0:
surv_func_x = np.insert(surv_func_x, 0, 0.0)
surv_func_y = np.insert(surv_func_y, 0, 1.0)
T = 4.0
L = surv_func_x.max() #nz_depth.max()
dX = L / 100.0
D_alpha = 20.0
alpha = 0.75
# Last minimisation got close to:
#diff_fit = [ 5.28210775, 0.95847065]
#subdiff_fit = [ 15.07811124, 0.55, 0.99997347]
xs = np.arange(0.0, L+dX, dX)
#
# FIT Diffusion model - analytic
#
diff_init_params = [D_alpha]
diff_fit = scipy.optimize.fmin_slsqp(lsq_diff, diff_init_params, args=(T, surv_func_x, surv_func_y), \
bounds=[(0.0, np.Inf)], epsilon = 1.0e-8, acc=1.0e-6, full_output=True)
diff_sq_err = diff_fit[1]
diff_fit = diff_fit[0]
print('Diffusion fit parameters:', diff_fit)
diff_analytic_soln_survival = produce_diff_soln_survival(diff_fit, T, xs)
diff_analytic_soln = produce_diff_soln(diff_fit, T, xs)
#
# FIT Subdiffusion model - numerical (DTRW algorithm)
#
#history_truncation = 0
# New regime: start at diff parameter fit
#subdiff_init_params = [diff_fit[0], alpha]
#subdiff_fit = scipy.optimize.fmin_slsqp(lsq_subdiff, subdiff_init_params, args=(T, 4.0 * L, dX, surv_func_x, surv_func_y, history_truncation), \
# bounds=[(0.0, 50.0),(0.51, 1.0)], epsilon = 1.0e-3, acc=1.0e-6, full_output=True)
#subdiff_sq_err = subdiff_fit[1]
#subdiff_fit = subdiff_fit[0]
#print 'Subdiffusion fit parameters:', subdiff_fit
#dtrw_sub_soln = produce_subdiff_soln(subdiff_fit, T, 4.0*L, dX)
#dtrw_sub_soln_survival = produce_subdiff_soln_survival(subdiff_fit, T, 4.0*L, dX)
#
# FIT Subdiffusion model - analytic
#
subdiff_anal_init_params = [D_alpha]
subdiff_anal_fit = scipy.optimize.fmin_slsqp(lsq_subdiff_analytic, subdiff_anal_init_params, args=(T, surv_func_x, surv_func_y), \
bounds=[(0.0, np.Inf)], epsilon = 1.0e-3, acc=1.0e-6, full_output=True)
subdiff_anal_sq_err = subdiff_anal_fit[1]
subdiff_anal_fit = subdiff_anal_fit[0]
print('Subdiffusion analytic fit parameters:', subdiff_anal_fit)
anal_sub_soln = produce_subdiff_analytic_soln(subdiff_anal_fit, T, xs)
anal_sub_soln_survival = produce_subdiff_analytic_survival(subdiff_anal_fit, T, xs)
#
# FIT Exponential... for fun
#
slope, offset = np.linalg.lstsq(np.vstack([surv_func_x, np.ones(len(surv_func_x))]).T, np.log(surv_func_y).T)[0]
exp_fit = np.exp(offset + xs * slope)
#
# PLOT IT ALL
#
fig = plt.figure(figsize=(16,8))
ax1 = fig.add_subplot(1, 2, 1)
bar1, = ax1.plot(surv_func_x, surv_func_y, 'b.-')
#line1, = ax1.plot(xs, dtrw_sub_soln_survival.T[:xs.size], 'r.-')
line2, = ax1.plot(xs, anal_sub_soln_survival, 'y.-')
line3, = ax1.plot(xs, diff_analytic_soln_survival, 'g.-')
line4, = ax1.plot(xs, exp_fit, 'b')
if sys.argv[2] == 'Y':
ax1.set_title('Survival function vs fits, with EDTA treatment')
else:
ax1.set_title('Survival function vs fits, without EDTA treatment')
ax2 = fig.add_subplot(1, 2, 2)
ax2.semilogy(surv_func_x, surv_func_y, 'b.-')
#ax2.semilogy(xs, dtrw_sub_soln_survival.T[:xs.size], 'r.-')
ax2.semilogy(xs, anal_sub_soln_survival, 'y.-')
ax2.semilogy(xs, diff_analytic_soln_survival, 'g.-')
ax2.semilogy(xs, exp_fit, 'b')
if sys.argv[2] == 'Y':
ax2.set_title('Logarithm of survival function vs fits, with EDTA treatment')
else:
ax2.set_title('Logarithm of survival function vs fits, without EDTA treatment')
#plt.legend([bar1, line1, line2, line3, line4], ["Viral survival func", "Subdiffusion fit, alpha={0:.2f}, D_alpha={1:.2f}, sq_err={2:.4f}".format(subdiff_fit[1],subdiff_fit[0],subdiff_sq_err), \
plt.legend([bar1, line2, line3, line4], ["Viral survival func", \
"Analytic subdiff fit, alpha=1/2, D_alpha={0:.2f}, sq_err={1:.4f}".format(subdiff_anal_fit[0], subdiff_anal_sq_err), \
"Diffusion fit, D_alpha={0:.2f}, sq_err={1:.2f}".format(diff_fit[0], diff_sq_err), "Exponential fit"], loc=3)
pp.savefig()
pp.close()
#plt.show()
| gpl-2.0 | -7,602,440,083,743,496,000 | 34.309524 | 194 | 0.660317 | false |
FrederikDiehl/apsis | code/apsis/tests/test_assistants/test_lab_assistant.py | 2 | 4195 | __author__ = 'Frederik Diehl'
from apsis.assistants.lab_assistant import *
from nose.tools import assert_equal, assert_items_equal, assert_dict_equal, \
assert_is_none, assert_raises, raises, assert_greater_equal, \
assert_less_equal, assert_in
from apsis.utilities.logging_utils import get_logger
from apsis.models.parameter_definition import *
import matplotlib.pyplot as plt
class TestLabAssistant(object):
"""
Tests the lab_assistants.
"""
LAss = None
param_defs = None
def setup(self):
self.LAss = LabAssistant()
def teardown(self):
self.LAss.set_exit()
def test_init(self):
"""
Tests whether the initialization works correctly.
Tests:
- Whether the directory for writing is correct
- _exp_assistants is empty
- logger name is correctly set.
"""
if os.name == "nt":
assert_equal(self.LAss._write_directory_base, "/tmp/APSIS_WRITING")
assert_items_equal(self.LAss._exp_assistants, {})
assert_equal(self.LAss._logger,
get_logger("apsis.assistants.lab_assistant.LabAssistant"))
def test_init_experiment(self):
"""
Tests whether the initialization works correctly.
Tests:
- optimizer correct
- minimization correct
- param_defs correct
- No two experiments with the same name
"""
optimizer = "RandomSearch"
name = "test_init_experiment"
self.param_defs = {
"x": MinMaxNumericParamDef(0, 1),
"name": NominalParamDef(["A", "B", "C"])
}
optimizer_arguments = {
"multiprocessing": "none"
}
minimization = True
exp_id = self.LAss.init_experiment(name, optimizer,
optimizer_arguments=optimizer_arguments,
param_defs=self.param_defs, minimization=minimization)
exp_ass = self.LAss._exp_assistants[exp_id]
assert_equal(exp_ass._optimizer.__class__.__name__, optimizer)
assert_equal(exp_ass._optimizer_arguments, optimizer_arguments)
assert_equal(exp_ass._experiment.minimization_problem, minimization)
with assert_raises(ValueError):
self.LAss.init_experiment(name, optimizer, exp_id=exp_id,
optimizer_arguments=optimizer_arguments,
param_defs=self.param_defs, minimization=minimization)
return exp_id
def test_get_next_candidate(self):
"""
Tests the get next candidate function.
Tests:
- The candidate's parameters are acceptable
"""
exp_id = self.test_init_experiment()
cand = self.LAss.get_next_candidate(exp_id)
assert_is_none(cand.result)
params = cand.params
assert_less_equal(params["x"], 1)
assert_greater_equal(params["x"], 0)
assert_in(params["name"], self.param_defs["name"].values)
def test_update(self):
"""
Tests whether update works.
- candidate exists in the list
- result is equal
"""
exp_id = self.test_init_experiment()
cand = self.LAss.get_next_candidate(exp_id)
cand.result = 1
self.LAss.update(exp_id, status="finished", candidate=cand)
assert_items_equal(self.LAss._exp_assistants[exp_id]._experiment.candidates_finished, [cand])
assert_equal(self.LAss._exp_assistants[exp_id]._experiment.candidates_finished[0].result, 1)
def test_get_best_candidate(self):
"""
Tests whether get_best_candidate works.
- Whether the best of the two candidates is the one it should be.
"""
exp_id = self.test_init_experiment()
cand_one = self.LAss.get_next_candidate(exp_id)
cand_one.result = 1
self.LAss.update(exp_id, "finished", cand_one)
cand_two = self.LAss.get_next_candidate(exp_id)
cand_two.result = 0
self.LAss.update(exp_id, "finished", cand_two)
assert_equal(cand_two, self.LAss.get_best_candidate(exp_id))
| mit | -4,735,466,048,406,558,000 | 34.550847 | 101 | 0.598808 | false |
TomAugspurger/pandas | pandas/tests/dtypes/test_missing.py | 1 | 19358 | from datetime import datetime
from decimal import Decimal
import numpy as np
import pytest
from pandas._config import config as cf
from pandas._libs import missing as libmissing
from pandas._libs.tslibs import iNaT, is_null_datetimelike
from pandas.core.dtypes.common import is_scalar
from pandas.core.dtypes.dtypes import DatetimeTZDtype, IntervalDtype, PeriodDtype
from pandas.core.dtypes.missing import (
array_equivalent,
isna,
isnull,
na_value_for_dtype,
notna,
notnull,
)
import pandas as pd
from pandas import DatetimeIndex, Float64Index, NaT, Series, TimedeltaIndex, date_range
import pandas._testing as tm
now = pd.Timestamp.now()
utcnow = pd.Timestamp.now("UTC")
@pytest.mark.parametrize("notna_f", [notna, notnull])
def test_notna_notnull(notna_f):
assert notna_f(1.0)
assert not notna_f(None)
assert not notna_f(np.NaN)
with cf.option_context("mode.use_inf_as_na", False):
assert notna_f(np.inf)
assert notna_f(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notna_f(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_na", True):
assert not notna_f(np.inf)
assert not notna_f(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notna_f(arr)
assert result.sum() == 2
with cf.option_context("mode.use_inf_as_na", False):
for s in [
tm.makeFloatSeries(),
tm.makeStringSeries(),
tm.makeObjectSeries(),
tm.makeTimeSeries(),
tm.makePeriodSeries(),
]:
assert isinstance(notna_f(s), Series)
class TestIsNA:
def test_0d_array(self):
assert isna(np.array(np.nan))
assert not isna(np.array(0.0))
assert not isna(np.array(0))
# test object dtype
assert isna(np.array(np.nan, dtype=object))
assert not isna(np.array(0.0, dtype=object))
assert not isna(np.array(0, dtype=object))
def test_empty_object(self):
for shape in [(4, 0), (4,)]:
arr = np.empty(shape=shape, dtype=object)
result = isna(arr)
expected = np.ones(shape=shape, dtype=bool)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("isna_f", [isna, isnull])
def test_isna_isnull(self, isna_f):
assert not isna_f(1.0)
assert isna_f(None)
assert isna_f(np.NaN)
assert float("nan")
assert not isna_f(np.inf)
assert not isna_f(-np.inf)
# type
assert not isna_f(type(pd.Series(dtype=object)))
assert not isna_f(type(pd.Series(dtype=np.float64)))
assert not isna_f(type(pd.DataFrame()))
# series
for s in [
tm.makeFloatSeries(),
tm.makeStringSeries(),
tm.makeObjectSeries(),
tm.makeTimeSeries(),
tm.makePeriodSeries(),
]:
assert isinstance(isna_f(s), Series)
# frame
for df in [
tm.makeTimeDataFrame(),
tm.makePeriodFrame(),
tm.makeMixedDataFrame(),
]:
result = isna_f(df)
expected = df.apply(isna_f)
tm.assert_frame_equal(result, expected)
def test_isna_lists(self):
result = isna([[False]])
exp = np.array([[False]])
tm.assert_numpy_array_equal(result, exp)
result = isna([[1], [2]])
exp = np.array([[False], [False]])
tm.assert_numpy_array_equal(result, exp)
# list of strings / unicode
result = isna(["foo", "bar"])
exp = np.array([False, False])
tm.assert_numpy_array_equal(result, exp)
result = isna(["foo", "bar"])
exp = np.array([False, False])
tm.assert_numpy_array_equal(result, exp)
# GH20675
result = isna([np.NaN, "world"])
exp = np.array([True, False])
tm.assert_numpy_array_equal(result, exp)
def test_isna_nat(self):
result = isna([NaT])
exp = np.array([True])
tm.assert_numpy_array_equal(result, exp)
result = isna(np.array([NaT], dtype=object))
exp = np.array([True])
tm.assert_numpy_array_equal(result, exp)
def test_isna_numpy_nat(self):
arr = np.array(
[
NaT,
np.datetime64("NaT"),
np.timedelta64("NaT"),
np.datetime64("NaT", "s"),
]
)
result = isna(arr)
expected = np.array([True] * 4)
tm.assert_numpy_array_equal(result, expected)
def test_isna_datetime(self):
assert not isna(datetime.now())
assert notna(datetime.now())
idx = date_range("1/1/1990", periods=20)
exp = np.ones(len(idx), dtype=bool)
tm.assert_numpy_array_equal(notna(idx), exp)
idx = np.asarray(idx)
idx[0] = iNaT
idx = DatetimeIndex(idx)
mask = isna(idx)
assert mask[0]
exp = np.array([True] + [False] * (len(idx) - 1), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
# GH 9129
pidx = idx.to_period(freq="M")
mask = isna(pidx)
assert mask[0]
exp = np.array([True] + [False] * (len(idx) - 1), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
mask = isna(pidx[1:])
exp = np.zeros(len(mask), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
def test_isna_old_datetimelike(self):
# isna_old should work for dt64tz, td64, and period, not just tznaive
dti = pd.date_range("2016-01-01", periods=3)
dta = dti._data
dta[-1] = pd.NaT
expected = np.array([False, False, True], dtype=bool)
objs = [dta, dta.tz_localize("US/Eastern"), dta - dta, dta.to_period("D")]
for obj in objs:
with cf.option_context("mode.use_inf_as_na", True):
result = pd.isna(obj)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"value, expected",
[
(np.complex128(np.nan), True),
(np.float64(1), False),
(np.array([1, 1 + 0j, np.nan, 3]), np.array([False, False, True, False])),
(
np.array([1, 1 + 0j, np.nan, 3], dtype=object),
np.array([False, False, True, False]),
),
(
np.array([1, 1 + 0j, np.nan, 3]).astype(object),
np.array([False, False, True, False]),
),
],
)
def test_complex(self, value, expected):
result = isna(value)
if is_scalar(result):
assert result is expected
else:
tm.assert_numpy_array_equal(result, expected)
def test_datetime_other_units(self):
idx = pd.DatetimeIndex(["2011-01-01", "NaT", "2011-01-02"])
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isna(idx), exp)
tm.assert_numpy_array_equal(notna(idx), ~exp)
tm.assert_numpy_array_equal(isna(idx.values), exp)
tm.assert_numpy_array_equal(notna(idx.values), ~exp)
for dtype in [
"datetime64[D]",
"datetime64[h]",
"datetime64[m]",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
]:
values = idx.values.astype(dtype)
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isna(values), exp)
tm.assert_numpy_array_equal(notna(values), ~exp)
exp = pd.Series([False, True, False])
s = pd.Series(values)
tm.assert_series_equal(isna(s), exp)
tm.assert_series_equal(notna(s), ~exp)
s = pd.Series(values, dtype=object)
tm.assert_series_equal(isna(s), exp)
tm.assert_series_equal(notna(s), ~exp)
def test_timedelta_other_units(self):
idx = pd.TimedeltaIndex(["1 days", "NaT", "2 days"])
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isna(idx), exp)
tm.assert_numpy_array_equal(notna(idx), ~exp)
tm.assert_numpy_array_equal(isna(idx.values), exp)
tm.assert_numpy_array_equal(notna(idx.values), ~exp)
for dtype in [
"timedelta64[D]",
"timedelta64[h]",
"timedelta64[m]",
"timedelta64[s]",
"timedelta64[ms]",
"timedelta64[us]",
"timedelta64[ns]",
]:
values = idx.values.astype(dtype)
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isna(values), exp)
tm.assert_numpy_array_equal(notna(values), ~exp)
exp = pd.Series([False, True, False])
s = pd.Series(values)
tm.assert_series_equal(isna(s), exp)
tm.assert_series_equal(notna(s), ~exp)
s = pd.Series(values, dtype=object)
tm.assert_series_equal(isna(s), exp)
tm.assert_series_equal(notna(s), ~exp)
def test_period(self):
idx = pd.PeriodIndex(["2011-01", "NaT", "2012-01"], freq="M")
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isna(idx), exp)
tm.assert_numpy_array_equal(notna(idx), ~exp)
exp = pd.Series([False, True, False])
s = pd.Series(idx)
tm.assert_series_equal(isna(s), exp)
tm.assert_series_equal(notna(s), ~exp)
s = pd.Series(idx, dtype=object)
tm.assert_series_equal(isna(s), exp)
tm.assert_series_equal(notna(s), ~exp)
def test_array_equivalent():
assert array_equivalent(np.array([np.nan, np.nan]), np.array([np.nan, np.nan]))
assert array_equivalent(
np.array([np.nan, 1, np.nan]), np.array([np.nan, 1, np.nan])
)
assert array_equivalent(
np.array([np.nan, None], dtype="object"),
np.array([np.nan, None], dtype="object"),
)
# Check the handling of nested arrays in array_equivalent_object
assert array_equivalent(
np.array([np.array([np.nan, None], dtype="object"), None], dtype="object"),
np.array([np.array([np.nan, None], dtype="object"), None], dtype="object"),
)
assert array_equivalent(
np.array([np.nan, 1 + 1j], dtype="complex"),
np.array([np.nan, 1 + 1j], dtype="complex"),
)
assert not array_equivalent(
np.array([np.nan, 1 + 1j], dtype="complex"),
np.array([np.nan, 1 + 2j], dtype="complex"),
)
assert not array_equivalent(
np.array([np.nan, 1, np.nan]), np.array([np.nan, 2, np.nan])
)
assert not array_equivalent(np.array(["a", "b", "c", "d"]), np.array(["e", "e"]))
assert array_equivalent(Float64Index([0, np.nan]), Float64Index([0, np.nan]))
assert not array_equivalent(Float64Index([0, np.nan]), Float64Index([1, np.nan]))
assert array_equivalent(DatetimeIndex([0, np.nan]), DatetimeIndex([0, np.nan]))
assert not array_equivalent(DatetimeIndex([0, np.nan]), DatetimeIndex([1, np.nan]))
assert array_equivalent(TimedeltaIndex([0, np.nan]), TimedeltaIndex([0, np.nan]))
assert not array_equivalent(
TimedeltaIndex([0, np.nan]), TimedeltaIndex([1, np.nan])
)
assert array_equivalent(
DatetimeIndex([0, np.nan], tz="US/Eastern"),
DatetimeIndex([0, np.nan], tz="US/Eastern"),
)
assert not array_equivalent(
DatetimeIndex([0, np.nan], tz="US/Eastern"),
DatetimeIndex([1, np.nan], tz="US/Eastern"),
)
assert not array_equivalent(
DatetimeIndex([0, np.nan]), DatetimeIndex([0, np.nan], tz="US/Eastern")
)
assert not array_equivalent(
DatetimeIndex([0, np.nan], tz="CET"),
DatetimeIndex([0, np.nan], tz="US/Eastern"),
)
assert not array_equivalent(DatetimeIndex([0, np.nan]), TimedeltaIndex([0, np.nan]))
@pytest.mark.parametrize(
"lvalue, rvalue",
[
# There are 3 variants for each of lvalue and rvalue. We include all
# three for the tz-naive `now` and exclude the datetim64 variant
# for utcnow because it drops tzinfo.
(now, utcnow),
(now.to_datetime64(), utcnow),
(now.to_pydatetime(), utcnow),
(now, utcnow),
(now.to_datetime64(), utcnow.to_pydatetime()),
(now.to_pydatetime(), utcnow.to_pydatetime()),
],
)
def test_array_equivalent_tzawareness(lvalue, rvalue):
# we shouldn't raise if comparing tzaware and tznaive datetimes
left = np.array([lvalue], dtype=object)
right = np.array([rvalue], dtype=object)
assert not array_equivalent(left, right, strict_nan=True)
assert not array_equivalent(left, right, strict_nan=False)
def test_array_equivalent_compat():
# see gh-13388
m = np.array([(1, 2), (3, 4)], dtype=[("a", int), ("b", float)])
n = np.array([(1, 2), (3, 4)], dtype=[("a", int), ("b", float)])
assert array_equivalent(m, n, strict_nan=True)
assert array_equivalent(m, n, strict_nan=False)
m = np.array([(1, 2), (3, 4)], dtype=[("a", int), ("b", float)])
n = np.array([(1, 2), (4, 3)], dtype=[("a", int), ("b", float)])
assert not array_equivalent(m, n, strict_nan=True)
assert not array_equivalent(m, n, strict_nan=False)
m = np.array([(1, 2), (3, 4)], dtype=[("a", int), ("b", float)])
n = np.array([(1, 2), (3, 4)], dtype=[("b", int), ("a", float)])
assert not array_equivalent(m, n, strict_nan=True)
assert not array_equivalent(m, n, strict_nan=False)
def test_array_equivalent_str():
for dtype in ["O", "S", "U"]:
assert array_equivalent(
np.array(["A", "B"], dtype=dtype), np.array(["A", "B"], dtype=dtype)
)
assert not array_equivalent(
np.array(["A", "B"], dtype=dtype), np.array(["A", "X"], dtype=dtype)
)
def test_array_equivalent_nested():
# reached in groupby aggregations, make sure we use np.any when checking
# if the comparison is truthy
left = np.array([np.array([50, 70, 90]), np.array([20, 30, 40])], dtype=object)
right = np.array([np.array([50, 70, 90]), np.array([20, 30, 40])], dtype=object)
assert array_equivalent(left, right, strict_nan=True)
assert not array_equivalent(left, right[::-1], strict_nan=True)
left = np.array([np.array([50, 50, 50]), np.array([40, 40, 40])], dtype=object)
right = np.array([50, 40])
assert not array_equivalent(left, right, strict_nan=True)
@pytest.mark.parametrize(
"dtype, na_value",
[
# Datetime-like
(np.dtype("M8[ns]"), NaT),
(np.dtype("m8[ns]"), NaT),
(DatetimeTZDtype.construct_from_string("datetime64[ns, US/Eastern]"), NaT),
(PeriodDtype("M"), NaT),
# Integer
("u1", 0),
("u2", 0),
("u4", 0),
("u8", 0),
("i1", 0),
("i2", 0),
("i4", 0),
("i8", 0),
# Bool
("bool", False),
# Float
("f2", np.nan),
("f4", np.nan),
("f8", np.nan),
# Object
("O", np.nan),
# Interval
(IntervalDtype(), np.nan),
],
)
def test_na_value_for_dtype(dtype, na_value):
result = na_value_for_dtype(dtype)
assert result is na_value
class TestNAObj:
_1d_methods = ["isnaobj", "isnaobj_old"]
_2d_methods = ["isnaobj2d", "isnaobj2d_old"]
def _check_behavior(self, arr, expected):
for method in TestNAObj._1d_methods:
result = getattr(libmissing, method)(arr)
tm.assert_numpy_array_equal(result, expected)
arr = np.atleast_2d(arr)
expected = np.atleast_2d(expected)
for method in TestNAObj._2d_methods:
result = getattr(libmissing, method)(arr)
tm.assert_numpy_array_equal(result, expected)
def test_basic(self):
arr = np.array([1, None, "foo", -5.1, pd.NaT, np.nan])
expected = np.array([False, True, False, False, True, True])
self._check_behavior(arr, expected)
def test_non_obj_dtype(self):
arr = np.array([1, 3, np.nan, 5], dtype=float)
expected = np.array([False, False, True, False])
self._check_behavior(arr, expected)
def test_empty_arr(self):
arr = np.array([])
expected = np.array([], dtype=bool)
self._check_behavior(arr, expected)
def test_empty_str_inp(self):
arr = np.array([""]) # empty but not na
expected = np.array([False])
self._check_behavior(arr, expected)
def test_empty_like(self):
# see gh-13717: no segfaults!
arr = np.empty_like([None])
expected = np.array([True])
self._check_behavior(arr, expected)
m8_units = ["as", "ps", "ns", "us", "ms", "s", "m", "h", "D", "W", "M", "Y"]
na_vals = (
[
None,
NaT,
float("NaN"),
complex("NaN"),
np.nan,
np.float64("NaN"),
np.float32("NaN"),
np.complex64(np.nan),
np.complex128(np.nan),
np.datetime64("NaT"),
np.timedelta64("NaT"),
]
+ [np.datetime64("NaT", unit) for unit in m8_units]
+ [np.timedelta64("NaT", unit) for unit in m8_units]
)
inf_vals = [
float("inf"),
float("-inf"),
complex("inf"),
complex("-inf"),
np.inf,
np.NINF,
]
int_na_vals = [
# Values that match iNaT, which we treat as null in specific cases
np.int64(NaT.value),
int(NaT.value),
]
sometimes_na_vals = [Decimal("NaN")]
never_na_vals = [
# float/complex values that when viewed as int64 match iNaT
-0.0,
np.float64("-0.0"),
-0j,
np.complex64(-0j),
]
class TestLibMissing:
def test_checknull(self):
for value in na_vals:
assert libmissing.checknull(value)
for value in inf_vals:
assert not libmissing.checknull(value)
for value in int_na_vals:
assert not libmissing.checknull(value)
for value in sometimes_na_vals:
assert not libmissing.checknull(value)
for value in never_na_vals:
assert not libmissing.checknull(value)
def test_checknull_old(self):
for value in na_vals:
assert libmissing.checknull_old(value)
for value in inf_vals:
assert libmissing.checknull_old(value)
for value in int_na_vals:
assert not libmissing.checknull_old(value)
for value in sometimes_na_vals:
assert not libmissing.checknull_old(value)
for value in never_na_vals:
assert not libmissing.checknull_old(value)
def test_is_null_datetimelike(self):
for value in na_vals:
assert is_null_datetimelike(value)
assert is_null_datetimelike(value, False)
for value in inf_vals:
assert not is_null_datetimelike(value)
assert not is_null_datetimelike(value, False)
for value in int_na_vals:
assert is_null_datetimelike(value)
assert not is_null_datetimelike(value, False)
for value in sometimes_na_vals:
assert not is_null_datetimelike(value)
assert not is_null_datetimelike(value, False)
for value in never_na_vals:
assert not is_null_datetimelike(value)
| bsd-3-clause | 5,070,179,214,365,183,000 | 31.209651 | 88 | 0.565296 | false |
thomasgibson/tabula-rasa | HDG_CG_comp/run_cg.py | 1 | 6794 | from argparse import ArgumentParser
from collections import defaultdict
from firedrake import COMM_WORLD, parameters
from firedrake.petsc import PETSc
from mpi4py import MPI
import os
import pandas as pd
import cg_problem as module
parameters["pyop2_options"]["lazy_evaluation"] = False
parser = ArgumentParser(description="""Profile CG solver.""",
add_help=False)
parser.add_argument("--results_file", action="store",
default="results/CG_data",
help="Where to put the results.")
parser.add_argument("--dim", action="store", default=3,
type=int, choices=[2, 3], help="Problem dimension.")
parser.add_argument("--quads", action="store_true",
help="Use quadrilateral elements")
parser.add_argument("--help", action="store_true", help="Show help.")
args, _ = parser.parse_known_args()
if args.help:
import sys
help = parser.format_help()
PETSc.Sys.Print("%s\n" % help)
sys.exit(0)
results = os.path.abspath(args.results_file)
warm = defaultdict(bool)
PETSc.Log.begin()
def run_solver(problem_cls, degree, size, rtol, quads, dim, cold=False):
params = {"ksp_type": "cg",
"ksp_rtol": rtol,
"pc_type": "hypre",
"pc_hypre_type": "boomeramg",
"pc_hypre_boomeramg_strong_threshold": 0.75,
"pc_hypre_boomeramg_agg_nl": 2}
problem = problem_cls(degree=degree, N=size,
quadrilaterals=quads, dimension=dim)
name = getattr(problem, "name")
solver = problem.solver(parameters=params)
if cold:
PETSc.Sys.Print("""
Running cold solve on coarse mesh for degree %d.\n
""" % degree)
solver.solve()
return
PETSc.Sys.Print("""
\nSolving problem: %s.\n
Approximation degree: %s\n
Problem size: %s ^ %s\n
Quads: %s\n
""" % (name, problem.degree, problem.N, problem.dim, problem.quads))
if not warm[(name, degree, size)]:
PETSc.Sys.Print("Warmup solve\n")
problem.u.assign(0)
with PETSc.Log.Stage("Warmup..."):
solver.solve()
warm[(name, degree, size)] = True
problem.u.assign(0)
PETSc.Sys.Print("Timed solve...")
solver.snes.setConvergenceHistory()
solver.snes.ksp.setConvergenceHistory()
warm_stage = "%s(deg=%s, N=%s, dim=%s) Warm solve\n" % (name,
degree,
size,
dim)
with PETSc.Log.Stage(warm_stage):
solver.solve()
snes = PETSc.Log.Event("SNESSolve").getPerfInfo()
ksp = PETSc.Log.Event("KSPSolve").getPerfInfo()
pcsetup = PETSc.Log.Event("PCSetUp").getPerfInfo()
pcapply = PETSc.Log.Event("PCApply").getPerfInfo()
jac_eval = PETSc.Log.Event("SNESJacobianEval").getPerfInfo()
residual = PETSc.Log.Event("SNESFunctionEval").getPerfInfo()
comm = problem.comm
snes_time = comm.allreduce(snes["time"], op=MPI.SUM) / comm.size
ksp_time = comm.allreduce(ksp["time"], op=MPI.SUM) / comm.size
pcsetup_time = comm.allreduce(pcsetup["time"], op=MPI.SUM) / comm.size
pcapply_time = comm.allreduce(pcapply["time"], op=MPI.SUM) / comm.size
jac_time = comm.allreduce(jac_eval["time"], op=MPI.SUM) / comm.size
res_time = comm.allreduce(residual["time"], op=MPI.SUM) / comm.size
num_cells = comm.allreduce(problem.mesh.cell_set.size, op=MPI.SUM)
err = problem.err
true_err = problem.true_err
if COMM_WORLD.rank == 0:
if not os.path.exists(os.path.dirname(results)):
os.makedirs(os.path.dirname(results))
data = {"SNESSolve": snes_time,
"KSPSolve": ksp_time,
"PCSetUp": pcsetup_time,
"PCApply": pcapply_time,
"SNESJacobianEval": jac_time,
"SNESFunctionEval": res_time,
"num_processes": problem.comm.size,
"mesh_size": problem.N,
"num_cells": num_cells,
"degree": problem.degree,
"dofs": problem.u.dof_dset.layout_vec.getSize(),
"name": problem.name,
"disc_error": err,
"true_err": true_err,
"ksp_iters": solver.snes.ksp.getIterationNumber()}
df = pd.DataFrame(data, index=[0])
if problem.quads:
result_file = results + "_N%d_deg%d_quads.csv" % (problem.N,
problem.degree)
else:
result_file = results + "_N%d_deg%d.csv" % (problem.N,
problem.degree)
df.to_csv(result_file, index=False, mode="w", header=True)
PETSc.Sys.Print("Solving %s(deg=%s, N=%s, dim=%s) finished.\n" %
(name, problem.degree, problem.N, problem.dim))
PETSc.Sys.Print("L2 error: %s\n" % true_err)
PETSc.Sys.Print("Algebraic error: %s\n" % err)
dim = args.dim
if dim == 3:
# (degree, size, rtol) NOTE: rtol is chosen such that the
# iterative solver reaches the minimal algebraic error
# so that we avoid "oversolving"
cg_params = [(2, 4, 1.0e-4),
(2, 8, 1.0e-5),
(2, 16, 1.0e-6),
(2, 32, 1.0e-7),
(2, 64, 1.0e-8),
(2, 128, 1.0e-9),
# Degree 3 set
(3, 4, 1.0e-6),
(3, 8, 1.0e-7),
(3, 16, 1.0e-8),
(3, 32, 1.0e-9),
(3, 64, 1.0e-10),
(3, 128, 1.0e-11),
# Degree 4 set
(4, 4, 1.0e-8),
(4, 8, 1.0e-9),
(4, 16, 1.0e-10),
(4, 32, 1.0e-11),
(4, 64, 1.0e-12)]
cold_params = [(2, 4, 1.0e-4),
(3, 4, 1.0e-6),
(4, 4, 1.0e-8)]
else:
# If a 2D run is desired, we can set one up.
raise NotImplementedError("Dim %s not set up yet." % dim)
problem_cls = module.CGProblem
quads = args.quads
for cold_param in cold_params:
degree, size, rtol = cold_param
run_solver(problem_cls=problem_cls, degree=degree,
size=size, rtol=rtol, quads=quads, dim=dim,
cold=True)
# Now we profile once the code has been generated
for cg_param in cg_params:
degree, size, rtol = cg_param
run_solver(problem_cls=problem_cls, degree=degree,
size=size, rtol=rtol, quads=quads, dim=dim,
cold=False)
| mit | 4,358,753,809,804,192,300 | 33.663265 | 81 | 0.524875 | false |
mph-/lcapy | lcapy/fexpr.py | 1 | 6297 | """This module provides the FourierDomainExpression class to represent
f-domain (Fourier domain) expressions.
Copyright 2014--2021 Michael Hayes, UCECE
"""
from __future__ import division
from .domains import FourierDomain
from .inverse_fourier import inverse_fourier_transform
from .inverse_dtft import IDTFT
from .expr import Expr, expr, expr_make
from .sym import fsym, ssym, tsym, pi
from .dsym import nsym, dt
from .units import u as uu
from .utils import factor_const
from sympy import Integral, Expr as symExpr
class FourierDomainExpression(FourierDomain, Expr):
"""Fourier domain expression or symbol."""
var = fsym
def __init__(self, val, **assumptions):
check = assumptions.pop('check', True)
assumptions['real'] = True
super(FourierDomainExpression, self).__init__(val, **assumptions)
expr = self.expr
if check and expr.has(ssym) and not expr.has(Integral):
raise ValueError(
'f-domain expression %s cannot depend on s' % expr)
if check and expr.has(tsym) and not expr.has(Integral):
raise ValueError(
'f-domain expression %s cannot depend on t' % expr)
def as_expr(self):
return FourierDomainExpression(self)
def inverse_fourier(self, evaluate=True, **assumptions):
"""Attempt inverse Fourier transform."""
result = inverse_fourier_transform(self.expr, self.var, tsym, evaluate=evaluate)
return self.change(result, 'time', units_scale=uu.Hz, **assumptions)
def IFT(self, evaluate=True, **assumptions):
"""Convert to time domain. This is an alias for inverse_fourier."""
return self.inverse_fourier(evaluate=evaluate, **assumptions)
def IDTFT(self, evaluate=True, **assumptions):
"""Convert to discrete-time domain using inverse discrete-time
Fourier transform."""
result = IDTFT(self.expr, self.var, nsym, evaluate=evaluate)
return self.change(result, 'discrete time', units_scale=uu.Hz,
**assumptions)
def time(self, **assumptions):
return self.inverse_fourier(**assumptions)
def norm_fourier(self, **assumptions):
"""Convert to normalized Fourier domain."""
from .symbols import F
from .dsym import dt
result = self.subs(F / dt)
return result
def angular_fourier(self, **assumptions):
"""Convert to angular Fourier domain."""
from .symbols import omega
result = self.subs(omega / (2 * pi))
return result
def norm_angular_fourier(self, **assumptions):
"""Convert to normalized angular Fourier domain."""
from .symbols import Omega
from .dsym import dt
result = self.subs(Omega / (2 * pi * dt))
return result
def laplace(self, **assumptions):
"""Determine one-side Laplace transform with 0- as the lower limit."""
result = self.time(**assumptions).laplace()
return result
def phasor(self, **assumptions):
"""Convert to phasor domain."""
return self.time(**assumptions).phasor(**assumptions)
def plot(self, fvector=None, plot_type=None, **kwargs):
"""Plot frequency response at values specified by `fvector`.
If `fvector` is a tuple, this sets the frequency limits.
`plot_type` - 'dB-phase', 'dB-phase-degrees', 'mag-phase',
'mag-phase-degrees', 'real-imag', 'mag', 'phase',
'phase-degrees', 'real', or 'imag'.
The default `plot_type` for complex data is `dB-phase`.
`kwargs` include:
`axes` - the plot axes to use otherwise a new figure is created
`xlabel` - the x-axis label
`ylabel` - the y-axis label
`ylabel2` - the second y-axis label if needed, say for mag and phase
`xscale` - the x-axis scaling, say for plotting as ms
`yscale` - the y-axis scaling, say for plotting mV
`norm` - use normalized frequency
`dbmin` - the smallest value to plot in dB (default -120)
in addition to those supported by the matplotlib plot command.
The plot axes are returned. This is a tuple for magnitude/phase or
real/imaginary plots.
There are many plotting options, see lcapy.plot and
matplotlib.pyplot.plot.
For example:
V.plot(fvector, log_frequency=True)
V.real.plot(fvector, color='black')
V.phase.plot(fvector, color='black', linestyle='--')
By default complex data is plotted as separate plots of
magnitude (dB) and phase.
"""
from .plot import plot_frequency
return plot_frequency(self, fvector, plot_type=plot_type, **kwargs)
def bode_plot(self, fvector=None, **kwargs):
"""Plot frequency response for a frequency-domain phasor as a Bode
plot (but without the straight line approximations). fvector
specifies the frequencies. If it is a tuple (f1, f2), it sets
the frequency limits. Since a logarithmic frequency scale is used,
f1 must be greater than 0.
For more info, see `plot`.
"""
from .plot import plot_bode
return plot_bode(self, fvector, **kwargs)
def nyquist_plot(self, fvector=None, log_frequency=True, **kwargs):
"""Plot frequency response as a Nyquist plot (real part versus
imaginary part). fvector specifies the frequencies. If it is
a tuple (f1, f2), it sets the frequency limits as (f1, f2).
`npoints` set the number of plotted points.
The unit circle is shown by default. This can be disabled with `unitcircle=False`.
"""
from .plot import plot_nyquist
return plot_nyquist(self, fvector, log_frequency=log_frequency, **kwargs)
def fexpr(arg, **assumptions):
"""Create FourierDomainExpression object. If `arg` is fsym return f"""
if arg is fsym:
return f
return expr_make('fourier', arg, **assumptions)
from .expressionclasses import expressionclasses
classes = expressionclasses.register('fourier', FourierDomainExpression)
f = FourierDomainExpression('f')
f.units = uu.Hz
| lgpl-2.1 | -3,965,235,396,505,826,300 | 33.983333 | 91 | 0.634429 | false |
Caleydo/caleydo_server | phovea_server/dataset_csv.py | 1 | 16149 | ###############################################################################
# Caleydo - Visualization for Molecular Biology - http://caleydo.org
# Copyright (c) The Caleydo Team. All rights reserved.
# Licensed under the new BSD license, available at http://caleydo.org/license
###############################################################################
from builtins import str, object
import json
from backports import csv
import io
import os
import numpy as np
from .dataset_def import ADataSetProvider, AColumn, AMatrix, AStratification, ATable, AVector
from .config import view
def assign_ids(ids, idtype):
from .plugin import lookup
manager = lookup('idmanager')
return np.array(manager(ids, idtype))
def fix_id(fqname):
from .util import fix_id
return fix_id(fqname)
def basic_description(data, type, path):
import datetime
from .security import current_username
desc = dict(type=type,
name=data.get('name', 'Uploaded File'),
description=data.get('description', ''),
creator=current_username,
ts=datetime.datetime.utcnow(),
path=os.path.basename(path))
if 'group' in data:
desc['group'] = data['group']
if 'permissions' in data:
desc['permissions'] = data['permissions']
if 'buddies' in data:
desc['buddies'] = data['buddies']
return desc
class CSVEntryMixin(object):
def __init__(self, desc, project):
self._desc = desc
folder = project.folder + '/data/' if not hasattr(project, 'inplace') else project.folder
self._path = os.path.join(folder, self._desc['path'])
del self._desc['path']
self._project = project
self._loaded = None
def load(self):
if self._loaded is not None:
return self._loaded
data = []
with io.open(self._path, 'r', newline='', encoding=self._desc.get('encoding', 'utf-8')) as csvfile:
reader = csv.reader(csvfile, delimiter=self._desc.get('separator', u','),
quotechar=str(self._desc.get('quotechar', u'|')))
data.extend(reader)
# print data
def to_num(s):
try:
return float(s) # for int, long and float
except ValueError:
return s
header = data[0]
data = [[to_num(v) if i > 0 else v for i, v in enumerate(row)] for row in data[1:]]
data.insert(0, header)
# convert to col, row and data
self._loaded = self._process(data)
return self._loaded
def _process(self, data):
return data
def to_description(self):
return self._desc
def idtypes(self):
return [v for k, v in self._desc.items() if k in ['rowtype', 'coltype', 'idtype']]
def guess_color(name, i):
name = name.lower()
colors = dict(name='blue', female='red', deceased='#e41a1b', living='#377eb8')
if name in colors:
return colors[name]
l = ['#8dd3c7', '#ffffb3', '#bebada', '#fb8072', '#80b1d3', '#fdb462', '#b3de69', '#fccde5', '#d9d9d9', '#bc80bd',
'#ccebc5', '#ffed6f']
return l[i % len(l)]
def cmp_string(a, b):
if a == b:
return 0
return -1 if a < b else +1
class CSVStratification(CSVEntryMixin, AStratification):
def __init__(self, desc, project):
AStratification.__init__(self, desc['name'], project.id, desc['type'], desc.get('id', None))
CSVEntryMixin.__init__(self, desc, project)
desc['fqname'] = self.fqname
desc['id'] = self.id
self.idtype = desc['idtype']
for i, g in enumerate(desc['groups']):
if 'color' not in g:
g['color'] = guess_color(g['name'], i)
def _process(self, data):
def to_string(v):
if type(v) is float:
return str(int(v))
return str(v)
d = [dict(row=row[0], i=i, cluster=to_string(row[1])) for i, row in enumerate(data[1:])]
groups = [str(g['name']) for g in self._desc['groups']]
def cmp(a, b):
ga = groups.index(a['cluster'])
gb = groups.index(b['cluster'])
if ga != gb:
return ga - gb
r = cmp_string(a['cluster'], b['cluster'])
if r != 0:
return r
return cmp_string(a['row'], b['row']) if r == 0 else r
d.sort(cmp) # sort by cluster;
clusters = dict()
for di in d:
c = di['cluster']
if c in clusters:
clusters[c].append(di['i'])
else:
clusters[c] = [di['i']]
colors = {g['name']: g['color'] for g in self._desc['groups']}
clusters = [dict(name=k, range=clusters.get(k, []), color=colors.get(k, 'gray')) for k in groups]
rows = np.array([di[0] for di in data[1:]])
return {'rows': rows,
'rowIds': assign_ids(rows, self.idtype),
'groups': clusters
}
def rows(self, range=None):
n = self.load()['rows']
if range is None:
return n
return n[range.asslice()]
def rowids(self, range=None):
n = self.load()['rowIds']
if range is None:
return n
return n[range.asslice()]
def groups(self):
return self.load()['groups']
def asjson(self, range=None):
return self.load()
@staticmethod
def parse(data, path, project, id=None):
desc = basic_description(data, 'stratification', path)
desc['idtype'] = data.get('idtype', data.get('rowtype', 'unknown'))
for k, v in data.items():
if k not in desc:
desc[k] = v
if id is not None:
desc['id'] = id
if 'size0' in data and 'ngroups' in data:
desc['size'] = [int(data['size0'])]
del desc['size0']
desc['ngroups'] = int(data['ngroups'])
else: # derive from the data
clusters = set()
count = 0
with io.open(path, 'r', newline='', encoding=desc.get('encoding', 'utf-8')) as csvfile:
reader = csv.reader(csvfile, delimiter=desc.get('separator', u','),
quotechar=str(desc.get('quotechar', u'|')))
for row in reader:
count += 1
clusters.add(row[1])
desc['size'] = [count]
desc['ngroups'] = len(clusters)
return CSVStratification(desc, project)
class CSVMatrix(CSVEntryMixin, AMatrix):
def __init__(self, desc, project):
AMatrix.__init__(self, desc['name'], project.id, desc['type'], desc.get('id', None))
CSVEntryMixin.__init__(self, desc, project)
desc['fqname'] = self.fqname
desc['id'] = self.id
self.rowtype = desc['rowtype']
self.coltype = desc['coltype']
self.value = desc['value']['type']
self.range = desc['value']['range']
self.shape = desc['size']
def _process(self, data):
cols = np.array(data[0][1:])
rows = np.array([x[0] for x in data[1:]])
is_number = self.value == 'real' or self.value == 'int'
if is_number:
vs = [[np.NaN if v == 'NA' or v == '' else v for v in x[1:]] for x in data[1:]]
# import numpy.ma as ma
# dd = ma.masked_equal(np.array(vs), np.NaN)
dd = np.array(vs)
else:
dd = np.array([x[1:] for x in data[1:]])
return {'cols': cols,
'colIds': assign_ids(cols, self.coltype),
'rows': rows,
'rowIds': assign_ids(rows, self.rowtype),
'data': dd
}
def rows(self, range=None):
n = self.load()['rows']
if range is None:
return n
return n[range.asslice()]
def rowids(self, range=None):
n = self.load()['rowIds']
if range is None:
return n
return n[range.asslice()]
def cols(self, range=None):
n = self.load()['cols']
if range is None:
return n
return n[range.asslice()]
def colids(self, range=None):
n = self.load()['colIds']
if range is None:
return n
return n[range.asslice()]
def asnumpy(self, range=None):
n = self.load()['data']
if range is None:
return n
rows = range[0].asslice()
cols = range[1].asslice()
d = None
if isinstance(rows, list) and isinstance(cols, list):
# fancy indexing in two dimension doesn't work
d_help = n[rows, :]
d = d_help[:, cols]
else:
d = n[rows, cols]
if d.ndim == 1:
# two options one row and n columns or the other way around
if rows is Ellipsis or (isinstance(rows, list) and len(rows) > 1):
d = d.reshape((d.shape[0], 1))
else:
d = d.reshape((1, d.shape[0]))
elif d.ndim == 0:
d = d.reshape((1, 1))
return d
@staticmethod
def parse(data, path, project, id=None):
desc = basic_description(data, 'matrix', path)
desc['rowtype'] = data.get('rowtype', 'unknown')
desc['coltype'] = data.get('coltype', 'unknown')
desc['value'] = dict(type=data.get('value_type', 'real'))
for k, v in data.items():
if k not in desc:
desc[k] = v
if id is not None:
desc['id'] = id
if all((k in data) for k in ['size0', 'size1', 'value_min', 'value_max']):
desc['size'] = [int(data['size0']), int(data['size1'])]
del desc['size0']
del desc['size1']
desc['value']['range'] = [float(data['value_min']), float(data['value_max'])]
del desc['value_min']
del desc['value_max']
else: # derive from the data
rows = 0
cols = None
min_v = None
max_v = None
with io.open(path, 'r', newline='', encoding=desc.get('encoding', 'utf-8')) as csvfile:
reader = csv.reader(csvfile, delimiter=desc.get('separator', u','),
quotechar=str(desc.get('quotechar', u'|')))
for row in reader:
if cols is None:
cols = len(row) - 1
else:
rows += 1
min_act = min((float(f) for f in row[1:]))
min_v = min_act if min_v is None else min(min_act, min_v)
max_act = max((float(f) for f in row[1:]))
max_v = max_act if max_v is None else max(max_act, max_v)
desc['size'] = [rows, cols]
desc['value']['range'] = [float(data['value_min']) if 'value_min' in data else min_v,
float(data['value_max']) if 'value_max' in data else max_v]
return CSVMatrix(desc, project)
class CSVColumn(AColumn):
def __init__(self, desc, table):
super(CSVColumn, self).__init__(desc['name'], desc['value']['type'])
self._desc = desc
self._table = table
def asnumpy(self, range=None):
import pandas as pd
p = self._table.aspandas(range)[self.name]
if isinstance(p, pd.Series):
return p.values
return np.array([p])
def process(self, index, data):
is_number = self.type == 'real' or self.type == 'int'
if is_number:
return [np.NaN if d[index] == 'NA' or d[index] == '' else d[index] for d in data]
else:
return [d[index] for d in data]
def dump(self):
return self._desc
class CSVTable(CSVEntryMixin, ATable):
def __init__(self, desc, project):
ATable.__init__(self, desc['name'], project.id, desc['type'], desc.get('id', None))
CSVEntryMixin.__init__(self, desc, project)
desc['fqname'] = self.fqname
desc['id'] = self.id
self.idtype = desc['idtype']
self.columns = [CSVColumn(d, self) for d in desc['columns']]
self.shape = desc['size']
def _process(self, data):
rows = np.array([x[0] for x in data[1:]])
import pandas as pd
objs = {c.name: c.process(i + 1, data[1:]) for i, c in enumerate(self.columns)}
df = pd.DataFrame(objs, columns=[c.name for c in self.columns])
df.index = rows
return {'rows': rows,
'rowIds': assign_ids(rows, self.idtype),
'df': df
}
def rows(self, range=None):
n = self.load()['rows']
if range is None:
return n
return n[range.asslice()]
def rowids(self, range=None):
n = self.load()['rowIds']
if range is None:
return n
return n[range.asslice()]
def aspandas(self, range=None):
n = self.load()['df']
if range is None:
return n
return n.iloc[range.asslice(no_ellipsis=True)]
@staticmethod
def parse(data, path, id=None):
pass
class CSVVector(CSVEntryMixin, AVector):
def __init__(self, desc, project):
AVector.__init__(self, desc['name'], project.id, desc['type'], desc.get('id', None))
CSVEntryMixin.__init__(self, desc, project)
desc['fqname'] = self.fqname
self.idtype = desc['idtype']
self.value = desc['value']['type']
self.range = desc['value']['range']
self.shape = desc['size']
def _process(self, data):
is_number = self.value == 'real' or self.value == 'int'
if is_number:
data = [np.NaN if x[1] == 'NA' or x[1] == '' else x[1] for x in data[1:]]
else:
data = [x[1] for x in data[1:]]
rows = np.array([x[0] for x in data[1:]])
return {'rows': rows,
'rowIds': assign_ids(rows, self.idtype),
'data': np.array(data)
}
def rows(self, range=None):
n = self.load()['rows']
if range is None:
return n
return n[range.asslice()]
def rowids(self, range=None):
n = self.load()['rowIds']
if range is None:
return n
return n[range.asslice()]
def asnumpy(self, range=None):
n = self.load()['data']
if range is None:
return n
d = n[range[0].asslice()]
if d.ndim == 0:
d = d.reshape((1,))
return d
@staticmethod
def parse(data, path, project, id=None):
pass
def to_files(plugins):
for plugin in plugins:
index = os.path.join(plugin.folder + '/data/' if not hasattr(plugin, 'inplace') else plugin.folder, 'index.json')
if not os.path.isfile(index):
continue
with open(index, 'r') as f:
desc = json.load(f)
for di in desc:
if di['type'] == 'matrix':
yield CSVMatrix(di, plugin)
elif di['type'] == 'table':
yield CSVTable(di, plugin)
elif di['type'] == 'vector':
yield CSVVector(di, plugin)
elif di['type'] == 'stratification':
yield CSVStratification(di, plugin)
class DataPlugin(object):
def __init__(self, folder):
# add a magic plugin for the static data dir
self.inplace = True # avoid adding the data suffix
self.folder = folder
self.id = os.path.basename(folder)
def save(self, f):
import werkzeug.utils
from .util import random_id
if not os.path.exists(self.folder):
os.makedirs(self.folder)
filename = os.path.basename(f.filename)
filename = werkzeug.utils.secure_filename(filename + random_id(3) + '.csv')
path = os.path.join(self.folder, filename)
f.save(path)
return path
def append(self, desc, path):
desc['path'] = os.path.basename(path)
index = os.path.join(self.folder, 'index.json')
old = []
if os.path.isfile(index):
with io.open(index, 'r', newline='', encoding=desc.get('encoding', 'utf-8')) as f:
old = json.load(f)
old.append(desc)
with io.open(index, 'w', newline='', encoding=desc.get('encoding', 'utf-8')) as f:
json.dump(old, f, indent=1)
class StaticFileProvider(ADataSetProvider):
def __init__(self, plugins):
self.files = list(to_files(plugins))
cc = view('phovea_server')
self.data_plugin = DataPlugin(os.path.join(cc.dataDir, 'data'))
self.files.extend(to_files([self.data_plugin]))
import glob
extras = [DataPlugin(f) for f in (os.path.dirname(f) for f in glob.glob(cc.dataDir + '/*/index.json')) if
os.path.basename(f) != 'data']
self.files.extend(to_files(extras))
def __iter__(self):
return iter((f for f in self.files if f.can_read()))
def upload(self, data, files, id=None):
if 'csv' != data.get('_provider', 'csv'):
return None # not the right provider
type = data.get('type', 'unknown')
parsers = dict(matrix=CSVMatrix.parse, table=CSVTable.parse, vector=CSVVector.parse,
stratification=CSVStratification.parse)
if type not in parsers:
return None # unknown type
f = files[list(files.keys())[0]]
path = self.data_plugin.save(f)
r = parsers[type](data, path, self.data_plugin, id)
if r:
self.data_plugin.append(r._desc, path)
self.files.append(r)
else:
os.remove(path) # delete file again
return r
def create():
"""
entry point of this plugin
"""
from .plugin import plugins
return StaticFileProvider(plugins())
| bsd-3-clause | 1,559,304,948,322,076,400 | 29.072626 | 117 | 0.578612 | false |
Raphael-C-Almeida/Wireless-Sensor-Network | Data Fusion Test/Kalman Filter Final.py | 1 | 4967 | import matplotlib.pyplot as plt
import numpy as np
def gen_data(n, start=0, end=10):
x = np.linspace(start, end, n)
y = np.sin(10*x) - x*x
return y
def gen_data_osc(n):
return np.array([1024 + (-2)**(-i/25) for i in range(n)])
def gen_data_rand(n):
return np.random.randn(n) + 0.3*np.linspace(0, 10, n)
def iterativeCalcCovariance(X, Y, X_avg, Y_avg, conv_sum, data_size):
return (conv_sum + ((X - X_avg)*(Y - Y_avg))) / (data_size - 1)
def iterativeAngularCoef(X, Y, X_sum, Y_sum, XY_conv_sum, XX_conv_sum, data_size):
X_avg = X_sum/data_size
Y_avg = Y_sum/data_size
return iterativeCalcCovariance(X, Y, X_avg, Y_avg, XY_conv_sum, data_size) / iterativeCalcCovariance(X, X, X_avg, X_avg, XX_conv_sum, data_size)
def iterativeLinearCoef(a, X_sum, Y_sum, data_size):
X_avg = X_sum/data_size
Y_avg = Y_sum/data_size
return Y_avg - a*X_avg
def kalmanCoeficient(est, measurement):
return est / (est + measurement)
def iterativeKalmanFilter(prev, measurement):
return prev + kalmanCoeficient(prev, measurement) * (measurement - prev)
count = 100
end = 100
time = np.linspace(0, end, count)
data = gen_data(count)
delta = end / count
iterativePredictions = []
iterativeKalmanPrediction = []
kalmanMinQuad = []
X_sum = time[0]
Y_sum = data[0]
X_sum_kg = time[0]
Y_sum_kg = data[0]
XY_conv_sum = 0
XX_conv_sum = 0
XY_conv_sum_kg = 0
XX_conv_sum_kg = 0
data_size = 2
decay = 0.99
for i in range(1, count):
#Update data sum
X_sum += time[i-1]
Y_sum += data[i-1]
#Calculate AVG
X_avg = X_sum/data_size
Y_avg = Y_sum/data_size
#Calculate angular and linear coeficient using iterative function
iterativeAngularCoeficient = iterativeAngularCoef(time[i - 1], data[i - 1], X_sum, Y_sum, XY_conv_sum, XX_conv_sum, data_size)
print(time[i - 1], data[i - 1], X_sum, Y_sum, XY_conv_sum, XX_conv_sum, data_size)
iterativeLinearCoeficient = iterativeLinearCoef(iterativeAngularCoeficient, X_sum, Y_sum, data_size)
#Update XY and XX conv sum
XY_conv_sum = (XY_conv_sum*decay) + ((time[i-1] - X_avg)*(data[i-1] - Y_avg))
XX_conv_sum = (XX_conv_sum*decay) + ((time[i-1] - X_avg)*(time[i-1] - X_avg))
#Predict
iterativePrediction = (time[i - 1] + delta) * iterativeAngularCoeficient + iterativeLinearCoeficient
print("angular coef", iterativeAngularCoeficient)
print("iterative pred", iterativePrediction)
print("data", data[i-1])
#Calculating prediction using Kalman Filter
kalmanFilterIterativePrediction = iterativeKalmanFilter(iterativePrediction, data[i - 1])
#Update data sum
X_sum_kg += time[i-1]
Y_sum_kg += kalmanFilterIterativePrediction
#Calculate AVG
X_avg_kg = X_sum_kg/data_size
Y_avg_kg = Y_sum_kg/data_size
#Calculate angular and linear coeficient using iterative function
iterativeAngularCoeficient_kg = iterativeAngularCoef(time[i - 1], kalmanFilterIterativePrediction, X_sum_kg, Y_sum_kg, XY_conv_sum_kg, XX_conv_sum_kg, data_size)
iterativeLinearCoeficient_kg = iterativeLinearCoef(iterativeAngularCoeficient_kg, X_sum_kg, Y_sum_kg, data_size)
#Update XY and XX conv sum
print("kalman", kalmanFilterIterativePrediction)
print("XY cov", XY_conv_sum_kg)
XY_conv_sum_kg = (XY_conv_sum_kg*decay) + ((time[i-1] - X_avg_kg)*(kalmanFilterIterativePrediction - Y_avg_kg))
print("after", XY_conv_sum_kg)
XX_conv_sum_kg = (XX_conv_sum_kg*decay) + ((time[i-1] - X_avg_kg)*(time[i-1] - X_avg_kg))
#Predict
iterativePrediction_kg = (time[i - 1] + delta) * iterativeAngularCoeficient_kg + iterativeLinearCoeficient_kg
#Creating arrays of data to create Graph
iterativePredictions.append(iterativePrediction)
iterativeKalmanPrediction.append(kalmanFilterIterativePrediction)
kalmanMinQuad.append(iterativePrediction_kg)
#Update data size
data_size += 1
print(XY_conv_sum_kg)
print(XX_conv_sum_kg)
print(kalmanMinQuad)
#calculate Min. Qua. line
minQuadLineWithDecay = time * iterativeAngularCoeficient + iterativeLinearCoeficient
#calculate kalman Min. Qua. line
minQuadLineWithDecay_kg = time * iterativeAngularCoeficient_kg + iterativeLinearCoeficient_kg
plt.scatter(time, data, label="Medições", color="#FF5850")
plt.scatter(time[1:], iterativePredictions, label="Est. Min. Quad. Iterativo com Decaimento", color="#1CB262")
plt.scatter(time[1:], iterativeKalmanPrediction, label="Est. Kalman Iterativo com Decaimento", color="#FF00C0")
plt.scatter(time[1:], kalmanMinQuad, label="Kalman Prediction", color="#C0FF00")
plt.plot(time, minQuadLineWithDecay, label="Min. Quad. Final Iterativo com Decaimento", color="#FFA136")
plt.plot(time, minQuadLineWithDecay_kg, label="Min. Quad. Kalman", color="#FF36A1")
plt.xlabel("Tempo")
plt.ylabel("Temperatura")
plt.title("Aproximação Por Kalman Filter Iterativo com Decaimento de %0.2f" %decay)
# Place a legend to the right of this smaller subplot.
plt.legend()
plt.show() | mit | 8,479,236,728,436,063,000 | 33.234483 | 165 | 0.698771 | false |
henningjp/CoolProp | dev/scripts/viscosity_builder.py | 2 | 3895 | from math import sqrt, exp
from CoolProp.CoolProp import Props
import numpy as np
import matplotlib.pyplot as plt
from scipy.odr import *
from math import log
E_K = {'REFPROP-Ammonia': 386,
'REFPROP-Argon': 143.2
}
SIGMA = {'REFPROP-Ammonia': 0.2957,
'REFPROP-Argon': 0.335
}
E_K['REFPROP-Propane'] = 263.88
SIGMA['REFPROP-Propane'] = 0.49748
E_K['REFPROP-R32'] = 289.65
SIGMA['REFPROP-R32'] = 0.4098
E_K['REFPROP-R245fa'] = 329.72
SIGMA['REFPROP-R245fa'] = 0.5529
def viscosity_dilute(fluid, T, e_k, sigma):
"""
T in [K], e_k in [K], sigma in [nm]
viscosity returned is in [Pa-s]
"""
Tstar = T / e_k
molemass = Props(fluid, 'molemass')
if fluid == 'Propane' or fluid == 'REFPROP-Propane':
a = [0.25104574, -0.47271238, 0, 0.060836515, 0]
theta_star = exp(a[0] * pow(log(Tstar), 0) + a[1] * pow(log(Tstar), 1) + a[3] * pow(log(Tstar), 3));
eta_star = 0.021357 * sqrt(molemass * T) / (pow(sigma, 2) * theta_star) / 1e6;
return eta_star
# From Neufeld, 1972, Journal of Chemical Physics - checked coefficients
OMEGA_2_2 = 1.16145 * pow(Tstar, -0.14874) + 0.52487 * exp(-0.77320 * Tstar) + 2.16178 * exp(-2.43787 * Tstar)
# Using the leading constant from McLinden, 2000 since the leading term from Huber 2003 gives crazy values
eta_star = 26.692e-3 * sqrt(molemass * T) / (pow(sigma, 2) * OMEGA_2_2) / 1e6
return eta_star
def viscosity_linear(fluid, T, rho, e_k, sigma):
"""
Implements the method of Vogel 1998 (Propane) for the linear part
"""
N_A = 6.02214129e23
molemass = Props(fluid, 'molemass')
Tstar = T / e_k
b = [-19.572881, 219.73999, -1015.3226, 2471.01251, -3375.1717, 2491.6597, -787.26086, 14.085455, -0.34664158]
s = sum([b[i] * pow(Tstar, -0.25 * i) for i in range(7)])
B_eta_star = s + b[7] * pow(Tstar, -2.5) + b[8] * pow(Tstar, -5.5) # //[no units]
B_eta = N_A * pow(sigma / 1e9, 3) * B_eta_star # [m3/mol]
return viscosity_dilute(fluid, T, e_k, sigma) * B_eta * rho / molemass * 1000
from PDSim.misc.datatypes import Collector
RHO = Collector()
TT = Collector()
DELTA = Collector()
TAU = Collector()
VV = Collector()
VV0 = Collector()
VV1 = Collector()
VVH = Collector()
fluid = 'REFPROP-R32'
Tc = Props(fluid, 'Tcrit')
rhoc = Props(fluid, 'rhocrit')
for T in np.linspace(290, Props(fluid, 'Tcrit') - 0.1, 100):
rhoV = Props('D', 'T', T, 'Q', 1, fluid)
rhoL = Props('D', 'T', T, 'Q', 0, fluid)
rhomax = Props('D', 'T', Props(fluid, 'Tmin'), 'Q', 0, fluid)
for rho in list(np.linspace(rhoL, rhomax, 100)): # +list(np.linspace(rhoV,0.0001,100)):
# for rho in list(np.linspace(rhoV,0.0001,100)):
mu_0 = viscosity_dilute(fluid, T, E_K[fluid], SIGMA[fluid])
mu_1 = viscosity_linear(fluid, T, rho, E_K[fluid], SIGMA[fluid])
mu = Props('V', 'T', T, 'D', rho, fluid)
VV << mu
VV0 << mu_0
VV1 << mu_1
VVH << mu - mu_0 - mu_1
TT << T
RHO << rho
DELTA << rho / rhoc
TAU << Tc / T
def f_RHS(E, DELTA_TAU, VV):
k = 0
sum = 0
DELTA = DELTA_TAU[0, :]
TAU = DELTA_TAU[1, :]
for i in range(2, 5):
for j in range(3):
sum += E[k] * DELTA**i / TAU**j
k += 1
# f1,f2,f3,g1,g2 = E[k],E[k+1],E[k+2],E[k+3],E[k+4]
# DELTA0 = g1*(1+g2*np.sqrt(TAU))
# sum += (f1+f2/TAU+f3/TAU/TAU)*(DELTA/(DELTA0-DELTA)-DELTA/DELTA0)
print('%s %%' % np.mean(np.abs(((sum / VV - 1) * 100))))
return sum
log_muH = np.log(VVH.v().T)
x = np.c_[DELTA.v().T, TAU.v().T].T
y = VVH.v()
linear = Model(f_RHS, extra_args=(y,))
mydata = Data(x, y)
myodr = ODR(mydata, linear, beta0=np.array([0.1] * 17),)
myoutput = myodr.run()
E = myoutput.beta
print(E)
#plt.plot(TT.vec, y,'b.',TT.vec, f_RHS(E, x, y),'r.')
# plt.show()
# plt.plot()
plt.plot(y.T, f_RHS(E, x, y))
plt.show()
| mit | -724,381,576,362,455,700 | 30.92623 | 114 | 0.572272 | false |
fred3m/decam-toyz | decamtoyz/catalog.py | 1 | 14071 | import os
import pandas
import numpy as np
import logging
import shutil
import subprocess
import datetime
import warnings
from astropy.io import fits
from astropy.table import Table, join, vstack
from astropy.coordinates import SkyCoord
from astropy.time import Time
import astropy.units as apu
import astromatic_wrapper as aw
logger = logging.getLogger('decamtoyz.catalog')
catalog_info = {
'SDSS9': {
'columns': {
'ra': '_RAJ2000',
'dec': '_DEJ2000',
'e_ra': 'e_RAJ2000',
'e_dec': 'e_DEJ2000',
'pm_ra': 'pmRA',
'pm_dec': 'pmDE',
'e_pm_ra': 'e_pmRA',
'e_pm_dec': 'e_pmDE',
'ObsDate': 'ObsDate'
},
'info': {
'jyear': 'ObsDate',
'pm_units': 'mas',
'e_pos_units': 'arcsec',
'vizier_id': "V/139"
}
},
'UKIDSS9': {
'columns': {
'ra': '_RAJ2000',
'dec': '_DEJ2000',
'e_ra': 'e_RAJ2000',
'e_dec': 'e_DEJ2000',
'pm_ra': 'pmRA',
'pm_dec': 'pmDE',
'e_pm_ra': 'e_pmRA',
'e_pm_dec': 'e_pmDE',
'Epoch': 'Epoch'
},
'info': {
'jyear': 'Epoch',
'pm_units': 'mas',
'e_pos_units': 'mas',
'vizier_id': "II/319"
}
},
'2MASS': {
'columns': {
'ra': '_RAJ2000',
'dec': '_DEJ2000',
'e_ra': 'errMaj',
'e_dec': 'errMin',
'e_PA': 'errPA',
},
'info': {
'epoch': 2000.0,
'e_pos_units': 'arcsec',
'vizier_id': 'II/246'
}
},
'AllWISE': {
'columns': {
'ra': 'RA_pm', # position at MJD=55400.0 (2010.5589)
'dec': 'DE_pm',
'e_ra': 'e_RA_pm',
'e_dec': 'e_DE_pm',
'pm_ra': 'pmRA',
'pm_dec': 'pmDE',
'e_pm_ra': 'e_pmRA',
'e_pm_dec': 'e_pmDE',
},
'info': {
'epoch': 2010.5589,
'pm_units': 'mas',
'e_pos_units': 'arcsec',
'vizier_id': 'II/328'
}
},
'UCAC4': {
'columns': {
'ra': '_RAJ2000',
'dec': '_DEJ2000',
'e_ra': 'ePos',
'e_dec': 'ePos',
'pm_ra': 'pmRA',
'pm_dec': 'pmDE',
'e_pm_ra': 'e_pmRA',
'e_pm_dec': 'e_pmDE',
},
'info': {
'epoch': 2000.0,
'pm_units': 'mas',
'e_pos_units': 'mas',
'vizier_id': 'I/322A'
}
},
'GSC': {
'columns': {
'ra': '_RAJ2000',
'dec': '_DEJ2000',
'e_ra': 'e_RAdeg',
'e_dec': 'e_DEdeg',
'epoch': 'Epoch'
},
'info': {
'jyear': 'Epoch',
'e_pos_units': 'arcsec',
'vizier_id': 'I/305'
}
},
'DENIS': {
'columns': {
'ra': '_RAJ2000',
'dec': '_DEJ2000',
'epoch': 'ObsJD'
},
'info': {
'mjd': 'ObsJD',
'e_pos_units': 'mas',
'e_pos': '400',
'vizier_id': 'B/denis'
}
},
'USNOB1': {
'columns': {
'ra': '_RAJ2000',
'dec': '_DEJ2000',
'e_ra': 'e_RAJ2000',
'e_dec': 'e_DEJ2000',
'pm_ra': 'pmRA',
'pm_dec': 'pmDE',
'e_pm_ra': 'e_pmRA',
'e_pm_dec': 'e_pmDE',
'epoch': 'Epoch'
},
'info': {
'jyear': 'Epoch',
'pm_units': 'mas',
'e_pos_units': 'mas',
'vizier_id': 'I/284'
}
}
}
def get_query_region(ra_bounds, dec_bounds, print_results=True):
"""
Get the max/min ra and dec based on lists of boundaries
Parameters
----------
ra_bounds: list
Boundary values for RA. These are usually taken from the header, for example
ra_bounds = [hdu.header['CORN1RA'], hdu.header['CORN2RA'], hdu.header['CORN3RA'],
hdu.header['CORN4RA']]
dec_bounds: list
Boundary values for DEC.
Returns
-------
result: tuple
The result is the tuple (ra_min, ra_max, dec_min, dec_max)
"""
min_ra = min(ra_bounds)
max_ra = max(ra_bounds)
min_dec = min(dec_bounds)
max_dec = max(dec_bounds)
if print_results:
logger.debug('ra range: {0} to {1}'.format(min_ra, max_ra))
logger.debug('dec range: {0} to {1}'.format(min_dec, max_dec))
return min_ra, max_ra, min_dec, max_dec
def query_cat(catalog, min_ra, max_ra, min_dec, max_dec, columns=None,
column_filters=None):
"""
Use vizquery to get a reference catalog from vizier
"""
from astroquery.vizier import Vizier
# Build vizquery statement
width = int(np.ceil((max_ra-min_ra)*60))
height = int(np.ceil((max_dec-min_dec)*60))
center = SkyCoord((min_ra+max_ra)/2, (min_dec+max_dec)/2, unit='deg')
# If no column filters are specified, use the defaults
if column_filters is None:
if catalog.startswith('SDSS'):
column_filters = {
'cl': '=6',
'q_mode':'=+'
}
elif catalog.startswith('UKIDSS'):
column_filters = {
'cl': '=-1',
'm': '=1'
}
else:
column_filters = {}
# Query the catalog in Vizier
logger.debug('columns:{0}'.format(columns))
v = Vizier(columns=columns, column_filters=column_filters,
catalog=catalog_info[catalog]['info']['vizier_id'])
v.ROW_LIMIT=200000
result = v.query_region(center, width='{0}m'.format(width*1.25),
height='{0}m'.format(height*1.25))
refcat = result[0]
return refcat
def update_refcat(cat_name, refcat, obs_dates):
"""
Update a reference catalogs errors, since the positional errors in Vizier are
for the J2000 positions before the proper motions are used to move the positions
to the observed epoch.
"""
cat_info = catalog_info[cat_name]
# set the epoch of the reference observation
if 'epoch' in cat_info['info']:
refcat['epoch'] = cat_info['info']['epoch']
elif 'jyear' in cat_info['info']:
refcat.rename_column(cat_info['info']['jyear'], 'epoch')
elif 'jd' in cat_info['info']:
from astropy.time import Time
jd = Time(refcat[cat_info['info']['jd']], format='jd')
refcat['epoch'] = jd.jyear
del refcat[cat_info['info']['jd']]
# Change the column names to fit a standard
for colname in cat_info['columns']:
if ((colname=='e_ra' or colname=='e_dec') and
cat_info['columns']['e_ra']==cat_info['columns']['e_dec']):
refcat[colname] = refcat[cat_info['columns'][colname]].astype(float)
refcat[colname].unit = refcat[cat_info['columns'][colname]].unit
elif cat_info['columns'][colname] in refcat.columns:
x = refcat[cat_info['columns'][colname]]
refcat[colname] = x.astype(float)
refcat[colname].unit = x.unit
# Astropy uses masks instead of nan values, so we convert the
# mask to NaN
refcat[colname][x.mask] = np.nan
del refcat[cat_info['columns'][colname]]
#refcat.rename_column(cat_info['columns'][colname], colname)
# Change proper motion errors and position errors to mas
for col in ['pm_ra', 'pm_dec', 'e_pm_ra', 'e_pm_dec']:
if col in cat_info['columns']:
refcat[col].convert_unit_to(apu.mas/apu.year)
if 'e_ra' in refcat.columns and 'e_dec' in refcat.columns:
refcat['e_ra'].convert_unit_to(apu.mas)
refcat['e_dec'].convert_unit_to(apu.mas)
else:
warnings.warn("{0} was missing 'e_ra' and 'e_dec'".format(cat_name))
# Tables do not add,subtract, multiply, or divide quantities properly so
# we need to incldue a conversion factor from mas to deg
mas2deg = 1/3600000.0
# Update positions to the observation dates
if 'pm_ra' in refcat.columns:
for obs_date in obs_dates:
date_diff = obs_date-refcat['epoch']
# Calculate the new positions
ra_field = 'ra_J{0:.1f}'.format(obs_date)
dec_field = 'dec_J{0:.1f}'.format(obs_date)
delta_ra = date_diff*refcat['pm_ra']*mas2deg
delta_ra.unit = 'deg'
delta_dec = date_diff*refcat['pm_dec']*mas2deg
delta_dec.unit = 'deg'
refcat[ra_field] = refcat['ra']+delta_ra
refcat[dec_field] = refcat['dec']+delta_dec
refcat[ra_field].unit='deg'
refcat[dec_field].unit='deg'
# Calculate the errors in the new positions and convert to mas
refcat['e_'+ra_field] = np.sqrt(
refcat['e_ra']**2+(date_diff*refcat['e_pm_ra'])**2)
refcat['e_'+dec_field] = np.sqrt(
refcat['e_dec']**2+(date_diff*refcat['e_pm_dec'])**2)
refcat['e_'+ra_field].unit = 'mas'
refcat['e_'+dec_field].unit = 'mas'
# Some sources might have NaN values for pm
pm_isnan = (np.isnan(refcat['pm_ra']) | np.isnan(refcat['pm_dec']) |
(refcat['pm_ra'].mask) | (refcat['pm_dec'].mask))
refcat[ra_field][pm_isnan] = refcat['ra'][pm_isnan]
refcat[dec_field][pm_isnan] = refcat['dec'][pm_isnan]
refcat['e_'+ra_field][pm_isnan] = refcat['e_ra'][pm_isnan]
refcat['e_'+dec_field][pm_isnan] = refcat['e_dec'][pm_isnan]
return refcat
def cds_query(pipeline, obj, catalog, columns=None, frames=None,
proctype='InstCal', filter_columns=None, obs_dates=None):
"""
Use cdsclient vizquery to query vizier and return a reference catalog
Parameters
----------
pipeline: astromatic_wrapper.Pipeline
Pipeline containing index info
obj: str
Name of the DECam object
catalog: str
Name of the reference catalog. This can either be a name, like ``SDSS9``, or a
Vizier Id, like ``V/139``
columns: str (optional)
Space separated list of columns to query. If the first character is a ``*`` Vizier
will return all of the default columns
frames: list (optional)
Sometimes a FOV might have too many matches for a single query, so the query can be done
frame by frame. If ``frames=[]`` then each frame in the fits file will be queried,
otherwise only the frames in the list will be queried.
proctype: str (optional)
DECam pipeline proctype. Default is ``InstCal``
filter_columns: dict
Parameters to pass to astroquery. The keys are the names of a column in the
reference catalog and the values are an operator (such as '>','<','=') and
a value. For example ``filter_columns={'e_pmRA':'<200'}.
"""
from decamtoyz.index import query_idx
# Load a fits image for the given object
sql = "select * from decam_obs where object like '{0}%'".format(obj)
exposures = query_idx(sql, pipeline.idx_connect_str).sort(['expnum'])
exp = exposures.iloc[0]
sql="select * from decam_files where expnum={0} and proctype='{1}'".format(
exp['expnum'], proctype
)
sql += " and prodtype='image'"
files = query_idx(sql, pipeline.idx_connect_str).iloc[0]
hdulist = fits.open(files['filename'], memmap=True)
if obs_dates is None:
dates = exposures['cal_date'].unique().tolist()
obs_dates = Time(dates, format='iso').jyear
logger.info('Loading {0} for {1}'.format(catalog, obj))
if frames is None:
# Get the ra and dec range for the entire field of view
header = hdulist[0].header
min_ra, max_ra, min_dec, max_dec = get_query_region(
[header['CORN1RA'], header['CORN2RA'], header['CORN3RA'], header['CORN4RA']],
[header['CORN1DEC'], header['CORN2DEC'], header['CORN3DEC'], header['CORN4DEC']])
# Query vizier
refcat = query_cat(catalog, min_ra, max_ra, min_dec, max_dec, columns, filter_columns)
else:
# For each frame, query Vizier and merge the result into a catalog for the entire field
if len(frames)==0:
frames = range(1, len(hdulist))
refcat = None
for frame in frames:
logger.info('querying frame: {0}'.format(frame))
# Get the ra and dec range for the current frame
header = hdulist[frame].header
min_ra, max_ra, min_dec, max_dec = get_query_region(
[header['COR1RA1'], header['COR2RA1'], header['COR3RA1'], header['COR4RA1']],
[header['COR1DEC1'], header['COR2DEC1'], header['COR3DEC1'], header['COR4DEC1']])
# Query vizier
new_ref = query_cat(catalog, min_ra, max_ra, min_dec, max_dec, columns, filter_columns)
logger.debug('new entries: {0}'.format(len(new_ref)))
# Merge the results
if refcat is None:
refcat = new_ref
else:
refcat = vstack([refcat, new_ref])
logger.debug('total entries: {0}'.format(len(refcat)))
# Update the position errors and other fields needed for better SCAMP astrometric fit
refcat = update_refcat(catalog, refcat, obs_dates)
# Convert the Table to a fits_ldac file to read into SCAMP
# Sometimes the meta data is too long to save to a fits file, in which case
# we just delete the meta data
try:
new_hdulist = aw.utils.ldac.convert_table_to_ldac(refcat)
except ValueError:
refcat.meta={}
new_hdulist = aw.utils.ldac.convert_table_to_ldac(refcat)
cat_path = os.path.join(pipeline.paths['catalogs'], 'ref', "{0}-{1}.fits".format(obj, catalog))
new_hdulist.writeto(cat_path, clobber=True)
logger.info('saved {0}'.format(cat_path)) | lgpl-3.0 | -3,457,563,716,552,419,000 | 35.645833 | 99 | 0.530666 | false |
bmanubay/open-forcefield-tools | single-molecule-property-generation/torsion_fitting/Mol2_files/AlkEthOH_rings_filt1/read_top.py | 2 | 6304 | from parmed.amber import *
import numpy as np
import glob
import pandas as pd
files = glob.glob('./AlkEthOH_r47*.top')
def drop(mylist, m, n):
mylist = list(mylist)
del mylist[m::n]
return mylist
# Reading in and cleaning up atoms involved in bonds
lst0name = []
lstt0 = []
lst00 = []
print("PRINTING BOND PAIRS...")
for FileName in files:
# read in AMBER prmtop
fin = AmberFormat(FileName)
# pull out specified parm data
a1 = fin.parm_data['BONDS_INC_HYDROGEN']
a2 = fin.parm_data['BONDS_WITHOUT_HYDROGEN']
# Get rid of the index identifier for the value of the bond length
a1 = drop(a1,2,3)
a2 = drop(a2,2,3)
# Don't need to distinguish between bonded to H or not
a1.extend(a2)
# Return true atom numbers based on AMBER documentation
a1 = np.array(a1)/3 + 1
# Subdivide array into those of length 2 to make assigning column titles easier later
#a2 = np.array_split(a1, len(a1)/2)
# Need to create multiple lists for this to work
# lst0name and lst0 allow me to keep the bond pairs indexed with the molecule
# lst00 will allow me to create the column names after finding the unique pairs
lst0name.append(FileName)
lstt0.append(a1)
lst00.extend(a1)
# Convert lst00 into list of strings
lstt0 = [map(str,i) for i in lstt0]
lst00 = map(str, lst00)
# Join every two entries into space delimited string
lst0 = []
for sublst in lstt0:
temp = [i+' '+j for i,j in zip(sublst[::2], sublst[1::2])]
lst0.append(temp)
lst00 = [i+' '+j for i,j in zip(lst00[::2], lst00[1::2])]
# Return unique strings from lst00
cols0 = set()
for x in lst00:
cols0.add(x)
cols0 = list(cols0)
print(cols0)
# Generate data lists to populate dataframe
data0 = [[] for i in range(len(lst0))]
for val in cols0:
for ind,item in enumerate(lst0):
if val in item:
data0[ind].append(1)
else:
data0[ind].append(0)
print(data0)
# Reading in and cleaning up atoms involved in angles
lst1name = []
lstt1 = []
lst11 = []
print("PRINTING ANGLE TRIPLETS...")
for FileName in files:
# read in AMBER prmtop
fin = AmberFormat(FileName)
# pull out specified parm data
b1 = fin.parm_data['ANGLES_INC_HYDROGEN']
b2 = fin.parm_data['ANGLES_WITHOUT_HYDROGEN']
# Get rid of the index identifier for the value of the angles
b1 = drop(b1,3,4)
b2 = drop(b2,3,4)
# Don't need to distinguish between angles including H or not
b1.extend(b2)
# Return true atom numbers based on AMBER documentation
b1 = np.array(b1)/3 + 1
# Need to create multiple lists for this to work
# lst1name and lst1 allow me to keep the angle trios indexed with the molecule
# lst11 will allow me to create the column names after finding the unique trios
lst1name.append(FileName)
lstt1.append(b1)
lst11.extend(b1)
# Convert lstt1 and lst11 into list of strings
lstt1 = [map(str, i) for i in lstt1]
lst11 = map(str, lst11)
# Join every three entries into space delimited string
lst1 = []
for sublst in lstt1:
temp = [i+' '+j+' '+k for i,j,k in zip(sublst[::3], sublst[1::3], sublst[2::3])]
lst1.append(temp)
lst11 = [i+' '+j+' '+k for i,j,k in zip(lst11[::3], lst11[1::3], lst11[2::3])]
# Return unique strings from lst11
cols1 = set()
for x in lst11:
cols1.add(x)
cols1 = list(cols1)
# Generate data lists to populate frame (1 means val in lst1 was in cols1, 0 means it wasn't)
data1 = [[] for i in range(len(lst1))]
for val in cols1:
for ind,item in enumerate(lst1):
if val in item:
data1[ind].append(1)
else:
data1[ind].append(0)
#print(data1)
# Reading in and cleaning up atoms involved in dihedrals
lstt2 = []
lst2name = []
lst22 = []
print("PRINTING DIHEDRAL QUARTETS...")
for FileName in files:
# read in AMBER prmtop
fin = AmberFormat(FileName)
# pull out specified parm data
c1 = fin.parm_data['DIHEDRALS_INC_HYDROGEN']
c2 = fin.parm_data['DIHEDRALS_WITHOUT_HYDROGEN']
# Get rid of the index identifier for the value of the torsions
c1 = drop(c1,4,5)
c2 = drop(c2,4,5)
# Don't need to distinguish between torsions including H or not
c1.extend(c2)
# Return true atom numbers based on AMBER documentation
for i in range(len(c1)):
if c1[i] >= 0:
c1[i] = np.array(c1[i])/3 + 1
else:
c1[i] = -(abs(np.array(c1[i]))/3 + 1)
# Need to create multiple lists for this to work
# lst2name and lst2 allow me to keep the torsion quartets indexed with the molecule
# lst22 will allow me to create the column names after finding the unique quartets
lst2name.append(FileName)
lstt2.append(c1)
lst22.extend(c1)
# Convert lstt2 and lst22 into list of strings
lstt2 = [map(str,i) for i in lstt2]
lst22 = map(str, lst22)
# Join every four entries into space delimited string
lst2 = []
for sublst in lstt2:
temp = [i+' '+j+' '+k+' '+l for i,j,k,l in zip(sublst[::4], sublst[1::4], sublst[2::4], sublst[3::4])]
lst2.append(temp)
lst22 = [i+' '+j+' '+k+' '+l for i,j,k,l in zip(lst22[::4], lst22[1::4], lst22[2::4], lst22[3::4])]
# Return unique strings from lst11
cols2 = set()
for x in lst22:
cols2.add(x)
cols2 = list(cols2)
# Generate data lists to populate frame (1 means val in lst2 was in cols2, 0 means it wasn't)
data2 = [[] for i in range(len(lst2))]
for val in cols2:
for ind,item in enumerate(lst2):
if val in item:
data2[ind].append(1)
else:
data2[ind].append(0)
# Clean up clarity of column headers and molecule names
cols0 = ["BondEquilibriumLength_" + i for i in cols0]
cols0temp = ["BondEquilibriumLength_std_" + i for i in cols0]
cols0 = cols0 + cols0temp
cols1 = ["AngleEquilibriumAngle_" + i for i in cols1]
cols1temp = ["AngleEquilibriumAngle_std_" + i for i in cols1]
cols1 = cols1 + cols1temp
cols2 = ["TorsionEquilibriumAngle_" + i for i in cols2]
cols2temp = ["TorsionEquilibriumAngle_std_" + i for i in cols2]
cols2 = cols2 + cols2temp
data0 = [i+i for i in data0]
data1 = [i+i for i in data1]
data2 = [i+i for i in data2]
# Construct dataframes
df0 = pd.DataFrame(data = data0, index = lst0name, columns = cols0)
df0['molecule'] = df0.index
df1 = pd.DataFrame(data = data1, index = lst1name, columns = cols1)
df1['molecule'] = df1.index
df2 = pd.DataFrame(data = data2, index = lst2name, columns = cols2)
df2['molecule'] = df2.index
dftemp = pd.merge(df0, df1, how = 'outer', on = 'molecule')
dfjoin = pd.merge(dftemp, df2, how = 'outer', on = 'molecule')
print(dfjoin)
dfjoin.to_csv("check.csv")
| mit | 7,412,857,011,192,627,000 | 27.396396 | 104 | 0.690197 | false |
tomsilver/NAB | tests/integration/scorer_test.py | 1 | 7319 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import copy
import datetime
import pandas
import random
import unittest
from nab.scorer import Scorer
from nab.test_helpers import generateTimestamps, generateWindows, generateLabels
class ScorerTest(unittest.TestCase):
def _checkCounts(self, counts, tn, tp, fp, fn):
"""Ensure the metric counts are correct."""
self.assertEqual(counts['tn'], tn, "Incorrect tn count")
self.assertEqual(counts['tp'], tp, "Incorrect tp count")
self.assertEqual(counts['fp'], fp, "Incorrect fp count")
self.assertEqual(counts['fn'], fn, "Incorrect fn count")
def setUp(self):
# Standard application profile
self.costMatrix = {"tpWeight": 1.0,
"fnWeight": 1.0,
"fpWeight": 1.0,
"tnWeight": 1.0}
def testNullCase(self):
"""No windows and no predictions should yield a score of 0.0."""
start = datetime.datetime.now()
increment = datetime.timedelta(minutes=5)
length = 10
timestamps = generateTimestamps(start, increment, length)
predictions = pandas.Series([0]*length)
labels = pandas.Series([0]*length)
windows = []
scorer = Scorer(timestamps, predictions, labels, windows, self.costMatrix,
probationaryPeriod=0)
(_, score) = scorer.getScore()
self.assertEqual(score, 0.0)
self._checkCounts(scorer.counts, 10, 0, 0, 0)
def testFalsePositiveScaling(self):
"""
Test scaling the weight of false positives results in an approximate
balance with the true positives.
The contributions of TP and FP scores should approximately cancel; i.e.
total score =0. With x windows, this total score should on average decrease
x/2 because of x FNs. Thus, the acceptable range for score should be
centered about -x/2.
"""
start = datetime.datetime.now()
increment = datetime.timedelta(minutes=5)
length = 100
numWindows = 1
windowSize = 10
timestamps = generateTimestamps(start, increment, length)
windows = generateWindows(timestamps, numWindows, windowSize)
labels = generateLabels(timestamps, windows)
# Scale for 10% = windowSize/length
self.costMatrix["fpWeight"] = 0.11
# Make arbitrary detections, score, repeat
scores = []
for _ in xrange(20):
predictions = pandas.Series([0]*length)
indices = random.sample(range(length), 10)
predictions[indices] = 1
scorer = Scorer(timestamps, predictions, labels, windows, self.costMatrix,
probationaryPeriod=0)
(_, score) = scorer.getScore()
scores.append(score)
avgScore = sum(scores)/float(len(scores))
self.assertTrue(-1.5 <= avgScore <= 0.5, "The average score across 20 sets "
"of random detections is %f, which is not within the acceptable range "
"-1.5 to 0.5." % avgScore)
def testRewardLowFalseNegatives(self):
"""
Given false negatives in the set of detections, the score output with the
Reward Low False Negatives application profile will be greater than with
the Standard application profile.
"""
start = datetime.datetime.now()
increment = datetime.timedelta(minutes=5)
length = 100
numWindows = 1
windowSize = 10
timestamps = generateTimestamps(start, increment, length)
windows = generateWindows(timestamps, numWindows, windowSize)
labels = generateLabels(timestamps, windows)
predictions = pandas.Series([0]*length)
costMatrixFN = copy.deepcopy(self.costMatrix)
costMatrixFN["fnWeight"] = 2.0
costMatrixFN["fpWeight"] = 0.055
scorer1 = Scorer(timestamps, predictions, labels, windows, self.costMatrix,
probationaryPeriod=0)
(_, score1) = scorer1.getScore()
scorer2 = Scorer(timestamps, predictions, labels, windows, costMatrixFN,
probationaryPeriod=0)
(_, score2) = scorer2.getScore()
self.assertEqual(score1, 0.5*score2)
self._checkCounts(scorer1.counts, length-windowSize*numWindows, 0, 0,
windowSize*numWindows)
self._checkCounts(scorer2.counts, length-windowSize*numWindows, 0, 0,
windowSize*numWindows)
def testRewardLowFalsePositives(self):
"""
Given false positives in the set of detections, the score output with the
Reward Low False Positives application profile will be greater than with
the Standard application profile.
"""
start = datetime.datetime.now()
increment = datetime.timedelta(minutes=5)
length = 100
numWindows = 0
windowSize = 10
timestamps = generateTimestamps(start, increment, length)
windows = []
labels = generateLabels(timestamps, windows)
predictions = pandas.Series([0]*length)
costMatrixFP = copy.deepcopy(self.costMatrix)
costMatrixFP["fpWeight"] = 2.0
costMatrixFP["fnWeight"] = 0.5
# FP
predictions[0] = 1
scorer1 = Scorer(timestamps, predictions, labels, windows, self.costMatrix,
probationaryPeriod=0)
(_, score1) = scorer1.getScore()
scorer2 = Scorer(timestamps, predictions, labels, windows, costMatrixFP,
probationaryPeriod=0)
(_, score2) = scorer2.getScore()
self.assertEqual(score1, 0.5*score2)
self._checkCounts(scorer1.counts, length-windowSize*numWindows-1, 0, 1, 0)
self._checkCounts(scorer2.counts, length-windowSize*numWindows-1, 0, 1, 0)
def testScoringAllMetrics(self):
"""
This tests an example set of detections, where all metrics have counts > 0.
"""
start = datetime.datetime.now()
increment = datetime.timedelta(minutes=5)
length = 100
numWindows = 2
windowSize = 5
timestamps = generateTimestamps(start, increment, length)
windows = generateWindows(timestamps, numWindows, windowSize)
labels = generateLabels(timestamps, windows)
predictions = pandas.Series([0]*length)
index = timestamps[timestamps == windows[0][0]].index[0]
# TP, add'l TP, and FP
predictions[index] = 1
predictions[index+1] = 1
predictions[index+7] = 1
scorer = Scorer(timestamps, predictions, labels, windows, self.costMatrix,
probationaryPeriod=0)
(_, score) = scorer.getScore()
self.assertAlmostEquals(score, -0.9540, 4)
self._checkCounts(scorer.counts, length-windowSize*numWindows-1, 2, 1, 8)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 6,342,285,827,935,987,000 | 33.523585 | 80 | 0.673726 | false |
robcarver17/pysystemtrade | systems/provided/example/rules.py | 1 | 2538 | """
Simple trading rules used in examples
"""
import pandas as pd
from sysquant.estimators.vol import robust_vol_calc
def ewmac_forecast_with_defaults(price, Lfast=32, Lslow=128):
"""
Calculate the ewmac trading rule forecast, given a price and EWMA speeds
Lfast, Lslow and vol_lookback
Assumes that 'price' is daily data
This version recalculates the price volatility, and does not do capping or
scaling
:param price: The price or other series to use (assumed Tx1)
:type price: pd.Series
:param Lfast: Lookback for fast in days
:type Lfast: int
:param Lslow: Lookback for slow in days
:type Lslow: int
:returns: pd.DataFrame -- unscaled, uncapped forecast
"""
# price: This is the stitched price series
# We can't use the price of the contract we're trading, or the volatility
# will be jumpy
# And we'll miss out on the rolldown. See
# https://qoppac.blogspot.com/2015/05/systems-building-futures-rolling.html
# We don't need to calculate the decay parameter, just use the span
# directly
fast_ewma = price.ewm(span=Lfast).mean()
slow_ewma = price.ewm(span=Lslow).mean()
raw_ewmac = fast_ewma - slow_ewma
vol = robust_vol_calc(price.diff())
return raw_ewmac / vol
def ewmac_forecast_with_defaults_no_vol(price, vol, Lfast=16, Lslow=32):
"""
Calculate the ewmac trading rule forecast, given a price and EWMA speeds
Lfast, Lslow and vol_lookback
Assumes that 'price' is daily data and that the vol is on the same timestamp
This version recalculates the price volatility, and does not do capping or
scaling
:param price: The price or other series to use (assumed Tx1)
:type price: pd.Series
:param vol: The vol of the price
:type vol: pd.Series
:param Lfast: Lookback for fast in days
:type Lfast: int
:param Lslow: Lookback for slow in days
:type Lslow: int
:returns: pd.Series -- unscaled, uncapped forecast
"""
# price: This is the stitched price series
# We can't use the price of the contract we're trading, or the volatility will be jumpy
# And we'll miss out on the rolldown. See
# https://qoppac.blogspot.com/2015/05/systems-building-futures-rolling.html
# We don't need to calculate the decay parameter, just use the span
# directly
fast_ewma = price.ewm(span=Lfast).mean()
slow_ewma = price.ewm(span=Lslow).mean()
raw_ewmac = fast_ewma - slow_ewma
ans = raw_ewmac / vol
return ans
| gpl-3.0 | 8,978,256,097,869,711,000 | 27.840909 | 91 | 0.684397 | false |
saimn/psrecord | psrecord/main.py | 1 | 7618 | # Copyright (c) 2013, Thomas P. Robitaille
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (unicode_literals, division, print_function,
absolute_import)
import time
import argparse
def get_percent(process):
try:
return process.cpu_percent()
except AttributeError:
return process.get_cpu_percent()
def get_memory(process):
try:
return process.memory_info()
except AttributeError:
return process.get_memory_info()
def all_children(pr):
processes = []
children = []
try:
children = pr.children()
except AttributeError:
children = pr.get_children()
except Exception: # pragma: no cover
pass
for child in children:
processes.append(child)
processes += all_children(child)
return processes
def main():
parser = argparse.ArgumentParser(
description='Record CPU and memory usage for a process')
parser.add_argument('process_id_or_command', type=str,
help='the process id or command')
parser.add_argument('--log', type=str,
help='output the statistics to a file')
parser.add_argument('--plot', type=str,
help='output the statistics to a plot')
parser.add_argument('--duration', type=float,
help='how long to record for (in seconds). If not '
'specified, the recording is continuous until '
'the job exits.')
parser.add_argument('--interval', type=float,
help='how long to wait between each sample (in '
'seconds). By default the process is sampled '
'as often as possible.')
parser.add_argument('--include-children',
help='include sub-processes in statistics (results '
'in a slower maximum sampling rate).',
action='store_true')
args = parser.parse_args()
# Attach to process
try:
pid = int(args.process_id_or_command)
print("Attaching to process {0}".format(pid))
sprocess = None
except Exception:
import subprocess
command = args.process_id_or_command
print("Starting up command '{0}' and attaching to process"
.format(command))
sprocess = subprocess.Popen(command, shell=True)
pid = sprocess.pid
monitor(pid, logfile=args.log, plot=args.plot, duration=args.duration,
interval=args.interval, include_children=args.include_children)
if sprocess is not None:
sprocess.kill()
def monitor(pid, logfile=None, plot=None, duration=None, interval=None,
include_children=False):
# We import psutil here so that the module can be imported even if psutil
# is not present (for example if accessing the version)
import psutil
pr = psutil.Process(pid)
# Record start time
start_time = time.time()
if logfile:
f = open(logfile, 'w')
f.write("# {0:12s} {1:12s} {2:12s} {3:12s}\n".format(
'Elapsed time'.center(12),
'CPU (%)'.center(12),
'Real (MB)'.center(12),
'Virtual (MB)'.center(12))
)
log = {}
log['times'] = []
log['cpu'] = []
log['mem_real'] = []
log['mem_virtual'] = []
try:
# Start main event loop
while True:
# Find current time
current_time = time.time()
try:
pr_status = pr.status()
except TypeError: # psutil < 2.0
pr_status = pr.status
except psutil.NoSuchProcess: # pragma: no cover
break
# Check if process status indicates we should exit
if pr_status in [psutil.STATUS_ZOMBIE, psutil.STATUS_DEAD]:
print("Process finished ({0:.2f} seconds)"
.format(current_time - start_time))
break
# Check if we have reached the maximum time
if duration is not None and current_time - start_time > duration:
break
# Get current CPU and memory
try:
current_cpu = get_percent(pr)
current_mem = get_memory(pr)
except Exception:
break
current_mem_real = current_mem.rss / 1024. ** 2
current_mem_virtual = current_mem.vms / 1024. ** 2
# Get information for children
if include_children:
for child in all_children(pr):
try:
current_cpu += get_percent(child)
current_mem = get_memory(child)
except Exception:
continue
current_mem_real += current_mem.rss / 1024. ** 2
current_mem_virtual += current_mem.vms / 1024. ** 2
if logfile:
f.write("{0:12.3f} {1:12.3f} {2:12.3f} {3:12.3f}\n".format(
current_time - start_time,
current_cpu,
current_mem_real,
current_mem_virtual))
f.flush()
if interval is not None:
time.sleep(interval)
# If plotting, record the values
if plot:
log['times'].append(current_time - start_time)
log['cpu'].append(current_cpu)
log['mem_real'].append(current_mem_real)
log['mem_virtual'].append(current_mem_virtual)
except KeyboardInterrupt: # pragma: no cover
pass
if logfile:
f.close()
if plot:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(log['times'], log['cpu'], '-', lw=1, color='r')
ax.set_ylabel('CPU (%)', color='r')
ax.set_xlabel('time (s)')
ax.set_ylim(0., max(log['cpu']) * 1.2)
ax2 = ax.twinx()
ax2.plot(log['times'], log['mem_real'], '-', lw=1, color='b')
ax2.set_ylim(0., max(log['mem_real']) * 1.2)
ax2.set_ylabel('Real Memory (MB)', color='b')
ax.grid()
fig.savefig(plot)
| bsd-2-clause | -2,285,660,879,018,334,200 | 31.836207 | 78 | 0.571804 | false |
dmargala/qusp | examples/save_deltas.py | 1 | 6760 | #!/usr/bin/env python
import argparse
import numpy as np
import numpy.ma as ma
import h5py
import qusp
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import scipy.interpolate
import fitsio
from restframe_work import export_exact_image
from progressbar import ProgressBar, Percentage, Bar
def main():
# parse command-line arguments
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
## targets to fit
parser.add_argument("--name", type=str, default=None,
help="base name of combined skim file")
parser.add_argument("--subsample-step", type=int, default=1000,
help="step size used for subsampling observations")
parser.add_argument("--dont-save", action="store_true",
help="dont save delta field (just do all the preprocessing)")
args = parser.parse_args()
# import data
skim = h5py.File(args.name+'.hdf5', 'r')
norm = skim['norm'][:][:,np.newaxis]
loglam = skim['loglam'][:]
wave = np.power(10.0, loglam)
quasar_redshifts = skim['z'][:]
linear_continuum = h5py.File(args.name+'-linear-continuum.hdf5', 'r')
params_a = linear_continuum['params_a'].value
params_b = linear_continuum['params_b'].value
continuum = linear_continuum['continuum'].value
continuum_wave = linear_continuum['continuum_wave'].value
continuum_interp = scipy.interpolate.UnivariateSpline(continuum_wave, continuum, s=0, ext=1)
wave_lya = linear_continuum.attrs['wave_lya']
abs_alpha = linear_continuum.attrs['abs_alpha']
abs_beta = linear_continuum.attrs['abs_beta']
forest_wave_ref = linear_continuum.attrs['forest_wave_ref']
print 'Adjusting weights for pipeline variance and LSS variance...'
forest_min_z = linear_continuum.attrs['forest_min_z']
forest_max_z = linear_continuum.attrs['forest_max_z']
forest_dz = 0.1
forest_z_bins = np.arange(forest_min_z, forest_max_z + forest_dz, forest_dz)
var_lss = scipy.interpolate.UnivariateSpline(forest_z_bins, 0.05 + 0.06*(forest_z_bins - 2.0)**2, s=0)
var_pipe_scale = scipy.interpolate.UnivariateSpline(forest_z_bins, 0.7 + 0.2*(forest_z_bins - 2.0)**2, s=0)
forest_pixel_redshifts = wave/wave_lya - 1
abs_coefs = abs_alpha*np.power(1+forest_pixel_redshifts, abs_beta)
forest_wave_refs = forest_wave_ref*(1+quasar_redshifts)
def model_flux(a, b):
return a*np.power(wave/forest_wave_refs[:,np.newaxis], b)*continuum_interp(wave/(1+quasar_redshifts[:,np.newaxis]))*np.exp(-abs_coefs)
mflux = model_flux(params_a[:,np.newaxis],params_b[:,np.newaxis])
# (1.0 + quasar_redshifts[:,np.newaxis])*forest_wave/args.wave_lya - 1.0
print forest_pixel_redshifts.shape
pixel_mask = skim['mask'][:]
print pixel_mask.shape
flux = np.ma.MaskedArray(skim['flux'][:], mask=pixel_mask)
ivar = np.ma.MaskedArray(skim['ivar'][:], mask=pixel_mask)
delta_flux = flux/mflux - 1.0
delta_ivar = ivar*mflux*mflux
delta_weight = delta_ivar*var_pipe_scale(forest_pixel_redshifts)
delta_weight = delta_weight/(1 + delta_weight*var_lss(forest_pixel_redshifts))
redshift_order = np.argsort(quasar_redshifts)
export_exact_image(args.name+'-delta-flux.png', delta_flux[redshift_order][::args.subsample_step], dpi=100,
vmin=-5, vmax=5, cmap=plt.get_cmap('bwr'), origin='lower')
export_exact_image(args.name+'-delta-weight.png', ma.log10(delta_flux[redshift_order][::args.subsample_step]), dpi=100,
vmin=-5, vmax=2, cmap=plt.get_cmap('Purples'), origin='lower')
export_exact_image(args.name+'-delta-mask.png', pixel_mask[redshift_order][::args.subsample_step], dpi=100,
origin='lower')
print 'Computing mean delta...'
mask_params = (params_a > .1) & (params_a < 10) & (params_b > -10) & (params_b < 10)
delta_mean = ma.average(delta_flux[mask_params], axis=0)
delta_mean_weighted = ma.average(delta_flux[mask_params], weights=delta_weight[mask_params], axis=0)
delta_mean_ivar_weighted = ma.average(delta_flux[mask_params], weights=delta_ivar[mask_params], axis=0)
plt.figure(figsize=(12,9))
plt.plot(wave, delta_mean, label='Unweighted Mean')
plt.plot(wave, delta_mean_weighted, label='LSS weighted Mean')
plt.plot(wave, delta_mean_ivar_weighted, label='Ivar weighted Mean')
# plt.ylim(0.06*np.array([-1,1]))
plt.xlabel(r'Observed Wavelength ($\AA$)')
plt.ylabel(r'Delta Mean')
plt.grid()
plt.legend()
plt.savefig(args.name+'-lssweighted-delta-mean.png', dpi=100, bbox_inches='tight')
plt.close()
if args.dont_save:
return -1
outfile = h5py.File(args.name+'-delta.hdf5', 'w')
# copy attributes from input files
for attr_key in skim.attrs:
outfile.attrs[attr_key] = skim.attrs[attr_key]
# it's okay to overwrite the few that were already copied, I added a few attr to the combined
# skim file and dont want to run the whole chain just yet
for attr_key in linear_continuum.attrs:
outfile.attrs[attr_key] = linear_continuum.attrs[attr_key]
# create los group
lines_of_sight = outfile.create_group('lines_of_sight')
outfile.create_dataset('delta_mean', data=delta_mean.data)
# loop over targets
progress_bar = ProgressBar(widgets=[Percentage(), Bar()], maxval=len(quasar_redshifts)).start()
for i, z in enumerate(quasar_redshifts):
progress_bar.update(i)
if not mask_params[i]:
# print 'fit param outside nominal range'
continue
z = quasar_redshifts[i]
a = params_a[i]
b = params_b[i]
norm_i = norm[i]
meta = skim['meta'][i]
assert norm_i > 0
ra = float(meta['ra'])
dec = float(meta['dec'])
thing_id = meta['thing_id']
plate = meta['plate']
mjd = meta['mjd']
fiber = meta['fiber']
# save to hdf5 file
los = lines_of_sight.create_group(str(thing_id))
los.attrs['plate'] = plate
los.attrs['mjd'] = mjd
los.attrs['fiber'] = fiber
los.attrs['ra'] = ra
los.attrs['dec'] = dec
los.attrs['z'] = z
los.attrs['p0'] = a
los.attrs['p1'] = b
los.create_dataset('loglam', data=loglam, dtype='f4')
los.create_dataset('delta', data=(delta_flux[i]-delta_mean_weighted), dtype='f8')
los.create_dataset('weight', data=delta_weight[i], dtype='f8')
los.create_dataset('r_comov', data=np.zeros_like(loglam), dtype='f4')
# los.create_dataset('ivar', data=ivar[i]/(norm_i*norm_i), dtype='f4')
los.create_dataset('ivar', data=delta_ivar[i], dtype='f4')
outfile.close()
progress_bar.finish()
if __name__ == '__main__':
main()
| mit | 6,463,585,951,117,323,000 | 36.765363 | 142 | 0.648669 | false |
linsalrob/EdwardsLab | cartopy/crAssphage_cophenetic.py | 1 | 7805 | """
Read a cophenetic matrix to plot the distances. You can make the matrix using ete3 and tree_to_cophenetic_matrix.py
"""
import os
import sys
import argparse
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
import math
import cartopy.crs as ccrs
import re
def get_lon_lat(idf, maxtoget=50000):
"""
Get the longitude and latitude of different ids. Note that we have longitude first to work with cartopy
:param idf: the id.map file
:param maxtoget: the maxiumum number of ids to get. This is just for debugging
:return:
"""
lonlat = {}
count = 0
global verbose
with open(idf, 'r') as fin:
for l in fin:
if count > maxtoget:
break
count+=1
s=re.search('latitude=(\S+)\]', l)
if not s:
sys.stderr.write("No latitude in {}".format(l))
continue
lat=s.group(1)
s = re.search('longitude=(\S+)\]', l)
if not s:
sys.stderr.write("No longitude in {}".format(l))
continue
lon = s.group(1)
p=l.split("\t")
lonlat[p[0]] = (float(lon), float(lat))
return lonlat
def latlon2distance(lat1, long1, lat2, long2, miles=False):
"""Convert two coordinates to distance.
This is an approximation since the earth is not spherical, but accuracy is <100m, especially for close points
This code was taken from http://www.johndcook.com/python_longitude_latitude.html
Latitude is measured in degrees north of the equator; southern locations have negative latitude.
Similarly, longitude is measured in degrees east of the Prime Meridian. A location 10deg west of
the Prime Meridian, for example, could be expressed as either 350deg east or as -10deg east.
Arguments: lat1, long1; lat2, long2; miles is a boolean. If you want miles set it to true. Else set it to false
"""
global verbose
if lat1 == lat2 and long1 == long2:
return 0
# Convert latitude and longitude to
# spherical coordinates in radians.
degrees_to_radians = math.pi / 180.0
# phi = 90 - latitude
phi1 = (90.0 - lat1) * degrees_to_radians
phi2 = (90.0 - lat2) * degrees_to_radians
# theta = longitude
theta1 = long1 * degrees_to_radians
theta2 = long2 * degrees_to_radians
# Compute spherical distance from spherical coordinates.
# For two locations in spherical coordinates
# (1, theta, phi) and (1, theta, phi)
# cosine( arc length ) =
# sin phi sin phi' cos(theta-theta') + cos phi cos phi'
# distance = rho * arc length
cos = (math.sin(phi1) * math.sin(phi2) * math.cos(theta1 - theta2) + math.cos(phi1) * math.cos(phi2))
try:
arc = math.acos(cos)
except Exception as err:
sys.stderr.write("There was an err: {} trying to take the acos of ({})\n".format(err, cos))
arc=0
# Remember to multiply arc by the radius of the earth
# in your favorite set of units to get length.
#
# To convert to miles multiple arc by 3960
# To convert to kilometers multiply arc by 6373
if miles:
arc *= 3960
else:
arc *= 6373
return arc
def closest_dna_dist(matrixfile):
"""
Read the matrix file and get the id of the point with the closest distance that is not ourself
:param treefile: The cophenetic matrix file to read
:return: a dict of a node and its closest leaf
"""
global verbose
if verbose:
sys.stderr.write("Getting closest distances\n")
distances = {}
with open(matrixfile, 'r') as f:
l = f.readline()
ids = l.rstrip().split("\t")
for i,name in enumerate(ids):
if i == 0:
continue
distances[name] = {}
for l in f:
data = l.rstrip().split("\t")
for i,dist in enumerate(data):
if i == 0:
continue
distances[data[0]][ids[i]] = float(dist)
distances[ids[i]][data[0]] = float(dist)
closest = {}
for d in distances:
closest[d] = {}
for k in sorted(distances[d], key=distances[d].get):
if k == d:
continue
closest[d][k] = distances[d][k]
break
if verbose:
sys.stderr.write("Done\n")
return closest
def plotmap(ll, dd, outputfile, maxdist=1, maxlinewidth=3):
"""
Plot the map of the dna distances and lat longs
:param ll: The lon-lats
:param dd: The distances to use
:param outputfile: The file name to write the image to
:param maxdist: The maximum distance that we will scale to be maxlinewidth
:return:
"""
global verbose
ax = plt.axes(projection=ccrs.Robinson())
# make the map global rather than have it zoom in to
# the extents of any plotted data
ax.set_global()
ax.stock_img()
ax.coastlines()
## color the lines based on the maximum distance value
jet = cm = plt.get_cmap('jet')
cNorm = colors.Normalize(vmin=0, vmax=maxdist)
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)
# Using contourf to provide my colorbar info, then clearing the figure
Z = [[0, 0], [0, 0]]
levels = range(0, int(100 * maxdist) + 10, 10)
CS3 = plt.contourf(Z, levels, cmap=jet)
# plt.clf()
# NOTE: longitude before latitude!!
# plt.plot([sdlon, brislon], [sdlat, brislat], color='blue', linewidth=2, transform=ccrs.Geodetic())
# plot the circles for each sample site
# markerfacecolor="None",
for lonlat in ll.values():
plt.plot(lonlat[0], lonlat[1], 'o', color='Black', alpha=0.25, markersize=4, transform=ccrs.PlateCarree())
for idx1 in dd:
for idx2 in dd[idx1]:
# this should only happen when we do best DNA distances
if idx1 not in ll:
sys.stderr.write("NO Lat/Lon for {}\n".format(idx1))
continue
if idx2 not in ll:
sys.stderr.write("NO Lat/Lon for {}\n".format(idx2))
continue
if verbose:
sys.stderr.write("Distance between {} and {}: {}\n".format(idx1, idx2, latlon2distance(ll[idx1][1], ll[idx1][0], ll[idx2][1], ll[idx2][0])))
linewidth = dd[idx1][idx2]
linewidth = linewidth/maxdist * maxlinewidth
#colorVal = scalarMap.to_rgba(dd[idx1][idx2])
plt.plot([ll[idx1][0], ll[idx2][0]], [ll[idx1][1], ll[idx2][1]], color='Red', linewidth=linewidth, alpha=0.1, transform=ccrs.Geodetic())
if latlon2distance(ll[idx1][1], ll[idx1][0], ll[idx2][1], ll[idx2][0]) < 100:
if verbose:
sys.stderr.write("Adding a circle for {} and {}\n".format(ll[idx1][0], ll[idx1][1]))
plt.plot(ll[idx1][0], ll[idx1][1], 'o', color='Red', alpha=0.1, markersize=2,
transform=ccrs.PlateCarree())
# plt.colorbar(CS3)
#plt.show()
plt.savefig(outputfile)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Plot a map using ete and lat lon')
parser.add_argument('-i', help='id.map file with lat/lon information', required=True)
parser.add_argument('-m', help='cophenetic map file with same ids as id.map', required=True)
parser.add_argument('-o', help='output file name', required=True)
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
global verbose
verbose = False
if args.v:
verbose = True
lonlat = get_lon_lat(args.i)
# dist = best_dna_dist(get_dna_distance(args.t))
dist = closest_dna_dist(args.m)
plotmap(lonlat, dist, args.o) | mit | 1,121,322,911,500,271,900 | 31.798319 | 156 | 0.601666 | false |
w-k-jones/brownian | brownian_system.py | 1 | 13860 | """
Created on Wed Jan 25 09:50:23 2017
@author: wj
History:
16/02/2017, LM: Small adjustments so can run properly with animation file
07/03/2017, WJ: General tidying
"""
#
import numpy as np
import matplotlib.pyplot as plt
#Define system class to hold wall,ball classes and system procedures
class system:
def __init__(self,wall_in,ball_in):
print '>>>Initialising System'
print 'Inputing wall properties'
self.wall = wall_in
print 'Inputing particle properties'
self.ball = ball_in
print 'Setting system time'
self.t = 0
self.t_step = 0.1
self.t_max = 100
#Incident and reflected velocity tracker
self.vel = np.full(2, np.nan).reshape([2,-1])
self.b = 0
self.c = 0
self.w = 0
self.type = 0
def step(self):
t_2col = self.ball.t2col()
t_2wall = self.wall.t2wall(self.ball)
t_2corn = self.wall.t2corn(self.ball)
self.min_t = np.nanmin([t_2col,t_2wall,t_2corn])
if self.min_t <= self.t_step:
self.t +=self.min_t
self.ball.p += self.ball.v*self.min_t
if t_2col == self.min_t:
self.ball.dv_col()
self.b +=1
self.type = 1
if t_2wall == self.min_t:
self.wall.dv_wall(self.ball)
self.w +=1
self.type = 2
self.vel = np.concatenate((self.vel,self.wall.velw), axis=1)
if t_2corn == self.min_t:
self.wall.dv_corn(self.ball)
self.c +=1
self.type = 3
self.vel = np.concatenate((self.vel,self.wall.velc), axis=1)
else:
self.t +=self.t_step
self.ball.p += self.ball.v*self.t_step
self.type = 0
self.ball.get_T()
return self.ball.p, self.t, self.ball.T
def plt_sys(self):
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111, aspect='equal', autoscale_on=False,
xlim=(self.wall.xlim[0]-0.1,self.wall.xlim[1]+0.1),
ylim=(self.wall.ylim[0]-0.1,self.wall.ylim[1]+0.1))
wal, = ax.plot(self.wall.co_plt[:,0],self.wall.co_plt[:,1])
bal, = ax.plot(self.ball.p[:,0],self.ball.p[:,1],'bo',
ms=fig.dpi
*fig.get_figwidth()/(ax.get_xlim()[1]-ax.get_xlim()[0])
*2*self.ball.r[0]
)
fig.show
def run(self,n_step):
print 'Running system for ',n_step,' steps'
#Initialise arrays for recording system properties
# elpased time
self.t_el = np.full(n_step+1,0)
# ball index
self.i_b = np.full([n_step+1,2],-1)
#wall index
self.i_w = np.full([n_step+1,1],-1)
#collision type
self.typ = np.full(n_step+1,0)
# collision velocities
self.dv = np.full([n_step+1,2],0)
# mean velocity
self.v_av = np.full([n_step+1,2],0)
self.v_av[0] = self.ball.get_v_av()
# total momentum
self.mv = np.full([n_step+1,2],0)
self.mv[0] = self.ball.get_mv_tot()
# system temperature
self.T = np.full(n_step+1,0)
self.T[0] = self.ball.get_T()
# total energy
self.E = np.full(n_step+1,0)
self.E[0] = self.ball.get_E_tot()
# kinetic energy
self.KE = np.full(n_step+1,0)
self.KE[0] = self.ball.get_E_KE()
# heat
self.Q = np.full(n_step+1,0)
self.Q[0] = self.ball.get_E_Q()
# angular momentum
self.mv_ang = np.full(n_step+1,0)
self.mv_ang[0] = self.ball.get_mv_ang()
for i in range(n_step):
# print progress
if i > 0 and i % (n_step/10) == 0:
print i*100./n_step,' % complete'
# step forwards
self.step()
# record system properties
self.t_el[i+1] = self.t
self.typ[i+1] = self.type
if self.type == 1:
self.i_b[i+1,0] = self.ball.i_ball
self.i_b[i+1,1] = self.ball.j_ball
self.dv[i+1] = [self.ball.dv1, self.ball.dv2]
elif self.type == 2:
self.i_w[i+1] = self.wall.i_wall
self.i_b[i+1,0] = self.wall.i_ball
self.dv[i+1] = [self.wall.dv_in, self.wall.dv_out]
elif self.type == 3:
self.i_w[i+1] = self.wall.i_corn
self.i_b[i+1,0] = self.wall.i_ball_c
self.dv[i+1] = [self.wall.dv_in, self.wall.dv_out]
#update particle properties
self.v_av[i+1] = self.ball.get_v_av()
self.mv[i+1] = self.ball.get_mv_tot()
self.T[i+1] = self.ball.get_T()
self.E[i+1] = self.ball.get_E_tot()
self.KE[i+1] = self.ball.get_E_KE()
self.Q[i+1] = self.ball.get_E_Q()
self.mv_ang[i+1] = self.ball.get_mv_ang()
#print np.shape(np.sum(self.ball.v**2,axis=1)**0.5)
#print self.v_av[i+1]
# collision histogram code
#self.vel = np.delete(self.vel,0,1)
#plt.hist(self.vel[0],bins=30)
#plt.show()
#plt.hist(self.vel[1],bins=30)
#plt.show()
print 'System time elapsed: ',self.t
print 'Particle collisions: ',self.b
print 'Wall collisions: ',self.w
print 'Corner collisions: ',self.c
v_av_all = np.mean(self.v_av)
print 'Average particle velocity = ', v_av_all
v_err_all = np.std(self.v_av)/n_step**0.5
print 'v error = ', v_err_all
#Calculate mean free path and collision time
t_col_tot = 0
t_col_n = 0
i_b_col = self.i_b[self.typ == 1]
t_col = self.t_el[self.typ == 1]
for j in np.arange(self.ball.n):
n_t = np.sum(i_b_col == j)
if n_t >= 2:
i = np.where(i_b_col == j)[0]
i = np.sort(i)
t_i = t_col[i]
t_col_tot += t_i[-1]-t_i[0]
t_col_n += (n_t-1)
self.t_col = t_col_tot/t_col_n
self.t_col_err = self.t_col/t_col_n**0.5
print 'Average collision time = ',self.t_col
print 'Error = ',self.t_col_err
self.d_col = self.t_col*v_av_all
self.d_col_err = self.d_col/t_col_n**0.5
print 'Mean free path = ', self.d_col
print 'Error = ',self.d_col_err
#print 'Wall collision velocities = ', self.vel_distrib
return self.t_el, \
self.t_col, \
self.t_col_err, \
self.d_col, \
self.d_col_err, \
self.v_av, \
self.mv, \
self.T, \
self.E, \
self.KE, \
self.Q, \
self.mv_ang
"""
Old run and plot code, use .run and .plt_... functions instead
"""
def run_plt(self,n_step):
print 'Running system for ',n_step,' steps'
print 'Recording momentum, angular momentum and temperature'
self.T = np.full([n_step+1],np.nan)
self.T[0] = np.mean(np.std(self.ball.v,axis=0)**2)
self.mv_tot = np.full([n_step+1,self.ball.nd],np.nan)
self.mv_tot[0] = np.sum(self.ball.v
*np.reshape(self.ball.m,[self.ball.n,1])
,axis=0)
v_tot = np.sum(self.ball.v,axis=0)
#print v_tot
self.KE = np.full([n_step+1,self.ball.nd],np.nan)
self.KE[0] = np.sum(self.ball.m)*np.sum(v_tot**2)/2
#print self.KE[0]
self.Q = np.full([n_step+1,self.ball.nd],np.nan)
self.Q[0] = np.sum(np.reshape(self.ball.m,[self.ball.n,1])
*(self.ball.v -np.reshape(v_tot,[1,self.ball.nd]))**2)/2
#print self.Q[0]
self.mv_ang = np.full([n_step+1],np.nan)
self.mv_ang[0] = np.sum(self.ball.m
*(self.ball.v[:,0]*(self.ball.p[:,1]-0.5)
-self.ball.v[:,1]*(self.ball.p[:,0]-0.5)))
self.P = np.copy(self.wall.P)
self.t_el = np.full(n_step+1,0)
self.dv = np.full([n_step+1],np.nan)
self.dv[0] = 0
self.dv_i = np.full([n_step+1],self.wall.n+1)
self.i_b = np.full([n_step+1,2],-1)
for i in range(n_step):
self.step()
self.t_el[i+1] = self.t
self.ball.get_T()
self.T[i+1] = self.ball.T
self.ball.get_mv_tot()
self.mv_tot[i+1] = self.ball.mv_tot
self.mv_ang[i+1] = np.sum(self.ball.m
*(self.ball.v[:,0]*(self.ball.p[:,1]-0.5)
-self.ball.v[:,1]*(self.ball.p[:,0]-0.5)))
self.P = (self.wall.P/self.wall.vlen)/self.t
"""
v_tot = np.sum(self.ball.v,axis=0)
self.KE[i+1] = np.sum(self.ball.m)*np.sum(v_tot**2)/2
self.Q[i+1] = np.sum(np.reshape(self.ball.m,[self.ball.n,1])
*(self.ball.v -np.reshape(v_tot,[1,self.ball.nd]))**2)/2
"""
if self.type == 2:
self.dv[i+1] = abs(self.wall.dv)
self.dv_i[i+1] = self.wall.i_wall
if self.type == 1:
self.i_b[i+1,0] = self.ball.i_ball
self.i_b[i+1,1] = self.ball.j_ball
if i % 2000 == 0:
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111, aspect='equal', autoscale_on=False,
xlim=(self.wall.xlim[0]-0.1,self.wall.xlim[1]+0.1),
ylim=(self.wall.ylim[0]-0.1,self.wall.ylim[1]+0.1))
wal, = ax.plot(self.wall.co_plt[:,0],self.wall.co_plt[:,1])
bal, = ax.plot(self.ball.p[:,0],self.ball.p[:,1],'bo',
ms=fig.dpi
*fig.get_figwidth()
/(ax.get_xlim()[1]-ax.get_xlim()[0])
*2*self.ball.r[0]
)
fig.show
print 'Particle collisions: ',self.b
print 'Wall collisions: ',self.w
print 'Corner collisions: ',self.c
print 'Plotting temperature and momentum'
fig2 = plt.figure(figsize=(6,6))
ax2 = fig2.add_subplot(111)
ax2.plot(self.t_el,self.T*120)
ax2.set_title('System Temperature')
ax2.set_xlabel('Elapsed time /ps')
ax2.set_ylabel('Temperature /K')
fig3 = plt.figure(figsize=(6,6))
ax3 = fig3.add_subplot(111)
ax3.plot(self.t_el,self.mv_tot)
ax3.set_title('Linear Momentum')
ax3.set_xlabel('Elapsed time /ps')
ax3.set_ylabel('Momentum /AMU.m/s')
ax3.legend(['x component','y component'])
fig4 = plt.figure(figsize=(6,6))
ax4 = fig4.add_subplot(111)
ax4.plot(self.t_el,self.mv_ang)
ax4.set_xlabel('Elapsed time /ps')
ax4.set_ylabel('Angular Momentum /AMU/s')
"""
ax5 = fig3.add_subplot(224)
ax5.plot(self.t_el,self.P)
"""
fig2.show
fig3.show
fig4.show
"""
fig5 = plt.figure(figsize=(6,6))
ax5 = fig5.add_subplot(111)
plot_dv = self.dv[self.dv_i < 8]
#print plot_dv
plot_dv_i = self.dv_i[self.dv_i < 8]
plot_dv_i = plot_dv_i.astype(int)
#print plot_dv_i
plot_dv = plot_dv * self.wall.norm[plot_dv_i,0]
#print plot_dv
ax5.hist(plot_dv, 50, normed=1, facecolor='green', alpha=0.75)
fig5.show
"""
t_col_tot = 0
n_tot = 0
for j in np.arange(self.ball.n):
n_col = np.sum(self.i_b == j)
if n_col >= 2:
i = np.where(self.i_b == j)[0]
i = np.sort(i)
t_i = self.t_el[i]
t_col_tot += np.mean(t_i[-1]-t_i[0])
n_tot += (n_col-1)
self.t_col = t_col_tot/n_tot
print self.t_col
"""
fig4 = plt.figure(figsize=(6,6))
ax6 = fig4.add_subplot(111)
ax6.plot(self.t_el,self.KE)
ax6.plot(self.t_el,self.Q)
fig3.show
"""
print 'System time elapsed: ',self.t
"""
def ani_init(self):
def init():
outline = wall.co
particles.set_data([], [])
if len(radii) > 1:
particles1.set_data([], [])
if len(radii) > 2:
particles2.set_data([], [])
time_text.set_text('')
energy_text.set_text('')
pressure_text.set_text('')
avg_press_text.set_text('')
if len(radii) == 1:
return particles, time_text, energy_text, pressure_text, avg_press_text, rect
elif len(radii) == 2:
return particles, particles1, time_text, energy_text, pressure_text, avg_press_text, rect
else:
return particles, particles1, particles2, time_text, energy_text, pressure_text, avg_press_text, rect
in_co3=np.array([[0,0],[0.2,0],[0.4,0.2],[0.4,0],[0.6,0.2],[0.6,0],[0.8,0.2],[0.8,0],[1,0],[1,1],[0.8,1],[0.6,0.8],[0.6,1],[0.4,0.8],[0.4,1],[0.2,0.8],[0.2,1],[0,1]])
"""
| mit | 7,813,621,357,036,184,000 | 35 | 174 | 0.461328 | false |
sylvan5/PRML | ch3/multi_linear_regression.py | 2 | 2946 | #coding:utf-8
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
"""
勾配降下法で多変数線形回帰モデル(重回帰モデル)の
パラメータ推定
"""
def featureNormalize(X):
X_norm = X
mu = np.mean(X, axis=0)
sigma = np.std(X, axis=0)
for i in range(len(mu)):
X_norm[:, i] = (X_norm[:, i] - mu[i]) / sigma[i]
return X_norm, mu, sigma
def computeCost(X, y, theta):
m = len(y) # 訓練データ数
tmp = np.dot(X, theta) - y
J = 1.0 / (2 * m) * np.dot(tmp.T, tmp)
return J
def gradientDescent(X, y, theta, alpha, iterations):
m = len(y) # 訓練データ数
J_history = [] # 各更新でのコスト
for iter in range(iterations):
theta = theta - alpha * (1.0 / m) * np.dot(X.T, np.dot(X, theta) - y)
J_history.append(computeCost(X, y, theta))
return theta, J_history
def normalEqn(X, y):
theta = np.linalg.inv(np.dot(X.T, X))
theta = np.dot(theta, X.T)
theta = np.dot(theta, y)
return theta
if __name__ == "__main__":
# 訓練データをロード
# 0列目は部屋のサイズ
# 1列目は寝室の数
# 2列目は家の価格というデータ
# 部屋のサイズと寝室の数から家の価格を推定する問題
data = np.genfromtxt("ex1data2.txt", delimiter=",")
X = data[:, (0,1)]
y = data[:, 2]
m = len(y) # 訓練データ数
# 部屋のサイズと寝室数ではスケールが違うため
# 特徴量をスケーリング
X, mu, sigma = featureNormalize(X)
# 訓練データの1列目に1を追加
X = np.hstack((np.ones((m, 1)), X))
# パラメータを0で初期化
# yとthetaはわざわざ縦ベクトルにreshapeしなくてもOK
# np.dot()では自動的に縦ベクトル扱いして計算してくれる
theta = np.zeros(3)
iterations = 1500
alpha = 0.01
# 初期状態のコストを計算
initialCost = computeCost(X, y, theta)
print "initial cost:", initialCost
# 勾配降下法でパラメータ推定
thetaGD, J_history = gradientDescent(X, y, theta, alpha, iterations)
print "theta (gradient descent):", thetaGD
print "final cost:", J_history[-1]
# 正規方程式による解法
# 勾配降下法で求めたパラメータとほぼ同じ値が得られる
thetaNE = normalEqn(X, y)
print "theta (normal equation):", thetaNE
# 1650sq-ftで3部屋寝室の価格を予測
xs = np.array([1, (1650 - mu[0]) / sigma[0], (3 - mu[1]) / sigma[1]])
predict1 = np.dot(xs, thetaGD)
predict2 = np.dot(xs, thetaNE)
print "1650 sq-ft, 3 bedrooms => %f (gradient descent)" % predict1
print "1650 sq-ft, 3 bedrooms => %f (normal equation)" % predict2
# コストの履歴をプロット
plt.figure(2)
plt.plot(J_history)
plt.xlabel("iteration")
plt.ylabel("J(theta)")
plt.show()
| mit | -1,640,898,940,971,660,800 | 25.222222 | 77 | 0.602542 | false |
timtroendle/urban-occupants-paper | scripts/tus/seed.py | 1 | 2667 | from itertools import chain
import click
import pandas as pd
import pytus2000
from urbanoccupants import PeopleFeature, HouseholdFeature
from urbanoccupants.types import HouseholdType
from urbanoccupants.tus import filter_features_and_drop_nan
HOUSEHOLD_TYPE_FEATURE_NAME = str(HouseholdFeature.HOUSEHOLD_TYPE)
@click.command()
@click.argument('path_to_input')
@click.argument('path_to_output')
def read_seed(path_to_input, path_to_output):
"""Reads, transforms, and filters the individual data from the TUS data set.
The raw data is mapped to people and household features of this study, all other
features are discarded. Furthermore the data set is filtered by correct households,
e.g. a couple with children household must have at least 3 individuals, otherwise
it is discarded as well.
Output is written in plain pickle format.
"""
individual_data = _read_raw_data(path_to_input)
print("Read {} individuals.".format(individual_data.shape[0]))
seed = _map_to_internal_types(individual_data)
seed = _filter_invalid_households(seed)
print("Write {} individuals.".format(seed.shape[0]))
seed.to_pickle(path_to_output)
def _read_raw_data(path_to_input):
return pytus2000.read_individual_file(path_to_input)
def _map_to_internal_types(individual_data):
age = individual_data.IAGE
seed = pd.DataFrame(index=individual_data.index)
for feature in chain(PeopleFeature, HouseholdFeature):
seed[str(feature)] = feature.tus_value_to_uo_value(
individual_data[feature.tus_variable_name],
age
)
return seed
def _filter_invalid_households(seed):
hh_groups = seed.groupby((seed.index.get_level_values('SN1'),
seed.index.get_level_values('SN2')))
households = hh_groups[HOUSEHOLD_TYPE_FEATURE_NAME].agg(['first', 'count'])
households.rename(columns={'first': 'type'}, inplace=True)
invalids = (
households[(households.type == HouseholdType.COUPLE_WITH_DEPENDENT_CHILDREN) &
(households.size <= 2)] |
households[(households.type == HouseholdType.COUPLE_WITHOUT_DEPENDENT_CHILDREN) &
(households.size != 2)] |
households[(households.type == HouseholdType.LONE_PARENT_WITH_DEPENDENT_CHILDREN) &
(households.size < 2)] |
households[(households.type == HouseholdType.MULTI_PERSON_HOUSEHOLD) &
(households.size < 2)]
)
print("{} households are invalid and were removed.".format(len(invalids.index)))
return seed.drop(labels=invalids.index, level=None)
if __name__ == '__main__':
read_seed()
| mit | 4,898,239,188,910,586,000 | 37.1 | 91 | 0.685039 | false |
KaitoKid/InvokerPractice | analytics/Invoker.py | 1 | 4299 | import json
import collections
import numpy
import matplotlib.pyplot as plt
from itertools import islice
class Event:
def __init__(self, data, timestamp):
self.timestamp = int(timestamp)
self.event_type = data['state']
if self.event_type == 'key down':
self.target = data['data']['target queue']
#self.current = data['data']['current queue']
self.success = data['data']['success state']
self.key = data['data']['key event']
class Invoker:
def __init__(self, filename):
self.od = collections.OrderedDict(sorted(json.load(open(filename)).items()))
self.bio = self.od.items()[0][1]['data']
self.sections = [e['data']['task mode'] for e in [v for k, v in self.od.iteritems() if v['state'] == 'initialize']]
self.section_start_times = [int([k for k, v in self.od.iteritems() if v['state'] == 'initialize' and v['data']['task mode'] == section][0]) for section in self.sections]
self.section_start_times.append(int(max(self.od.keys())))
self.section_start_end_times = zip(self.section_start_times[:-1], self.section_start_times[1:])
self.section_times = dict(zip(self.sections, [collections.OrderedDict(sorted({str(e): self.od[str(e)] for e in range(*pair) if str(e) in self.od}.items())) for pair in self.section_start_end_times]))
self.section_data = dict(zip(self.sections, [[Event(self.od[d], d) for d in self.section_times[s]] for s in self.sections]))
self.section_times_to_combo = [self.time_to_combo(section) for section in self.sections]
self.section_time_to_start = [self.time_to_start_press(section) for section in self.sections]
_, self.section_ikis, self.section_time_to_correct = zip(*[self.IKI(section) for section in self.sections])
def time_to_combo(self, section):
''' Time in milliseconds to successful combo '''
times = []
for event in self.section_data[section]:
if event.event_type == 'initialize':
last_time = event.timestamp
if (event.event_type == 'key down' and event.success == 'matched'):
times.append(event.timestamp - last_time)
last_time = event.timestamp
return times
def time_to_start_press(self, section):
''' Time in milliseconds to first key press '''
times = []
flag = 'found'
for event in self.section_data[section]:
if flag == 'found' and (event.event_type == 'initialize' or (event.event_type == 'key down' and event.success == 'matched')):
last_time = event.timestamp
flag = 'unfound'
continue
if (event.event_type == 'key down' and flag == 'unfound'):
times.append(event.timestamp - last_time)
flag = 'found'
return times
def IKI(self, section):
''' IKIs and time to correct combo start '''
times = []
ikis = []
times_since = []
current = [0,] * 6
iki = [0,] * 6
time_since = [0,] * 6
last = self.section_data[section][0].timestamp
combo_start = last
for ind, event in enumerate(self.section_data[section][1:]):
if event.event_type == 'key down':
if event.key in [49, 81, 87, 69, 82, 68]:
current.pop(0)
current.append(event.key)
iki.pop(0)
iki.append(event.timestamp - last)
time_since.pop(0)
time_since.append(event.timestamp - combo_start)
#print 'iki(' + str(ind) + '): ' + str(iki) + ', ' + str(last)
last = event.timestamp
if event.target.upper() == ''.join([chr(e) for e in current]):
ikis.append(iki[1:])
times.append(iki[0])
times_since.append(time_since[0])
combo_start = last
time_since = [0,] * 6
current = [0,] * 6
iki = [0,] * 6
#print 'ikis: '
#print ikis
return times, ikis, times_since
| apache-2.0 | -5,124,483,033,910,992,000 | 45.728261 | 207 | 0.540823 | false |
perrette/pydraw | pydraw.py | 1 | 2621 | """ pydraw module: draw lines that can be modified via mouse click
"""
import numpy as np
import matplotlib.pyplot as plt
from shapely.geometry import LineString, Point
def draw_background(fig=None):
""" background for drawing
"""
if fig is None:
fig = plt.figure()
ax = fig.add_subplot(111)
return ax
class InteractiveLine(object):
""" class to plot a matplotlib 2-D line
"""
def __init__(self, line):
"""
"""
self.line = line
self.line.set_picker(5) # 5 points tolerance
#self.line.set_pickable(True) # line is pickable by mouse
self.line.figure.canvas.mpl_connect('pick_event', self.on_pick)
def on_pick(self, event):
""" mouse pick of the line ==> modify the points
"""
#i, xp, yp = find_closest_point(self.line.get_data(), \
#(event.xdata, event.ydata))
# line coordinates
xl, yl = self.line.get_data()
xp, yp = event.mouseevent.xdata, event.mouseevent.ydata
# distance between points on the line and clicked point
dist = (xp- np.asarray(xl))**2 + (yp - np.asarray(yl))**2
# find minimum distance
i = np.argmin(dist)
return self.move_vertex(i)
def move_vertex(self, i):
""" move ith vertex of the line
just offer the user to move the point and update the line
"""
xl, yl = self.line.get_data()
# display picked point
pt, = self.line.axes.plot(xl[i], yl[i], 'ko')
# draw it
self.draw()
# user-input new location
xy = self.line.figure.ginput(n=1, timeout=0)
x, y = xy[0]
# update line
xl[i] = x
yl[i] = y
self.line.set_data(xl, yl)
# remove former point
pt.remove()
# draw
self.draw()
@classmethod
def plot(cls, x, y, ax=None, **lineprops):
""" plot a line
"""
if ax is None:
ax = plt.gca()
line, = ax.plot(x, y, **lineprops)
iline = cls(line)
iline.draw()
return iline
@classmethod
def ginput(cls, ax=None):
""" draw a line
"""
if ax is None:
ax = plt.gca()
# use input line
print "draw a line"
line = ax.figure.ginput(n=0, timeout=0)
x, y = zip(*line)
return cls.plot(x, y)
def draw(self):
self.line.figure.canvas.draw()
def main():
ax = draw_background()
line = InteractiveLine.ginput(ax=ax)
return line
if __name__ == '__main__':
line = main()
| bsd-2-clause | 8,617,618,007,358,381,000 | 23.495327 | 71 | 0.537963 | false |
compas-dev/compas | src/compas_plotters/_plotter.py | 1 | 19096 | import os
import shutil
import subprocess
from contextlib import contextmanager
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from matplotlib.patches import FancyArrowPatch
from matplotlib.patches import ArrowStyle
from compas_plotters.core.drawing import create_axes_xy
from compas_plotters.core.drawing import draw_xpoints_xy
from compas_plotters.core.drawing import draw_xlines_xy
from compas_plotters.core.drawing import draw_xpolylines_xy
from compas_plotters.core.drawing import draw_xpolygons_xy
from compas_plotters.core.drawing import draw_xarrows_xy
__all__ = [
'BasePlotter',
'valuedict'
]
def valuedict(keys, value, default):
"""
Build value dictionary from a list of keys and a value.
Parameters
----------
keys: list
The list of keys
value: {dict, int, float, str, None}
A value or the already formed dictionary
default: {int, float, str}
A default value to set if no value
Returns
-------
dict
A dictionary
Notes
-----
This standalone and generic function is only required by plotters.
"""
if isinstance(value, dict):
return {key: value.get(key, default) for key in keys}
else:
return dict.fromkeys(keys, value or default)
class BasePlotter:
"""Definition of a plotter object based on matplotlib.
Parameters
----------
figsize : tuple, optional
The size of the plot in inches (width, length). Default is ``(16.0, 12.0)``.
Other Parameters
----------------
dpi : float, optional
The resolution of the plot in "dots per inch".
Default is ``100.0``.
tight : bool, optional
Produce a plot with limited padding between the plot and the edge of the figure.
Default is ``True``.
fontsize : int, optional
The size of the font used in labels. Default is ``10``.
axes : matplotlib.axes.Axes, optional
An instance of ``matplotlib`` ``Axes``.
For example to share the axes of a figure between different plotters.
Default is ``None`` in which case the plotter will make its own axes.
Attributes
----------
defaults : dict
Dictionary containing default attributes for vertices and edges.
Notes
-----
For more info, see [1]_.
References
----------
.. [1] Hunter, J. D., 2007. *Matplotlib: A 2D graphics environment*.
Computing In Science & Engineering (9) 3, p.90-95.
Available at: http://ieeexplore.ieee.org/document/4160265/citations.
"""
def __init__(self, figsize=(16.0, 12.0), dpi=100.0, tight=True, axes=None, fontsize=10, **kwargs):
"""Initializes a plotter object"""
self._axes = None
self.axes = axes
self.tight = tight
# use descriptors for these
# to help the user set these attributes in the right format
# figure attributes
self.figure_size = figsize
self.figure_dpi = dpi
self.figure_bgcolor = '#ffffff'
# axes attributes
self.axes_xlabel = None
self.axes_ylabel = None
# drawing defaults
# z-order
# color
# size/thickness
self.defaults = {
'point.radius': 0.1,
'point.facecolor': '#ffffff',
'point.edgecolor': '#000000',
'point.edgewidth': 0.5,
'point.textcolor': '#000000',
'point.fontsize': fontsize,
'line.width': 1.0,
'line.color': '#000000',
'line.textcolor': '#000000',
'line.fontsize': fontsize,
'polygon.facecolor': '#ffffff',
'polygon.edgecolor': '#000000',
'polygon.edgewidth': 0.1,
'polygon.textcolor': '#000000',
'polygon.fontsize': fontsize,
}
@property
def axes(self):
"""Returns the axes subplot matplotlib object.
Returns
-------
Axes
The matplotlib axes object.
Notes
-----
For more info, see the documentation of the Axes class ([1]_) and the
axis and tick API ([2]_).
References
----------
.. [1] https://matplotlib.org/api/axes_api.html
.. [2] https://matplotlib.org/api/axis_api.html
"""
if self._axes is None:
self._axes = create_axes_xy(
figsize=self.figure_size,
dpi=self.figure_dpi,
xlabel=self.axes_xlabel,
ylabel=self.axes_ylabel
)
return self._axes
@axes.setter
def axes(self, axes):
self._axes = axes
@property
def figure(self):
"""Returns the matplotlib figure instance.
Returns
-------
Figure
The matplotlib figure instance.
Notes
-----
For more info, see the figure API ([1]_).
References
----------
.. [1] https://matplotlib.org/2.0.2/api/figure_api.html
"""
return self.axes.get_figure()
@property
def canvas(self):
"""Returns the canvas of the figure instance.
"""
return self.figure.canvas
@property
def bgcolor(self):
"""Returns the background color.
Returns
-------
str
The color as a string (hex colors).
"""
return self.figure.get_facecolor()
@bgcolor.setter
def bgcolor(self, value):
"""Sets the background color.
Parameters
----------
value : str, tuple
The color specification for the figure background.
Colors should be specified in the form of a string (hex colors) or
as a tuple of normalized RGB components.
"""
self.figure.set_facecolor(value)
@property
def title(self):
"""Returns the title of the plot.
Returns
-------
str
The title of the plot.
"""
return self.figure.canvas.get_window_title()
@title.setter
def title(self, value):
"""Sets the title of the plot.
Parameters
----------
value : str
The title of the plot.
"""
self.figure.canvas.set_window_title(value)
def register_listener(self, listener):
"""Register a listener for pick events.
Parameters
----------
listener : callable
The handler for pick events.
Returns
-------
None
Notes
-----
For more information, see the docs of ``mpl_connect`` ([1]_), and on event
handling and picking ([2]_).
References
----------
.. [1] https://matplotlib.org/api/backend_bases_api.html#matplotlib.backend_bases.FigureCanvasBase.mpl_connect
.. [2] https://matplotlib.org/users/event_handling.html
Examples
--------
.. code-block:: python
#
"""
self.figure.canvas.mpl_connect('pick_event', listener)
def clear_collection(self, collection):
"""Clears a matplotlib collection object.
Parameters
----------
collection : object
The matplotlib collection object.
Notes
-----
For more info, see [1]_ and [2]_.
References
----------
.. [1] https://matplotlib.org/2.0.2/api/collections_api.html
.. [2] https://matplotlib.org/2.0.2/api/collections_api.html#matplotlib.collections.Collection.remove
"""
collection.remove()
def show(self, autoscale=True):
"""Displays the plot.
"""
if autoscale:
self.axes.autoscale()
if self.tight:
plt.tight_layout()
plt.show()
def top(self):
"""Bring the plotting window to the top.
Warnings
--------
This seems to work only for some back-ends.
Notes
-----
For more info, see this SO post [1]_.
References
----------
.. [1] https://stackoverflow.com/questions/20025077/how-do-i-display-a-matplotlib-figure-window-on-top-of-all-other-windows-in-spyde
"""
self.figure.canvas.manager.show()
def save(self, filepath, **kwargs):
"""Saves the plot to a file.
Parameters
----------
filepath : str
Full path of the file.
Notes
-----
For an overview of all configuration options, see [1]_.
References
----------
.. [1] https://matplotlib.org/2.0.2/api/pyplot_api.html#matplotlib.pyplot.savefig
"""
self.axes.autoscale()
plt.savefig(filepath, **kwargs)
@contextmanager
def gifified(self, func, tempfolder, outfile, pattern='image_{}.png'):
"""Create a context for making animated gifs using a callback for updating the plot.
Parameters
----------
func : callable
The callback function used to update the plot.
tempfolder : str
The path to a folder for storing temporary image frames.
outfile : str
Path to the file where the resultshould be saved.
pattern : str, optional
Pattern for the filename of the intermediate frames.
The pattern should contain a replacement placeholder for the number
of the frame. Default is ``'image_{}.png'``.
"""
images = []
def gifify(f):
def wrapper(*args, **kwargs):
f(*args, **kwargs)
image = os.path.join(tempfolder, pattern.format(len(images)))
images.append(image)
self.save(image)
return wrapper
if not os.path.exists(tempfolder) or not os.path.isdir(tempfolder):
os.makedirs(tempfolder)
for file in os.listdir(tempfolder):
filepath = os.path.join(tempfolder, file)
try:
if os.path.isfile(filepath):
os.remove(filepath)
except Exception as e:
print(e)
image = os.path.join(tempfolder, pattern.format(len(images)))
images.append(image)
self.save(image)
#
yield gifify(func)
#
self.save_gif(outfile, images)
shutil.rmtree(tempfolder)
print('done gififying!')
def save_gif(self, filepath, images, delay=10, loop=0):
"""Save a series of images as an animated gif.
Parameters
----------
filepath : str
The full path to the output file.
images : list
A list of paths to input files.
delay : int, optional
The delay between frames in milliseconds. Default is ``10``.
loop : int, optional
The number of loops. Default is ``0``.
Returns
-------
None
Warnings
--------
This function assumes ImageMagick is installed on your system, and on
*convert* being on your system path.
"""
command = ['convert', '-delay', '{}'.format(delay), '-loop', '{}'.format(loop), '-layers', 'optimize']
subprocess.call(command + images + [filepath])
def draw_points(self, points):
"""Draws points on a 2D plot.
Parameters
----------
points : list of dict
List of dictionaries containing the point properties.
Each point is represented by a circle with a given radius.
The following properties of the circle can be specified in the point dict.
* pos (list): XY(Z) coordinates
* radius (float, optional): the radius of the circle. Default is 0.1.
* text (str, optional): the text of the label. Default is None.
* facecolor (rgb or hex color, optional): The color of the face of the circle. Default is white.
* edgecolor (rgb or hex color, optional): The color of the edge of the cicrle. Default is black.
* edgewidth (float, optional): The width of the edge of the circle. Default is 1.0.
* textcolor (rgb or hex color, optional): Color of the text label. Default is black.
* fontsize (int, optional): Font size of the text label. Default is ``12``.
Returns
-------
object
The matplotlib point collection object.
"""
return draw_xpoints_xy(points, self.axes)
def draw_lines(self, lines):
"""Draws lines on a 2D plot.
Parameters
----------
lines : list of dict
List of dictionaries containing the line properties.
The following properties of a line can be specified in the dict.
* start (list): XY(Z) coordinates of the start point.
* end (list): XY(Z) coordinatesof the end point.
* width (float, optional): The width of the line. Default is ``1.0``.
* color (rgb tuple or hex string, optional): The color of the line. Default is black.
* text (str, optional): The text of the label. Default is ``None``.
* textcolor (rgb tuple or hex string, optional): Color of the label text. Default is black.
* fontsize (int, optional): The size of the font of the label text. Default is ``12``.
Returns
-------
object
The matplotlib line collection object.
"""
return draw_xlines_xy(lines, self.axes)
def draw_polylines(self, polylines):
"""Draw polylines on a 2D plot.
Parameters
----------
polylines : list of dict
A list of dictionaries containing the polyline properties.
The following properties are supported:
* points (list): XY(Z) coordinates of the polygon vertices.
* text (str, optional): The text of the label. Default is ``None``.
* textcolor (rgb tuple or hex string, optional): Color of the label text. Default is black.
* fontsize (int, optional): The size of the font of the label text. Default is ``12``.
* facecolor (rgb tuple or hex string, optional): Color of the polygon face. Default is white.
* edgecolor (rgb tuple or hex string, optional): Color of the edge of the polygon. Default is black.
* edgewidth (float): Width of the polygon edge. Default is ``1.0``.
Returns
-------
object
The matplotlib polyline collection object.
"""
return draw_xpolylines_xy(polylines, self.axes)
def draw_polygons(self, polygons):
"""Draws polygons on a 2D plot.
Parameters
----------
polygons : list of dict
List of dictionaries containing the polygon properties.
The following properties can be specified in the dict.
* points (list): XY(Z) coordinates of the polygon vertices.
* text (str, optional): The text of the label. Default is ``None``.
* textcolor (rgb tuple or hex string, optional): Color of the label text. Default is black.
* fontsize (int, optional): The size of the font of the label text. Default is ``12``.
* facecolor (rgb tuple or hex string, optional): Color of the polygon face. Default is white.
* edgecolor (rgb tuple or hex string, optional): Color of the edge of the polygon. Default is black.
* edgewidth (float): Width of the polygon edge. Default is ``1.0``.
Returns
-------
object
The matplotlib polygon collection object.
"""
return draw_xpolygons_xy(polygons, self.axes)
def draw_arrows(self, arrows):
"""Draws arrows on a 2D plot.
Parameters
----------
arrows : list of dict
List of dictionaries containing the arrow properties.
The following properties of an arrow can be specified in the dict.
* start (list): XY(Z) coordinates of the starting point.
* end (list): XY(Z) coordinates of the end point.
* text (str, optional): The text of the label. Default is ``None``.
* textcolor (rgb tuple or hex string, optional): Color of the label text. Default is black.
* fontsize (int, optional): The size of the font of the label text. Default is ``6``.
* color (rgb tuple or hex string, optional): Color of the arrow. Default is black.
* width (float): Width of the arrow. Default is ``1.0``.
Returns
-------
object
The matplotlib arrow collection object.
"""
return draw_xarrows_xy(arrows, self.axes)
def draw_arrows2(self, arrows):
for data in arrows:
a = data['start'][:2]
b = data['end'][:2]
color = data.get('color', (0.0, 0.0, 0.0))
style = ArrowStyle("Simple, head_length=.1, head_width=.1, tail_width=.02")
arrow = FancyArrowPatch(a, b,
arrowstyle=style,
edgecolor=color,
facecolor=color,
zorder=2000,
mutation_scale=100)
self.axes.add_patch(arrow)
def update(self, pause=0.0001):
"""Updates and pauses the plot.
Parameters
----------
pause : float
Amount of time to pause the plot in seconds.
"""
self.axes.autoscale()
if self.tight:
plt.tight_layout()
plt.pause(pause)
def update_pointcollection(self, collection, centers, radius=1.0):
"""Updates the location and radii of a point collection.
Parameters
----------
collection : object
The point collection to update.
centers : list
List of tuples or lists with XY(Z) location for the points in the collection.
radius : float or list, optional
The radii of the points. If a floar is given it will be used for all points.
"""
try:
len(radius)
except Exception:
radius = [radius] * len(centers)
data = zip(centers, radius)
circles = [Circle(c[0:2], r) for c, r in data]
collection.set_paths(circles)
def update_linecollection(self, collection, segments):
"""Updates a line collection.
Parameters
----------
collection : object
The line collection to update.
segments : list
List of tuples or lists with XY(Z) location for the start and end
points in each line in the collection.
"""
collection.set_segments([(start[0:2], end[0:2]) for start, end in segments])
def update_polygoncollection(self, collection, polygons):
raise NotImplementedError
| mit | -6,496,157,172,936,023,000 | 30.356322 | 140 | 0.560432 | false |
menloparkwizard/Aesop-Stats | aesop.py | 1 | 2321 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Standard Python Imports
import os
import sys
import codecs
import glob
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Local Imports
from text import Text
# set default output encoding so we can pipe to files
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
text_files = glob.glob("texts/*.txt")
print text_files
# , u"ж" u"ш",
wf_patterns = [u"п", u"пь", u"б", u"бь", u"ф", u"фь", u"в", u"вь", u"т", u"ть", u"д", u"дь", u"с", u"сь", u"з", u"зь", u"шь?", u"жь?", u"к", u"г"]
wf_ipa_equiv = [u"p ", u"pʲ", u"b", u"bʲ", u"f", u"pʲ", u"v", u"vʲ", u"t", u"tʲ", u"d", u"dʲ", u"s", u"sʲ", u"z", u"zʲ", u"ʂ", u"ʐ", u"k", u"ɡ"]
print "Checking: " + str(len(wf_patterns)) + " patterns"
# Parse all the text files
texts = []
for f in text_files:
T = Text(f)
T.parse_file()
texts.append(T)
wf_match = {}
# wf_match_by_text = {}
for p in wf_patterns:
print "Searching for: " + p
for t in texts:
if p in wf_match:
wf_match[p].extend(t.find_word_final(p))
else:
wf_match[p] = t.find_word_final(p)
print "Found: " + str(len(t.find_word_final(p))) + " matches in " + t.file()
# file = str(os.path.basename(t.file()))[:2]
# if file in wf_match_by_text:
# wf_match_by_text[file].append({ p : len(t.find_word_final(p))})
# else:
# wf_match_by_text[file] = [{ p : len(t.find_word_final(p))}]
wf_match_by_text = pd.DataFrame(columns=wf_ipa_equiv)
for t in texts:
counts = []
for p in wf_patterns:
counts.append(len(t.find_word_final(p)))
cs = pd.Series(counts, index=wf_ipa_equiv, name=str(os.path.basename(t.file()))[:2])
wf_match_by_text = wf_match_by_text.append(cs)
#print wf_match_by_text
word_final_counts = []
for p in wf_match:
word_final_counts.append(len(wf_match[p]))
s = pd.Series(word_final_counts, index=wf_ipa_equiv)
#print "Word-Final Occurances:"
#print s.to_string()
plt.figure()
s.plot(kind='bar')
#print s
#plt.bar(X, counts, facecolor='#9999ff', edgecolor='white')
#plt.hist(counts, len(wf_patterns))
plt.show()
plt.savefig("output/bar.png", dpi=800)
print wf_match_by_text.to_string()
plt.figure()
wf_match_by_text.plot.box()
plt.savefig("output/boxplot.png", dpi=800)
#for i in wf_match[u"ф"]:
# print i
| gpl-3.0 | -182,276,451,217,711,100 | 26.433735 | 148 | 0.621432 | false |
webmasterraj/GaSiProMo | flask/lib/python2.7/site-packages/pandas/core/reshape.py | 2 | 37292 | # pylint: disable=E1101,E1103
# pylint: disable=W0703,W0622,W0613,W0201
from pandas.compat import range, zip
from pandas import compat
import itertools
import numpy as np
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.categorical import Categorical
from pandas.core.common import (notnull, _ensure_platform_int, _maybe_promote,
isnull)
from pandas.core.groupby import get_group_index, _compress_group_index
import pandas.core.common as com
import pandas.algos as algos
from pandas.core.index import MultiIndex, _get_na_value
class _Unstacker(object):
"""
Helper class to unstack data / pivot with multi-level index
Parameters
----------
level : int or str, default last level
Level to "unstack". Accepts a name for the level.
Examples
--------
>>> import pandas as pd
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1
b 2
two a 3
b 4
dtype: float64
>>> s.unstack(level=-1)
a b
one 1 2
two 3 4
>>> s.unstack(level=0)
one two
a 1 2
b 3 4
Returns
-------
unstacked : DataFrame
"""
def __init__(self, values, index, level=-1, value_columns=None):
self.is_categorical = None
if values.ndim == 1:
if isinstance(values, Categorical):
self.is_categorical = values
values = np.array(values)
values = values[:, np.newaxis]
self.values = values
self.value_columns = value_columns
if value_columns is None and values.shape[1] != 1: # pragma: no cover
raise ValueError('must pass column labels for multi-column data')
self.index = index
if isinstance(self.index, MultiIndex):
if index._reference_duplicate_name(level):
msg = ("Ambiguous reference to {0}. The index "
"names are not unique.".format(level))
raise ValueError(msg)
self.level = self.index._get_level_number(level)
# when index includes `nan`, need to lift levels/strides by 1
self.lift = 1 if -1 in self.index.labels[self.level] else 0
self.new_index_levels = list(index.levels)
self.new_index_names = list(index.names)
self.removed_name = self.new_index_names.pop(self.level)
self.removed_level = self.new_index_levels.pop(self.level)
self._make_sorted_values_labels()
self._make_selectors()
def _make_sorted_values_labels(self):
v = self.level
labs = list(self.index.labels)
levs = list(self.index.levels)
to_sort = labs[:v] + labs[v + 1:] + [labs[v]]
sizes = [len(x) for x in levs[:v] + levs[v + 1:] + [levs[v]]]
comp_index, obs_ids = get_compressed_ids(to_sort, sizes)
ngroups = len(obs_ids)
indexer = algos.groupsort_indexer(comp_index, ngroups)[0]
indexer = _ensure_platform_int(indexer)
self.sorted_values = com.take_nd(self.values, indexer, axis=0)
self.sorted_labels = [l.take(indexer) for l in to_sort]
def _make_selectors(self):
new_levels = self.new_index_levels
# make the mask
remaining_labels = self.sorted_labels[:-1]
level_sizes = [len(x) for x in new_levels]
comp_index, obs_ids = get_compressed_ids(remaining_labels, level_sizes)
ngroups = len(obs_ids)
comp_index = _ensure_platform_int(comp_index)
stride = self.index.levshape[self.level] + self.lift
self.full_shape = ngroups, stride
selector = self.sorted_labels[-1] + stride * comp_index + self.lift
mask = np.zeros(np.prod(self.full_shape), dtype=bool)
mask.put(selector, True)
if mask.sum() < len(self.index):
raise ValueError('Index contains duplicate entries, '
'cannot reshape')
self.group_index = comp_index
self.mask = mask
self.unique_groups = obs_ids
self.compressor = comp_index.searchsorted(np.arange(ngroups))
def get_result(self):
# TODO: find a better way than this masking business
values, value_mask = self.get_new_values()
columns = self.get_new_columns()
index = self.get_new_index()
# filter out missing levels
if values.shape[1] > 0:
col_inds, obs_ids = _compress_group_index(self.sorted_labels[-1])
# rare case, level values not observed
if len(obs_ids) < self.full_shape[1]:
inds = (value_mask.sum(0) > 0).nonzero()[0]
values = com.take_nd(values, inds, axis=1)
columns = columns[inds]
# may need to coerce categoricals here
if self.is_categorical is not None:
values = [ Categorical.from_array(values[:,i],
categories=self.is_categorical.categories,
ordered=True)
for i in range(values.shape[-1]) ]
return DataFrame(values, index=index, columns=columns)
def get_new_values(self):
values = self.values
# place the values
length, width = self.full_shape
stride = values.shape[1]
result_width = width * stride
result_shape = (length, result_width)
# if our mask is all True, then we can use our existing dtype
if self.mask.all():
dtype = values.dtype
new_values = np.empty(result_shape, dtype=dtype)
else:
dtype, fill_value = _maybe_promote(values.dtype)
new_values = np.empty(result_shape, dtype=dtype)
new_values.fill(fill_value)
new_mask = np.zeros(result_shape, dtype=bool)
# is there a simpler / faster way of doing this?
for i in range(values.shape[1]):
chunk = new_values[:, i * width: (i + 1) * width]
mask_chunk = new_mask[:, i * width: (i + 1) * width]
chunk.flat[self.mask] = self.sorted_values[:, i]
mask_chunk.flat[self.mask] = True
return new_values, new_mask
def get_new_columns(self):
if self.value_columns is None:
if self.lift == 0:
return self.removed_level
lev = self.removed_level
return lev.insert(0, _get_na_value(lev.dtype.type))
stride = len(self.removed_level) + self.lift
width = len(self.value_columns)
propagator = np.repeat(np.arange(width), stride)
if isinstance(self.value_columns, MultiIndex):
new_levels = self.value_columns.levels + (self.removed_level,)
new_names = self.value_columns.names + (self.removed_name,)
new_labels = [lab.take(propagator)
for lab in self.value_columns.labels]
else:
new_levels = [self.value_columns, self.removed_level]
new_names = [self.value_columns.name, self.removed_name]
new_labels = [propagator]
new_labels.append(np.tile(np.arange(stride) - self.lift, width))
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
def get_new_index(self):
result_labels = [lab.take(self.compressor)
for lab in self.sorted_labels[:-1]]
# construct the new index
if len(self.new_index_levels) == 1:
lev, lab = self.new_index_levels[0], result_labels[0]
if (lab == -1).any():
lev = lev.insert(len(lev), _get_na_value(lev.dtype.type))
return lev.take(lab)
return MultiIndex(levels=self.new_index_levels,
labels=result_labels,
names=self.new_index_names,
verify_integrity=False)
def _unstack_multiple(data, clocs):
from pandas.core.groupby import decons_obs_group_ids
if len(clocs) == 0:
return data
# NOTE: This doesn't deal with hierarchical columns yet
index = data.index
clocs = [index._get_level_number(i) for i in clocs]
rlocs = [i for i in range(index.nlevels) if i not in clocs]
clevels = [index.levels[i] for i in clocs]
clabels = [index.labels[i] for i in clocs]
cnames = [index.names[i] for i in clocs]
rlevels = [index.levels[i] for i in rlocs]
rlabels = [index.labels[i] for i in rlocs]
rnames = [index.names[i] for i in rlocs]
shape = [len(x) for x in clevels]
group_index = get_group_index(clabels, shape, sort=False, xnull=False)
comp_ids, obs_ids = _compress_group_index(group_index, sort=False)
recons_labels = decons_obs_group_ids(comp_ids,
obs_ids, shape, clabels, xnull=False)
dummy_index = MultiIndex(levels=rlevels + [obs_ids],
labels=rlabels + [comp_ids],
names=rnames + ['__placeholder__'],
verify_integrity=False)
if isinstance(data, Series):
dummy = Series(data.values, index=dummy_index)
unstacked = dummy.unstack('__placeholder__')
new_levels = clevels
new_names = cnames
new_labels = recons_labels
else:
if isinstance(data.columns, MultiIndex):
result = data
for i in range(len(clocs)):
val = clocs[i]
result = result.unstack(val)
clocs = [val if i > val else val - 1 for val in clocs]
return result
dummy = DataFrame(data.values, index=dummy_index,
columns=data.columns)
unstacked = dummy.unstack('__placeholder__')
if isinstance(unstacked, Series):
unstcols = unstacked.index
else:
unstcols = unstacked.columns
new_levels = [unstcols.levels[0]] + clevels
new_names = [data.columns.name] + cnames
new_labels = [unstcols.labels[0]]
for rec in recons_labels:
new_labels.append(rec.take(unstcols.labels[-1]))
new_columns = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
if isinstance(unstacked, Series):
unstacked.index = new_columns
else:
unstacked.columns = new_columns
return unstacked
def pivot(self, index=None, columns=None, values=None):
"""
See DataFrame.pivot
"""
if values is None:
indexed = self.set_index([index, columns])
return indexed.unstack(columns)
else:
indexed = Series(self[values].values,
index=MultiIndex.from_arrays([self[index],
self[columns]]))
return indexed.unstack(columns)
def pivot_simple(index, columns, values):
"""
Produce 'pivot' table based on 3 columns of this DataFrame.
Uses unique values from index / columns and fills with values.
Parameters
----------
index : ndarray
Labels to use to make new frame's index
columns : ndarray
Labels to use to make new frame's columns
values : ndarray
Values to use for populating new frame's values
Notes
-----
Obviously, all 3 of the input arguments must have the same length
Returns
-------
DataFrame
"""
if (len(index) != len(columns)) or (len(columns) != len(values)):
raise AssertionError('Length of index, columns, and values must be the'
' same')
if len(index) == 0:
return DataFrame(index=[])
hindex = MultiIndex.from_arrays([index, columns])
series = Series(values.ravel(), index=hindex)
series = series.sortlevel(0)
return series.unstack()
def _slow_pivot(index, columns, values):
"""
Produce 'pivot' table based on 3 columns of this DataFrame.
Uses unique values from index / columns and fills with values.
Parameters
----------
index : string or object
Column name to use to make new frame's index
columns : string or object
Column name to use to make new frame's columns
values : string or object
Column name to use for populating new frame's values
Could benefit from some Cython here.
"""
tree = {}
for i, (idx, col) in enumerate(zip(index, columns)):
if col not in tree:
tree[col] = {}
branch = tree[col]
branch[idx] = values[i]
return DataFrame(tree)
def unstack(obj, level):
if isinstance(level, (tuple, list)):
return _unstack_multiple(obj, level)
if isinstance(obj, DataFrame):
if isinstance(obj.index, MultiIndex):
return _unstack_frame(obj, level)
else:
return obj.T.stack(dropna=False)
else:
unstacker = _Unstacker(obj.values, obj.index, level=level)
return unstacker.get_result()
def _unstack_frame(obj, level):
from pandas.core.internals import BlockManager, make_block
if obj._is_mixed_type:
unstacker = _Unstacker(np.empty(obj.shape, dtype=bool), # dummy
obj.index, level=level,
value_columns=obj.columns)
new_columns = unstacker.get_new_columns()
new_index = unstacker.get_new_index()
new_axes = [new_columns, new_index]
new_blocks = []
mask_blocks = []
for blk in obj._data.blocks:
blk_items = obj._data.items[blk.mgr_locs.indexer]
bunstacker = _Unstacker(blk.values.T, obj.index, level=level,
value_columns=blk_items)
new_items = bunstacker.get_new_columns()
new_placement = new_columns.get_indexer(new_items)
new_values, mask = bunstacker.get_new_values()
mblk = make_block(mask.T, placement=new_placement)
mask_blocks.append(mblk)
newb = make_block(new_values.T, placement=new_placement)
new_blocks.append(newb)
result = DataFrame(BlockManager(new_blocks, new_axes))
mask_frame = DataFrame(BlockManager(mask_blocks, new_axes))
return result.ix[:, mask_frame.sum(0) > 0]
else:
unstacker = _Unstacker(obj.values, obj.index, level=level,
value_columns=obj.columns)
return unstacker.get_result()
def get_compressed_ids(labels, sizes):
from pandas.core.groupby import get_group_index
ids = get_group_index(labels, sizes, sort=True, xnull=False)
return _compress_group_index(ids, sort=True)
def stack(frame, level=-1, dropna=True):
"""
Convert DataFrame to Series with multi-level Index. Columns become the
second level of the resulting hierarchical index
Returns
-------
stacked : Series
"""
N, K = frame.shape
if isinstance(frame.columns, MultiIndex):
if frame.columns._reference_duplicate_name(level):
msg = ("Ambiguous reference to {0}. The column "
"names are not unique.".format(level))
raise ValueError(msg)
# Will also convert negative level numbers and check if out of bounds.
level_num = frame.columns._get_level_number(level)
if isinstance(frame.columns, MultiIndex):
return _stack_multi_columns(frame, level_num=level_num, dropna=dropna)
elif isinstance(frame.index, MultiIndex):
new_levels = list(frame.index.levels)
new_levels.append(frame.columns)
new_labels = [lab.repeat(K) for lab in frame.index.labels]
new_labels.append(np.tile(np.arange(K), N).ravel())
new_names = list(frame.index.names)
new_names.append(frame.columns.name)
new_index = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
else:
ilabels = np.arange(N).repeat(K)
clabels = np.tile(np.arange(K), N).ravel()
new_index = MultiIndex(levels=[frame.index, frame.columns],
labels=[ilabels, clabels],
names=[frame.index.name, frame.columns.name],
verify_integrity=False)
new_values = frame.values.ravel()
if dropna:
mask = notnull(new_values)
new_values = new_values[mask]
new_index = new_index[mask]
return Series(new_values, index=new_index)
def stack_multiple(frame, level, dropna=True):
# If all passed levels match up to column names, no
# ambiguity about what to do
if all(lev in frame.columns.names for lev in level):
result = frame
for lev in level:
result = stack(result, lev, dropna=dropna)
# Otherwise, level numbers may change as each successive level is stacked
elif all(isinstance(lev, int) for lev in level):
# As each stack is done, the level numbers decrease, so we need
# to account for that when level is a sequence of ints
result = frame
# _get_level_number() checks level numbers are in range and converts
# negative numbers to positive
level = [frame.columns._get_level_number(lev) for lev in level]
# Can't iterate directly through level as we might need to change
# values as we go
for index in range(len(level)):
lev = level[index]
result = stack(result, lev, dropna=dropna)
# Decrement all level numbers greater than current, as these
# have now shifted down by one
updated_level = []
for other in level:
if other > lev:
updated_level.append(other - 1)
else:
updated_level.append(other)
level = updated_level
else:
raise ValueError("level should contain all level names or all level numbers, "
"not a mixture of the two.")
return result
def _stack_multi_columns(frame, level_num=-1, dropna=True):
def _convert_level_number(level_num, columns):
"""
Logic for converting the level number to something
we can safely pass to swaplevel:
We generally want to convert the level number into
a level name, except when columns do not have names,
in which case we must leave as a level number
"""
if level_num in columns.names:
return columns.names[level_num]
else:
if columns.names[level_num] is None:
return level_num
else:
return columns.names[level_num]
this = frame.copy()
# this makes life much simpler
if level_num != frame.columns.nlevels - 1:
# roll levels to put selected level at end
roll_columns = this.columns
for i in range(level_num, frame.columns.nlevels - 1):
# Need to check if the ints conflict with level names
lev1 = _convert_level_number(i, roll_columns)
lev2 = _convert_level_number(i + 1, roll_columns)
roll_columns = roll_columns.swaplevel(lev1, lev2)
this.columns = roll_columns
if not this.columns.is_lexsorted():
# Workaround the edge case where 0 is one of the column names,
# which interferes with trying to sort based on the first
# level
level_to_sort = _convert_level_number(0, this.columns)
this = this.sortlevel(level_to_sort, axis=1)
# tuple list excluding level for grouping columns
if len(frame.columns.levels) > 2:
tuples = list(zip(*[
lev.take(lab) for lev, lab in
zip(this.columns.levels[:-1], this.columns.labels[:-1])
]))
unique_groups = [key for key, _ in itertools.groupby(tuples)]
new_names = this.columns.names[:-1]
new_columns = MultiIndex.from_tuples(unique_groups, names=new_names)
else:
new_columns = unique_groups = this.columns.levels[0]
# time to ravel the values
new_data = {}
level_vals = this.columns.levels[-1]
level_labels = sorted(set(this.columns.labels[-1]))
level_vals_used = level_vals[level_labels]
levsize = len(level_labels)
drop_cols = []
for key in unique_groups:
loc = this.columns.get_loc(key)
slice_len = loc.stop - loc.start
# can make more efficient?
if slice_len == 0:
drop_cols.append(key)
continue
elif slice_len != levsize:
chunk = this.ix[:, this.columns[loc]]
chunk.columns = level_vals.take(chunk.columns.labels[-1])
value_slice = chunk.reindex(columns=level_vals_used).values
else:
if frame._is_mixed_type:
value_slice = this.ix[:, this.columns[loc]].values
else:
value_slice = this.values[:, loc]
new_data[key] = value_slice.ravel()
if len(drop_cols) > 0:
new_columns = new_columns - drop_cols
N = len(this)
if isinstance(this.index, MultiIndex):
new_levels = list(this.index.levels)
new_names = list(this.index.names)
new_labels = [lab.repeat(levsize) for lab in this.index.labels]
else:
new_levels = [this.index]
new_labels = [np.arange(N).repeat(levsize)]
new_names = [this.index.name] # something better?
new_levels.append(frame.columns.levels[level_num])
new_labels.append(np.tile(level_labels, N))
new_names.append(frame.columns.names[level_num])
new_index = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
result = DataFrame(new_data, index=new_index, columns=new_columns)
# more efficient way to go about this? can do the whole masking biz but
# will only save a small amount of time...
if dropna:
result = result.dropna(axis=0, how='all')
return result
def melt(frame, id_vars=None, value_vars=None,
var_name=None, value_name='value', col_level=None):
"""
"Unpivots" a DataFrame from wide format to long format, optionally leaving
identifier variables set.
This function is useful to massage a DataFrame into a format where one
or more columns are identifier variables (`id_vars`), while all other
columns, considered measured variables (`value_vars`), are "unpivoted" to
the row axis, leaving just two non-identifier columns, 'variable' and
'value'.
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar
Name to use for the 'variable' column. If None it uses
``frame.columns.name`` or 'variable'.
value_name : scalar, default 'value'
Name to use for the 'value' column.
col_level : int or string, optional
If columns are a MultiIndex then use this level to melt.
See also
--------
pivot_table
DataFrame.pivot
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
... 'B': {0: 1, 1: 3, 2: 5},
... 'C': {0: 2, 1: 4, 2: 6}})
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> pd.melt(df, id_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> pd.melt(df, id_vars=['A'], value_vars=['B', 'C'])
A variable value
0 a B 1
1 b B 3
2 c B 5
3 a C 2
4 b C 4
5 c C 6
The names of 'variable' and 'value' columns can be customized:
>>> pd.melt(df, id_vars=['A'], value_vars=['B'],
... var_name='myVarname', value_name='myValname')
A myVarname myValname
0 a B 1
1 b B 3
2 c B 5
If you have multi-index columns:
>>> df.columns = [list('ABC'), list('DEF')]
>>> df
A B C
D E F
0 a 1 2
1 b 3 4
2 c 5 6
>>> pd.melt(df, col_level=0, id_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> pd.melt(df, id_vars=[('A', 'D')], value_vars=[('B', 'E')])
(A, D) variable_0 variable_1 value
0 a B E 1
1 b B E 3
2 c B E 5
"""
# TODO: what about the existing index?
if id_vars is not None:
if not isinstance(id_vars, (tuple, list, np.ndarray)):
id_vars = [id_vars]
else:
id_vars = list(id_vars)
else:
id_vars = []
if value_vars is not None:
if not isinstance(value_vars, (tuple, list, np.ndarray)):
value_vars = [value_vars]
frame = frame.ix[:, id_vars + value_vars]
else:
frame = frame.copy()
if col_level is not None: # allow list or other?
# frame is a copy
frame.columns = frame.columns.get_level_values(col_level)
if var_name is None:
if isinstance(frame.columns, MultiIndex):
if len(frame.columns.names) == len(set(frame.columns.names)):
var_name = frame.columns.names
else:
var_name = ['variable_%s' % i for i in
range(len(frame.columns.names))]
else:
var_name = [frame.columns.name if frame.columns.name is not None
else 'variable']
if isinstance(var_name, compat.string_types):
var_name = [var_name]
N, K = frame.shape
K -= len(id_vars)
mdata = {}
for col in id_vars:
mdata[col] = np.tile(frame.pop(col).values, K)
mcolumns = id_vars + var_name + [value_name]
mdata[value_name] = frame.values.ravel('F')
for i, col in enumerate(var_name):
# asanyarray will keep the columns as an Index
mdata[col] = np.asanyarray(frame.columns.get_level_values(i)).repeat(N)
return DataFrame(mdata, columns=mcolumns)
def lreshape(data, groups, dropna=True, label=None):
"""
Reshape long-format data to wide. Generalized inverse of DataFrame.pivot
Parameters
----------
data : DataFrame
groups : dict
{new_name : list_of_columns}
dropna : boolean, default True
Examples
--------
>>> import pandas as pd
>>> data = pd.DataFrame({'hr1': [514, 573], 'hr2': [545, 526],
... 'team': ['Red Sox', 'Yankees'],
... 'year1': [2007, 2008], 'year2': [2008, 2008]})
>>> data
hr1 hr2 team year1 year2
0 514 545 Red Sox 2007 2008
1 573 526 Yankees 2007 2008
>>> pd.lreshape(data, {'year': ['year1', 'year2'], 'hr': ['hr1', 'hr2']})
team hr year
0 Red Sox 514 2007
1 Yankees 573 2007
2 Red Sox 545 2008
3 Yankees 526 2008
Returns
-------
reshaped : DataFrame
"""
if isinstance(groups, dict):
keys = list(groups.keys())
values = list(groups.values())
else:
keys, values = zip(*groups)
all_cols = list(set.union(*[set(x) for x in values]))
id_cols = list(data.columns.difference(all_cols))
K = len(values[0])
for seq in values:
if len(seq) != K:
raise ValueError('All column lists must be same length')
mdata = {}
pivot_cols = []
for target, names in zip(keys, values):
mdata[target] = com._concat_compat([data[col].values for col in names])
pivot_cols.append(target)
for col in id_cols:
mdata[col] = np.tile(data[col].values, K)
if dropna:
mask = np.ones(len(mdata[pivot_cols[0]]), dtype=bool)
for c in pivot_cols:
mask &= notnull(mdata[c])
if not mask.all():
mdata = dict((k, v[mask]) for k, v in compat.iteritems(mdata))
return DataFrame(mdata, columns=id_cols + pivot_cols)
def wide_to_long(df, stubnames, i, j):
"""
Wide panel to long format. Less flexible but more user-friendly than melt.
Parameters
----------
df : DataFrame
The wide-format DataFrame
stubnames : list
A list of stub names. The wide format variables are assumed to
start with the stub names.
i : str
The name of the id variable.
j : str
The name of the subobservation variable.
stubend : str
Regex to match for the end of the stubs.
Returns
-------
DataFrame
A DataFrame that contains each stub name as a variable as well as
variables for i and j.
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> np.random.seed(123)
>>> df = pd.DataFrame({"A1970" : {0 : "a", 1 : "b", 2 : "c"},
... "A1980" : {0 : "d", 1 : "e", 2 : "f"},
... "B1970" : {0 : 2.5, 1 : 1.2, 2 : .7},
... "B1980" : {0 : 3.2, 1 : 1.3, 2 : .1},
... "X" : dict(zip(range(3), np.random.randn(3)))
... })
>>> df["id"] = df.index
>>> df
A1970 A1980 B1970 B1980 X id
0 a d 2.5 3.2 -1.085631 0
1 b e 1.2 1.3 0.997345 1
2 c f 0.7 0.1 0.282978 2
>>> wide_to_long(df, ["A", "B"], i="id", j="year")
X A B
id year
0 1970 -1.085631 a 2.5
1 1970 0.997345 b 1.2
2 1970 0.282978 c 0.7
0 1980 -1.085631 d 3.2
1 1980 0.997345 e 1.3
2 1980 0.282978 f 0.1
Notes
-----
All extra variables are treated as extra id variables. This simply uses
`pandas.melt` under the hood, but is hard-coded to "do the right thing"
in a typicaly case.
"""
def get_var_names(df, regex):
return df.filter(regex=regex).columns.tolist()
def melt_stub(df, stub, i, j):
varnames = get_var_names(df, "^" + stub)
newdf = melt(df, id_vars=i, value_vars=varnames, value_name=stub,
var_name=j)
newdf_j = newdf[j].str.replace(stub, "")
try:
newdf_j = newdf_j.astype(int)
except ValueError:
pass
newdf[j] = newdf_j
return newdf
id_vars = get_var_names(df, "^(?!%s)" % "|".join(stubnames))
if i not in id_vars:
id_vars += [i]
newdf = melt_stub(df, stubnames[0], id_vars, j)
for stub in stubnames[1:]:
new = melt_stub(df, stub, id_vars, j)
newdf = newdf.merge(new, how="outer", on=id_vars + [j], copy=False)
return newdf.set_index([i, j])
def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
columns=None):
"""
Convert categorical variable into dummy/indicator variables
Parameters
----------
data : array-like, Series, or DataFrame
prefix : string, list of strings, or dict of strings, default None
String to append DataFrame column names
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternativly, `prefix`
can be a dictionary mapping column names to prefixes.
prefix_sep : string, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix.`
dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
columns : list-like, default None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
`object` or `category` dtype will be converted.
Returns
-------
dummies : DataFrame
Examples
--------
>>> import pandas as pd
>>> s = pd.Series(list('abca'))
>>> get_dummies(s)
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
>>> s1 = ['a', 'b', np.nan]
>>> get_dummies(s1)
a b
0 1 0
1 0 1
2 0 0
>>> get_dummies(s1, dummy_na=True)
a b NaN
0 1 0 0
1 0 1 0
2 0 0 1
>>> df = DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'],
'C': [1, 2, 3]})
>>> get_dummies(df, prefix=['col1', 'col2']):
C col1_a col1_b col2_a col2_b col2_c
0 1 1 0 0 1 0
1 2 0 1 1 0 0
2 3 1 0 0 0 1
See also ``Series.str.get_dummies``.
"""
from pandas.tools.merge import concat
from itertools import cycle
if isinstance(data, DataFrame):
# determine columns being encoded
if columns is None:
columns_to_encode = data.select_dtypes(include=['object',
'category']).columns
else:
columns_to_encode = columns
# validate prefixes and separator to avoid silently dropping cols
def check_len(item, name):
length_msg = ("Length of '{0}' ({1}) did "
"not match the length of the columns "
"being encoded ({2}).")
if com.is_list_like(item):
if not len(item) == len(columns_to_encode):
raise ValueError(length_msg.format(name, len(item),
len(columns_to_encode)))
check_len(prefix, 'prefix')
check_len(prefix_sep, 'prefix_sep')
if isinstance(prefix, compat.string_types):
prefix = cycle([prefix])
if isinstance(prefix, dict):
prefix = [prefix[col] for col in columns_to_encode]
if prefix is None:
prefix = columns_to_encode
# validate separators
if isinstance(prefix_sep, compat.string_types):
prefix_sep = cycle([prefix_sep])
elif isinstance(prefix_sep, dict):
prefix_sep = [prefix_sep[col] for col in columns_to_encode]
result = data.drop(columns_to_encode, axis=1)
with_dummies = [result]
for (col, pre, sep) in zip(columns_to_encode, prefix, prefix_sep):
dummy = _get_dummies_1d(data[col], prefix=pre,
prefix_sep=sep, dummy_na=dummy_na)
with_dummies.append(dummy)
result = concat(with_dummies, axis=1)
else:
result = _get_dummies_1d(data, prefix, prefix_sep, dummy_na)
return result
def _get_dummies_1d(data, prefix, prefix_sep='_', dummy_na=False):
# Series avoids inconsistent NaN handling
cat = Categorical.from_array(Series(data), ordered=True)
levels = cat.categories
# if all NaN
if not dummy_na and len(levels) == 0:
if isinstance(data, Series):
index = data.index
else:
index = np.arange(len(data))
return DataFrame(index=index)
number_of_cols = len(levels)
if dummy_na:
number_of_cols += 1
dummy_mat = np.eye(number_of_cols).take(cat.codes, axis=0)
if dummy_na:
levels = np.append(cat.categories, np.nan)
else:
# reset NaN GH4446
dummy_mat[cat.codes == -1] = 0
if prefix is not None:
dummy_cols = ['%s%s%s' % (prefix, prefix_sep, v)
for v in levels]
else:
dummy_cols = levels
if isinstance(data, Series):
index = data.index
else:
index = None
return DataFrame(dummy_mat, index=index, columns=dummy_cols)
def make_axis_dummies(frame, axis='minor', transform=None):
"""
Construct 1-0 dummy variables corresponding to designated axis
labels
Parameters
----------
frame : DataFrame
axis : {'major', 'minor'}, default 'minor'
transform : function, default None
Function to apply to axis labels first. For example, to
get "day of week" dummies in a time series regression
you might call::
make_axis_dummies(panel, axis='major',
transform=lambda d: d.weekday())
Returns
-------
dummies : DataFrame
Column names taken from chosen axis
"""
numbers = {
'major': 0,
'minor': 1
}
num = numbers.get(axis, axis)
items = frame.index.levels[num]
labels = frame.index.labels[num]
if transform is not None:
mapped_items = items.map(transform)
cat = Categorical.from_array(mapped_items.take(labels), ordered=True)
labels = cat.codes
items = cat.categories
values = np.eye(len(items), dtype=float)
values = values.take(labels, axis=0)
return DataFrame(values, columns=items, index=frame.index)
| gpl-2.0 | 113,699,651,385,219,840 | 32.060284 | 88 | 0.56484 | false |
sfu-discourse-lab/SFU_Comment_Extractor | Source_Code/data_cleanup/random_sample.py | 1 | 4075 | import os, random
import pandas as pd
import re
import sys
import codecs
from shutil import copyfile
from datetime import datetime
def clean_text(text):
text = text.replace("<p>", "").replace("</p>", "\n")
return re.sub('\.+', ".", text)
def filecount(dir):
return len([f for f in os.listdir(dir)])
def main(gnm_articles, article_input_dir, sample_output_dir, articles_file, comments_output_dir, comments_file):
'''For writing article text with atleast one comment to txt files'''
articles_df = pd.read_csv(gnm_articles)
#
comments_df = pd.read_csv(comments_file)
articles_with_comm = list(comments_df['article_id'].unique())
articles_df = articles_df[articles_df['article_id'].isin(articles_with_comm)]
for idx, article in articles_df.iterrows():
date = datetime.strptime(article['published_date'].split()[0], '%Y-%m-%d')
folder_name = article_input_dir + str(date.year)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
file_name = folder_name + "/" + str(article['article_id']) + ".txt"
text_file = codecs.open(file_name, "w", "utf-8")
cleaned_text = clean_text(article['article_text'])
text_file.write(cleaned_text)
text_file.close()
print("Articles with atleast one comment written to files.")
'''Getting sample articles'''
files = set()
count = pd.DataFrame.from_csv(articles_file, index_col=None, header=None)
dir = article_input_dir
output_dir = sample_output_dir
for idx, row in count.iterrows():
folder_name = dir + str(row[0])
num_of_articles = row[1]
num_of_files = filecount(folder_name)
for i in range(num_of_articles):
if num_of_files == len(files):
break
file_name = random.choice(os.listdir(folder_name))
while file_name in files:
file_name = random.choice(os.listdir(folder_name))
files.add(file_name)
year_folder = output_dir + str(row[0])
if not os.path.exists(year_folder):
os.makedirs(year_folder)
source = folder_name + "/" + file_name
copyfile(source, year_folder + "/" + file_name)
'''Getting comments from sampled articles'''
article_ids = pd.DataFrame(columns=['article_id', 'year'])
ids_list = []
for root, directories, filenames, in os.walk(sample_output_dir):
for f_name in filenames:
path = os.path.join(root, f_name)
year = re.search('_articles/(.+?)/', path)
if year:
article = {}
article['year'] = year.group(1)
article['article_id'] = f_name.split('.')[0]
ids_list.append(f_name.split('.')[0])
article_ids = article_ids.append(article, ignore_index=True)
article_ids['article_id'] = article_ids['article_id'].str.strip()
sampled_comments = comments_df[comments_df['article_id'].isin(ids_list)]
article_ids['article_id'] = article_ids['article_id'].astype('str').astype('int')
sampled_comments = pd.DataFrame.merge(sampled_comments, article_ids, on="article_id")
for idx, articles in sampled_comments.groupby('article_id'):
folder_name = comments_output_dir + str(articles['year'].iloc[0])
file_name = folder_name + "/" + str(idx) + "_comments.txt"
if not os.path.exists(folder_name):
os.makedirs(folder_name)
articles.to_csv(file_name, header=None, index=None, columns=['comment_text'])
if __name__ == "__main__":
gnm_articles = sys.argv[1] # ./CSVs/gnm_articles.csv
article_input_dir = sys.argv[2] # ./articles_w_comm/
sample_output_dir = sys.argv[3] # ./sampled_articles/
articles_file = sys.argv[4] # articles_per_year.csv
comments_output_dir = sys.argv[5] # ./sampled_comments/
comments_file = sys.argv[6] # ./CSVs/gnm_comments.csv
main(gnm_articles, article_input_dir, sample_output_dir, articles_file, comments_output_dir, comments_file)
| mit | -2,819,352,886,322,472,400 | 36.385321 | 112 | 0.61227 | false |
pierre-chaville/automlk | tests/test_transformer.py | 1 | 2090 | from abc import ABCMeta, abstractmethod
import pandas as pd
class HyperProcess(object):
__metaclass__ = ABCMeta
# abstract class for model preprocessing in hyper optimisation
@abstractmethod
def __init__(self, **params):
print(params)
self.set_params(**params)
self.transformer = None
self.__feature_names = []
@abstractmethod
def get_params(self, deep=True):
return self.params
@abstractmethod
def set_params(self, **params):
self.params = params
if 'context' in params.keys():
self.context = params['context']
else:
self.context = {}
self.t_params = {key:params[key] for key in params.keys() if key not in ['context']}
@abstractmethod
def get_feature_names(self):
return self.__feature_names
@abstractmethod
def fit(self, X, y):
# fit the transformer with the data
if isinstance(X, pd.DataFrame):
self.__feature_names = X.columns
@abstractmethod
def transform(self, X):
# fit and transform
Xt = self.transformer.transform(X)
if not isinstance(Xt, pd.DataFrame):
Xt = pd.DataFrame(Xt)
Xt.columns = self.__feature_names
return Xt
@abstractmethod
def fit_transform(self, X, y):
# fit and transform
self.fit(X, y)
return self.transform(X)
class HyperProcessLabel(HyperProcess):
# class for process categorical encoding - label encoder
def __init__(self, **params):
super().__init__(**params)
self.transformer = []
def fit(self, X, y):
self.transformer = []
for col in self.context.cat_cols:
encoder = {x: i for i, x in enumerate(X[col].unique())}
self.transformer.append((col, encoder))
def transform(self, X):
# transform X
for col, encoder in self.transformer:
X[col] = X[col].map(lambda x: encoder[x] if x in encoder else -1)
return X
p1 = HyperProcess(a=234)
p2 = HyperProcessLabel(a=567) | mit | -1,179,461,848,617,328,400 | 26.513158 | 92 | 0.595215 | false |
phobson/seaborn | seaborn/tests/test_rcmod.py | 3 | 7875 | import numpy as np
import matplotlib as mpl
from distutils.version import LooseVersion
import nose
import matplotlib.pyplot as plt
import nose.tools as nt
import numpy.testing as npt
from .. import rcmod, palettes, utils
class RCParamTester(object):
def flatten_list(self, orig_list):
iter_list = map(np.atleast_1d, orig_list)
flat_list = [item for sublist in iter_list for item in sublist]
return flat_list
def assert_rc_params(self, params):
for k, v in params.items():
if isinstance(v, np.ndarray):
npt.assert_array_equal(mpl.rcParams[k], v)
else:
nt.assert_equal((k, mpl.rcParams[k]), (k, v))
class TestAxesStyle(RCParamTester):
styles = ["white", "dark", "whitegrid", "darkgrid", "ticks"]
def test_default_return(self):
current = rcmod.axes_style()
self.assert_rc_params(current)
def test_key_usage(self):
_style_keys = set(rcmod._style_keys)
for style in self.styles:
nt.assert_true(not set(rcmod.axes_style(style)) ^ _style_keys)
def test_bad_style(self):
with nt.assert_raises(ValueError):
rcmod.axes_style("i_am_not_a_style")
def test_rc_override(self):
rc = {"axes.facecolor": "blue", "foo.notaparam": "bar"}
out = rcmod.axes_style("darkgrid", rc)
nt.assert_equal(out["axes.facecolor"], "blue")
nt.assert_not_in("foo.notaparam", out)
def test_set_style(self):
for style in self.styles:
style_dict = rcmod.axes_style(style)
rcmod.set_style(style)
self.assert_rc_params(style_dict)
def test_style_context_manager(self):
rcmod.set_style("darkgrid")
orig_params = rcmod.axes_style()
context_params = rcmod.axes_style("whitegrid")
with rcmod.axes_style("whitegrid"):
self.assert_rc_params(context_params)
self.assert_rc_params(orig_params)
@rcmod.axes_style("whitegrid")
def func():
self.assert_rc_params(context_params)
func()
self.assert_rc_params(orig_params)
def test_style_context_independence(self):
nt.assert_true(set(rcmod._style_keys) ^ set(rcmod._context_keys))
def test_set_rc(self):
rcmod.set(rc={"lines.linewidth": 4})
nt.assert_equal(mpl.rcParams["lines.linewidth"], 4)
rcmod.set()
def test_reset_defaults(self):
# Changes to the rc parameters make this test hard to manage
# on older versions of matplotlib, so we'll skip it
if LooseVersion(mpl.__version__) < LooseVersion("1.3"):
raise nose.SkipTest
rcmod.reset_defaults()
self.assert_rc_params(mpl.rcParamsDefault)
rcmod.set()
def test_reset_orig(self):
# Changes to the rc parameters make this test hard to manage
# on older versions of matplotlib, so we'll skip it
if LooseVersion(mpl.__version__) < LooseVersion("1.3"):
raise nose.SkipTest
rcmod.reset_orig()
self.assert_rc_params(mpl.rcParamsOrig)
rcmod.set()
class TestPlottingContext(RCParamTester):
contexts = ["paper", "notebook", "talk", "poster"]
def test_default_return(self):
current = rcmod.plotting_context()
self.assert_rc_params(current)
def test_key_usage(self):
_context_keys = set(rcmod._context_keys)
for context in self.contexts:
missing = set(rcmod.plotting_context(context)) ^ _context_keys
nt.assert_true(not missing)
def test_bad_context(self):
with nt.assert_raises(ValueError):
rcmod.plotting_context("i_am_not_a_context")
def test_font_scale(self):
notebook_ref = rcmod.plotting_context("notebook")
notebook_big = rcmod.plotting_context("notebook", 2)
font_keys = ["axes.labelsize", "axes.titlesize", "legend.fontsize",
"xtick.labelsize", "ytick.labelsize", "font.size"]
for k in font_keys:
nt.assert_equal(notebook_ref[k] * 2, notebook_big[k])
def test_rc_override(self):
key, val = "grid.linewidth", 5
rc = {key: val, "foo": "bar"}
out = rcmod.plotting_context("talk", rc=rc)
nt.assert_equal(out[key], val)
nt.assert_not_in("foo", out)
def test_set_context(self):
for context in self.contexts:
context_dict = rcmod.plotting_context(context)
rcmod.set_context(context)
self.assert_rc_params(context_dict)
def test_context_context_manager(self):
rcmod.set_context("notebook")
orig_params = rcmod.plotting_context()
context_params = rcmod.plotting_context("paper")
with rcmod.plotting_context("paper"):
self.assert_rc_params(context_params)
self.assert_rc_params(orig_params)
@rcmod.plotting_context("paper")
def func():
self.assert_rc_params(context_params)
func()
self.assert_rc_params(orig_params)
class TestPalette(object):
def test_set_palette(self):
rcmod.set_palette("deep")
assert utils.get_color_cycle() == palettes.color_palette("deep", 10)
rcmod.set_palette("pastel6")
assert utils.get_color_cycle() == palettes.color_palette("pastel6", 6)
rcmod.set_palette("dark", 4)
assert utils.get_color_cycle() == palettes.color_palette("dark", 4)
rcmod.set_palette("Set2", color_codes=True)
assert utils.get_color_cycle() == palettes.color_palette("Set2", 8)
class TestFonts(object):
def test_set_font(self):
rcmod.set(font="Verdana")
_, ax = plt.subplots()
ax.set_xlabel("foo")
try:
nt.assert_equal(ax.xaxis.label.get_fontname(),
"Verdana")
except AssertionError:
if has_verdana():
raise
else:
raise nose.SkipTest("Verdana font is not present")
finally:
rcmod.set()
def test_set_serif_font(self):
rcmod.set(font="serif")
_, ax = plt.subplots()
ax.set_xlabel("foo")
nt.assert_in(ax.xaxis.label.get_fontname(),
mpl.rcParams["font.serif"])
rcmod.set()
def test_different_sans_serif(self):
if LooseVersion(mpl.__version__) < LooseVersion("1.4"):
raise nose.SkipTest
rcmod.set()
rcmod.set_style(rc={"font.sans-serif": ["Verdana"]})
_, ax = plt.subplots()
ax.set_xlabel("foo")
try:
nt.assert_equal(ax.xaxis.label.get_fontname(),
"Verdana")
except AssertionError:
if has_verdana():
raise
else:
raise nose.SkipTest("Verdana font is not present")
finally:
rcmod.set()
def has_verdana():
"""Helper to verify if Verdana font is present"""
# This import is relatively lengthy, so to prevent its import for
# testing other tests in this module not requiring this knowledge,
# import font_manager here
import matplotlib.font_manager as mplfm
try:
verdana_font = mplfm.findfont('Verdana', fallback_to_default=False)
except: # noqa
# if https://github.com/matplotlib/matplotlib/pull/3435
# gets accepted
return False
# otherwise check if not matching the logic for a 'default' one
try:
unlikely_font = mplfm.findfont("very_unlikely_to_exist1234",
fallback_to_default=False)
except: # noqa
# if matched verdana but not unlikely, Verdana must exist
return True
# otherwise -- if they match, must be the same default
return verdana_font != unlikely_font
| bsd-3-clause | 6,179,887,483,956,207,000 | 28.166667 | 78 | 0.596317 | false |
peterfpeterson/mantid | Framework/PythonInterface/test/python/mantid/plots/compatabilityTest.py | 3 | 8145 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
#
#
# std imports
import unittest
# third party imports
import matplotlib
matplotlib.use('AGG') # noqa
# local imports
from unittest.mock import patch
from mantid.plots._compatability import plotSpectrum, plotBin
from mantid.plots.utility import MantidAxType
class compatabilityTest(unittest.TestCase):
@patch('mantid.plots._compatability.plot')
def test_plotSpectrum_calls_plot_with_a_string(self, plot_mock):
ws_name = 'test_plotSpectrum_calls_plot_with_a_string-1'
wksp_indices = 0
plotSpectrum(ws_name,wksp_indices)
plot_mock.assert_called_once_with([ws_name],wksp_indices=[wksp_indices],errors=False, spectrum_nums=None, waterfall=None,
fig=None, overplot=False, plot_kwargs={})
@patch('mantid.plots._compatability.plot')
def test_plotSpectrum_calls_plot_with_a_list(self, plot_mock):
ws_name = ['test_plotSpectrum_calls_plot_with_a_list-1','test_plotSpectrum_calls_plot_with_a_list']
wksp_indices = list(range(10))
plotSpectrum(ws_name,wksp_indices)
plot_mock.assert_called_once_with(ws_name,wksp_indices=wksp_indices,errors=False, spectrum_nums=None, waterfall=None,
fig=None, overplot=False, plot_kwargs={})
@patch('mantid.plots._compatability.plot')
def test_plotSpectrum_calls_plot_with_errors(self, plot_mock):
ws_name = 'test_plotSpectrum_calls_plot_with_a_string-1'
wksp_indices = 0
plotSpectrum(ws_name,wksp_indices,error_bars=True)
plot_mock.assert_called_once_with([ws_name],wksp_indices=[wksp_indices],errors=True, spectrum_nums=None, waterfall=None,
fig=None, overplot=False, plot_kwargs={})
@patch('mantid.plots._compatability.plot')
def test_plotSpectrum_calls_plot_with_spectrum_numbers(self, plot_mock):
ws_name = 'test_plotSpectrum_calls_plot_with_spectrum_numbers-1'
wksp_indices = 0
plotSpectrum(ws_name,spectrum_nums=wksp_indices)
plot_mock.assert_called_once_with([ws_name],spectrum_nums=[wksp_indices],errors=False, wksp_indices=None, waterfall=None,
fig=None, overplot=False, plot_kwargs={})
@patch('mantid.plots._compatability.plot')
def test_plotSpectrum_calls_plot_with_no_line(self, plot_mock):
ws_name = 'test_plotSpectrum_calls_plot_with_no_line-1'
wksp_indices = list(range(10))
plotSpectrum(ws_name,wksp_indices,type=1)
plot_mock.assert_called_once_with([ws_name],wksp_indices=wksp_indices,errors=False, spectrum_nums=None, waterfall=None,
fig=None, overplot=False, plot_kwargs={'linestyle': 'None', 'marker': '.'})
@patch('mantid.plots._compatability.plot')
def test_plotSpectrum_calls_plot_with_waterfall(self, plot_mock):
ws_name = 'test_plotSpectrum_calls_plot_with_waterfall-1'
wksp_indices = list(range(10))
plotSpectrum(ws_name,wksp_indices,waterfall=True)
plot_mock.assert_called_once_with([ws_name],wksp_indices=wksp_indices,errors=False, spectrum_nums=None, waterfall=True,
fig=None, overplot=False, plot_kwargs={})
@patch('mantid.plots._compatability.plot')
def test_plotSpectrum_calls_plot_with_clear_window(self, plot_mock):
ws_name = 'test_plotSpectrum_calls_plot_with_clear_window-1'
wksp_indices = list(range(10))
fake_window = "this is a string representing a fake plotting window"
plotSpectrum(ws_name,wksp_indices,window=fake_window,clearWindow=True)
plot_mock.assert_called_once_with([ws_name],wksp_indices=wksp_indices,errors=False, spectrum_nums=None, waterfall=None,
fig=fake_window, overplot=False, plot_kwargs={})
@patch('mantid.plots._compatability.plot')
def test_plotSpectrum_calls_plot_with_overplot(self, plot_mock):
ws_name = 'test_plotSpectrum_calls_plot_with_overplot-1'
wksp_indices = list(range(10))
fake_window = "this is a string representing a fake plotting window"
plotSpectrum(ws_name,wksp_indices,window=fake_window)
plot_mock.assert_called_once_with([ws_name],wksp_indices=wksp_indices,errors=False, spectrum_nums=None, waterfall=None,
fig=fake_window, overplot=True, plot_kwargs={})
@patch('mantid.plots._compatability.plot')
def test_plotBin_calls_plot_with_a_string(self, plot_mock):
ws_name = 'test_plotBin_calls_plot_with_a_string-1'
wksp_indices = 0
plotBin(ws_name,wksp_indices)
plot_mock.assert_called_once_with([ws_name],wksp_indices=[wksp_indices],errors=False, waterfall=None,
fig=None, overplot=False, plot_kwargs={'axis': MantidAxType.BIN})
@patch('mantid.plots._compatability.plot')
def test_plotBin_calls_plot_with_a_list(self, plot_mock):
ws_name = ['test_plotBin_calls_plot_with_a_list-1','test_plotBin_calls_plot_with_a_list']
wksp_indices = list(range(10))
plotBin(ws_name,wksp_indices)
plot_mock.assert_called_once_with(ws_name,wksp_indices=wksp_indices,errors=False, waterfall=None,
fig=None, overplot=False, plot_kwargs={'axis': MantidAxType.BIN})
@patch('mantid.plots._compatability.plot')
def test_plotBin_calls_plot_with_errors(self, plot_mock):
ws_name = 'test_plotBin_calls_plot_with_a_string-1'
wksp_indices = 0
plotBin(ws_name,wksp_indices,error_bars=True)
plot_mock.assert_called_once_with([ws_name],wksp_indices=[wksp_indices],errors=True, waterfall=None,
fig=None, overplot=False, plot_kwargs={'axis': MantidAxType.BIN})
@patch('mantid.plots._compatability.plot')
def test_plotBin_calls_plot_with_no_line(self, plot_mock):
ws_name = 'test_plotBin_calls_plot_with_no_line-1'
wksp_indices = list(range(10))
plotBin(ws_name,wksp_indices,type=1)
plot_mock.assert_called_once_with([ws_name],wksp_indices=wksp_indices,errors=False, waterfall=None,
fig=None, overplot=False,
plot_kwargs={'axis': MantidAxType.BIN, 'linestyle': 'None', 'marker': '.'})
@patch('mantid.plots._compatability.plot')
def test_plotBin_calls_plot_with_waterfall(self, plot_mock):
ws_name = 'test_plotBin_calls_plot_with_waterfall-1'
wksp_indices = list(range(10))
plotBin(ws_name,wksp_indices,waterfall=True)
plot_mock.assert_called_once_with([ws_name],wksp_indices=wksp_indices,errors=False, waterfall=True,
fig=None, overplot=False, plot_kwargs={'axis': MantidAxType.BIN})
@patch('mantid.plots._compatability.plot')
def test_plotBin_calls_plot_with_clear_window(self, plot_mock):
ws_name = 'test_plotBin_calls_plot_with_clear_window-1'
wksp_indices = list(range(10))
fake_window = "this is a string representing a fake plotting window"
plotBin(ws_name,wksp_indices,window=fake_window,clearWindow=True)
plot_mock.assert_called_once_with([ws_name],wksp_indices=wksp_indices,errors=False, waterfall=None,
fig=fake_window, overplot=False, plot_kwargs={'axis': MantidAxType.BIN})
@patch('mantid.plots._compatability.plot')
def test_plotBin_calls_plot_with_overplot(self, plot_mock):
ws_name = 'test_plotBin_calls_plot_with_overplot-1'
wksp_indices = list(range(10))
fake_window = "this is a string representing a fake plotting window"
plotBin(ws_name,wksp_indices,window=fake_window)
plot_mock.assert_called_once_with([ws_name],wksp_indices=wksp_indices,errors=False, waterfall=None,
fig=fake_window, overplot=True, plot_kwargs={'axis': MantidAxType.BIN})
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -2,971,175,596,392,354,000 | 48.066265 | 129 | 0.675384 | false |
bmazin/ARCONS-pipeline | photonlist/RADecImage2.py | 1 | 28302 | '''
Author: Julian van Eyken Date: May 15 2013
Package/class for handling of images created from photon lists that are derotated and
mapped to sky coordinates.
**Temporary version 2 of RADecImage, attempting to reduce memory useage a bit.
This is currently not the main version to use!**
'''
import time
import numpy as np
import tables
import matplotlib.pyplot as mpl
import hotpix.hotPixels as hp
from util import utils
import photonlist.photlist as pl
from photonlist import boxer
from astrometry import CalculateRaDec as crd
from headers import pipelineFlags
class RADecImage(object):
'''
Class to hold derotated, integrated and possibly stacked images, in the sky coordinate
frame.
'''
def __init__(self,photList=None,nPixRA=None,nPixDec=None,cenRA=None,cenDec=None,
vPlateScale=0.1, detPlateScale=None, firstSec=0, integrationTime=-1):
#expWeightTimeStep=1.0):
'''
Initialise a (possibly empty) RA-dec coordinate frame image.
INPUTS:
photList: optionally provide a PhotList object from which to create an
image (see photonlist.photlist)
nPixRA, nPixDec: integers, Number of pixels in RA and Dec directions
for the virtual image.
cenRA, cenDec: floats, location of center of virtual image in RA and
dec (both in radians)
vPlateScale: float, plate scale for virtual image (arcseconds per
virtual image pixel). Note that the attribute, self.vPlateScale
is stored in *radians* per pixel, as is self.detPlateScale (plate
scale for the detector pixels).
detPlateScale: override the assumed detector plate scale (arcseconds
per detector pixel)
firstSec: float, time from beginning of photon-list file at which to
begin integration
integrationTime: float, length of time to integrate for in seconds. If
-1, integrate to end of photon list.
#### DEPRECATED #####
expWeightTimeStep: float, time step to use when calculating exposure
time weights for the virtual pixels (seconds).
#####################
'''
self.nPixRA = nPixRA #No. of virtual pixels in RA direction
self.nPixDec = nPixDec #No. of virtual pixels in dec. direction
self.cenRA = cenRA #RA location of center of field (radians)
self.cenDec = cenDec #Dec location of center of field (rad)
self.vPlateScale = vPlateScale*2*np.pi/1296000 #No. of radians on sky per virtual pixel.
self.imageIsLoaded = False #Flag to indicate whether an actual image has been loaded yet.
if detPlateScale is None:
self.detPlateScale = crd.CalculateRaDec.platescale*2*np.pi/1296000 #Radians per detector pixel. ******For now - but this really needs reading in from the photon list file.
else:
self.detPlateScale = detPlateScale
if nPixRA is not None and nPixDec is not None:
self.image = np.empty((self.nPixDec,self.nPixRA),dtype=float) #To take a (possibly stacked) image in virtual
self.image.fill(np.nan)
self.effIntTimes = np.empty_like(self.image) #Effective integration times for each pixel, in seconds.
self.effIntTimes.fill(np.nan)
self.expTimeWeights = np.empty((self.nPixDec,self.nPixRA),dtype=float) #Weights for each pixel in the virtual image to account for effective integration time on each pixel.
self.expTimeWeights.fill(np.nan)
self.gridRA = np.empty((self.nPixRA),dtype=float) #Virtual pixel boundaries in the RA direction
self.gridRA.fill(np.nan)
self.gridDec = np.empty((self.nPixDec),dtype=float) #Virtual pixel boundaries in the dec. direction.
self.gridDec.fill(np.nan)
self.totExpTime = np.nan #Total exposure time included in current image
#self.expWeightTimeStep = expWeightTimeStep
else:
self.image = None
self.effIntTimes = None
self.expTimeWeights = None
self.gridRA = None
self.gridDec = None
self.totExpTime = None
#self.expWeightTimeStep = expWeightTimeStep
if (cenRA is not None and cenDec is not None and vPlateScale is not None
and nPixRA is not None and nPixDec is not None):
self.setCoordGrid()
if photList is not None:
self.loadImage(photList,firstSec=firstSec,integrationTime=integrationTime)
def setCoordGrid(self):
'''
Establish RA and dec coordinates for pixel boundaries in the virtual pixel grid,
given the number of pixels in each direction (self.nPixRA and self.nPixDec), the
location of the centre of the array (self.cenRA, self.cenDec), and the plate scale
(self.vPlateScale).
'''
#self.gridRA = np.empty((self.nPixDec,self.nPixRA),dtype=float)
#self.gridRA.fill(np.nan)
#self.gridDec = np.empty((self.nPixDec,self.nPixRA),dtype=float)
#self.gridDec.fill(np.nan)
#Note - +1's are because these are pixel *boundaries*, not pixel centers:
self.gridRA = self.cenRA + (self.vPlateScale*(np.arange(self.nPixRA+1) - ((self.nPixRA+1)//2)))
self.gridDec = self.cenDec + (self.vPlateScale*(np.arange(self.nPixDec+1) - ((self.nPixDec+1)//2)))
def loadImage(self,photList,firstSec=0,integrationTime=-1,wvlMin=None,wvlMax=None,
doStack=False, #expWeightTimeStep=None,
savePreStackImage=None, doWeighted=True): #savePreStackImage is temporary for test purposes
'''
Build a de-rotated stacked image from a photon list (PhotList) object.
If the RADecImage instance already contains an image, the new image is added to it.
INPUTS:
photList - a PhotList object from which to construct the image.
firstSec - time from start of exposure to start the 'integration' for the image (seconds)
integrationTime - duration of integration time to include in the image (in seconds; -1 or NaN => to end of exposure)
wvlMin, wvlMax - min and max wavelengths of photons to include in the image (Angstroms).
doStack - boolean; if True, then stack the image to be loaded on top of any image data already present.
#### DEPRECATED - NOW GETS TIME STEPS STRAIGHT FROM CENTROID LIST FILES #####
expWeightTimeStep - see __init__. If set here, overrides any value already set in the RADecImage object.
If the new image is being stacked on top of a current image, a new value can be
supplied that is different from the current image's value; but only the last value used
(i.e. the one supplied) will be stored in the class attribute.
################################
wvlMin, wvlMax - set min and max wavelength cutoffs for photons to be loaded in.
savePreStackImage - temporary fudge, set to a file-name to save the image out to a file prior to stacking.
doWeighted - if True, includes flat and flux weighting (i.e. flatfielding and spectral response)factors from photons,
and rejects photons from pixels where the flatfield is bad at any wavelength within the requested
wavelength range (all if wvlMin/wvl Max not specified).
****NOTE - FLUX WEIGHTING NOT FULLY TESTED -- but looks probably okay.****
'''
#posErr = 0.8 #Approx. position error in arcsec (just a fixed estimate for now, will improve later)
#posErr *= 2*np.pi/(60.*60.*360.) #Convert to radians
imLoadTic = time.clock()
photTable = photList.file.root.photons.photons #Shortcut to table
#if expWeightTimeStep is not None:
# self.expWeightTimeStep=expWeightTimeStep
if wvlMin is not None and wvlMax is None: wvlMax = np.inf
if wvlMin is None and wvlMax is not None: wvlMin = 0.0
#Figure out last second of integration
obsFileExpTime = photList.header.cols.exptime[0]
if integrationTime==-1 or firstSec+integrationTime > obsFileExpTime:
lastSec = obsFileExpTime
else:
lastSec = firstSec+integrationTime
#If virtual coordinate grid is not yet defined, figure it out.
if self.gridRA is None or self.gridDec is None:
#Find RA/dec range needed, taking advantage of the fact that the ra/dec columns are (or should be) indexed....
print 'Finding RA/dec ranges'
self.raMin = photTable.cols.ra[photTable.colindexes['ra'][0]]
self.raMax = photTable.cols.ra[photTable.colindexes['ra'][-1]]
self.decMin = photTable.cols.dec[photTable.colindexes['dec'][0]]
self.decMax = photTable.cols.dec[photTable.colindexes['dec'][-1]]
self.cenRA = (self.raMin+self.raMax)/2.0
self.cenDec = (self.decMin+self.decMax)/2.0
#Set size of virtual grid to accommodate.
if self.nPixRA is None:
#+1 for round up; +1 because coordinates are the boundaries of the virtual pixels, not the centers.
self.nPixRA = int((self.raMax-self.raMin)//self.vPlateScale + 2)
if self.nPixDec is None:
self.nPixDec = int((self.decMax-self.decMin)//self.vPlateScale + 2)
self.setCoordGrid()
#Short-hand notations for no. of detector and virtual pixels, just for clarity:
nDPixRow,nDPixCol = photList.nRow,photList.nCol
nVPixRA,nVPixDec = self.nPixRA,self.nPixDec
#Calculate ratio of virtual pixel area to detector pixel area
vdPixAreaRatio = (self.vPlateScale/self.detPlateScale)**2
#Make a boolean mask of dead (non functioning for whatever reason) pixels
#True (1) = good; False (0) = dead
#First on the basis of the wavelength cals:
wvlCalFlagImage = photList.getBadWvlCalFlags()
deadPixMask = np.where(wvlCalFlagImage == pipelineFlags.waveCal['good'], 1, 0) #1.0 where flag is good; 0.0 otherwise. (Straight boolean mask would work, but not guaranteed for Python 4....)
#Next on the basis of the flat cals (or all ones if weighting not requested)
if doWeighted:
flatCalFlagArray = photList.file.root.flatcal.flags.read() # 3D array - nRow * nCol * nWavelength Bins.
flatWvlBinEdges = photList.file.root.flatcal.wavelengthBins.read() # 1D array of wavelength bin edges for the flat cal.
lowerEdges = flatWvlBinEdges[0:-1]
upperEdges = flatWvlBinEdges[1:]
if wvlMin is None and wvlMax is None:
inRange = np.ones(len(lowerEdges),dtype=bool) # (all bins in range implies all True)
else:
inRange = ((lowerEdges >= wvlMin) & (lowerEdges < wvlMax) |
(upperEdges >= wvlMin) & (lowerEdges < wvlMax))
flatCalMask = np.where(np.all(flatCalFlagArray[:,:,inRange]==False, axis=2), 1, 0) # Should be zero where any pixel has a bad flag at any wavelength within the requested range; one otherwise. Spot checked, seems to work.
else:
flatCalMask = np.ones((nDPixRow,nDPixCol))
#If hot pixels time-mask data not already parsed in, then parse it.
if photList.hotPixTimeMask is None:
photList.parseHotPixTimeMask() #Loads time mask dictionary into photList.hotPixTimeMask
#First find start/end times of each timestep ('frame') for calculating effective exp. times
#and for subdividing the image data (the latter is only needed for the purposes of
#splitting the data into small chunks so it'll fit in memory easily).
#Use the same timesteps as used in calculating the astrometry.
tStartFramesAll = np.array(photList.file.root.centroidList.times.read()) #Convert to array, since it's saved as a list.
tEndFramesAll = np.append(tStartFramesAll[1:], np.inf) #Last frame goes on forever as far as we know at the moment
withinIntegration = ((tStartFramesAll < lastSec) & (tEndFramesAll > firstSec))
tStartFrames = tStartFramesAll[withinIntegration].clip(min=firstSec) #Now clip so that everything is within the requested integration time.
tEndFrames = tEndFramesAll[withinIntegration].clip(max=lastSec)
nFrames = len(tStartFrames)
assert nFrames > 0 #Otherwise we have a problem....
assert np.all(tStartFrames <= lastSec) and np.all(tEndFrames >= firstSec)
#Get x,y locations of detector pixel corners (2D array of each x,y value, in detector space)
dPixXmin = np.indices((nDPixRow,nDPixCol))[1] - 0.5
dPixXmax = np.indices((nDPixRow,nDPixCol))[1] + 0.5
dPixYmin = np.indices((nDPixRow,nDPixCol))[0] - 0.5
dPixYmax = np.indices((nDPixRow,nDPixCol))[0] + 0.5
dPixXminFlat = dPixXmin.flatten() #Flattened versions of the same since getRaDec() only works on flat arrays.
dPixXmaxFlat = dPixXmax.flatten()
dPixYminFlat = dPixYmin.flatten()
dPixYmaxFlat = dPixYmax.flatten()
#Create (1D) arrays for normalised center locations of virtual pixel grid (=index numbers, representing location of unit squares)
vPixRANormCen = np.arange(nVPixRA) #np.indices(nVPixDec,nVPixRA)[1]
vPixDecNormCen = np.arange(nVPixDec) #np.indices(nVPixDec,nVPixRA)[0]
#Create 1D arrays marking edges of virtual pixels (in 'normalised' space...)
vPixRANormMin = np.arange(nVPixRA)-0.5
vPixRANormMax = np.arange(nVPixRA)+0.5
vPixDecNormMin = np.arange(nVPixDec)-0.5
vPixDecNormMax = np.arange(nVPixDec)+0.5
#Find origin of virtual array (center of virtual pixel 0,0) in RA/dec space.
vPixOriginRA = np.mean(self.gridRA[0:2])
vPixOriginDec = np.mean(self.gridDec[0:2])
vPixSize = self.vPlateScale #Short hand, Length of side of virtual pixel in radians (assume square pixels)
#Make arrays to take the total exposure times and image data for each virtual pixel at each time step
vExpTimesStack = np.zeros((nVPixDec,nVPixRA,nFrames))
imageStack = np.zeros((nVPixDec,nVPixRA,nFrames))
#And one for the total exposure time at each pixel summed over all time steps
vExpTimes = np.zeros((nVPixDec,nVPixRA))
#Array to hold list of (equal) timestamps for each pixel at each timestep
#(just for calculating the RA/dec coordinates of the pixel corners)
frameTimeFlat = np.zeros((nDPixRow*nDPixCol)) #Also flat array for the purposes of getRaDec()
frameTimeFlat.fill(np.nan)
#Initialise RA/dec calculations of pixel locations for exposure time weighting
raDecCalcObject = crd.CalculateRaDec(photList.file.root.centroidList)
#------------ Loop through the time steps ----------
for iFrame in range(nFrames):
print 'Time slice: ',iFrame+1, '/', nFrames
#-------------Make image for this time step-----------
#Get the photons
print 'Getting photon coords'
print 'wvlMin, wvlMax: ',wvlMin,wvlMax
if wvlMin is None:
assert wvlMin is None and wvlMax is None
print '(getting all wavelengths)'
tic = time.clock()
strt, fin = tStartFrames[iFrame], tEndFrames[iFrame] #Just because Numexpr can't handle indexing, it seems
photons = photTable.readWhere('(arrivalTime>=strt) & (arrivalTime<fin)')
#photons = np.array([row.fetch_all_fields() for row in photTable.where('(arrivalTime>=strt) & (arrivalTime<=fin)')])
#photIndices = photTable.getWhereList('(arrivalTime>=strt) & (arrivalTime<=fin)')
print 'Time taken (s): ',time.clock()-tic
else:
assert wvlMin is not None and wvlMax is not None
print '(trimming wavelength range) '
photons = photTable.readWhere('(arrivalTime>=strt) & (arrivalTime<=fin) & (wavelength>=wvlMin) & (wavelength<=wvlMax)')
#Filter out photons to be masked out on the basis of detector pixel
print 'Finding bad detector pixels...'
detPixMask = deadPixMask * flatCalMask #Combine wave cal pixel mask and flat cal mask (should be the same in an ideal world, but not
whereBad = np.where(detPixMask == 0)
badXY = pl.xyPack(whereBad[0],whereBad[1]) #Array of packed x-y values for bad pixels (CHECK X,Y THE RIGHT WAY ROUND!)
allPhotXY = photons['xyPix'] #Array of packed x-y values for all photons
#Get a boolean array indicating photons whose packed x-y coordinate value is in the 'bad' list.
toReject = np.where(np.in1d(allPhotXY,badXY))[0] #Zero to take index array out of the returned 1-element tuple.
#Chuck out the bad photons
print 'Rejecting photons from bad pixels...'
photons = np.delete(photons,toReject)
#Pull out needed information
print 'Pulling out relevant columns'
photRAs = photons['ra'] #Read all photon coords into an RA and a dec array.
photDecs = photons['dec']
photHAs = photons['ha'] #Along with hour angles...
photWeights = photons['flatWeight'] * photons['fluxWeight'] #********EXPERIMENTING WITH ADDING FLUX WEIGHT - NOT FULLY TESTED, BUT SEEMS OKAY....********
print 'INCLUDING FLUX WEIGHTS!'
photWavelengths = photons['wavelength']
del(photons) #Not needed till next iteration, and it takes up a lot of memory....
if wvlMin is not None or wvlMax is not None:
assert all(photWavelengths>=wvlMin) and all(photWavelengths<=wvlMax)
print 'Min, max photon wavelengths found: ', np.min(photWavelengths), np.max(photWavelengths)
nPhot = len(photRAs)
#Add uniform random dither to each photon, distributed over a square
#area of the same size and orientation as the originating pixel at
#the time of observation.
xRand = np.random.rand(nPhot)*self.detPlateScale-self.detPlateScale/2.0
yRand = np.random.rand(nPhot)*self.detPlateScale-self.detPlateScale/2.0 #Not the same array!
ditherRAs = xRand*np.cos(photHAs) - yRand*np.sin(photHAs)
ditherDecs = yRand*np.cos(photHAs) + xRand*np.sin(photHAs)
photRAs=photRAs+ditherRAs
photDecs=photDecs+ditherDecs
#Make the image for this time slice
if doWeighted:
print 'Making weighted image'
imageStack[:,:,iFrame],thisGridDec,thisGridRA = np.histogram2d(photDecs,photRAs,[self.gridDec,self.gridRA],
weights=photWeights)
else:
print 'Making unweighted image'
imageStack[:,:,iFrame],thisGridDec,thisGridRA = np.histogram2d(photDecs,photRAs,[self.gridDec,self.gridRA])
if savePreStackImage is not None:
saveName = 'det'+str(start)+'-'+str(fin)+'s-'+savePreStackImage
print 'Making det-frame image for diagnostics: '+saveName
detImSlice = np.histogram2d(photons['yPix'],photons['xPix'])
mpl.imsave(fname=saveName,arr=detImSlice,origin='lower',
cmap=mpl.cm.gray,vmin=np.percentile(detImSlice, 0.5), vmax=np.percentile(detImSlice,99.5))
#----------Now start figuring out effective exposure times for each virtual pixel----------------
#And start figuring out the exposure time weights....
print 'Calculating effective exposure times'
#Calculate detector pixel corner locations in RA/dec space (needs to be clockwise in RA/dec space! (checked, gives +ve answers).
frameTimeFlat.fill(tStartFrames[iFrame])
dPixRA1,dPixDec1,dummy = raDecCalcObject.getRaDec(frameTimeFlat,dPixXminFlat,dPixYminFlat) #dPix* should all be flat
dPixRA2,dPixDec2,dummy = raDecCalcObject.getRaDec(frameTimeFlat,dPixXminFlat,dPixYmaxFlat)
dPixRA3,dPixDec3,dummy = raDecCalcObject.getRaDec(frameTimeFlat,dPixXmaxFlat,dPixYmaxFlat)
dPixRA4,dPixDec4,dummy = raDecCalcObject.getRaDec(frameTimeFlat,dPixXmaxFlat,dPixYminFlat)
#Normalise to scale where virtual pixel size=1 and origin is the origin of the virtual pixel grid
dPixNormRA1 = (dPixRA1 - vPixOriginRA)/vPixSize #dPixNorm* should all be flat.
dPixNormRA2 = (dPixRA2 - vPixOriginRA)/vPixSize
dPixNormRA3 = (dPixRA3 - vPixOriginRA)/vPixSize
dPixNormRA4 = (dPixRA4 - vPixOriginRA)/vPixSize
dPixNormDec1 = (dPixDec1 - vPixOriginDec)/vPixSize
dPixNormDec2 = (dPixDec2 - vPixOriginDec)/vPixSize
dPixNormDec3 = (dPixDec3 - vPixOriginDec)/vPixSize
dPixNormDec4 = (dPixDec4 - vPixOriginDec)/vPixSize
#Get min and max RA/decs for each of the detector pixels
dPixCornersRA = np.array([dPixNormRA1,dPixNormRA2,dPixNormRA3,dPixNormRA4]) #2D array, 4 by nRow*nCol - should be clockwise, I think!
dPixCornersDec = np.array([dPixNormDec1,dPixNormDec2,dPixNormDec3,dPixNormDec4])
#dPixCornersRA = np.array([dPixNormRA4,dPixNormRA3,dPixNormRA2,dPixNormRA1]) #2D array, 4 by nRow*nCol - reversed, but gives -ve results, so prob. anti-clockwise....
#dPixCornersDec = np.array([dPixNormDec4,dPixNormDec3,dPixNormDec2,dPixNormDec1])
dPixRANormMin = dPixCornersRA.min(axis=0) #Flat 1D array, nRow * nCol
dPixRANormMax = dPixCornersRA.max(axis=0)
dPixDecNormMin = dPixCornersDec.min(axis=0)
dPixDecNormMax = dPixCornersDec.max(axis=0)
#Get array of effective exposure times for each detector pixel based on the hot pixel time mask
#Multiply by the bad pixel mask and the flatcal mask so that non-functioning pixels have zero exposure time.
#Flatten the array in the same way as the previous arrays (1D array, nRow*nCol elements).
detExpTimes = (hp.getEffIntTimeImage(photList.hotPixTimeMask, integrationTime=tEndFrames[iFrame]-tStartFrames[iFrame],
firstSec=tStartFrames[iFrame]) * detPixMask).flatten()
#Loop over the detector pixels and accumulate the exposure time that falls in each
#tic = time.clock()
for iDPix in np.arange(nDPixRow * nDPixCol):
#Find the pixels which are likely to be overlapping (note - could do this as a sorted search to make things faster)
maybeOverlappingRA = np.where((dPixRANormMax[iDPix] > vPixRANormMin) & (dPixRANormMin[iDPix] < vPixRANormMax))[0]
maybeOverlappingDec = np.where((dPixDecNormMax[iDPix] > vPixDecNormMin) & (dPixDecNormMin[iDPix] < vPixDecNormMax))[0]
for overlapLocRA in maybeOverlappingRA:
for overlapLocDec in maybeOverlappingDec:
overlapFrac = boxer.boxer(overlapLocDec,overlapLocRA,dPixCornersDec[:,iDPix],dPixCornersRA[:,iDPix])
expTimeToAdd = overlapFrac*detExpTimes[iDPix]
vExpTimesStack[overlapLocDec,overlapLocRA,iFrame] += expTimeToAdd
#print 'Time taken (s): ',time.clock()-tic
#------------ End loop through time steps ----------
#Sum up the exposure times from each frame:
vExpTimes = np.sum(vExpTimesStack,axis=2)
thisImage = np.sum(imageStack,axis=2)
#Check that wherever the exposure time is zero, there are no photons that have not been rejected
#assert np.all(thisImage[vExpTimes==0] == 0)
#assert 1==0
#Temporary for testing-------------
if savePreStackImage is not None:
print 'Saving pre-stacked image to '+savePreStackImage
mpl.imsave(fname=savePreStackImage,arr=thisImage,origin='lower',cmap=mpl.cm.gray,
vmin=np.percentile(thisImage, 0.5), vmax=np.percentile(thisImage,99.5))
#---------------------------------
if self.imageIsLoaded is False or doStack is False:
self.image = thisImage #For now, let's keep it this way.... Since weighting does odd things.
self.effIntTimes = vExpTimes
self.totExpTime = lastSec-firstSec
self.expTimeWeights = self.totExpTime/self.effIntTimes
self.vExpTimesStack = vExpTimesStack #TEMPORARY FOR DEBUGGING PURPOSES
self.imageIsLoaded = True
else:
assert self.imageIsLoaded == True
print 'Stacking'
self.image += thisImage
self.effIntTimes += vExpTimes
self.totExpTime += lastSec-firstSec
self.expTimeWeights = self.totExpTime/self.effIntTimes
print 'Image load done. Time taken (s): ', time.clock()-imLoadTic
def display(self,normMin=None,normMax=None,expWeight=True,pclip=None,colormap=mpl.cm.gnuplot2,
image=None, logScale=False):
'''
Display the current image. Currently just a short-cut to utils.plotArray,
but needs updating to mark RA and Dec on the axes.
'''
if expWeight:
toDisplay = np.copy(self.image*self.expTimeWeights)
else:
toDisplay = np.copy(self.image)
if logScale is True: toDisplay = np.log10(toDisplay)
if image is not None: toDisplay = image
if pclip:
normMin = np.percentile(toDisplay[np.isfinite(toDisplay)],q=pclip)
normMax = np.percentile(toDisplay[np.isfinite(toDisplay)],q=100.0-pclip)
#Display NaNs as zeros so it looks better
toDisplay[np.isnan(toDisplay)] = 0
#Find the coordinates of the centers of the virtual pixels in degrees
#raMin = (self.gridRA[0:-1] + self.gridRA[1:])/2.0 / np.pi * 180.
#dec = (self.gridDec[0:-1] + self.gridDec[1:])/2.0 / np.pi * 180.
utils.plotArray(toDisplay,cbar=True,normMin=normMin,normMax=normMax,colormap=colormap)
#mpl.imshow(toDisplay,vmin=normMin,vmax=normMax, extent=(180./np.pi)*
# np.array([self.gridRA[0],self.gridRA[-1],self.gridDec[0],self.gridDec[-1]])
def test(photListFileName='/Users/vaneyken/Data/UCSB/ARCONS/Palomar2012/corot18/testPhotonList-blosc.h5',
vPlateScale=0.1, integrationTime=-1,firstSec=0):
photList = tables.openFile(photListFileName,mode='r')
try:
im = RADecImage(photList,vPlateScale=vPlateScale,firstSec=firstSec,
integrationTime=integrationTime)
finally:
print 'Closing phot. list file.'
photList.close()
im.display()
return im
| gpl-2.0 | -5,484,837,501,403,567,000 | 58.209205 | 232 | 0.618578 | false |
MG-RAST/kmerspectrumanalyzer | scripts/plotkmerspectrum.py | 1 | 7381 | #!/usr/bin/env python
'''Tool to generate graphs of kmer spectra'''
import sys
import os
import argparse
import numpy as np
import matplotlib as mpl
from ksatools.ksatools import getmgrkmerspectrum, printstats, loadfile, makegraphs
def main(filename, opt=6, label=None, n=0, opts=None, colorlist=[],
stylelist=None):
'''loads file and invokes makegraphs and printstats.
Appends graphics from each file onto the figure.
opt is a symbol for the graph type;
n is the serial number of successful traces.'''
logfh = open(opts.logfile, "a")
if opts.filetype.upper() == "MGM":
spectrum = getmgrkmerspectrum(filename, mgrkey=MGRKEY)
elif opts.filetype == "file":
spectrum = loadfile(filename)
else:
raise ValueError(
"%s is invalid type (valid types are mgm and file)"
% opts.filetype)
if spectrum == []: # Abort this trace--but try to graph the others
return n
if label is None:
label = filename
if spectrum.shape[1] > 0:
spectrum = spectrum[np.lexsort((spectrum[:, 1], spectrum[:, 0]))]
sys.stderr.write("Making graphs for %s\n" % filename)
try:
makegraphs(
spectrum, filename, option=opt, label=label, n=n,
dump=opts.dump, opts=opts, colorlist=colorlist,
stylelist=stylelist)
# sys.stderr.write("Printing stats in logfile %s %d\n" %
# (opts.logfile, n))
printstats(spectrum, filename, filehandle=logfh, n=n)
n += 1
except ValueError as err: # This catches no data or defective data
sys.stderr.write("Error printing stats for %s\n" % filename)
else:
sys.stderr.write("Error with dataset %s\n" % filename)
return n
if __name__ == '__main__':
usage = '''usage: plotkmerspectrum.py [options] <datafile> [<datafile2> <datafile3>...]
plotkmerspectrum.py [options] -l <file containing targets, labels> '''
GRAPHNUMBERDESCRIPTION = '''-3: No graphs, produce stratify one-line summary
-2: No graphs, but produce stratify table
-1: no graphs, only append summary to kmers.log
0 : number of kmers vs. kmer abundance (basic spectrum)
1 : kmers observed vs. kmer abundance (scaled spectrum)
2 : kmer abundance vs. basepairs observed
3 : kmer abundance vs. fraction of observed data
4 : kmer abundance vs. fraction of distinct kmers
5 : fraction of observed vs. kmer rank (kmer k-dominance curve)
6 : kmer abundance vs. kmer rank (kmer rank-abundance)
24: band-colored variant
25: band-colored variant of kmer k-dominance curve
26: band-colored variant of kmer rank-abundance curve
30: Renyi entropy (transformation, function of lambda)
'''
parser = argparse.ArgumentParser(description=usage)
parser.add_argument(
"files", type=str, nargs='*', help="List of files")
parser.add_argument(
"-d", "--dump", dest="dump", action="store_true",
default=False, help="dump table with outputs")
parser.add_argument(
"-v", "--verbose", dest="verbose", action="store_true",
default=False, help="verbose")
parser.add_argument(
"-o", "--outfile", dest="outfile", action="store",
default=None, help="dump table with outputs ")
parser.add_argument(
"-g", "--graph", dest="option", action="store", type=int,
default="6", help=GRAPHNUMBERDESCRIPTION)
parser.add_argument(
"-i", "--interactive", dest="interactive", action="store_true",
default=False, help="interactive mode--draw window")
parser.add_argument(
"-l", "--list", dest="filelist",
default=None, help="file containing list of targets and labels")
parser.add_argument(
"-t", "--type", dest="filetype",
default="file", help="type for file list (file,mgm)")
parser.add_argument(
"-w", "--writetype", dest="writetype",
default="pdf", help="file type for output (pdf,png)")
parser.add_argument(
"-a", "--appendlogfile", dest="logfile",
default="kmers.log", help="logfile for summary statistics")
parser.add_argument(
"-s", "--suppresslegend", dest="suppress", action="store_true",
default=False, help="supress display of legend")
parser.add_argument(
"-n", "--name", dest="name",
default=None, help="Name for graph, graph title")
parser.add_argument(
"-x", "--xlabel", dest="xlabel",
default=None, help="X label override")
parser.add_argument(
"-y", "--ylabel", dest="ylabel",
default=None, help="Y label override")
opts = parser.parse_args()
graphtype = opts.option
writetype = opts.writetype
if len(opts.files) == 0 and not opts.filelist:
sys.exit("Missing input file argument!\n" + usage)
assert writetype == "png" or writetype == "pdf" or writetype == "eps"
if opts.outfile:
imagefilename = "%s.%d.%s" % (opts.outfile, graphtype, writetype)
elif opts.filelist:
imagefilename = "%s.%d.%s" % (opts.filelist, graphtype, writetype)
else:
imagefilename = "%s.%d.%s" % (opts.files[0], graphtype, writetype)
sys.stderr.write("Warning, using default filename %s\n" %
(imagefilename,))
# only invoke interactive backend if requested with -i
# this stabilizes behavior on non-interactive terminals
if not opts.interactive:
mpl.use("Agg")
else:
mpl.use('TkAgg')
import matplotlib.pyplot as plt
fig = plt.figure()
fig.patch.set_facecolor('white')
if opts.filetype == "mgm":
try:
MGRKEY = os.environ["MGRKEY"]
except KeyError:
MGRKEY = ""
graphcount = 0
# Loop over input identifiers, and skip if main()
# fails to produce some traces
colorlist = []
stylelist = []
if opts.filelist is not None:
assert os.path.isfile(
opts.filelist), "File %s does not exist" % opts.filelist
IN_FILE = open(opts.filelist, "r")
for line in IN_FILE:
if line[0] != "#":
a = line.strip().split("\t")
if len(a[0]) > 0:
if len(a) == 1:
a.append(a[0]) # use filename as label if nothing else
if len(a) >= 3: # if three columns, last column is color
colorlist.append((a[2]))
if len(a) >= 4: # if three columns, last column is color
stylelist.append((a[3]))
sys.stderr.write("%s\t%s\n" % (a[0], a[1]))
graphcount = main(
a[0], graphtype, label=a[1], n=graphcount,
opts=opts, colorlist=colorlist, stylelist=stylelist)
else:
for f in opts.files:
filen = f
graphcount = main(filen, graphtype, n=graphcount, opts=opts)
# don't continue if all inputs fail
assert graphcount > 0, "ERROR: unable to find any data to graph!"
if graphtype >= 0:
sys.stderr.write("Writing graph into file %s\n" % (imagefilename))
plt.savefig(imagefilename)
if opts.interactive:
plt.show()
else:
sys.stderr.write("Use -i to open widow with graph\n")
if graphcount == 0:
sys.stderr.write("ERROR: no data found!\n")
| bsd-2-clause | -57,584,907,572,838,130 | 40.234637 | 91 | 0.601815 | false |
anmolgarg/plotbox | plotbox/tables.py | 1 | 4520 | '''This is the docstring for the tables module.
'''
# -*- coding: utf-8 -*-
import pandas as pd
def df_to_markdown_table(df, return_str=True):
'''Creates markdown tables of pandas dataframes
Parameters
----------
df : pandas DataFrame
return_str : boolean (True)
If True, return string, else, return Markdown object
Returns
-------
md : str or IPython.core.display.Markdown object
'''
from IPython.display import Markdown
fmt = ['---' for i in range(len(df.columns))]
df_fmt = pd.DataFrame([fmt], columns=df.columns)
df_formatted = pd.concat([df_fmt, df])
md = Markdown(df_formatted.to_csv(sep="|", index=False))
if return_str:
return str(md.data)
else:
return md
def get_style(h2 = None, p = None, hover_color = '8fbcbc'):
'''Returns HTML head string to add style to a html table.
Parameters
----------
h2 : str
Title text for head
p : str
Paragraph text for head
hover_color : str (8fbcbc)
Color for hover over highlight
Returns
-------
style : str
Notes
-----
Adds boostrap `table`, `table-hover`, and `table-striped` classes to table
'''
style = '''
<!DOCTYPE html>
<html lang="en">
<head>
<title>Logged Signals</title>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="stylesheet" href="http://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap.min.css">
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.3/jquery.min.js"></script>
<script src="http://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/js/bootstrap.min.js"></script>
<style>
td {
font-size: 13px;
}
.table-hover tbody tr:hover td, .table-hover tbody tr:hover th {
background-color: #%s;
}
</style>
</head>
<body>
<div class="container">
<h2>%s</h2>
<p>%s</p>
<br>
<table class="table table-hover table-striped">
'''%(str(hover_color), str(h2), str(p))
return style
def df_to_html_str(df):
'''Creates HTML string of pandas DataFrame with no formatting.
Parameters
----------
df : pandas DataFrame
Returns
-------
df_html : str
'''
df_html = df.to_html(na_rep='', )
return df_html
def df_to_html_table(df, save_as, style=None, **kwargs):
'''Writes HTML table of pandas DataFrame with style to save_as.
Parameters
----------
df : pandas DataFrame
save_as : str
Full path to save html file
style : str (optional)
Style string outputted from `get_style`
kwargs
Args for `get_style` (`h2`, `p`, and `hover_color`)
Returns
-------
None
'''
df = df_to_html_str(df)
df = df.split('<table border="1" class="dataframe">')[1]
if style is None:
style = get_style(**kwargs)
f = open(save_as+'.html', 'w')
f.write(style)
f.write(df)
f.write('\n</body>')
f.close()
return
def foo(var1, var2, long_var_name='hi') :
'''A one-line summary that does not use variable names or the
function name.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
Long_variable_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
type
Explanation of anonymous return value of type ``type``.
describe : type
Explanation of return value named `describe`.
out : type
Explanation of `out`.
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
thirdfunc, fourthfunc, fifthfunc
Notes
-----
Notes about the implementation algorithm (if needed).
References
----------
Cite the relevant literature
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a=[1,2,3]
>>> print [x + 3 for x in a]
[4, 5, 6]
>>> print "a\n\nb"
a
b
'''
pass | mit | 6,594,797,687,149,475,000 | 21.718593 | 101 | 0.589159 | false |
pinga-lab/magnetic-ellipsoid | code/plot_functions.py | 1 | 6917 | from __future__ import division
import os
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import mesher
def savefig(fname):
"""
Save a matplotlib figure in 'manuscript/figures'
Uses the "os" module to specify the path in a cross-platform way.
Calls plt.savefig in the background.
Parameters:
* fname : str
The file name of the figure **without** the folder path.
Ex: "my_figure.pdf", not "../manuscript/figures/my_figure.pdf".
"""
fig_file = os.path.join(os.path.pardir, 'manuscript', 'figures', fname)
plt.savefig(fig_file, facecolor='w', bbox_inches='tight')
def draw_main_system(ax, length_axes=1, label_size=22, elev=200,
azim=-20):
'''
Plot the axes forming the main coordinate system.
Parameters:
* ax: axes of a matplotlib figure.
* length_axes: float
Length of the axes (in meters). Default is 1.
* label_size: float
Size of the label font. Default is 22.
* elev and azim: floats
Parameters controlling the view of the figure.
Default is 200 and -20, respectively.
'''
# x-axis
ax.quiver(length_axes, 0, 0, length_axes, 0, 0,
length=length_axes, color='k', linewidth=2, linestyle='-',
arrow_length_ratio=0.1)
ax.text(1.05*length_axes, 0, 0, '$x$', color='k', fontsize=label_size)
# y-axis
ax.quiver(0, length_axes, 0, 0, length_axes, 0,
length=length_axes, color='k', linewidth=2, linestyle='-',
arrow_length_ratio=0.1)
ax.text(0, 1.05*length_axes, 0, '$y$', color='k', fontsize=label_size)
# z-axis
ax.quiver(0, 0, length_axes, 0, 0, length_axes,
length=length_axes, color='k', linewidth=2, linestyle='-',
arrow_length_ratio=0.1)
ax.text(0, 0, 1.05*length_axes, '$z$', color='k', fontsize=label_size)
ax.axis('off')
ax.view_init(elev=elev, azim=azim)
def get_parameters(ellipsoid):
'''
Get the coordinate transformation matrix and
the semi-axes of a given ellipsoid.
Parameters:
* ellipsoid : element of :class:`mesher.TriaxialEllipsoid`,
`mesher.ProlateEllipsoid` or `mesher.OblateEllipsoid`.
Returns:
* V: numpy array 2D
Coordinate transformation matrix from the main coordinate system
to the local coordinate system. The local coordinate system
has the origin at the centre of the ellipsoid and the axes
aligned with the semi-axes of the ellipsoid.
* a, b, c: floats
Semi-axes of the ellipsoid.
* xc, yc, zc: floats
Coordinates of the elliposid centre referred to the
main coordinate system.
'''
# Coordinate transformation matrix
V = ellipsoid.transf_matrix
# Ellipsoid centre
xc = ellipsoid.x
yc = ellipsoid.y
zc = ellipsoid.z
# Ellipsoid semi-axes
if ellipsoid.__class__ is mesher.TriaxialEllipsoid:
a = ellipsoid.large_axis
b = ellipsoid.intermediate_axis
c = ellipsoid.small_axis
if ellipsoid.__class__ is mesher.ProlateEllipsoid:
a = ellipsoid.large_axis
b = ellipsoid.small_axis
c = ellipsoid.small_axis
if ellipsoid.__class__ is mesher.OblateEllipsoid:
a = ellipsoid.small_axis
b = ellipsoid.large_axis
c = ellipsoid.large_axis
return V, a, b, c, xc, yc, zc
def draw_ellipsoid(ax, ellipsoid, body_color, body_alpha, npoints=100):
'''
Plot the surface of an ellipsoid.
Parameters:
* ax: axes of a matplotlib figure.
* ellipsoid : element of :class:`mesher.TriaxialEllipsoid`,
`mesher.ProlateEllipsoid` or `mesher.OblateEllipsoid`.
* body_color: RGB matplotlib tuple
Color of the body.
* body_alpha: float
Transparency of the body.
* npoints: int
Number of points used to interpolate the surface
of the ellipsoid.
'''
V, a, b, c, xc, yc, zc = get_parameters(ellipsoid)
# Spherical angles (in radians) for plotting the ellipsoidal surface.
u = np.linspace(0, 2 * np.pi, 2*npoints)
v = np.linspace(0, np.pi, npoints)
# Cartesian coordinates referred to the body system
# (https://en.wikipedia.org/wiki/Ellipsoid)
x1 = a * np.outer(np.cos(u), np.sin(v))
x2 = b * np.outer(np.sin(u), np.sin(v))
x3 = c * np.outer(np.ones_like(u), np.cos(v))
# Cartesian coordinates referred to the main system
x = V[0, 0]*x1 + V[0, 1]*x2 + V[0, 2]*x3 + xc
y = V[1, 0]*x1 + V[1, 1]*x2 + V[1, 2]*x3 + yc
z = V[2, 0]*x1 + V[2, 1]*x2 + V[2, 2]*x3 + zc
# Plot:
ax.plot_surface(x, y, z, linewidth=0., color=body_color, alpha=body_alpha)
def draw_axes(ax, ellipsoid, axes_color=(0, 0, 0),
label_axes=True, label_size=16):
'''
Plot three orthogonal axes.
Parameters:
* ax: axes of a matplotlib figure.
* ellipsoid : element of :class:`mesher.TriaxialEllipsoid`,
`mesher.ProlateEllipsoid` or `mesher.OblateEllipsoid`.
* label_axes : boolean
If True, plot the label of all axes.
* label_size : int
Define the size of the label of all axes.
'''
V, a, b, c, xc, yc, zc = get_parameters(ellipsoid)
ax.quiver(xc+V[0, 0]*a, yc+V[1, 0]*a, zc+V[2, 0]*a,
V[0, 0], V[1, 0], V[2, 0],
length=a, color=axes_color, linewidth=3.0, linestyle='-',
arrow_length_ratio=0.1)
ax.quiver(xc+V[0, 1]*b, yc+V[1, 1]*b, zc+V[2, 1]*b,
V[0, 1], V[1, 1], V[2, 1],
length=b, color=axes_color, linewidth=3.0, linestyle='-',
arrow_length_ratio=0.1)
ax.quiver(xc+V[0, 2]*c, yc+V[1, 2]*c, zc+V[2, 2]*c,
V[0, 2], V[1, 2], V[2, 2],
length=c, color=axes_color, linewidth=3.0, linestyle='-',
arrow_length_ratio=0.1)
if label_axes is True:
ax.text(xc+V[0, 0]*a*1.05, yc+V[1, 0]*a*1.05, zc+V[2, 0]*a*1.05,
'$a \hat{\mathbf{v}}_{1}$', color=axes_color,
fontsize=label_size)
ax.text(xc+V[0, 1]*b*1.05, yc+V[1, 1]*b*1.05, zc+V[2, 1]*b*1.05,
'$b \hat{\mathbf{v}}_{2}$', color=axes_color,
fontsize=label_size)
ax.text(xc+V[0, 2]*c*1.05, yc+V[1, 2]*c*1.05, zc+V[2, 2]*c*1.05,
'$c \hat{\mathbf{v}}_{3}$', color=axes_color,
fontsize=label_size)
def limits(ax, xmin, xmax, ymin, ymax, zmin, zmax):
'''
Set the limits of the 3D plot.
Parameters:
* ax: axes of a matplotlib figure.
* xmin, xmax, ymin, ymax, zmin, zmax: floats
Lower and upper limites along the x-, y- and z- axes.
'''
x = [xmin, xmax, xmin, xmin]
y = [ymin, ymin, ymax, ymin]
z = [zmin, zmin, zmin, zmax]
ax.scatter(x, y, z, s=0)
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.set_zlim(zmin, zmax)
| bsd-3-clause | 3,975,124,816,492,423 | 30.298643 | 78 | 0.589273 | false |
GkAntonius/feynman | examples/Particle_Physics/plot_VBF_tautau.py | 2 | 1420 | """
VBH tau-tau
===========
Vector Boson Fusion.
"""
import matplotlib.pyplot as plt
from feynman import Diagram
fig = plt.figure(figsize=(10.,10.))
ax = fig.add_axes([0,0,1,1], frameon=False)
diagram = Diagram(ax)
#diagram.text(.5,0.9,r"Vector Boson Fusion (VBF) Higgs $\rightarrow\tau\tau$",fontsize=40)
in1 = diagram.vertex(xy=(.1,.8), marker='')
in2= diagram.vertex(xy=(.1,.2), marker='')
v1 = diagram.vertex(xy=(.3,.7))
v2 = diagram.vertex(xy=(.3,.3))
v3 = diagram.vertex(xy=(.5,.5))
out1 = diagram.vertex(xy=(.9,.8), marker='')
out2 = diagram.vertex(xy=(.9,.2), marker='')
higgsf = diagram.vertex(xy=(.7,.5))
tau1 = diagram.vertex(xy=(.9,.7), marker='')
tau2 = diagram.vertex(xy=(.9,.3), marker='')
q1 = diagram.line(in1, v1, arrow=False)
q2 = diagram.line(in2, v2, arrow=False)
wz1 = diagram.line(v1, v3, style='wiggly')
wz2 = diagram.line(v2, v3, style='wiggly')
higgs = diagram.line(v3, higgsf, style='dashed', arrow=False)
q3 = diagram.line(v1, out1, arrow=False)
q4 = diagram.line(v2, out2, arrow=False)
t1 = diagram.line(higgsf, tau1)
t2 = diagram.line(tau2, higgsf)
q1.text("$q_1$",fontsize=30)
q2.text("$q_2$",fontsize=30)
diagram.text(v3.xy[0], v3.xy[1]+0.11, "$Z/W^\pm$",fontsize=30)
wz2.text("$Z/W^\pm$",fontsize=30)
q3.text("$q_3$",fontsize=30)
q4.text("$q_4$",fontsize=30)
higgs.text("$H$",fontsize=30)
t1.text(r"$\tau^-$",fontsize=35)
t2.text(r"$\tau^+$",fontsize=35)
diagram.plot()
plt.show()
| gpl-3.0 | 2,183,326,868,890,777,000 | 29.212766 | 90 | 0.649296 | false |
lpryszcz/bin | fpkm2expression_plot.py | 1 | 6309 | #!/usr/bin/env python
desc="""Generate expression plot similar to cummeRbund from .sf expression values.
CHANGELOG:
v1.3
- calculate FPKM for salmon output
"""
epilog="""Author: l.p.pryszcz+git@gmail.com
Mizerow, 8/05/2015
"""
import os, sys, time
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
#colors = ['Blues', 'BuGn', 'BuPu', 'GnBu', 'Greens', 'Greys', 'Oranges', 'OrRd', 'PuBu', 'PuBuGn', 'PuRd', 'Purples', 'RdPu', 'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd']
colors = ('b','c','r','g','y', 'm','b','c','r','g','y','m', 'b','c','r','g','y','m')
def load_gene2transcripts(transcripts):
"""Return gene2transcripts.
Supports ensembl fasta files.
"""
# ENSDART00000007748 ensembl:known chromosome:Zv9:21:822304:832471:-1 gene:ENSDARG00000016476
gene2transcripts = {}
for line in transcripts:
if line.startswith(">"):
lData = line[1:].split()
tid = lData[0]
gid = filter(lambda x: x.startswith("gene:"), lData)[0].split(':')[1]
if gid not in gene2transcripts:
gene2transcripts[gid] = [tid]
else:
gene2transcripts[gid].append(tid)
return gene2transcripts
def parse_sf(handles, conditions, transcripts, genes=[]):
"""Parse multiple .sf handles and return expression info
for all trascripts of each gene."""
# get gene2transcripts
gene2transcripts = load_gene2transcripts(transcripts)
# get tid2gid
tid2gid = {} #tid: gid for
for gid, tids in gene2transcripts.iteritems():
for tid in tids:
tid2gid[tid] = gid
# prepare
gene2fpkms = {gid: [[0]*len(tids) for i in range(len(conditions))] \
for gid, tids in gene2transcripts.iteritems()}
# parse handles
for i, handle in enumerate(handles):
for line in handle:
if line.startswith(('#','Name\t')):
continue
# unload line
lData = line[:-1].split('\t')
if len(lData)==5:
tid, length, tpm, fpkm, reads = lData
# salmon v4.1+ https://github.com/COMBINE-lab/salmon/releases
else:
tid, length, tpm, reads = lData
# get gid
gid = tid2gid[tid]
if genes and gid not in genes:
continue
# get fpkm
length, tpm = float(length), float(tpm)
fpkm = tpm*1000.0 / length
# store fpkm for given condition and transcript
gene2fpkms[gid][i][gene2transcripts[gid].index(tid)] = float(fpkm)
# yield data
for gid, fpkms in gene2fpkms.iteritems():
if genes and gid not in genes:
continue
yield gid, gene2transcripts[gid], fpkms
def fpkm2expression_plot(genes, handle, out, transcripts, FPKMfrac, verbose):
"""Parse expression and generate expression plots."""
if not os.path.isdir(out):
os.makedirs(out)
# get conditions
## salmon/RZE024/quant.sf -> RZE024
conditions = [h.name.split("/")[-2].split('_')[-1] for h in handle]
# get parser
parser = parse_sf(handle, conditions, transcripts, genes)
# the x locations for the groups
ind = np.arange(len(conditions))+.25
# the width of the bars: can also be len(x) sequence
width = 0.75
for i, (gid, transcripts, fpkms) in enumerate(parser, 1):
if verbose:
sys.stderr.write(" %s %s %s\n"%(i, gid, len(transcripts)))
#start figure
fig = plt.figure()
ax = fig.add_subplot(111)
# prepare data
fpkms = np.array(fpkms).transpose()
cumFPKM = sum(sum(fpkms))
bottom = np.zeros(len(conditions))
ji = 0
for j, _fpkms in enumerate(fpkms):
if sum(_fpkms) < FPKMfrac*cumFPKM:
sys.stderr.write(" Skipped %s with %.2f FPKM\n"%(transcripts[j], sum(_fpkms)))
continue
if ji >= len(colors):
sys.stderr.write("[WARNING] Too many transcripts to plot!\n")
break
ax.bar(ind, _fpkms, width, bottom=bottom, label=transcripts[j], color=colors[ji])
plt.xticks(ind+width/2., conditions, rotation=90, fontsize=9)
#plt.xlabel("bound unbound total")
ax.set_ylabel("FPKM")
# update handles
bottom += _fpkms
ji += 1
ax.legend(loc="best", fontsize=8)
ax.set_title(gid)
#plt.show()
fig.savefig(os.path.join(out,"%s.svg"%gid), papertype="A3")
def main():
import argparse
usage = "%(prog)s -v" #usage=usage,
parser = argparse.ArgumentParser(description=desc, epilog=epilog, \
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--version', action='version', version='1.3')
parser.add_argument("-v", "--verbose", default=False, action="store_true",
help="verbose")
parser.add_argument("-i", "--fpkm", default=sys.stdin, type=file, nargs="+",
help="isoforms.fpkm_tracking or .sf file(s) [stdin]")
parser.add_argument("-g", "--genes", nargs="+",
help="genes to plot")
parser.add_argument("-o", "--output", default="expression_plot",
help="output stream [%(default)s]")
parser.add_argument("-t", "--transcripts", default="", type=file,
help="transcripts file; needed to get gene2transcripts for .sf input [%(default)s]")
parser.add_argument("-f", "--frac", default=0.05, type=float,
help="ignore transcripts with expression below [%(default)s] of gene expression")
o = parser.parse_args()
if o.verbose:
sys.stderr.write("Options: %s\n"%str(o))
fpkm2expression_plot(o.genes, o.fpkm, o.output, o.transcripts, o.frac, o.verbose)
if __name__=='__main__':
t0 = datetime.now()
try:
main()
except KeyboardInterrupt:
sys.stderr.write("\nCtrl-C pressed! \n")
except IOError as e:
sys.stderr.write("I/O error({0}): {1}\n".format(e.errno, e.strerror))
dt = datetime.now()-t0
sys.stderr.write("#Time elapsed: %s\n"%dt)
| gpl-3.0 | -8,574,801,773,714,000,000 | 39.442308 | 172 | 0.570138 | false |
anthonytw/neat-deform | src/gui/EvolveThread.py | 1 | 6204 | from PyQt5.QtCore import *
from random import randint
import matplotlib.pyplot as plt
# A thread to handle image evolution.
class EvolveThread(QThread):
# Define signals.
finished_job = pyqtSignal()
update_progress = pyqtSignal(int, int) # update_progress( current, max )
# Thread initialization.
def __init__( self, parent = None ):
print "EvolveThread::init"
# Call QThread constructor.
QThread.__init__( self, parent )
# Create a mutex for job parameters.
self.job_mutex = QMutex()
# Initialize job parameters.
self.job = 0
self.selection = []
self.cross_correlate = False
# Run the thread.
def run( self ):
# Wait on jobs.
while True:
# Short wait (one second).
self.sleep( 1 )
# Check job queue.
exiting = False
next_job = self.start_next_job()
if next_job:
# If negative, exit thread.
if next_job < 0:
exiting = True
# If positive, run through generations.
elif next_job > 0:
print "EvolveThread::job - iters[%d] initial_selection: " % next_job,
print self.selection
for iter in xrange(next_job):
self.selection = self.execute_iteration( iter, next_job, self.selection )
self.selection = []
self.finish_job()
# Emit the finished job signal if there was a job.
if next_job:
self.finished_job.emit()
# Finished?
if exiting:
print "EvolveThread::exiting"
break
# Execute an iteration.
def execute_iteration(
self,
iteration_id,
max_iteration,
selection ):
print " - EvolveThread:exec: iter[%d] selection: " % iteration_id,
print selection
# Assign reward to selection (or random reward if none are selected).
if len(selection) > 0:
for index in selection:
self.parent().population.getIndividual(index).reward( randint(80,120) )
else:
for index in xrange(self.parent().population.getIndividualCount()):
self.parent().population.getIndividual(index).reward( randint(10,40) )
# Finish evaluations.
self.parent().experiment.finishEvaluations()
# Get next generation.
self.parent().experiment.produceNextGeneration()
self.parent().experiment.preprocessPopulation()
self.parent().population = self.parent().experiment.pythonEvaluationSet()
# Update population model.
if self.parent().population.getIndividualCount() != self.parent().population_model.rowCount():
print "WARNING: Discrepency between population size and model size! Things might blow up. Wear a hardhat."
# Reset image cache every generation.
image_cache = []
rows = self.parent().population_model.rowCount()
update_index = iteration_id * rows + 1
max_update_index = max_iteration * rows
selection = []
for i in xrange(rows):
print " - Updating network %2d with new network..." % i,
index = self.parent().population_list.model().index(i, 0)
# If performing cross-correlation or on the last iteration, update network.
if self.cross_correlate or ((iteration_id + 1) == max_iteration):
individual = self.parent().population.getIndividual(i)
network = individual.spawnFastPhenotypeStack()
self.parent().population_model.update_item(i, network)
# Perform cross correlation with cached images. Do not perform on the last
# iteration because the selection is not being updated.
if self.cross_correlate and ((iteration_id + 1) < max_iteration):
entropy = self.parent().population_model.image_entropy(i)
similar_image_found = False
for [ref_entropy, ref_autocorr] in image_cache:
crosscorr = self.parent().population_model.correlate_image(entropy, ref_entropy)
simrange = (0.6*ref_autocorr, 1.2*ref_autocorr)
print " - - ccor: %.4f; acor: %.4f simrange: " % (crosscorr, ref_autocorr),
print simrange
if (crosscorr >= simrange[0]) and (crosscorr <= simrange[1]):
similar_image_found = True
break
# No similar image found?
if not similar_image_found:
autocorr = self.parent().population_model.correlate_image(entropy, entropy)
image_cache.append( [entropy, autocorr] )
selection.append( i )
print "- Found unique"
else:
print "- Found similar"
print "Done"
# Update element status.
self.update_progress.emit( update_index, max_update_index )
update_index += 1
return selection
# Add job to workflow.
def add_job( self, iterations, selection, cross_correlate = False ):
# Handle mutex lock / unlock.
job_locker = QMutexLocker( self.job_mutex )
# Add job.
self.job = iterations
self.selection = []
self.cross_correlate = cross_correlate
if len(selection) > 0:
# If a selection is sent in, for only one iteration.
self.job = 1
# Extract row numbers.
for sel in selection:
self.selection.append( sel.row() )
# Get next job from the workflow queue.
def start_next_job( self ):
# Lock the mutex here. Unlock in run function.
self.job_mutex.lock()
# Return most recent job after mutex has been locked.
next_job = self.job
self.job = 0
return next_job
# Finish the job.
def finish_job( self ):
# Unlock mutex.
self.job_mutex.unlock()
| gpl-3.0 | 505,967,782,694,013,250 | 35.710059 | 118 | 0.561896 | false |
huazhisong/graduate_text | src/rnn/sample.py | 1 | 5454 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys
import time
import csv
import argparse
import cPickle as pickle
import numpy as np
import pandas as pd
import tensorflow as tf
from utils import TextLoader
from model import Model
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--save_dir', type=str, default='save',
help='model directory to store checkpointed models')
parser.add_argument('--how', type=str, default='sample',
help='''sample / predict / accuracy:
test one sample / predict some samples / compute accuracy of dataset''')
parser.add_argument('--sample_text', type=str, default=' ',
help='sample text, necessary when how is sample')
parser.add_argument('--data_path', type=str, default='data/test.csv',
help='data to predict or compute accuracy, necessary when how is predict or accuracy')
parser.add_argument('--result_path', type=str, default='data/result.csv',
help='result of prediction, necessary when how is predict')
args = parser.parse_args()
if args.how == 'sample':
sample(args)
elif args.how == 'predict':
predict(args)
elif args.how == 'accuracy':
accuracy(args)
else:
raise Exception('incorrect argument, input "sample" or "accuracy" after "--how"')
def transform(text, seq_length, vocab):
x = map(vocab.get, text)
x = map(lambda i: i if i else 0, x)
if len(x) >= seq_length:
x = x[:seq_length]
else:
x = x + [0] * (seq_length - len(x))
return x
def sample(args):
with open(os.path.join(args.save_dir, 'config.pkl'), 'rb') as f:
saved_args = pickle.load(f)
with open(os.path.join(args.save_dir, 'chars_vocab.pkl'), 'rb') as f:
chars, vocab = pickle.load(f)
with open(os.path.join(args.save_dir, 'labels.pkl'), 'rb') as f:
labels = pickle.load(f)
model = Model(saved_args, deterministic=True)
x = transform(args.sample_text.decode('utf8'), saved_args.seq_length, vocab)
with tf.Session() as sess:
saver =tf.train.Saver(tf.all_variables())
ckpt = tf.train.get_checkpoint_state(args.save_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print model.predict_label(sess, labels, [x])
def predict(args):
with open(os.path.join(args.save_dir, 'config.pkl'), 'rb') as f:
saved_args = pickle.load(f)
with open(os.path.join(args.save_dir, 'chars_vocab.pkl'), 'rb') as f:
chars, vocab = pickle.load(f)
with open(os.path.join(args.save_dir, 'labels.pkl'), 'rb') as f:
labels = pickle.load(f)
model = Model(saved_args, deterministic=True)
with open(args.data_path, 'r') as f:
reader = csv.reader(f)
texts = list(reader)
texts = map(lambda i: i[0], texts)
x = map(lambda i: transform(i.strip().decode('utf8'), saved_args.seq_length, vocab), texts)
with tf.Session() as sess:
saver =tf.train.Saver(tf.all_variables())
ckpt = tf.train.get_checkpoint_state(args.save_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
start = time.time()
results = model.predict_label(sess, labels, x)
end = time.time()
print 'prediction costs time: ', end - start
with open(args.result_path, 'w') as f:
writer = csv.writer(f)
writer.writerows(zip(texts, results))
def accuracy(args):
with open(os.path.join(args.save_dir, 'config.pkl'), 'rb') as f:
saved_args = pickle.load(f)
with open(os.path.join(args.save_dir, 'chars_vocab.pkl'), 'rb') as f:
chars, vocab = pickle.load(f)
with open(os.path.join(args.save_dir, 'labels.pkl'), 'rb') as f:
labels = pickle.load(f)
data_loader = TextLoader(None, args.data_path, saved_args.batch_size, saved_args.seq_length, vocab, labels)
model = Model(saved_args, deterministic=True)
with tf.Session() as sess:
saver = tf.train.Saver(tf.all_variables())
ckpt = tf.train.get_checkpoint_state(args.save_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
data = data_loader.tensor.copy()
n_chunks = len(data) / saved_args.batch_size
if len(data) % saved_args.batch_size:
n_chunks += 1
data_list = np.array_split(data, n_chunks, axis=0)
correct_total = 0.0
num_total = 0.0
for m in range(n_chunks):
start = time.time()
x = data_list[m][:, :-1]
y = data_list[m][:, -1]
results = model.predict_class(sess, x)
correct_num = np.sum(results==y)
end = time.time()
print 'batch {}/{} cost time {:.3f}, sub_accuracy = {:.6f}'.format(m+1, n_chunks, end-start, correct_num*1.0/len(x))
correct_total += correct_num
num_total += len(x)
accuracy_total = correct_total / num_total
print 'total_num = {}, total_accuracy = {:.6f}'.format(int(num_total), accuracy_total)
if __name__ == '__main__':
main()
| agpl-3.0 | -4,485,595,765,210,151,000 | 33.647059 | 128 | 0.581958 | false |
garibaldu/radioblobs | code/code_2d/score_DirMult_2d.py | 1 | 19713 | import numpy as np
import numpy.random as rng
import pylab as pl
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.mlab as mp
from matplotlib.patches import Ellipse
import copy, sys
import math
import optparse
import scipy.signal
import scipy.special.basic as sp
import scipy.optimize as sop
import scipy
from numpy.core.umath_tests import inner1d
INCLUDE_BACKGROUND_COMPENSATION = False
source_to_background_ratio = np.log(0.1/0.9)
def sq(x): # Am using this just to make the code more readable.
return np.power(x,2.0)
def make_dirichlet_bins(data,num_bins,strategy,num_dirs=50,alpha=10.,stretch_factor=None,total_alpha=None,safety_gap=np.inf):
z = copy.copy(data)
z = np.ravel(z)
z.sort()
z = np.delete(z, np.where(np.isnan(z)))
top, bottom = z[-1], z[0]
alphas = [alpha]*num_bins #can only do eqocc and width for now
dirs = rng.dirichlet(alphas,num_dirs)
mybins = np.zeros((num_dirs,num_bins+1))
mybins[:,0] = bottom
mybins[:,-1] = top
if strategy == 'eqocc': #(roughly) equal occupancies
num_datapts = z.size
for d in range(dirs.shape[0]):
props = (np.cumsum(dirs[d])*num_datapts)[:-1]
for p in range(len(props)):
mybins[d,p+1] = (z[props[p]] + z[props[p]+1])/2
elif strategy == 'width': #(roughly) equal width
datarange = top - bottom
for d in range(dirs.shape[0]):
props = np.cumsum(dirs[d])[:-1]
for p in range(len(props)):
mybins[d,p+1] = props[p] * datarange
else:
sys.exit('Not a valid binning strategy')
#safety gap
mybins[:,0] -= safety_gap
mybins[:,-1] += safety_gap
return mybins #return the bin borders
def make_bin_borders(data,num_bins,strategy='eqocc',safety_gap=np.inf,fname=None):
z = copy.copy(data)
z = np.ravel(z)
z.sort()
z = np.delete(z, np.where(np.isnan(z)))
top, bottom = z[-1], z[0]
mybins = []
if strategy == 'eqocc': #Equal occupancies
step = len(z)/num_bins
for i in range(0,len(z)-step+1,step):
mybins.append(z[i])
mybins.append(z[-1]) # ie. these are really bin BORDERS.
elif strategy == 'width': #Equal width
step = (top-bottom)/(num_bins+0.1)
mybins = [bottom + x*step for x in range(0, num_bins)]
mybins.append(z[-1]) # the last one.
elif strategy == 'fromfile':
if fname == None:
sys.exit('Please supply a file name')
else:
mybins = np.genfromtxt(fname)
else:
sys.exit('Not a valid binning strategy')
# Now ensure the borders are big enough to catch new data that's out-of-range.
mybins[-1] += safety_gap
mybins[0] -= safety_gap
return mybins
def make_alphaBG(BINS,Nx,Ny,Z,dirichlet):
if dirichlet:
alpha_BGs = np.zeros((BINS.shape[0],BINS.shape[1]-1))
K = BINS.shape[1]-1
Cxk = np.zeros((Ny,Nx,K))
for b in range(BINS.shape[0]):
alpha_BGs[b] = np.histogram(np.ravel(Z),bins=BINS[b])[0]
#for row in range(Ny):
# for col in range(Nx):
# Cxk[row,col] += np.histogram(Z[row,col],bins=BINS[b])[0]
for i in range(K-1):
Cxk[:,:,i]+=np.asarray((Z>=BINS[b,i])&(Z<BINS[b,i+1]),dtype=int)
Cxk[:,:,K-1]+=np.asarray((Z>=BINS[b,K-1])&(Z<=BINS[b,K]),dtype=int)
alpha_BG = np.mean(alpha_BGs,axis=0) + 1.0
Cxk /= float(BINS.shape[0])
else:
alpha_BG = np.histogram(np.ravel(Z),bins=BINS)[0] + 1.0
K = len(BINS)-1
Cxk = np.zeros((Ny,Nx,K))
#for row in range(Ny):
# for col in range(Nx):
# Cxk[row,col]=np.histogram(Z[row,col],bins=BINS)[0]
for i in range(K-1):
Cxk[:,:,i]=np.asarray((Z>=BINS[i])&(Z<BINS[i+1]),dtype=int)
Cxk[:,:,K-1]=np.asarray((Z>=BINS[K-1])&(Z<=BINS[K]),dtype=int)
return Cxk, alpha_BG
# A useful method, used for making fake blobs, and for specifying a
# window profile for giving pixels different weights, when calculating
# the counts "in a window".
def make_gaussian_blob(X,Y,midx,midy,spreadx,spready,rotn=0.):
xDist = (X-midx) # these are entire matrices
yDist = (Y-midy)
#if spreadx<spready:
# temp=spreadx; spreadx = spready; spready = temp;
a = sq(math.cos(rotn))/(2.*sq(spreadx)) + sq(math.sin(rotn))/(2.*sq(spready))
b = (-math.sin(2.*rotn)/(4*sq(spreadx))) + (math.sin(2*rotn)/(4*sq(spready)))
c = sq(math.sin(rotn))/(2*sq(spreadx)) + sq(math.cos(rotn))/(2*sq(spready))
W = np.exp(-1.0* (a*sq(xDist) + 2.*b*xDist*yDist + c*sq(yDist)))
return W
############################ SCORE METHODS ############################
def calc_weighted_counts(theta, args):
# WAS 'calc_params' but it didn't calc any params.
# Passing it just theta and args now as simpler.
mdx,mdy,sigmax,sigmay,phi = theta
Cxk, X,Y,alpha_SRC,alpha_BG = args
wgts = make_gaussian_blob(X,Y,mdx,mdy,sigmax,sigmay,phi)
N0, N1 = wgts.shape
nkVec = (Cxk * wgts.reshape(N0,N1,1) ).sum(0).sum(0)
return wgts, nkVec
def calc_logL(n, alphas):
""" Calculate the log likelihood under DirMult distribution with alphas=avec, given data counts of nvec."""
lg_sum_alphas = math.lgamma(alphas.sum())
sum_lg_alphas = np.sum(scipy.special.gammaln(alphas))
lg_sum_alphas_n = math.lgamma(alphas.sum() + n.sum())
sum_lg_alphas_n = np.sum(scipy.special.gammaln(n+alphas))
return lg_sum_alphas - sum_lg_alphas - lg_sum_alphas_n + sum_lg_alphas_n
def calc_score(theta, args):
""" Calculate and return the score (WAS score_wrapper) """
wgts, nkVec = calc_weighted_counts(theta, args)
mdx,mdy,sigmax,sigmay,phi = theta
Cxk, X,Y,alpha_SRC,alpha_BG = args
logL_SRC = calc_logL(nkVec, alpha_SRC)
logL_BG = calc_logL(nkVec, alpha_BG)
score = logL_SRC - logL_BG #+ source_to_background_ratio
if INCLUDE_BACKGROUND_COMPENSATION:
nkVec_n = (alpha_BG/alpha_BG.sum()) * nkVec.sum()
logL_compensation_SRC = calc_logL(nkVec_n, alpha_SRC)
logL_compensation_BG = calc_logL(nkVec_n, alpha_BG)
compensation_score = logL_compensation_SRC - logL_compensation_BG
score = score - compensation_score
return -score # minus, since being fed to a minimizer not a maximizer
############################ GRADIENT METHODS ############################
def calc_df_dtheta(X,Y,midx,midy,spreadx,spready,rotation):
""" Calculate gradients for given ellipse parameters
Returns five window-sized arrays."""
#W = make_gaussian_blob(X,Y,midx,midy,spreadx,spready,rotation)
#if spreadx<spready:
# temp=spreadx; spreadx = spready; spready = temp;
# These are just shorthand definitions. I've renamed while checking.
# r is for rotation (as t/theta is taken).
xDist, yDist = (X-midx), (Y-midy)
xDist2, yDist2 = sq(xDist), sq(yDist)
sigx2, sigy2 = sq(spreadx), sq(spready)
cosr,sinr = math.cos(rotation), math.sin(rotation)
cosr2,sinr2 = sq(cosr), sq(sinr)
sin_2r,cos_2r = math.sin(2.*rotation), math.cos(2.*rotation)
# These a,b,c seem okay...............
a = cosr2/(2.*sigx2) + sinr2/(2.*sigy2)
b = -(sin_2r/(4.*sigx2)) + (sin_2r/(4.*sigy2)) # nb. this could be simplified.
c = sinr2/(2.*sigx2) + cosr2/(2.*sigy2)
df_dmx = (2.*a*xDist + 2.*b*yDist)
df_dmy = (2.*b*xDist + 2.*c*yDist)
df_dsx = (xDist2*cosr2 - xDist*yDist*sin_2r + yDist2*sinr2)/(np.power(spreadx,3))
df_dsy = (xDist2*sinr2 + xDist*yDist*sin_2r + yDist2*cosr2)/(np.power(spready,3))
df_dr = (((sinr*cosr)/(sigx2)-(sinr*cosr)/(sigy2))*xDist2 - 2.*((-cos_2r)/(2.*sigx2)+(cos_2r)/(2.*sigy2))*xDist*yDist - ((sinr*cosr)/(sigx2)-(sinr*cosr)/(sigy2))*yDist2)
return np.asarray((df_dmx, df_dmy, df_dsx, df_dsy, df_dr))
def calc_Qdiff(nVec, alphaS, alphaB):
""" Calculate Q_k - Q_base, for each bin k. Returns k-length vector."""
N = nVec.sum()
Qdiff = sp.psi(nVec + alphaS) - sp.psi(nVec+alphaB) - sp.psi(N+alphaS.sum()) + sp.psi(N+alphaB.sum())
return Qdiff
def calc_logL_grad(Qdiff, Cxk_wgtd, dW_dtheta):
""" Assembles the gradient of the score w.r.t. the parameters??
Calculate full gradient: wgts * (data * grad) """
T0,T1,T2 = dW_dtheta.shape
K0,K1,K2 = Cxk_wgtd.shape
dg = (Cxk_wgtd.reshape(1,K0,K1,K2)) * (dW_dtheta.reshape(T0,T1,T2,1))
dgs = dg.sum(1).sum(1)
score_gradient = inner1d(Qdiff,dgs)
return score_gradient
def calc_score_gradient(theta, args):
""" Calculate and return the gradient """
mdx,mdy,sigmax,sigmay,phi = theta
Cxk,X,Y,alpha_SRC,alpha_BG = args
wgts, nkVec = calc_weighted_counts(theta, args)
N0,N1 = wgts.shape
CtimesW = Cxk * wgts.reshape(N0,N1,1)
df_dtheta = calc_df_dtheta(X,Y,mdx,mdy,sigmax,sigmay,phi)
Qdiff = calc_Qdiff(nkVec,alpha_SRC,alpha_BG)
score_grad = calc_logL_grad(Qdiff,CtimesW,df_dtheta)
if INCLUDE_BACKGROUND_COMPENSATION:
nkVec_compensation = (alpha_BG/alpha_BG.sum()) * np.sum(nkVec)
Qdiffn = calc_Qdiff(nkVec_compensation,alpha_SRC,alpha_BG)
score_grad -= calc_logL_grad(Qdiffn,CtimesW,df_dtheta)
return -score_grad # minus, because it's used by a minimizer not a maximizer
if __name__ == "__main__":
parser = optparse.OptionParser(usage="usage %prog [options]")
parser.add_option("-n","--numbins",type = "int",dest = "K",default=0,
help="number of bins (ignored if strategy is dexpocc or fromfile)")
parser.add_option("-b","--bins_fname",dest = "bfname",
help="bin borders filename")
parser.add_option("-s","--binning_strategy",dest = "strategy",
help="eqocc, width or fromfile. "
"MANDATORY OPTION.")
parser.add_option("-d","--datafile",dest = "infile",
help="a list of numbers: 1D data to be read in (can't be "
"used with --rngseed)")
parser.add_option("-r","--rngseed",type = "int",dest = "seed",
help="an int to make random data up (can't be used with "
"--datafile)")
parser.add_option("-t","--dirichlet",action="store_true",dest="dirichlet",default=False,
help="make dirichlet bin borders (incompatible with \"from file\" binning stratgegy)")
parser.add_option("-C","--CWT_fname",dest="CWT",
help="give CWT filename if background alphas from LDA "
"file to be used (can't be used with --local or --seed)\n")
opts, args = parser.parse_args()
EXIT = False
if opts.strategy is None:
print "ERROR: you must supply a binning strategy\n"
EXIT = True
if opts.infile and opts.seed:
print "ERROR: supply EITHER a datafile OR a random seed to make up data\n"
EXIT = True
if opts.seed and opts.CWT:
print "ERROR: background alphas from CWT can't be used with randomly generated data\n"
EXIT = True
if opts.dirichlet and opts.strategy=="fromfile":
print "ERROR: dirichlet bin borders are incompatible with using bin borders from file\n"
EXIT = True
if EXIT:
parser.print_help()
sys.exit(-1)
strategy = opts.strategy
outfile = 'DirModel_%s' % strategy
K = opts.K
if opts.seed:
seed = opts.seed
# make an "image"
rng.seed(seed) # seed the random number generator here
noise_size=1.0
Nx, Ny = 50, 70 #number of pixels in a fake test image
X,Y = np.meshgrid(np.arange(Nx), np.arange(Ny)) # these are both matrices of indices, each the same size as image
# make up the 'shapes' of the sources
mid1x, mid2x, mid3x = rng.random()*Nx, rng.random()*Nx, rng.random()*Nx
mid1y, mid2y, mid3y = rng.random()*Ny, rng.random()*Ny, rng.random()*Ny
print 'Random sources placed at %.0d,%.0d; %.0d,%.0d; %.0d,%.0d ' %(mid1x,mid1y,mid2x,mid2y,mid3x,mid3y)
# length scale
spread1x = 1+2*rng.random()
spread1y = 1+2*rng.random()
spread2x = 1+2*rng.random()
spread2y = 1+2*rng.random()
spread3x = 1+2*rng.random()
spread3y = 1+2*rng.random()
print 'with (x,y) sigmas: %.1f,%.1f; %.1f,%.1f; %.1f,%.1f ' %(spread1x,spread1y,spread2x,spread2y,spread3x,spread3y)
rot1,rot2,rot3 = rng.random()*(2.*math.pi),rng.random()*(2.*math.pi),rng.random()*(2.*math.pi)
print 'and rotation variables: %s; %s; %s ' % (rot1,rot2,rot3)
shape1 = make_gaussian_blob(X,Y,mid1x,mid1y,spread1x,spread1y,rot1) *0.8
shape2 = make_gaussian_blob(X,Y,mid2x,mid2y,spread2x,spread2y,rot2) *5.0
shape3 = make_gaussian_blob(X,Y,mid3x,mid3y,spread3x,spread3y,rot3) *3.0
# noise character of sources
variance = noise_size*(1.0 - shape1 + shape2) # source 3 has no variance effect
noise = rng.normal(0.,variance)
# mean_intensity character of sources
mean = shape1 + shape2 + shape3
Z = mean + noise
outobjs = 'r%s_objects' % seed
plt.imshow(mean,interpolation='nearest',cmap='gray',origin='lower')
plt.savefig(outobjs)
outobjs = 'r%s_objects-noise' % seed
plt.imshow(Z,interpolation='nearest',cmap='gray',origin='lower')
plt.savefig(outobjs)
else: # it's not a digit, so it's a filename. File should be just list of numbers.
infile = opts.infile
Z = np.genfromtxt(infile)
Nx,Ny = Z.shape
X, Y = np.meshgrid(np.arange(Nx), np.arange(Ny)) # these are both matrices of indices, each the same size as image
mean = Z.copy()
outfile += '_%s' % infile
#make bins (here, from the naked image)
if opts.dirichlet:
outfile += '_dirichletborders'
BINS = make_dirichlet_bins(Z,K,strategy)
if K == 0:
K = BINS.shape[1] - 1 # mean_intensity character of sources
print 'Note: an example overall histogram: (using the first of the dirichlet histograms)'
print np.histogram(np.ravel(Z),bins=BINS[0])[0]
else:
BINS = make_bin_borders(Z,K,strategy,safety_gap=np.inf,fname=opts.bfname)
if K == 0:
K = len(BINS) - 1
print 'Note: this makes the overall histogram this: (reality-check the final one especially)'
print np.histogram(np.ravel(Z),bins=BINS)[0]
outfile += '_K%d' % K
#get background alphas from LDA output, if specified
if opts.CWT:
alpha_BG = get_BG(opts.CWT)
outfile += '_LDA'
else:
print 'making bins and binning data ...'
# bogus, but we're setting the background alphas as if there were
# no sources in the image at the moment....
Cxk, alpha_BG = make_alphaBG(BINS,Nx,Ny,Z,opts.dirichlet)
#k=np.arange(float(K))
#CMAP = pl.cm.RdBu
#plt.clf()
#plt.imshow(np.sum((Cxk*k),axis=2).reshape(Ny,Nx),interpolation='nearest',cmap=CMAP,origin='lower')
#out = outfile + '_inbins'
#plt.savefig(out)
# 1.0 to be agnostic: all multinomial distributions are equally likely to be drawn
alpha_SRC = 1.0 * np.ones(alpha_BG.shape)
print 'proceeding to gradient descent\n'
rng.seed(1)
#do gradient ascent
num_top = 53
num_iters = 1
top_scores=np.zeros((num_top,11))
messages = np.zeros(9)
Bounds = [(0,Nx),(0,Ny),(1,Nx/2.),(1,Ny/2.),(0,2.*math.pi)]
print "gradient descent ... "
for i in range(num_top):
optima = np.zeros((num_iters,6))
for j in range(num_iters):
print '------------------------------------------\niter %s.%s\n' % (i,j)
brightr, brightc = np.where(Z==np.nanmax(Z))
idx = rng.randint(brightr.size)
mdx = brightc[idx]
mdy = brightr[idx]
sigmax = rng.rand()*(Nx/50.)
sigmay = rng.rand()*(Ny/50.)
phi = rng.rand()*2.*math.pi
theta = [mdx,mdy,sigmax,sigmay,phi]
args = [Cxk, X,Y,alpha_SRC, alpha_BG]
sltn, its, rc = sop.fmin_tnc(calc_score, theta, calc_score_gradient, [args], bounds=Bounds, fmin=-1e10, maxfun=1000, accuracy=1e-16)
#sltn, its, rc = sop.fmin_tnc(calc_score, theta, args=[args], approx_grad=True, bounds=Bounds, fmin=-1e10, maxfun=1000)
sc = calc_score(sltn, args)
optima[j,:5] = sltn
optima[j,5] = -sc
messages[rc] += 1
top_opt = scipy.delete(optima, np.where(np.isnan(optima)), 0)
top_opt = top_opt[np.argsort(top_opt[:,5])][-1]
top_scores[i,:6] = top_opt
#remove best source
top_mdx =top_opt[0]
top_mdy =top_opt[1]
top_sigx=top_opt[2]
top_sigy=top_opt[3]
top_phi =top_opt[4]
theta = [top_mdx,top_mdy,top_sigx,top_sigy,top_phi]
grad = -calc_score_gradient(theta, args)
top_scores[i,6:] = grad
print grad
x = (X-top_mdx)*math.cos(top_phi)+(Y-top_mdy)*math.sin(top_phi)
y = -(X-top_mdx)*math.sin(top_phi)+(Y-top_mdy)*math.cos(top_phi)
#if top_sigx>top_sigy:
a = sq(1.5*top_sigx)
b = sq(1.5*top_sigy)
#else:
# a = sq(top_sigy)
# b = sq(top_sigx)
Z[np.where((sq(x)/a+sq(y)/b) <= 1)] = np.nan
if opts.dirichlet:
BINS = make_dirichlet_bins(Z,K,strategy)
else:
BINS = make_bin_borders(Z,K,strategy,safety_gap=np.inf,fname=opts.bfname)
Cxk, alpha_BG = make_alphaBG(BINS,Nx,Ny,Z,opts.dirichlet)
#out = outfile + '_%dth_src' % i
#masked_array = np.ma.array(Z, mask=np.isnan(Z))
#cmap = pl.cm.gray
#cmap.set_bad('r',1.)
#plt.clf()
#plt.imshow(Z,interpolation='nearest',cmap=cmap,origin='lower')
#plt.savefig(out)
#plt.clf()
#plt.imshow(np.sum((Cxk*k),axis=2).reshape(Ny,Nx),interpolation='nearest',cmap=CMAP,origin='lower')
#out = outfile + '_inbins-%dth' % i
#plt.savefig(out)
if top_opt[5] < 0: break #quit early if only background left +source_to_background_ratio
print '%s local minimum' % messages[0]
print '%s fconverged' % messages[1]
print '%s xconverged' % messages[2]
print '%s max functions reached' % messages[3]
print '%s linear search failed' % messages[4]
print '%s constant' % messages[5]
print '%s no progress' % messages[6]
print '%s user aborted' % messages[7]
print '%s infeasible' % messages[8]
for i in range(top_scores.shape[0]):
print top_scores[i]
plt.imshow(mean,interpolation='nearest',cmap='gray',origin='lower')
plt.plot(top_scores[:,0],top_scores[:,1],'rx')
#print '%s\n%s' % (top_scores[:,0],top_scores[:,1])
for i in range(top_scores.shape[0]):
top_mdx =top_scores[i,0]
top_mdy =top_scores[i,1]
top_sigx=top_scores[i,2]
top_sigy=top_scores[i,3]
top_phi =top_scores[i,4]
#print '(%s,%s); %s, %s; %s' % (top_mdx, top_mdy, top_sigx, top_sigy, top_phi)
#plt.text(top_mdx, top_mdy, str(i+1),color='red')
#if top_sigx>top_sigy:
a = top_sigx; b = top_sigy;
#else:
# b = top_sigx; a = top_sigy;
rect = Ellipse((top_mdx,top_mdy),a*2,b*2,top_phi*(180./math.pi),edgecolor='red',facecolor='green',alpha = 0.5)
pl.gca().add_patch(rect)
plt.ylim(0,Ny)
plt.xlim(0,Nx)
plt.clim(0,0.0001)
#outfile = 'r%s_%s-top10-grad' % (seed,outfile)
plt.savefig(outfile)
np.savetxt(outfile,top_scores)
| gpl-2.0 | 503,761,756,869,428,200 | 35.777985 | 174 | 0.587988 | false |
sixy6e/geospatial-hdf5 | examples/append_reference_band_example.py | 1 | 3853 | #!/usr/bin/env python
import numpy
from scipy import ndimage
import pandas
from geoh5 import kea
from geoh5.kea import common as kc
# https://github.com/sixy6e/image-processing
from image_processing.segmentation import Segments
"""
Once completed open the file in tuiview to see the colourised segments
and the raster attribute table.
"""
def main():
"""
Create a segmented array.
Compute basic stats for each segment:
(min, max, mean, standard deviation, total, area)
Write the segmented image and the raster attribute table.
Add another raster band to the dataset as a linked/reference
dataset.
Compute basic stats for the same segments, but using different
input data, and save the new raster attribute table to the new
`reference/linked` band.
"""
# data dimensions
dims = (1000, 1000)
# create some random data and segment via value > 5000
seg_data = numpy.random.randint(0, 10001, dims).astype('uint32')
seg_data, nlabels = ndimage.label(seg_data > 5000)
# create some random data to calculate stats against
data = numpy.random.ranf(dims)
# create a segments class object
seg = Segments(seg_data, include_zero=True)
# retrieve basic stats (min, max, mean, standard deviation, total, area)
stats_table = seg.basic_statistics(data, dataframe=True)
stats_table.set_index("Segment_IDs", inplace=True)
# join via segment id, specifying 'outer' will account for empty segments
df = pandas.DataFrame({"Histogram": seg.histogram})
stats_table = df.join(stats_table, how='outer')
nrows = stats_table.shape[0]
# assign random colours to each segment
stats_table.insert(1, "Red", numpy.random.randint(0, 256, (nrows)))
stats_table.insert(2, "Green", numpy.random.randint(0, 256, (nrows)))
stats_table.insert(3, "Blue", numpy.random.randint(0, 256, (nrows)))
stats_table.insert(4, "Alpha", 255)
# define 1 output band and add another band later
kwargs = {'width': dims[1],
'height': dims[0],
'count': 1,
'compression': 4,
'chunks': (100, 100),
'blocksize': 100,
'dtype': seg_data.dtype.name}
with kea.open('add-reference-band-example.kea', 'w', **kwargs) as src:
src.write(seg_data, 1)
# define the layer type as thematic (labelled, classified etc)
src.write_layer_type(1, kc.LayerType.thematic)
# write the stats table as an attribute table
usage = {"Red": "Red",
"Green": "Green",
"Blue": "Blue",
"Alpha": "Alpha",
"Histogram": "PixelCount"}
src.write_rat(stats_table, 1, usage=usage)
# add a new image band, but as a reference to band 1
src.add_image_band(band_name='Reference to band 1', link=1)
src.write_layer_type(2, kc.LayerType.thematic)
# create some random data to calculate stats against
data = numpy.random.ranf(dims)
# retrieve basic stats (min, max, mean, standard deviation, total, area)
stats_table2 = seg.basic_statistics(data, dataframe=True)
stats_table2.set_index("Segment_IDs", inplace=True)
# join via segment id, specifying 'outer' will account for empty segments
df = pandas.DataFrame({"Histogram": seg.histogram})
stats_table2 = df.join(stats_table2, how='outer')
# insert colors
stats_table2.insert(1, "Red", stats_table["Red"])
stats_table2.insert(2, "Green", stats_table["Green"])
stats_table2.insert(3, "Blue", stats_table["Blue"])
stats_table2.insert(4, "Alpha", 255)
# write the rat to the newly created band 2
src.write_rat(stats_table2, 2, usage=usage)
if __name__ == '__main__':
main()
| mit | -672,002,758,900,673,300 | 35.349057 | 81 | 0.635349 | false |
RegulatoryGenomicsUPF/pyicoteo | pyicoteolib/outdated/statistics.py | 1 | 13072 | import math
import os
import sys
from datetime import datetime
import copy
import utils.log
from peak import Peak
class Analysis:
"""Abstract class for the different analysis methods"""
def __init__(self, input_path, output_path):
self.input_path = input_path
if os.path.isdir(output_path):
if output_path[-1] != '/':
output_path = '%s/'%output_path
self.output_dir = output_path
self.log = utils.log.Log('%sanalysis.txt' % self.output_dir)
else:
self.log = utils.log.Log(output_path)
self.output_dir = '%s/'%os.path.abspath(os.path.dirname(output_path))
def set_parameters(self):
raise NotImplementedError("You're using the abstract base class 'Analysis', use a specific class instead")
def run(self):
"""entry point for the analysis process """
self.log.write('\nAutomatically generated by PICOS ANALYSIS\n-----------------\n\nDate: %s\n'%(datetime.now()))
if os.path.isdir(self.input_path):
self.log.write('Directory analyzed:%s\n'%os.path.abspath(self.input_path))
for filename in os.listdir(self.input_path):
if os.path.isfile(self.input_path+filename):
self._analize_file(self.input_path+filename)
else:
self._analize_file(self.input_path)
self.log.write('\n\nAnalysis finished successfully!')
def _analize_file(self, file_path):
self.log.write('-------------------------------------\n\nFile:%s\n'%os.path.basename(file_path))
self.analysis(file_path)
def analysis(self, file_path):
raise NotImplementedError("You're using the abstract base class 'Analysis', use a specific class instead")
class PoissonAnalysis(Analysis):
def poisson(self, actual, mean):
#From StackOverflow
# naive: math.exp(-mean) * mean**actual / factorial(actual)
# iterative, to keep the components from getting too large or small:
p = math.exp(-mean)
for i in xrange(actual):
p *= mean
p /= i+1
return p
def _process_line(self, line):
"""returns an array with the line processed: start, end, array of length:peak"""
try:
line = line.split()
return (int(line[1]), int(line[2]), line[3].split('|'))
except:
print 'skipping header'
return None
def _correct_bias(self, p_value):
if p_value < 0:
return 0
else:
return p_value
def set_parameters(self, p_value, height_limit, correction_factor, read_length):
self.p_value = p_value
self.height_limit = height_limit
self.correction_factor = correction_factor
self.read_length = read_length
def analysis(self, file_path):
"""
We do 3 different global poisson statistical tests for each file:
Nucleotide analysis:
This analysis takes the nucleotide as the unit to analize. We give a p-value for each "height"
of read per nucleotides using an accumulated poisson. With this test we can infer more accurately
what nucleotides in the "peak" are part of the DNA binding site
Peak analysis:
This analysis takes as a basic unit the "peak" profile and performs a poisson taking into account the height (and the length?)
of the profile. This will help us to better know witch peaks are statistically significant and witch are product of chromatine noise
Number of reads analysis:
We analize the number of reads of the cluster. Number of reads = sum(xi *yi ) / read_length
"""
self.log.write('Correction factor for the size of the genome is %s...\n\n'%(self.correction_factor))
input_file = file(file_path, 'rb')
readsperbase_log = utils.log.Log('%s/reads_per_base_%s.log'%(self.output_dir, os.path.basename(file_path)))
maxheight_log = utils.log.Log('%s/maxheight_per_peak_%s.log'%(self.output_dir, os.path.basename(file_path)))
numreads_log = utils.log.Log('%s/numreads_per_peak_%s.log'%(self.output_dir, os.path.basename(file_path)))
total_bp_with_reads = 0.
start = sys.maxint
end = 0.
total_peaks = 0.
total_reads = 0
acum_height = 0.
heights = dict()
max_heights = dict()
numreads_dict = dict()
absolute_max_height = 0
absolute_max_numreads = 0
#process the file to extract the information
for line in input_file:
processed_line = self._process_line(line)
if self._process_line(line) is not None:
start = min(start, processed_line[0])
end = max(end, processed_line[1])
max_height = 0.
acum_numreads = 0.
total_peaks+=1
for level in processed_line[2]:
numbers = level.split(':')
len = int(numbers[0])
height = float(numbers[1])
if height not in heights:
heights[height] = len
else:
heights[height] += len
total_bp_with_reads+=len
max_height = max(max_height, height)
acum_numreads += len*height
#numreads per peak
numreads_in_cluster = acum_numreads/self.read_length
total_reads += numreads_in_cluster
absolute_max_numreads = max(numreads_in_cluster, absolute_max_numreads)
if int(numreads_in_cluster) not in numreads_dict:
numreads_dict[int(numreads_in_cluster)] = 1
else:
numreads_dict[int(numreads_in_cluster)] += 1
#maxheight per peak
if max_height not in max_heights:
max_heights[max_height] = 1
else:
max_heights[max_height] += 1
acum_height += max_height
absolute_max_height = max(max_height, absolute_max_height)
total_bp = end-start #total base pairs in the
reads_per_bp = total_bp_with_reads / total_bp*self.correction_factor
p_nucleotide = 1.
p_peak = 1.
p_numreads = 1.
k = 0
self.log.write_line('k\tBp\tPeak\tNum_reads')
while (absolute_max_numreads > k) or (absolute_max_height > k):
p_nucleotide -= self.poisson(k, reads_per_bp) #analisis nucleotide
p_peak -= self.poisson(k, acum_height/total_peaks) #analysis peak
p_numreads -= self.poisson(k, total_reads/total_peaks) #analysis numreads
p_nucleotide = self._correct_bias(p_nucleotide)
p_peak = self._correct_bias(p_peak)
p_numreads = self._correct_bias(p_numreads)
if ((p_nucleotide > self.p_value) or (p_peak > self.p_value) or (p_numreads > self.p_value)) and (k < self.height_limit):
self.log.write_line('%s\t%.5f\t%.5f\t%.5f'%(k, p_nucleotide, p_peak, p_numreads))
if k in heights:
readsperbase_log.write_line("%s\t%s\t%.5f%%\t%.5f"%(k, heights[k], heights[k]/total_bp_with_reads, p_nucleotide))
if k in max_heights:
maxheight_log.write_line("%s\t%s\t%.5f%%\t%.5f"%(k, max_heights[k], max_heights[k]/total_peaks, p_peak))
if k in numreads_dict:
numreads_log.write_line("%s\t%s\t%.5f%%\t%.5f"%(k, numreads_dict[k], numreads_dict[k]/total_peaks, p_numreads))
k+=1
class CorrelationAnalysis(Analysis):
delta_results = dict()
best_delta = -1
def __add_zeros(self, array, num_zeros):
for i in range(0, num_zeros):
array.append(0)
def analize_paired_peaks(self, positive_peak, negative_peak, delta):
from scipy.stats.stats import pearsonr
positive_array = []
negative_array = []
#delta correction
corrected_positive_start = positive_peak.start + delta
#add zeros at the start of the earliest peak
if corrected_positive_start > negative_peak.start:
self.__add_zeros(positive_array, corrected_positive_start - negative_peak.start)
elif negative_peak.start > corrected_positive_start:
self.__add_zeros(negative_array, negative_peak.start - corrected_positive_start)
#add the values of the peaks
positive_array.extend(positive_peak.get_heights())
negative_array.extend(negative_peak.get_heights())
#add the zeros at the end of the shortest array
if len(positive_array) > len(negative_array):
self.__add_zeros(negative_array, len(positive_array) - len(negative_array))
elif len(positive_array) < len(negative_array):
self.__add_zeros(positive_array, len(negative_array) - len(positive_array))
return pearsonr(negative_array, positive_array)
def set_parameters(self, min_delta=40, max_delta=200, delta_step=2, height_filter=15, duplicate_limit = 20, no_short=False):
self.min_delta = min_delta
self.max_delta = max_delta
self.delta_step = delta_step
self.height_filter = height_filter
self.duplicate_limit = duplicate_limit
self.no_short = no_short
def analysis(self, file_path):
#make sure that the file is sorted
if self.no_short:
sorted_pk = file(file_path, 'r')
else:
print 'Sorting stranded pk file...'
bsort = utils.bigSort.BigSort()
sorted_pk = bsort.sort(file_path,None,lambda x:(x.split()[0],int(x.split()[1]),int(x.split()[2])))
print 'Sorted. Calculating correlation...'
positive_peak = None
negative_peak = None
self.analized_pairs = 0.
for line in sorted_pk:
peak = Peak(line, rounding = True)
if (peak.get_max_height() > self.height_filter) and not peak.has_duplicates(self.duplicate_limit):
if peak.strand == '+':
positive_peak = copy.deepcopy(peak)#big positive peak found
self._start_analysis(positive_peak, negative_peak)
else:
negative_peak = copy.deepcopy(peak)#big negative peak found
self._start_analysis(positive_peak, negative_peak)
print 'FINAL DELTAS:'
data = []
for delta in range(self.min_delta, self.max_delta, self.delta_step):
if delta in self.delta_results:
self.delta_results[delta]=self.delta_results[delta]/self.analized_pairs
data.append(self.delta_results[delta])
self.log.write_line('Delta %s:%s'%(delta, self.delta_results[delta]))
try:
import matplotlib.pyplot
matplotlib.pyplot.plot(range(self.min_delta, self.max_delta), data)
matplotlib.pyplot.plot()
matplotlib.pyplot.savefig('%s%s.png'%(self.output_dir, os.path.basename(file_path)))
#matplotlib.pyplot.show()
except ImportError:
print 'you dont have matplotlib installed, therefore picos cant create the graphs'
except:
print 'cant print the plots, unknown error'
sorted_pk.close()
if self.no_short:
print 'removing temp file at %s...'%(sorted_pk.name)
os.remove(sorted_pk.name)
def _start_analysis(self, positive_peak, negative_peak):
if positive_peak is not None and negative_peak is not None:
if (abs(negative_peak.start-positive_peak.end) < self.max_delta or abs(positive_peak.start-negative_peak.end) < self.max_delta or positive_peak.intersects(negative_peak)) and positive_peak.chr == negative_peak.chr:
self.analized_pairs+=1
print 'Pair of peaks:'
print positive_peak.line(), negative_peak.line(),
for delta in range(self.min_delta, self.max_delta+1, self.delta_step):
r_squared = self.analize_paired_peaks(positive_peak, negative_peak, delta)[0]**2
if delta not in self.delta_results:
self.delta_results[delta] = r_squared
else:
self.delta_results[delta] += r_squared
#print 'Delta %s:%s'%(delta, result)
| gpl-3.0 | 3,455,217,578,828,780,500 | 43.311864 | 226 | 0.560205 | false |
sthyme/ZFSchizophrenia | BehaviorAnalysis/HSMovieAnalysis/highspeedmovieanalysis150.py | 1 | 16626 | #!/usr/bin/python -tt
"""
SCRIPT RUNNING NOTES, WILL ADD SOMEDAY
"""
# IMPORT NECESSARY MODULES
import matplotlib.image as mpimg
import numpy as np
import cv2
from datetime import datetime, timedelta
import os,sys,glob,re,argparse
import sys
import imageTools
import motionTools
#from collections import dequeue
from scipy.stats import mode
import math
numberofwells = 96
numberofrows = 8
numberofcols = 12
xdim = 1088
ydim = 660
parser = argparse.ArgumentParser(description='loading for fish behavior files')
parser.add_argument('-c', type=str, action="store", dest="centroidfile")
parser.add_argument('-m', type=str, action="store", dest="moviefile")
args = parser.parse_args()
centroidfile = args.centroidfile
videoStream = args.moviefile
#pixThreshold = imageTools.getPixThreshold(0.032)
pixThreshold = 3
frameRate = imageTools.getFrameRate() # default is 30
#well_conversion = {0:84,1:72,2:60,3:48,4:36,5:24,6:12,7:0,8:85,9:73,10:61,11:49,12:37,13:25,14:13,15:1,16:86,17:74,18:62,19:50,20:38,21:26,22:14,23:2,24:87,25:75,26:63,27:51,28:39,29:27,30:15,31:3,32:88,33:76,34:64,35:52,36:40,37:28,38:16,39:4,40:89,41:77,42:65,43:53,44:41,45:29,46:17,47:5,48:90,49:78,50:66,51:54,52:42,53:30,54:18,55:6,56:91,57:79,58:67,59:55,60:43,61:31,62:19,63:7,64:92,65:80,66:68,67:56,68:44,69:32,70:20,71:8,72:93,73:81,74:69,75:57,76:45,77:33,78:21,79:9,80:94,81:82,82:70,83:58,84:46,85:34,86:22,87:10,88:95,89:83,90:71,91:59,92:47,93:35,94:23,95:11}
well_conversion = {0:0,1:8,2:16,3:24,4:32,5:40,6:48,7:56,8:64,9:72,10:80,11:88,12:1,13:9,14:17,15:25,16:33,17:41,18:49,19:57,20:65,21:73,22:81,23:89,24:2,25:10,26:18,27:26,28:34,29:42,30:50,31:58,32:66,33:74,34:82,35:90,36:3,37:11,38:19,39:27,40:35,41:43,42:51,43:59,44:67,45:75,46:83,47:91,48:4,49:12,50:20,51:28,52:36,53:44,54:52,55:60,56:68,57:76,58:84,59:92,60:5,61:13,62:21,63:29,64:37,65:45,66:53,67:61,68:69,69:77,70:85,71:93,72:6,73:14,74:22,75:30,76:38,77:46,78:54,79:62,80:70,81:78,82:86,83:94,84:7,85:15,86:23,87:31,88:39,89:47,90:55,91:63,92:71,93:79,94:87,95:95}
#well_conversion = {0:0,1:12,2:24,3:36,4:48,5:60,6:72,7:84,8:1,9:13,10:25,11:37,12:49,13:61,14:73,15:85,16:2,17:14,18:26,19:38,20:50,21:62,22:74,23:86,24:3,25:15,26:27,27:39,28:51,29:63,30:75,31:87,32:4,33:16,34:28,35:40,36:52,37:64,38:76,39:88,40:5,41:17,42:29,43:41,44:53,45:65,46:77,47:89,48:6,49:18,50:30,51:42,52:54,53:66,54:78,55:90,56:7,57:19,58:31,59:43,60:55,61:67,62:79,63:91,64:8,65:20,66:32,67:44,68:56,69:68,70:80,71:92,72:9,73:21,74:33,75:45,76:57,77:69,78:81,79:93,80:10,81:22,82:34,83:46,84:58,85:70,86:82,87:94,88:11,89:23,90:35,91:47,92:59,93:71,94:83,95:95}
#def convert_ys(j2):
# if 0 <= j2 <= 11:
# num = 0
# elif 12 <=j2 <= 23:
# num = 1
# elif 24 <=j2 <= 35:
# num = 2
# elif 36 <=j2 <= 47:
# elif 48 <=j2 <= 59:
# elif 60 <=j2 <= 23:
def calc_mode(deq, nump_arr):
for j,k in enumerate(nump_arr[:,0]): #so k are the values, j are the indices
nump_arr[j,:] = mode(np.array([x[j,:] for x in deq]))[0]
return nump_arr
def imageMode(movielist, modename):
moviedeq = []
i2=0
#movielist = ["hsmovieTue, Nov 8, 2016_1.avi", "hsmovieTue, Nov 8, 2016_2.avi", "hsmovieTue, Nov 8, 2016_3.avi", "hsmovieTue, Nov 8, 2016_4.avi", "hsmovieTue, Nov 8, 2016_5.avi", "hsmovieTue, Nov 8, 2016_6.avi"]
#modename = ''.join(map(str,movielist))
for filenumber in movielist:
#for file in glob.glob(movielist):
#for file in glob.glob("*avi"):
#if i2 == 100:
# break
#print "testing: ", videoStream.split('-')[x]
#file = "*_" + str(filenumber) + ".avi"
#file2 = glob.glob("*_" + str(filenumber) + ".avi")
#print file2
cap = cv2.VideoCapture(glob.glob("*_" + str(filenumber) + ".avi")[0])
#cap = cv2.VideoCapture(file2)
#cap = cv2.VideoCapture(videoStream.split('-')[x])
ret,frame = cap.read()
storedFrame = imageTools.grayBlur(frame)
totalFrames = 0
while(cap.isOpened()):
ret,frame = cap.read()
if ret == False:
#print 'End of Video'
break
currentFrame = imageTools.grayBlur(frame)
if totalFrames < 50:
if totalFrames % 3 == 0:
#print "adding frames: ", totalFrames
moviedeq.append(currentFrame)
totalFrames += 1
storedFrame = currentFrame # comment out if nothing is in first frame
i2 += 1
testing = calc_mode(moviedeq, np.zeros([660,1088]))
#print "saving mode.png"
cv2.imwrite("mode_" + modename + ".png", testing)
#cv2.imwrite("mode_"+ movielist[0] + "_to_" + movielist[len(movielist)] + ".png", testing)
cap.release()
cv2.destroyAllWindows()
def max_min():
with open(centroidfile, 'rb') as fid:
#with open("testlog.centroid1.Tue, Jun 21, 2016", 'rb') as fid:
cen_data_array = np.fromfile(fid, '>u2')
cen_data_array = cen_data_array.reshape(cen_data_array.size / (numberofwells*2), (numberofwells*2))
cen_data_array[cen_data_array == 65535] = 0 # just setting to zero to make it easier to ignore
maxxys = []
minxys = []
for n in range (0, numberofwells*2,2):
maxtest = np.amax(cen_data_array[:,n])
mintest = np.amin(cen_data_array[:,n])
# Adds the x and y coordinates to the arrays in an interleaved manner for the next steps, ie, x1 then y1, x2 then y2
if maxtest == mintest and maxtest == 0:
maxxys.append(-100)
maxxys.append(-100)
minxys.append(-100)
minxys.append(-100)
# IF WELL IS EMPTY OR NOTHING EVER MOVES NEED A CHECK - ie, if MIN AND MAX ARE EQUAL?
else:
maxrealx = maxtest
minrealx = np.amin(cen_data_array[:,n][np.nonzero(cen_data_array[:,n])])
maxrealy = np.amax(cen_data_array[:,n+1])
minrealy = np.amin(cen_data_array[:,n+1][np.nonzero(cen_data_array[:,n+1])])
maxxys.append(maxrealx)
maxxys.append(maxrealy)
minxys.append(minrealx)
minxys.append(minrealy)
maxxysnp = np.array(maxxys)
minxysnp = np.array(minxys)
return( maxxysnp, minxysnp)
def main(pixThreshold,frameRate,videoStream):
row={0:0,1:12,2:24,3:36,4:48,5:60,6:72,7:84}
saveFreq = 4500 # how often to save data, in frames, just making sure this is really big, so I don't have any issues, would be important for memory considerations if I was doing a long movie
#i,m = imageTools.loadImageAndMask()
filenumber = videoStream.split('.')[0].split('_')[len(videoStream.split('.')[0].split('_'))-1]
#print "testing: ", filenumber
# if 1 <= int(filenumber) <= 20:
# movielist = list(range(1,21))
# if 21 <= int(filenumber) <= 40:
# movielist = list(range(21,41))
# if 41 <= int(filenumber) <= 60:
# movielist = list(range(41,61))
# if 61 <= int(filenumber) <= 70:
# movielist = list(range(51,71))
# if 71 <= int(filenumber) <= 80:
# movielist = list(range(71,91))
# if 81 <= int(filenumber) <= 478:
# movielist = list(range(int(filenumber)-10,int(filenumber)+10))
# if 479 <= filenumber <= 488:
# movielist = list(range(468,488))
# if 489 <= int(filenumber) <= 498:
# movielist = list(range(489,509))
# if 499 <= int(filenumber) <= 598:
# movielist = list(range(int(filenumber)-10,int(filenumber)+10))
# if 599 <= int(filenumber) <= 608:
# movielist = list(range(599,609))
# if 609 <= int(filenumber) <= 631:
# movielist = list(range(609,632))
# if 632 <= int(filenumber) <= 661:
# movielist = list(range(632,662))
# if 662 <= int(filenumber) <= 691:
# movielist = list(range(662,692))
# if 692 <= int(filenumber) <= 721:
# movielist = list(range(692,722))
# if 722 <= int(filenumber) <= 741:
# movielist = list(range(722,742))
# if 742 <= int(filenumber) <= 781:
# movielist = list(range(742,782))
# if 782 <= int(filenumber) <= 821:
# movielist = list(range(782,822))
# if 822 <= int(filenumber) <= 862:
# movielist = list(range(822,862))
# if 862 <= int(filenumber) <= 901:
# movielist = list(range(862,901))
if 1 <= int(filenumber) <= 20:
movielist = list(range(1,21))
if 21 <= int(filenumber) <= 40:
movielist = list(range(21,41))
if 41 <= int(filenumber) <= 60:
movielist = list(range(41,61))
if 61 <= int(filenumber) <= 70:
movielist = list(range(61,71))
# if 1 <= int(filenumber) <= 70:
# movielist = list(range(71,111))
if 71 <= int(filenumber) <= 100:
movielist = list(range(71,111))
if 101<= int(filenumber) <= 125:
movielist = list(range(101,126))
if 126 <= int(filenumber) <= 150:
movielist = list(range(126,151))
modename = str(movielist[0]) + "to" + str(movielist[len(movielist)-1])
#modename = ''.join(map(str,movielist))
imageMode(movielist, modename)
modefilename = "mode_" + modename + ".png"
#print movielist
try:
mpimg.imread(modefilename)
#print "mode file already generated"
except:
imageMode(movielist, modename)
e = imageTools.loadmodeImage(modefilename)
#e = imageTools.loadModeImage()
roimask = np.zeros((660,1088))
(maxxysnp, minxysnp) = max_min()
maxxs = []
minxs = []
maxys = []
minys = []
for j in range (0, numberofwells*2,2):
if maxxysnp[j] == -100:
# if nothing ever moved in this well and there is no max or min value (could happen with a totally empty well)
maxxs.append(np.nan)
maxys.append(np.nan)
minxs.append(np.nan)
minys.append(np.nan)
else:
maxxs.append(maxxysnp[j])
maxys.append(maxxysnp[j+1])
minxs.append(minxysnp[j])
minys.append(minxysnp[j+1])
npmaxxs = np.asarray(maxxs)
npmaxys = np.asarray(maxys)
npminxs = np.asarray(minxs)
npminys = np.asarray(minys)
npmaxxs = np.reshape(npmaxxs, (numberofcols,numberofrows))
npmaxys = np.reshape(npmaxys, (numberofcols,numberofrows)) #12,8
npminxs = np.reshape(npminxs, (numberofcols,numberofrows))
npminys = np.reshape(npminys, (numberofcols,numberofrows))
#print npmaxxs
#print npmaxys
#print npminxs
#print npminys
cmaxxs = []
cminxs = []
cmaxys = []
cminys = []
for j2 in range (0, numberofwells):
maxx = maxxs[well_conversion[j2]]
maxy = maxys[well_conversion[j2]]
miny = minys[well_conversion[j2]]
minx = minxs[well_conversion[j2]]
#print "j2: ", j2, well_conversion[j2], maxx, maxy, minx, miny
#print "wcj2/8: ", well_conversion[j2]/8
#print npmaxxs[well_conversion[j2]/8,:]
#print npminxs[well_conversion[j2]/8,:]
#print np.nanmean(npmaxxs[well_conversion[j2]/8,:])
#print np.nanmean(npminxs[well_conversion[j2]/8,:])
#print "j2/12: ", j2/12
#print npmaxys[:,j2/12]
#print npminys[:,j2/12]
#print np.nanmean(npmaxys[:,j2/12])
#print np.nanmean(npminys[:,j2/12])
if minx == maxx:
maxx = maxx + 2
if miny == maxy:
maxy = maxy + 2
if math.isnan(float(maxx)): # could also add a condition if min and max are equal to each other
#print "very first if statement"
maxx = np.nanmean(npmaxxs[well_conversion[j2]/numberofrows,:])
minx = np.nanmean(npminxs[well_conversion[j2]/numberofrows,:])
maxy = np.nanmean(npmaxys[:,j2/numberofcols])
miny = np.nanmean(npminys[:,j2/numberofcols])
#print "new means: ", maxx, minx, maxy, miny
# In the case that the entire row never gets any values in any wells and the mean is still NaN
# not 100% sure that this is going to work, will get a runtime warning, so then can check it out??
# mostly not sure about the well_conversions for the Xs
if math.isnan(float(maxx)) and math.isnan(float(minx)):
# print "first if statement"
if well_conversion[j2] < 8:
#print "2nd if statement"
minx = well_conversion[j2]
maxx = well_conversion[j2]+85
else:
# print "2nd else statement"
minx = cminxs[well_conversion[j2]-8] + 85
maxx = cmaxxs[well_conversion[j2]-8] + 85
###if j2 <= 11:
# print "3rd if statement"
### miny = j2
### maxy = j2+60
### minx = j2
if math.isnan(float(maxy)) and math.isnan(float(miny)):
#print "4th if statement"
if j2 < 12:
# print "5th if statement"
miny = j2
maxy = j2+85
else:
# print "5th else statement"
miny = cminys[j2-12] + 85
maxy = cmaxys[j2-12] + 85
# End of untested section
cmaxxs.append(maxx)
cmaxys.append(maxy)
cminxs.append(minx)
cminys.append(miny)
#print miny, maxy, minx, maxx, j2, j2+1
roimask[miny:maxy,minx:maxx] = j2+1
np.set_printoptions(threshold=np.nan) # printing entire array
cmaxxs.sort()
cmaxys.sort()
cminxs.sort()
cminys.sort()
rm,roimaskweights = imageTools.convertMaskToWeights(roimask)
# start camera or open video
videoType, displayDiffs = imageTools.getVideoType(videoStream)
cap = cv2.VideoCapture(videoStream)
# adjust video resolution if necessary (sized to mask)
print 'Camera resolution is %s x %s' % (str(roimask.shape[1]),str(roimask.shape[0]))
cap.set(3,roimask.shape[1])
cap.set(4,roimask.shape[0])
# Set Pixel Threshold
ret,frame = cap.read()
storedImage = np.array(e * 255, dtype = np.uint8)
# have to convert the float32 to uint8
storedMode = imageTools.Blur(storedImage)
storedFrame = imageTools.grayBlur(frame)
#pixThreshold = int(np.floor( pixThreshold * storedFrame.shape[0] ))
print('PixelThreshold is %i') % pixThreshold
#cenData = np.zeros([ saveFreq, len(np.unique(roimaskweights))*2])
cenData = np.zeros([ saveFreq, len(np.unique(roimaskweights))*2 -2])
#print "cenData shape: ", np.shape(cenData)
pixData = np.zeros([ saveFreq, len(np.unique(roimaskweights))])
#pixData = np.zeros([ saveFreq, len(np.unique(roimaskweights))])
i = 0 # a counter for saving chunks of data
totalFrames = 0
startTime = datetime.now()
print('Analyzing motion data...')
frame_roi = []
while(cap.isOpened()):
ret,frame = cap.read()
if ret == False:
print 'End of Video'
break
currentFrame = imageTools.grayBlur(frame)
currentFrame2 = imageTools.grayBlur(frame)
diffpix = imageTools.diffImage(storedFrame,currentFrame2,pixThreshold,displayDiffs)
diff = imageTools.trackdiffImage(storedMode,currentFrame,pixThreshold,displayDiffs)
#cv2.imwrite(videoStream + '_diffimage_' + str(i) + ".png", diff)
diff.dtype = np.uint8
_,contours,hierarchy = cv2.findContours(diff, cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
MIN_THRESH = 20.0
MIN_THRESH_P = 20.0
roi_dict = {}
for r in range(0,numberofwells):
roi_dict[r+1] = []
for cs in range(0,len(contours)):
#print "area and lenght: ", cv2.contourArea(contours[cs]), cv2.arcLength(contours[cs], True)
if cv2.contourArea(contours[cs]) < 1.0:
continue
if cv2.arcLength(contours[cs],True) < 1.0:
continue
if cv2.contourArea(contours[cs]) > MIN_THRESH or cv2.arcLength(contours[cs],True) > MIN_THRESH_P:
M = cv2.moments(contours[cs])
#print M
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
#print "cX, cY :", cX, cY
r=1
c=1
for x in range(0,len(cmaxxs)):
#print "cmaxxs: ", cmaxxs[x]
if cX > cmaxxs[x]:
r=x+1
#print "r: ", r
#print "r: ", r
for y in range(0, len(cmaxys)):
#print "cmaxys: ", cmaxys[x]
if cY > cmaxys[y]:
c=y+1
# print "c: ", c
if c == numberofwells:
c = c-1
if r == numberofwells:
r = r-1
area = cv2.contourArea(contours[cs])
perim = cv2.arcLength(contours[cs],True)
#print c, numberofcols, c/numberofcols
if not roi_dict[r/numberofrows+1+row[c/numberofcols]]:
roi_dict[r/numberofrows+1+row[c/numberofcols]].append((area*perim,cX,cY))
else:
if roi_dict[r/numberofrows+1+row[c/numberofcols]] < area*perim:
roi_dict[r/numberofrows+1+row[c/numberofcols]][0] = (area*perim,cX,cY)
frame_roi.append(roi_dict)
pixcounts = []
pixcounts = np.bincount(roimaskweights, weights=diffpix.ravel())
pixData[i,:] = np.hstack((pixcounts))
counts = []
keys = roi_dict.keys()
keys.sort()
for k in keys:
#print "k: ", k
x = -10000
y = -10000
if roi_dict[k]:
x = roi_dict[k][0][1]
y = roi_dict[k][0][2]
counts.append(x)
counts.append(y)
cv2.line(storedImage,(x,y),(x,y),(255,255,255),2)
if i == 284:
cv2.imwrite(videoStream + '_trackedimagewithlines_' + str(i) + ".png", storedImage)
cenData[i,:] = np.asarray(counts)
totalFrames += 1
storedFrame = currentFrame
i += 1
file = open(videoStream + ".centroid2",'w')
for x in range(0,285):
for y in range(0,192):
file.write(str(int(cenData[x,:][y])) + '\n')
pixData = pixData[:i,:]
pixData = pixData[:,1:] # get rid of background column
file = open(videoStream + ".motion2",'w')
#file.write("12/8/2015" + '\015')
for x in range(0,285):
for y in range(0,numberofwells):
file.write(str(int(pixData[x,:][y])) + '\n')
# vidInfo = {}
# release camera
cap.release()
cv2.destroyAllWindows()
# return vidInfo
#def cmdLine(pixThreshold,frameRate,videoStream):
main(pixThreshold,frameRate,videoStream)
#vidInfo = main(pixThreshold,frameRate,videoStream)
# return vidInfo
#if __name__ == '__main__':
# pixThreshold = imageTools.getPixThreshold(0.032)
# frameRate = imageTools.getFrameRate() # default is 30
# videoStream = imageTools.getVideoStream(sys.argv)
# vidInfo = cmdLine(pixThreshold,frameRate,videoStream)
| mit | 5,532,531,976,551,207,000 | 37.045767 | 576 | 0.664682 | false |
abelhj/svtools | scripts/geno_refine_12.py | 1 | 15812 | #!/usr/bin/env python
import argparse, sys, copy, gzip, time, math, re
import numpy as np
import pandas as pd
#import fastcluster
from scipy import stats, cluster, spatial
from scipy.stats import multivariate_normal
from sklearn import metrics
from collections import Counter, defaultdict, namedtuple
import statsmodels.formula.api as smf
from operator import itemgetter
import warnings
from svtools.vcf.file import Vcf
from svtools.vcf.genotype import Genotype
from svtools.vcf.variant import Variant
import svtools.utils as su
vcf_rec = namedtuple ('vcf_rec', 'var_id sample svtype AF GT CN AB')
def recluster(df):
df=df[(df.AB!=".")].copy()
df.loc[:,'AB']=pd.to_numeric(df.loc[:,'AB'])
df.loc[:,'CN']=pd.to_numeric(df.loc[:,'CN'])
tp=df.iloc[0,:].loc['svtype']
gt_code={'0/0':1, '0/1':2, '1/1':3}
gt_code_rev={1:'0/0', 2:'0/1', 3:'1/1'}
df.loc[:,'gtn']=df.loc[:, 'GT'].map(gt_code)
if tp in ['DEL']:
recluster_DEL(df)
re_recluster_DEL(df)
df.loc[:,'GTR']=df.loc[:, 'gt_new_re'].map(gt_code_rev)
elif tp in ['DUP']:
recluster_DUP(df)
re_recluster_DUP(df)
df.loc[:,'GTR']=df.loc[:, 'GT'].copy()
return df
def recluster_DEL(df):
#priors
mu_0={1: np.array([0.03, 2]), 2:np.array([0.46,1.1]), 3:np.array([0.94,0.1])}
psi={1:np.matrix('0.00128 -0.00075; -0.00075 1.1367'),
2:np.matrix('0.013 -0.0196; -0.0196 0.4626'),
3:np.matrix('0.0046 -0.0112; -0.0112 0.07556')}
lambda_0=1
nu_0=1
gpd=df.loc[:, ['gtn', 'CN', 'AB']].groupby(['gtn'])
covs=gpd[['AB','CN']].cov()
mns=gpd[['AB', 'CN']].mean()
cts=gpd.size()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
lin_fit=smf.ols('CN~AB',df).fit()
df.loc[:, 'gt_adj']=df.loc[:, 'gtn'].copy()
#check that CN, AB are correlated, and in the right direction
if (lin_fit.rsquared>0.5) and (-1*lin_fit.params[1]>0.5):
x_int=-lin_fit.params[0]/lin_fit.params[1]
#adjust init GT calls if AB shifted toward 0
if x_int<1:
#find mdpts between neighboring GT
mins=gpd['AB'].min()
maxes=gpd['AB'].max()
bound1=0.2
bound2=0.7
if (2 in mins) and (1 in maxes):
bound1=0.5*(mins[2]+maxes[1])
if (3 in mins) and (2 in maxes):
bound2=0.5*(mins[3]+maxes[2])
newbound1=bound1*x_int
newbound2=bound2*x_int
df.loc[:, 'gt_adj']=pd.to_numeric(pd.cut(df['AB'], bins=[-1, newbound1, newbound2, 1], labels=['1', '2', '3']))
gpd=df.loc[:,['gt_adj', 'CN', 'AB']].groupby(['gt_adj'])
covs=gpd[['AB', 'CN']].cov()
mns=gpd[['AB', 'CN']].mean()
cts=gpd.size()
mu_map={1: get_mu_map(1, cts, lambda_0, mu_0, mns),
2: get_mu_map(2, cts, lambda_0, mu_0, mns),
3: get_mu_map(3, cts, lambda_0, mu_0, mns)}
sigma_map={1: get_sigma_map(1, cts, lambda_0, psi, covs, mns, mu_0),
2: get_sigma_map(2, cts, lambda_0, psi, covs, mns, mu_0),
3: get_sigma_map(3, cts, lambda_0, psi, covs, mns, mu_0)}
df.loc[:, 'lld1']=multivariate_normal.logpdf(df.loc[:, ['AB', 'CN']], mean=mu_map[1], cov=sigma_map[1])
df.loc[:, 'lld2']=multivariate_normal.logpdf(df.loc[:, ['AB', 'CN']], mean=mu_map[2], cov=sigma_map[2])
df.loc[:, 'lld3']=multivariate_normal.logpdf(df.loc[:, ['AB', 'CN']], mean=mu_map[3], cov=sigma_map[3])
lld_code={'lld1':1, 'lld2':2, 'lld3':3}
df.loc[:,'gt_new']=df.loc[:, ['lld1', 'lld2', 'lld3']].idxmax(1).map(lld_code)
df.loc[:, 'gq']=df.loc[:, ['lld1', 'lld2', 'lld3']].max(axis=1)-df.loc[:, ['lld1', 'lld2', 'lld3']].median(axis=1)
df.loc[:, 'med_gq']=df.loc[:, 'gq'].median()
df.loc[:, 'q10_gq']=df.loc[:, 'gq'].quantile(0.1)
return
def re_recluster_DEL(df):
#priors
mu_0={1: np.array([0.03, 2]), 2:np.array([0.46,1.1]), 3:np.array([0.94,0.1])}
psi={1:np.matrix('0.00128 -0.00075; -0.00075 1.1367'),
2:np.matrix('0.013 -0.0196; -0.0196 0.4626'),
3:np.matrix('0.0046 -0.0112; -0.0112 0.07556')}
lambda_0=1
nu_0=1
df.loc[:, 'gt_adj']=df.loc[:,'gt_new'].copy()
df.loc[ (df['gt_new']==1) & (df['AB']>0.1) & (df['CN']<1.5), 'gt_adj']=2
gpd=df.loc[:, ['gt_adj', 'CN', 'AB']].groupby(['gt_adj'])
covs=gpd[['AB','CN']].cov()
mns=gpd[['AB', 'CN']].mean()
cts=gpd.size()
mu_map={1: get_mu_map(1, cts, lambda_0, mu_0, mns),
2: get_mu_map(2, cts, lambda_0, mu_0, mns),
3: get_mu_map(3, cts, lambda_0, mu_0, mns)}
sigma_map={1: get_sigma_map(1, cts, lambda_0, psi, covs, mns, mu_0),
2: get_sigma_map(2, cts, lambda_0, psi, covs, mns, mu_0),
3: get_sigma_map(3, cts, lambda_0, psi, covs, mns, mu_0)}
df.loc[:, 'lld1']=multivariate_normal.logpdf(df.loc[:, ['AB', 'CN']], mean=mu_map[1], cov=sigma_map[1])
df.loc[:, 'lld2']=multivariate_normal.logpdf(df.loc[:, ['AB', 'CN']], mean=mu_map[2], cov=sigma_map[2])
df.loc[:, 'lld3']=multivariate_normal.logpdf(df.loc[:, ['AB', 'CN']], mean=mu_map[3], cov=sigma_map[3])
lld_code={'lld1':1, 'lld2':2, 'lld3':3}
df.loc[:,'gt_new_re']=df.loc[:, ['lld1', 'lld2', 'lld3']].idxmax(1).map(lld_code)
df.loc[:, 'gq_re']=df.loc[:, ['lld1', 'lld2', 'lld3']].max(axis=1)-df.loc[:, ['lld1', 'lld2', 'lld3']].median(axis=1)
df.loc[:, 'med_gq_re']=df.loc[:, 'gq_re'].median()
df.loc[:, 'q10_gq_re']=df.loc[:, 'gq_re'].quantile(0.1)
return
def re_recluster_DUP(df):
df.loc[:, 'gt_new_re']=0
df.loc[:, 'gq_re']=0
df.loc[:, 'med_gq_re']=0
df.loc[:, 'q10_gq_re']=0
return
def re_recluster_DUP(df):
#priors
mu_0={1: np.array([0.03, 2]), 2:np.array([0.27,3]), 3:np.array([0.45,4])}
psi={1:np.matrix('0.00128 -0.00075; -0.00075 1.1367'),
2:np.matrix('0.013 -0.0196; -0.0196 0.4626'),
3:np.matrix('0.0046 -0.0112; -0.0112 0.07556')}
lambda_0=1
nu_0=1
df.loc[:, 'gt_adj']=df.loc[:,'gt_new'].copy()
df.loc[ (df['gt_new']==1) & (df['AB']>0.1) & (df['CN']>2.5), 'gt_adj']=2
gpd=df.loc[:, ['gt_adj', 'CN', 'AB']].groupby(['gt_adj'])
covs=gpd[['AB','CN']].cov()
mns=gpd[['AB', 'CN']].mean()
cts=gpd.size()
mu_map={1: get_mu_map(1, cts, lambda_0, mu_0, mns),
2: get_mu_map(2, cts, lambda_0, mu_0, mns),
3: get_mu_map(3, cts, lambda_0, mu_0, mns)}
sigma_map={1: get_sigma_map(1, cts, lambda_0, psi, covs, mns, mu_0),
2: get_sigma_map(2, cts, lambda_0, psi, covs, mns, mu_0),
3: get_sigma_map(3, cts, lambda_0, psi, covs, mns, mu_0)}
df.loc[:, 'lld1']=multivariate_normal.logpdf(df.loc[:, ['AB', 'CN']], mean=mu_map[1], cov=sigma_map[1])
df.loc[:, 'lld2']=multivariate_normal.logpdf(df.loc[:, ['AB', 'CN']], mean=mu_map[2], cov=sigma_map[2])
df.loc[:, 'lld3']=multivariate_normal.logpdf(df.loc[:, ['AB', 'CN']], mean=mu_map[3], cov=sigma_map[3])
lld_code={'lld1':1, 'lld2':2, 'lld3':3}
df.loc[:,'gt_new_re']=df.loc[:, ['lld1', 'lld2', 'lld3']].idxmax(1).map(lld_code)
df.loc[:, 'gq_re']=df.loc[:, ['lld1', 'lld2', 'lld3']].max(axis=1)-df.loc[:, ['lld1', 'lld2', 'lld3']].median(axis=1)
df.loc[:, 'med_gq_re']=df.loc[:, 'gq_re'].median()
df.loc[:, 'q10_gq_re']=df.loc[:, 'gq_re'].quantile(0.1)
return
def recluster_DUP(df):
#priors
mu_0={1: np.array([0.03, 2]), 2:np.array([0.27,3]), 3:np.array([0.45,4])}
psi={1:np.matrix('0.00128 -0.00075; -0.00075 1.1367'),
2:np.matrix('0.013 -0.0196; -0.0196 0.4626'),
3:np.matrix('0.0046 -0.0112; -0.0112 0.07556')}
lambda_0=1
nu_0=1
gpd=df.loc[:, ['gtn', 'CN', 'AB']].groupby(['gtn'])
covs=gpd[['AB','CN']].cov()
mns=gpd[['AB', 'CN']].mean()
cts=gpd.size()
df.loc[:, 'gt_adj']=df.loc[:, 'gtn'].copy()
mu_map={1: get_mu_map(1, cts, lambda_0, mu_0, mns),
2: get_mu_map(2, cts, lambda_0, mu_0, mns),
3: get_mu_map(3, cts, lambda_0, mu_0, mns)}
sigma_map={1: get_sigma_map(1, cts, lambda_0, psi, covs, mns, mu_0),
2: get_sigma_map(2, cts, lambda_0, psi, covs, mns, mu_0),
3: get_sigma_map(3, cts, lambda_0, psi, covs, mns, mu_0)}
df.loc[:, 'lld1']=multivariate_normal.logpdf(df.loc[:, ['AB', 'CN']], mean=mu_map[1], cov=sigma_map[1])
df.loc[:, 'lld2']=multivariate_normal.logpdf(df.loc[:, ['AB', 'CN']], mean=mu_map[2], cov=sigma_map[2])
df.loc[:, 'lld3']=multivariate_normal.logpdf(df.loc[:, ['AB', 'CN']], mean=mu_map[3], cov=sigma_map[3])
lld_code={'lld1':1, 'lld2':2, 'lld3':3}
df.loc[:,'gt_new']=df.loc[:, ['lld1', 'lld2', 'lld3']].idxmax(1).map(lld_code)
df.loc[:, 'gq']=df.loc[:, ['lld1', 'lld2', 'lld3']].max(axis=1)-df.loc[:, ['lld1', 'lld2', 'lld3']].median(axis=1)
df.loc[:, 'med_gq']=df.loc[:, 'gq'].median()
df.loc[:, 'q10_gq']=df.loc[:, 'gq'].quantile(0.1)
return
def recluster_INV_BND(df):
#priors
mu_0={1: 0.03, 2:0.46, 3:0.94}
psi={1:0.00128, 2:0.013, 3:0.0046}
lambda_0=1
nu_0=1
gpd=df.loc[:, ['gtn', 'AB']].groupby(['gtn'])
covs=gpd[['AB']].cov()
mns=gpd[['AB']].mean()
cts=gpd.size()
df.loc[:, 'gt_adj']=df.loc[:, 'gtn'].copy()
mu_map={1: get_mu_map(1, cts, lambda_0, mu_0, mns),
2: get_mu_map(2, cts, lambda_0, mu_0, mns),
3: get_mu_map(3, cts, lambda_0, mu_0, mns)}
sigma_map={1: get_sigma_map(1, cts, lambda_0, psi, covs, mns, mu_0),
2: get_sigma_map(2, cts, lambda_0, psi, covs, mns, mu_0),
3: get_sigma_map(3, cts, lambda_0, psi, covs, mns, mu_0)}
df.loc[:, 'lld1']=multivariate_normal.logpdf(df.loc[:, ['AB', 'CN']], mean=mu_map[1], cov=sigma_map[1])
df.loc[:, 'lld2']=multivariate_normal.logpdf(df.loc[:, ['AB', 'CN']], mean=mu_map[2], cov=sigma_map[2])
df.loc[:, 'lld3']=multivariate_normal.logpdf(df.loc[:, ['AB', 'CN']], mean=mu_map[3], cov=sigma_map[3])
lld_code={'lld1':1, 'lld2':2, 'lld3':3}
df.loc[:,'gt_new']=df.loc[:, ['lld1', 'lld2', 'lld3']].idxmax(1).map(lld_code)
df.loc[:, 'gq']=df.loc[:, ['lld1', 'lld2', 'lld3']].max(axis=1)-df.loc[:, ['lld1', 'lld2', 'lld3']].median(axis=1)
df.loc[:, 'med_gq']=df.loc[:, 'gq'].median()
df.loc[:, 'q10_gq']=df.loc[:, 'gq'].quantile(0.1)
return
def get_mu_map(j, nn, l0, m0, sample_mean):
new_mu=m0[j]
if j in nn:
new_mu=(l0*m0[j]+nn[j]*sample_mean.loc[j,:].values)/(l0+nn[j])
return new_mu
def get_sigma_map(j, nn, l0, psi0, sample_cov, sample_mean, m0):
new_sig=psi0[j]
if (j in nn) and nn[j]>2:
val=sample_cov.loc[j,:]
val=val+l0*nn[j]/(l0+nn[j])*np.outer(sample_mean.loc[j,:]-m0[j], sample_mean.loc[j,:]-m0[j])
new_sig=val+psi0[j]
return new_sig
def percentile(n):
def percentile_(x):
return np.percentile(x,n)
percentile_.__name__= 'percentile_%s' % n
return percentile_
def load_df(var, exclude, sex):
test_set = list()
for s in var.sample_list:
if s in exclude:
continue
cn = var.genotype(s).get_format('CN')
if (var.chrom == 'X' or var.chrom == 'Y') and sex[s] == 1:
cn=str(float(cn)*2)
test_set.append(vcf_rec(var.var_id, s, var.info['SVTYPE'], var.info['AF'],
var.genotype(s).get_format('GT'), cn , var.genotype(s).get_format('AB')))
test_set = pd.DataFrame(data = test_set, columns=vcf_rec._fields)
return test_set
def run_gt_refine(vcf_in, vcf_out, diag_outfile, gender_file, exclude_file):
vcf = Vcf()
header = []
in_header = True
sex={}
for line in gender_file:
v = line.rstrip().split('\t')
sex[v[0]] = int(v[1])
exclude = []
if exclude_file is not None:
for line in exclude_file:
exclude.append(line.rstrip())
outf=open(diag_outfile, 'w', 4096)
ct=1
for line in vcf_in:
if in_header:
if line[0] == "#":
header.append(line)
continue
else:
in_header = False
vcf.add_header(header)
vcf.add_info('MEDGQR', '1', 'Float', 'Median quality for refined GT')
vcf.add_info('Q10GQR', '1', 'Float', 'Q10 quality for refined GT')
vcf.add_format('GQR', 1, 'Float', 'Quality of refined genotype.')
vcf.add_format('GTR', 1, 'String', 'Refined genotype.')
vcf_out.write(vcf.get_header() + '\n')
v = line.rstrip().split('\t')
info = v[7].split(';')
svtype = None
for x in info:
if x.startswith('SVTYPE='):
svtype = x.split('=')[1]
break
# bail if not DEL or DUP prior to reclassification
if svtype not in ['DEL']:
vcf_out.write(line)
continue
var = Variant(v, vcf)
sys.stderr.write("%s\n" % var.var_id)
sys.stderr.write("%f\n" % float(var.get_info('AF')))
if float(var.get_info('AF'))<0.01:
vcf_out.write(line)
else:
df=load_df(var, exclude, sex)
recdf=recluster(df)
if ct==1:
recdf.to_csv(outf, header=True)
ct += 1
else:
recdf.to_csv(outf, header=False)
var.set_info("MEDGQR", '{:.2f}'.format(recdf.iloc[0,:].loc['med_gq_re']))
var.set_info("Q10GQR", '{:.2f}'.format(recdf.iloc[0,:].loc['q10_gq_re']))
recdf.set_index('sample', inplace=True)
for s in var.sample_list:
if s in recdf.index:
var.genotype(s).set_format("GTR", recdf.loc[s,'GTR'])
var.genotype(s).set_format("GQR", '{:.2f}'.format(recdf.loc[s,'gq_re']))
else:
var.genotype(s).set_format("GTR", "./.")
var.genotype(s).set_format("GQR", 0)
vcf_out.write(var.get_var_string(use_cached_gt_string=False) + '\n')
vcf_out.close()
vcf_in.close()
gender_file.close()
outf.close()
if exclude_file is not None:
exclude_file.close()
return
def add_arguments_to_parser(parser):
parser.add_argument('-i', '--input', metavar='<VCF>', dest='vcf_in', type=argparse.FileType('r'), default=None, help='VCF input [stdin]')
parser.add_argument('-o', '--output', metavar='<VCF>', dest='vcf_out', type=argparse.FileType('w'), default=sys.stdout, help='VCF output [stdout]')
parser.add_argument('-d', '--diag_file', metavar='<STRING>', dest='diag_outfile', type=str, default=None, required=False, help='text file to output method comparisons')
parser.add_argument('-g', '--gender', metavar='<FILE>', dest='gender', type=argparse.FileType('r'), required=True, default=None, help='tab delimited file of sample genders (male=1, female=2)\nex: SAMPLE_A\t2')
parser.add_argument('-e', '--exclude', metavar='<FILE>', dest='exclude', type=argparse.FileType('r'), required=False, default=None, help='list of samples to exclude from classification algorithms')
parser.set_defaults(entry_point=run_from_args)
def description():
return 'refine genotypes by clustering'
def command_parser():
parser = argparse.ArgumentParser(description=description())
add_arguments_to_parser(parser)
return parser
def run_from_args(args):
with su.InputStream(args.vcf_in) as stream:
run_gt_refine(stream, args.vcf_out, args.diag_outfile, args.gender, args.exclude)
if __name__ == '__main__':
parser = command_parser()
args=parser.parse_args()
sys.exit(args.entry_point(args))
| mit | -989,182,057,878,760,700 | 39.54359 | 213 | 0.537693 | false |