repo_name
stringlengths
6
112
path
stringlengths
4
204
copies
stringlengths
1
3
size
stringlengths
4
6
content
stringlengths
714
891k
license
stringclasses
15 values
hash
int64
-9,223,135,201,861,841,000
9,223,183,049B
line_mean
float64
6
99.4
line_max
int64
17
1k
alpha_frac
float64
0.25
0.89
autogenerated
bool
1 class
TakayukiSakai/tensorflow
tensorflow/contrib/learn/python/learn/dataframe/queues/feeding_functions.py
2
5454
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Helper functions for enqueuing data from arrays and pandas `DataFrame`s.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_queue_runner as fqr from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.ops import data_flow_ops from tensorflow.python.training import queue_runner # pylint: disable=g-import-not-at-top try: import pandas as pd HAS_PANDAS = True except ImportError: HAS_PANDAS = False class _ArrayFeedFn(object): """Creates feed dictionaries from numpy arrays.""" def __init__(self, placeholders, array): if len(placeholders) != 2: raise ValueError("_array_feed_fn expects 2 placeholders; got {}.".format( len(placeholders))) self._placeholders = placeholders self._array = array self._reset() def _reset(self): self._row_iterator = enumerate(self._array) def __call__(self): try: index, row = next(self._row_iterator) except StopIteration: self._reset() index, row = next(self._row_iterator) return {self._placeholders[0]: index, self._placeholders[1]: row} class _PandasFeedFn(object): """Creates feed dictionaries from pandas `DataFrames`.""" def __init__(self, placeholders, dataframe): if len(placeholders) != len(dataframe.columns) + 1: raise ValueError("Expected {} placeholders; got {}.".format( len(dataframe.columns), len(placeholders))) self._index_placeholder = placeholders[0] self._row_placeholders = placeholders[1:] self._dataframe = dataframe self._reset() def _reset(self): self._row_iterator = self._dataframe.iterrows() def __call__(self): try: index, row = next(self._row_iterator) except StopIteration: self._reset() index, row = next(self._row_iterator) feed_dict = dict(zip(self._row_placeholders, row)) feed_dict[self._index_placeholder] = index return feed_dict def enqueue_data(data, capacity, shuffle=False, min_after_dequeue=None, seed=None): """Creates a queue filled from a numpy array or pandas `DataFrame`. Returns a queue filled with the rows of the given array or `DataFrame`. In the case of a pandas `DataFrame`, the first enqueued `Tensor` corresponds to the index of the `DataFrame`. For numpy arrays, the first enqueued `Tensor` contains the row number. Args: data: a numpy `ndarray or` pandas `DataFrame` that will be read into the queue. capacity: the capacity of the queue. shuffle: whether or not to shuffle the rows of the array. min_after_dequeue: minimum number of elements that can remain in the queue after a dequeue operation. Only used when `shuffle` is true. If not set, defaults to `capacity` / 4. seed: used to seed RandomShuffleQueue. Only used when `shuffle` is True. Returns: A queue filled with the rows of the given array or `DataFrame`. Raises: TypeError: `data` is not a Pandas `DataFrame` or a numpy `ndarray`. """ # TODO(jamieas): create multithreaded version of enqueue_data. if isinstance(data, np.ndarray): types = [dtypes.int64, dtypes.as_dtype(data.dtype)] shapes = [(), data.shape[1:]] get_feed_fn = _ArrayFeedFn elif HAS_PANDAS and isinstance(data, pd.DataFrame): types = [dtypes.as_dtype(dt) for dt in [data.index.dtype] + list(data.dtypes)] shapes = [() for _ in types] get_feed_fn = _PandasFeedFn else: raise TypeError( "data must be either a numpy array or pandas DataFrame if pandas is " "installed; got {}".format( type(data).__name__)) placeholders = [array_ops.placeholder(*type_and_shape) for type_and_shape in zip(types, shapes)] if shuffle: min_after_dequeue = (capacity / 4 if min_after_dequeue is None else min_after_dequeue) queue = data_flow_ops.RandomShuffleQueue(capacity, min_after_dequeue, dtypes=types, shapes=shapes, seed=seed) else: queue = data_flow_ops.FIFOQueue(capacity, dtypes=types, shapes=shapes) enqueue_op = queue.enqueue(placeholders) feed_fn = get_feed_fn(placeholders, data) runner = fqr.FeedingQueueRunner(queue=queue, enqueue_ops=[enqueue_op], feed_fn=feed_fn) queue_runner.add_queue_runner(runner) return queue
apache-2.0
-1,223,852,130,678,914,600
35.604027
94
0.645398
false
YsuSERESL/fixation-correction-sourcecode
src/plot_points.py
1
2983
""" This file is part of Fixation-Correction-Sourcecode. Fixation-Correction-Sourcecode is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 2 of the License, or (at your option) any later version. Fixation-Correction-Sourcecode is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Fixation-Correction-Sourcecode. If not, see <http://www.gnu.org/licenses/>. Copyright 2015 Author: Chris Palmer """ import matplotlib.pyplot as plt from matplotlib.patches import Rectangle import globalVariables def plot_points(listofpoints, filename, listofaois): """ <estimated return type> <function_name> (<parameters>) PRECONDITION(S): POSTCONDITION(S): """ for point in listofpoints: plt.plot(point.x, -point.y, 'bo', label=filename) plt.savefig('point-plots/'+filename+'.png') def plot_aois(listofaois, filename, list_of_clusters): """ <estimated return type> <function_name> (<parameters>) PRECONDITION(S): POSTCONDITION(S): """ numerOfAois = 0 print globalVariables.AOI_FILES[filename] for aoi in listofaois: for cluster in list_of_clusters: for point in cluster: plt.plot(point.x, -point.y, 'bo') rectangle = Rectangle((aoi.x, -aoi.y), aoi.width, aoi.height, fc='r') numerOfAois += 1 plt.gca().add_patch(rectangle) print(str(numerOfAois) + ' aois') period = filename.rfind('.') filename = filename[:-(len(filename)-period)] print(str(len(list_of_clusters)) + ' clusters') if len(list_of_clusters) > 0: plt.savefig('aoi-plots/' + filename) plt.clf() plt.close('all') #---------------------------------------------------------------------------------- numerOfAois = 0 autocorrected = False for cluster in list_of_clusters: for point in cluster: if point.x != point.autoxCorrected or point.y != point.autoyCorrected: autocorrected = True if autocorrected: for aoi in listofaois: for cluster in list_of_clusters: for point in cluster: plt.plot(point.autoxCorrected, -point.autoyCorrected, 'ro') rectangle = Rectangle((aoi.x, -aoi.y), aoi.width, aoi.height, fc='g') numerOfAois += 1 plt.gca().add_patch(rectangle) print(str(numerOfAois) + ' aois') print(str(len(list_of_clusters)) + ' clusters') if len(list_of_clusters) > 0: plt.savefig('aoi-plots/' + filename+"_autocorrected") plt.clf() plt.close('all') return len(list_of_clusters) # plt.show()
gpl-2.0
-9,087,281,211,252,764,000
34.094118
87
0.635602
false
drvinceknight/sklDj
sklDj/implementations/implementations/linearregression.py
1
1035
from implementations import Implementation from sklearn import linear_model import matplotlib.pyplot as plt, mpld3 class LinearRegression(Implementation): """A class for linear regression""" def __init__(self, data): self.name = 'linear-regression' self.X = [row[:-1] for row in data] self.Y = [row[-1] for row in data] def fit(self): algorithm = linear_model.LinearRegression() algorithm.fit(self.X, self.Y) self.coeff = algorithm.coef_ self.intercept = algorithm.intercept_ def plot(self): fig = plt.figure() plt.scatter(self.X, self.Y) min_x = min(self.X)[0] max_x = max(self.X)[0] line = lambda x: self.coeff * x + self.intercept plt.plot([min_x, max_x], [line(min_x), line(max_x)]) fig_html = mpld3.fig_to_html(fig) # When we have local mpld3 libraries we will need to tweak this return fig_html def run(self): self.fit() return self.plot(), self.coeff, self.intercept
mit
1,248,169,903,476,889,900
32.387097
105
0.613527
false
VirusTotal/msticpy
msticpy/nbtools/process_tree.py
1
17301
# ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- """Process Tree Visualization.""" from collections import namedtuple from typing import Optional, Tuple, Union from bokeh.io import output_notebook, show, reset_output from bokeh.plotting import figure from bokeh.transform import dodge, factor_cmap, linear_cmap from bokeh.models import ( HoverTool, ColumnDataSource, CustomJS, # Legend, # LegendItem, BoxSelectTool, RangeTool, ColorBar, ) # pylint: disable=no-name-in-module from bokeh.palettes import viridis # pylint: enable=no-name-in-module from bokeh.layouts import column, row from bokeh.models import LayoutDOM from bokeh.models.widgets import DataTable, TableColumn, DateFormatter import numpy as np import pandas as pd from ..sectools.process_tree_utils import ( build_process_tree, ProcSchema, infer_schema, ProcessTreeSchemaException, ) from ..common.utility import check_kwargs from .._version import VERSION __version__ = VERSION __author__ = "Ian Hellen" _DEFAULT_KWARGS = ["height", "title", "width"] def build_and_show_process_tree( data: pd.DataFrame, schema: ProcSchema = None, output_var: str = None, legend_col: str = None, **kwargs, ) -> Tuple[figure, LayoutDOM]: """ Build process tree from data and plot a tree. Parameters ---------- data : pd.DataFrame Window process creation or Linux Auditd events schema : ProcSchema The data schema to use for the data set, by default None (if None the schema is inferred) output_var : str, optional Output variable for selected items in the tree, by default None legend_col : str, optional The column used to color the tree items, by default None kwargs : Dict[str, Any] Additional arguments passed to plot_process_tree Returns ------- Tuple[figure, LayoutDOM]: figure - The main bokeh.plotting.figure Layout - Bokeh layout structure. See Also -------- plot_process_tree """ # Check if this table already seems to have the proc_tree metadata missing_cols = _check_proc_tree_schema(data) if missing_cols: data = build_process_tree(procs=data, schema=schema) return plot_process_tree( data, schema, output_var=output_var, legend_col=legend_col, **kwargs ) # pylint: disable=too-many-locals def plot_process_tree( data: pd.DataFrame, schema: ProcSchema = None, output_var: str = None, legend_col: str = None, show_table: bool = False, **kwargs, ) -> Tuple[figure, LayoutDOM]: """ Plot a Process Tree Visualization. Parameters ---------- data : pd.DataFrame DataFrame containing one or more Process Trees schema : ProcSchema, optional The data schema to use for the data set, by default None (if None the schema is inferred) output_var : str, optional Output variable for selected items in the tree, by default None legend_col : str, optional The column used to color the tree items, by default None show_table: bool Set to True to show a data table, by default False. Other Parameters ---------------- height : int, optional The height of the plot figure (the default is 700) width : int, optional The width of the plot figure (the default is 900) title : str, optional Title to display (the default is None) Returns ------- Tuple[figure, LayoutDOM]: figure - The main bokeh.plotting.figure Layout - Bokeh layout structure. Raises ------ ProcessTreeSchemaException If the data set schema is not valid for the plot. Notes ----- The `output_var` variable will be overwritten with any selected values. """ check_kwargs(kwargs, _DEFAULT_KWARGS) reset_output() output_notebook() data, schema, levels, n_rows = _pre_process_tree(data, schema) if schema is None: raise ProcessTreeSchemaException("Could not infer schema from data set.") source = ColumnDataSource(data=data) # Get legend/color bar map fill_map, color_bar = _create_fill_map(source, legend_col) max_level = max(levels) + 3 min_level = min(levels) plot_height: int = kwargs.pop("height", 700) plot_width: int = kwargs.pop("width", 900) title: str = kwargs.pop("title", "ProcessTree") if color_bar: title += " (color bar = {legend_col})" visible_range = int(plot_height / 35) y_start_range = (n_rows - visible_range, n_rows + 1) b_plot = figure( title=title, plot_width=plot_width, plot_height=plot_height, x_range=(min_level, max_level), y_range=y_start_range, tools=["ypan", "reset", "save", "tap"], toolbar_location="above", ) hover = HoverTool( tooltips=_get_tool_tips(schema), formatters={"TimeGenerated": "datetime"} ) b_plot.add_tools(hover) # dodge to align rectangle with grid rect_x = dodge("Level", 1.75, range=b_plot.x_range) rect_plot_params = dict( width=3.5, height=0.95, source=source, fill_alpha=0.4, fill_color=fill_map ) if color_bar: b_plot.add_layout(color_bar, "right") elif legend_col: rect_plot_params["legend_field"] = legend_col rect_plot = b_plot.rect(x=rect_x, y="Row", **rect_plot_params) if legend_col and not color_bar: b_plot.legend.title = legend_col text_props = {"source": source, "text_align": "left", "text_baseline": "middle"} def x_dodge(x_offset): return dodge("Level", x_offset, range=b_plot.x_range) def y_dodge(y_offset): return dodge("Row", y_offset, range=b_plot.y_range) b_plot.text( x=x_dodge(0.1), y=y_dodge(-0.2), text="cmd", text_font_size="7pt", **text_props ) b_plot.text( x=x_dodge(0.1), y=y_dodge(0.25), text="Exe", text_font_size="8pt", **text_props ) b_plot.text( x=x_dodge(1.8), y=y_dodge(0.25), text="PID", text_font_size="8pt", **text_props ) # Plot options _set_plot_option_defaults(b_plot) b_plot.xaxis.ticker = sorted(levels) b_plot.xgrid.ticker = sorted(levels) b_plot.hover.renderers = [rect_plot] # only hover element boxes # Selection callback if output_var is not None: get_selected = _create_js_callback(source, output_var) b_plot.js_on_event("tap", get_selected) box_select = BoxSelectTool(callback=get_selected) b_plot.add_tools(box_select) range_tool = _create_vert_range_tool( data=source, min_y=0, max_y=n_rows, plot_range=b_plot.y_range, width=90, height=plot_height, x_col="Level", y_col="Row", fill_map=fill_map, ) plot_elems = row(b_plot, range_tool) if show_table: data_table = _create_data_table(source, schema, legend_col) plot_elems = column(plot_elems, data_table) show(plot_elems) return b_plot, plot_elems # pylint: enable=too-many-locals TreeResult = namedtuple("TreeResult", "proc_tree, schema, levels, n_rows") def _pre_process_tree(proc_tree: pd.DataFrame, schema: ProcSchema = None): """Extract dimensions and formatted values from proc_tree.""" if schema is None: schema = infer_schema(proc_tree) _validate_plot_schema(proc_tree, schema) proc_tree = proc_tree.sort_values("path", ascending="True").reset_index() n_rows = len(proc_tree) proc_tree["Row"] = proc_tree.index proc_tree["Row"] = n_rows - proc_tree["Row"] proc_tree["Level"] = proc_tree["path"].str.count("/") + 1 levels = proc_tree["Level"].unique() max_cmd_len = int(350 / len(levels)) long_cmd = proc_tree[schema.cmd_line].str.len() > max_cmd_len proc_tree.loc[long_cmd, "cmd"] = ( proc_tree[schema.cmd_line].str[:max_cmd_len] + "..." ) proc_tree.loc[~long_cmd, "cmd"] = proc_tree[schema.cmd_line].fillna( "cmdline unknown" ) proc_tree["Exe"] = proc_tree.apply( lambda x: x[schema.process_name].split(schema.path_separator)[-1], axis=1 ) pid_fmt = ( lambda x: f"PID: {x} ({int(x, base=16)})" if str(x).startswith("0x") else f"PID: 0x{int(x):x} ({int(x)})" ) proc_tree["PID"] = proc_tree[schema.process_id].apply(pid_fmt) return TreeResult(proc_tree=proc_tree, schema=schema, levels=levels, n_rows=n_rows) def _validate_plot_schema(proc_tree: pd.DataFrame, schema): """Validate that we have the required columns.""" required_cols = set( ["path", schema.cmd_line, schema.process_name, schema.process_id] ) proc_cols = set(proc_tree.columns) missing = required_cols - proc_cols if missing: raise ProcessTreeSchemaException( f"Required columns not found in data set: {','.join(missing)}" ) def _set_plot_option_defaults(b_plot): """Set default plot options.""" b_plot.outline_line_color = None b_plot.grid.grid_line_color = "navy" b_plot.axis.axis_line_color = None b_plot.axis.major_tick_line_color = "navy" b_plot.xaxis.visible = False b_plot.yaxis.visible = False b_plot.xgrid.visible = True b_plot.ygrid.visible = False b_plot.xgrid.minor_grid_line_color = "navy" b_plot.xgrid.minor_grid_line_alpha = 0.1 b_plot.xgrid.grid_line_color = "navy" b_plot.xgrid.grid_line_alpha = 0.1 b_plot.axis.major_label_standoff = 0 def _get_tool_tips(schema: ProcSchema): """Return tool tip formatter.""" return [ ("Process", f"@{schema.process_name}"), ("PID", "@PID"), ("CmdLine", f"@{schema.cmd_line}"), ("SubjUser", f"@{schema.user_name}"), ("SubjLgnId", f"@{schema.logon_id}"), ("TargLgnId", f"@{schema.target_logon_id}"), ("Time", f"@{schema.time_stamp}{{%F %T}}"), ] def _create_js_callback(source: ColumnDataSource, result_var: str) -> CustomJS: """Create and return CustomJS callback to set Python variable.""" ret_var_js = """ // get data source from Callback args var inds = source.selected.indices; var output = []; for (var i = 0; i < inds.length; i++) { output.push(source.data[itemkey][inds[i]]) } out_str = JSON.stringify(output); py_str = `${output_var} = ${out_str}`; console.log(py_str); IPython.notebook.kernel.execute(py_str); """ get_selected = CustomJS( args=dict(source=source, itemkey="proc_key", output_var=result_var), code=ret_var_js, ) return get_selected def _create_fill_map( source: ColumnDataSource, source_column: str = None ) -> Tuple[Union[factor_cmap, linear_cmap], Optional[ColorBar]]: """Create factor map or linear map based on `source_column`.""" fill_map = "navy" color_bar = None if source_column is None or source_column not in source.data: return fill_map, color_bar col_kind = source.data[source_column].dtype.kind if col_kind in ["b", "O"]: s_values = set(source.data[source_column]) if np.nan in s_values: s_values.remove(np.nan) values = list(s_values) fill_map = factor_cmap( source_column, palette=viridis(max(3, len(values))), factors=values ) elif col_kind in ["i", "u", "f", "M"]: values = [val for val in source.data[source_column] if not np.isnan(val)] fill_map = linear_cmap( field_name=source_column, palette=viridis(256), low=np.min(values), high=np.max(values), ) color_bar = ColorBar( color_mapper=fill_map["transform"], width=8, location=(0, 0) # type: ignore ) return fill_map, color_bar # pylint: disable=too-many-arguments def _create_vert_range_tool( data, min_y, max_y, plot_range, width, height, x_col, y_col, fill_map="navy" ): """Return vertical range too for plot.""" rng_select = figure( plot_width=width, plot_height=height, y_range=(min_y - 1, max_y + 1), toolbar_location=None, ) x_dodge = dodge(x_col, -0.5) rng_select.rect( x=x_dodge, y=y_col, width=1.2, height=0.8, source=data, fill_alpha=0.6, fill_color=fill_map, ) rng_select.xaxis.visible = False rng_select.yaxis.visible = False range_tool = RangeTool(y_range=plot_range) range_tool.overlay.fill_color = "navy" range_tool.overlay.fill_alpha = 0.2 rng_select.ygrid.grid_line_color = None rng_select.xgrid.grid_line_color = None rng_select.add_tools(range_tool) rng_select.toolbar.active_multi = range_tool return rng_select # pylint: enable=too-many-arguments def _create_data_table( source: ColumnDataSource, schema: ProcSchema, legend_col: str = None ): """Return DataTable widget for source.""" column_names = [ schema.user_name, schema.user_id, schema.logon_id, schema.process_id, schema.process_name, schema.cmd_line, schema.parent_id, schema.parent_name, schema.target_logon_id, ] if legend_col and legend_col not in column_names: column_names.append(legend_col) date_fmt = "%F %T" columns = [ TableColumn( field=schema.time_stamp, title=schema.time_stamp, formatter=DateFormatter(format=date_fmt), ) ] columns2 = [ TableColumn(field=col, title=col) for col in column_names if col in source.column_names ] data_table = DataTable( source=source, columns=columns + columns2, width=950, height=150 ) return data_table def _check_proc_tree_schema(data): """Return true if expected process tree columns are present.""" input_cols = set(data.columns) expected_cols = set( [ "new_process_lc", "parent_proc_lc", "source_index", "new_process_lc_par", "source_index_par", "parent_key", "IsRoot", "IsLeaf", "IsBranch", "path", "parent_index", ] ) return expected_cols - input_cols # pylint: disable=too-few-public-methods @pd.api.extensions.register_dataframe_accessor("mp_process_tree") class ProcessTreeAccessor: """Pandas api extension for Process Tree.""" def __init__(self, pandas_obj): """Instantiate pandas extension class.""" self._df = pandas_obj def plot(self, **kwargs) -> Tuple[figure, LayoutDOM]: """ Build and plot a process tree. Parameters ---------- schema : ProcSchema, optional The data schema to use for the data set, by default None (if None the schema is inferred) output_var : str, optional Output variable for selected items in the tree, by default None legend_col : str, optional The column used to color the tree items, by default None show_table: bool Set to True to show a data table, by default False. Other Parameters ---------------- height : int, optional The height of the plot figure (the default is 700) width : int, optional The width of the plot figure (the default is 900) title : str, optional Title to display (the default is None) Returns ------- Tuple[figure, LayoutDOM]: figure - The main bokeh.plotting.figure Layout - Bokeh layout structure. """ return build_and_show_process_tree(data=self._df, **kwargs) def build(self, schema: ProcSchema = None, **kwargs) -> pd.DataFrame: """ Build process trees from the process events. Parameters ---------- procs : pd.DataFrame Process events (Windows 4688 or Linux Auditd) schema : ProcSchema, optional The column schema to use, by default None If None, then the schema is inferred show_progress : bool Shows the progress of the process (helpful for very large data sets) debug : bool If True produces extra debugging output, by default False Returns ------- pd.DataFrame Process tree dataframe. Notes ----- It is not necessary to call this before `plot`. The process tree is built automatically. This is only needed if you want to return the processed tree data as a DataFrame """ return build_process_tree( procs=self._df, schema=schema, show_progress=kwargs.get("show_progress", False), debug=kwargs.get("debug", False), )
mit
6,688,006,977,346,799,000
29.513228
88
0.600197
false
dchad/malware-detection
vs/combine_av_reports.py
1
2096
# combine-av-reports.py # # Combine the processed ClamAV and Windows Defender reports, drop duplicates and # fill NaN values with "OK". # # Inputs : clamav-xxx.csv and defender-xxx.csv # row format = [file_name, malware_type] # # Outputs: sorted-av-report.csv # row format = [file_name, clamav_malware_type, windefender_malware_type] # # # # Author: Derek Chadwick # Date : 18/08/2016 import os import pandas as pd import numpy as np def combine_av_reports(av_file_1, av_file_2, out_file): mals1 = pd.read_csv(av_file_1) mals2 = pd.read_csv(av_file_2) allmals = mals1.merge(mals2, on='filename', how='outer', indicator=True, sort=True) uniq_allmals = allmals.drop_duplicates(subset='filename', keep='first') filled_uniq_allmals = uniq_allmals.replace(np.NaN, 'OK') # Now we have our combined AV results, write to file. filled_uniq_allmals.to_csv(out_file, index=False) return # TODO: Add command line arguments to specify files to combine and output file. # Start of Script clammals = pd.read_csv('data/clamav-vs263-264-apt.csv') windefmals = pd.read_csv('data/defender-vs263-264-apt.csv') print("Read {:d} ClamAV detections.".format(clammals.shape[0])) print("Read {:d} Windows Defender detections.".format(windefmals.shape[0])) # allmals = clammals.merge(windefmals, on='filename', how='outer', indicator=True, sort=True) # NOTE: old versions of pandas merge() do not have indicator argument. allmals = clammals.merge(windefmals, on='filename', how='outer', sort=True) # uniq_allmals = allmals.drop_duplicates(subset='filename', keep='first') # NOTE: old versions of pandas do not have subset argument in drop_duplicates() # or keep argument. uniq_allmals = allmals.drop_duplicates(cols='filename', take_last=False) filled_uniq_allmals = uniq_allmals.replace(np.NaN, 'OK') # Now we have our combined AV results, write to file. filled_uniq_allmals.to_csv('data/sorted-av-report.csv', index=False) print("Wrote {:d} combined malware detections.".format(filled_uniq_allmals.shape[0])) # End of Script
gpl-3.0
-5,992,231,992,788,923,000
29.823529
93
0.708492
false
DmitrySmalyuk/sirepo_benchmark
scripts/plot_stacked_bars.py
1
3210
""" Demo of table function to display a table within a plot. """ import matplotlib import matplotlib.pyplot as plt import numpy as np def plot_stacked_bars(data, title=None, fontsize=22, figname='plot.png'): columns = ('Alpha', 'NSLS-II (Docker)', 'NSLS-II (Vagrant)') rows = ['Total User Time [s]', 'Total Server Time [s]', 'Calculation Time [s]'] phase = ['Calculation', 'Response Preparation', 'Data Transfer'] values = np.arange(0, 25, 5) value_increment = 1000 # Get some pastel shades for the colors colors = plt.cm.BuPu(np.linspace(0.1, 0.5, len(rows))) n_rows = len(data) index = np.arange(len(columns)) + 0.6 bar_width = 0.46 # Initialize the vertical-offset for the stacked bar chart. y_offset = np.zeros(len(columns)) # Plot bars and create text labels for the table cell_text = [] for row in range(n_rows): l = ' '.join(rows[-row - 1].split(' ')[:3]) a = plt.bar(index, data[row], bar_width, bottom=y_offset, color=colors[row], tick_label=l, edgecolor='black') if row > 0: for i in range(len(a)): plt.text( a[i].get_x() + a[i].get_width() + 0.01, y_offset[i] + a[i].get_height(), '{}'.format(l), ha='left', va='center' ) for i, r in enumerate(a): plt.text( r.get_x() + r.get_width() / 2., y_offset[i] + r.get_height() / 2. - 300, '{}'.format(' '.join(phase[row].split(' ')[:2])), ha='center', va='bottom' ) y_offset += data[row] cell_text.append(['{:1.1f}'.format(x / 1000.0) for x in y_offset]) # Reverse colors and text labels to display the last value at the top. colors = colors[::-1] cell_text.reverse() # Add a table at the bottom of the axes the_table = plt.table( cellText=cell_text, rowLabels=rows, rowColours=colors, colLabels=columns, loc='bottom' ) the_table.auto_set_font_size(False) the_table.set_fontsize(14) # Set font size: matplotlib.rcParams.update({'font.size': fontsize}) plt.tick_params(axis='y', which='major', labelsize=fontsize) # Adjust layout to make room for the table: plt.subplots_adjust(left=0.2, bottom=0.2) plt.ylabel('Time [s]', {'fontsize': fontsize}) plt.yticks(values * value_increment, ['%d' % val for val in values]) plt.xticks([]) plt.xlim([0, 3.2]) if title: plt.title(title) plt.grid(color='gray', linestyle='dotted') # plt.show() fig = plt.gcf() fig.set_size_inches(16, 11.25) plt.savefig(figname) plt.show() if __name__ == '__main__': title = 'NSLS-II FMX beamline simulation time comparison' figname = 'stacked_bars_server_preparation.png' data = [ # milliseconds # alpha, nsls-ii docker, nsls-ii vagrant [13650, 10310, 11612], # duration (SRW calculation) [2268, 1826, 3406], # waiting - duration (response preparation) [3726, 5290, 7144], # total - waiting (data transfer) ] plot_stacked_bars(data, figname=figname)
apache-2.0
9,153,365,064,216,360,000
32.4375
117
0.56947
false
zutshi/S3CAMX
src/CASymbolicKLEE.py
1
19555
#!/usr/bin/python # -*- coding: utf-8 -*- import glob import logging import numpy as np # import matplotlib # matplotlib.use('GTK3Agg') # import matplotlib.pyplot as plt import fileOps as fp import smtSolver as smt import state as st import utils as U import sample as S import err import time logger = logging.getLogger(__name__) MAX_NUM_VARS_EXPECTED = 100000 class ControllerSymbolicAbstraction: # @staticmethod # def get_abs_state(s): # return ControllerSymbolicAbstractState(s) @staticmethod def get_paths(solver, controller_path_dir_path, decls): path = controller_path_dir_path # get path files, which are in smt2 format with the same extension # ##!!##logger.debug('reading files(controller_paths) form {}'.format(path)) filenames = glob.glob(path + '*.smt2') path_dict = {} for (idx, f) in enumerate(filenames): # ##!!##logger.debug('reading path: {}'.format(f)) smt_string = fp.get_data(f) pc = solver.smt2_2_constraints(smt_string, decls) path_dict[str(idx)] = pc return path_dict def __init__( # np.inf): self, num_dims, controller_path_dir_path, max_hd=0, ): # ##!!##logger.debug('ControllerSymbolicAbstraction instance created') # super(Abstraction, self).__init__() self.max_hd = max_hd self.num_dims = num_dims # TODO: gets paths as z3 constraints, should take care of them bby # itself self.solver = smt.smt_solver_factory('z3') self.id_generator = self.gen_id_() self.gen_id = lambda : next(self.id_generator) # creates smt vars self.create_smt_var = lambda s_str: self.solver.BitVecArray(s_str) self.path_var_dict = {} var_name_list = ['x_arr', 'state_arr', 'dummy_output_arr', 'dummy_nextstate_arr', 'input_arr'] for var_name in var_name_list: self.path_var_dict[var_name] = self.create_smt_var(var_name) self.path_dict = ControllerSymbolicAbstraction.get_paths(self.solver, controller_path_dir_path, self.path_var_dict) def gen_id_(self): for i in range(MAX_NUM_VARS_EXPECTED): yield i def get_new_plant_smt_var(self, cell_id): x_str = 'X' + str(cell_id) x = self.create_smt_var(x_str) return x def get_input_smt_var(self, uid_str): # id_str = str(self.gen_id()) ci_str = 'ci' + uid_str ci = self.create_smt_var(ci_str) return ci # get new control states and control inputs def get_new_cumulitive_smt_vars( self, abs_state, uid_str, pid_str, ): # path_trace_str = 'p' + str(p_id) # s_str = str(abs_state.s_) + pid_str # u_str = str(abs_state.u) + pid_str s_str = 'S' + uid_str + '_' + pid_str u_str = 'U' + uid_str + '_' + pid_str s = self.create_smt_var(s_str) u = self.create_smt_var(u_str) # print str(s), s.sexpr() # print str(u), u.sexpr() # print str(x), x.sexpr() return (s, u) def instantiate( self, pc, s, x, s_, u, ci, ): # print z3.simplify(pc) # print '0'*80 # print self.path_var_dict['x_arr'], x pc = self.solver.substitute(pc, self.path_var_dict['x_arr'], x) pc = self.solver.substitute(pc, self.path_var_dict['state_arr'], s) pc = self.solver.substitute(pc, self.path_var_dict['dummy_nextstate_arr'], s_) pc = self.solver.substitute(pc, self.path_var_dict['dummy_output_arr' ], u) pc = self.solver.substitute(pc, self.path_var_dict['input_arr'], ci) # print '0'*80 # print pc return pc # def valid(self, abs_state): # return self.solver.SAT(abs_state.C) def meta_sampler( self, abs_state, num_req_samples, time_budget, ): raise NotImplementedError def timed_sampler( self, abs_state, num_req_samples, time_budget, ): num_samples_found = 0 start_time = time.time() while num_samples_found < num_req_samples: elapsed_time = time.time() - start_time if elapsed_time >= time_budget: return states = sample_random() def sample_random(): raise NotImplementedError def sample_random_smt(self, abs_state, num_req_samples): MAX_ITERS = 20 num_samples = 0 iters = 0 # this cumulitive sample dict will be returned total_sample_dict = {} var_list = [abs_state.x, abs_state.u, abs_state.s, abs_state.s_, abs_state.ci] var2dim_dict = { str(abs_state.x): self.num_dims.x, str(abs_state.u): self.num_dims.u, str(abs_state.s): self.num_dims.s, str(abs_state.s_): self.num_dims.s, str(abs_state.ci): self.num_dims.ci, } var2sample_list = [] # initialize the cumulitive sample dict for (var, dim) in var2dim_dict.iteritems(): total_sample_dict[var] = np.empty((0, dim), dtype=int) while num_samples < num_req_samples and iters < MAX_ITERS: smt_cons_list = [abs_state.C] for (x, ival_cons) in abs_state.x_ival_cons_list: # print x, ival_cons val = S.sample_ival_constraints(ival_cons, 1)[0] # print val smt_cons = self.solver.equal(x, np.round(val).astype(int)) smt_cons_list.append(smt_cons) # get back samples for plant states and inputs sample_dict = self.solver.sample_bvArray( smt_cons_list, 1, var_list, var2dim_dict, var2sample_list, minDist=None, ) # sample found if sample_dict: num_samples += 1 for (var, val) in sample_dict.iteritems(): existing_samples = total_sample_dict[var] new_sample = sample_dict[var] total_sample_dict[var] = np.concatenate((existing_samples, new_sample)) iters += 1 if num_samples == 0: print 'no sample found...' # return Nones if no sample was found normalized_sample_list = [None for i in range(len(var_list))] # if any sample was found if total_sample_dict: sample_list = [total_sample_dict[str(var)] for var in var_list] normalized_sample_list = [np.array(val).astype(float) for val in sample_list] print 'SAMPLES' for (var, val) in zip(var_list, normalized_sample_list): # ##!!##logger.debug('{}: {}'.format(var, val)) pass normalized_sample_list.append(num_samples) # TODO: remove this # if num_samples != num_req_samples: # print iters # raise err.Fatal('FAILED') # print total_sample_dict # print normalized_sample_list print 'num_actual_samples:', num_samples return tuple(normalized_sample_list) def sample_smt(self, abs_state, num_req_samples): # print abs_state # get back samples for plant states and inputs var_list = [abs_state.x, abs_state.u, abs_state.s, abs_state.s_, abs_state.ci] var2dim_dict = { str(abs_state.x): self.num_dims.x, str(abs_state.u): self.num_dims.u, str(abs_state.s): self.num_dims.s, str(abs_state.s_): self.num_dims.s, str(abs_state.ci): self.num_dims.ci, } # var2sample_list = [abs_state.x, abs_state.u, abs_state.ci, abs_state.s_] var2sample_list = [abs_state.x] # , abs_state.u] sample_dict = self.solver.sample_bvArray( abs_state.C, num_req_samples, var_list, var2dim_dict, var2sample_list, minDist=5, ) if sample_dict: # print z3.simplify(abs_state.C) sample_list = [sample_dict[str(var)] for var in var_list] normalized_sample_list = [np.array(val).astype(float) for val in sample_list] print 'SAMPLES' # ci_array # print 'S_VAL =', normalized_sample_list[2] # print 'S__VAL =', normalized_sample_list[3] for (var, val) in zip(var_list, normalized_sample_list): # ##!!##logger.debug('{}: {}'.format(var, val)) pass x_array = normalized_sample_list[0] num_actual_samples = x_array.shape[0] normalized_sample_list.append(num_actual_samples) print 'num_actual_samples:', num_actual_samples # return (x_array, u_array, s_array, s__array, ci_array, n) return tuple(normalized_sample_list) else: # print 'UNSAT' return ( None, None, None, None, None, 0, ) # \alpha() # def get_abs_state_from_concrete_state(self, concrete_state, hd=0, p='XXX', cp='XXX'): def get_abs_state_from_concrete_state( self, concrete_state, hd=0, p='', cp='', ): if hd != 0: raise err.Fatal('investigate') id_str = str(self.gen_id()) s_str = 'S' + id_str s__str = 'S' + id_str + p x_str = 'X' + id_str u_str = 'U' + id_str ci_str = 'I' + id_str s = self.create_smt_var(s_str) s_ = self.create_smt_var(s__str) u = self.create_smt_var(u_str) x = self.create_smt_var(x_str) ci = self.create_smt_var(ci_str) C = self.solver.equal(s_, concrete_state) x_ival_cons_list = [] return ControllerSymbolicAbstractState( C, s, x, s_, u, p, cp, ci, hd, x_ival_cons_list, ) def get_reachable_abs_states( self, abs_state, A, system_params, ): reachable_state_list = [] # CA = A.controller_abs PA = A.plant_abs hd = abs_state.cs.hd + 1 uid_str = str(self.gen_id()) x = self.get_new_plant_smt_var(str(abs_state.ps)) ci = self.get_input_smt_var(uid_str) # construct ci smt constraints #ci_ival_cons = system_params.ci.scaleNround(self.CONVERSION_FACTOR) ci_ival_cons = system_params.ci ci_smt = self.solver.ic2smt(ci_ival_cons, ci) X_smt = PA.get_smt2_constraints(abs_state.ps, x) x_ival_cons = PA.get_ival_constraints(abs_state.ps) x_ival_cons_list = abs_state.cs.x_ival_cons_list + [(x, x_ival_cons)] for (p_id, pc) in self.path_dict.iteritems(): # pid_str = abs_state.cs.pid + 'p' + str(p_id) pid_str = 'p' + str(p_id) cumulitive_pid_str = abs_state.cs.cpid + pid_str # print abs_state.cs s = abs_state.cs.s_ # C'[s] = C[s'] (s_, u) = self.get_new_cumulitive_smt_vars(abs_state.cs, uid_str, pid_str) # print (s, s_, u, x) pc_ = self.instantiate( pc=pc, s=s, x=x, s_=s_, u=u, ci=ci, ) # print 'X_smt\n', X_smt # print 'init', abs_state.cs.C # print 'pc\n', self.solver.simplify(pc) pc_ = self.solver.And(pc_, abs_state.cs.C, X_smt, ci_smt) pc_ = self.solver.simplify(pc_) # print 'simplified pc' # print pc_ # print '='*80 # The below logging operation is very expensive. Commenting out! # # ##!!##logger.debug('simplified pc:\n{}'.format(U.decorate(str(pc_)))) reachable_controller_state = ControllerSymbolicAbstractState( pc_, s, x, s_, u, pid_str, cumulitive_pid_str, ci, hd, x_ival_cons_list, ) reachable_controller_state.Cx = X_smt # requested number of samples num_req_samples = A.num_samples # n = actual number of samples ( x_array, u_array, s_array, s__array, ci_array, num_actual_samples, ) = \ self.get_concrete_states_from_abs_state(reachable_controller_state, num_req_samples) if num_actual_samples != 0: # TODO: remove the below d and p arrays # One strategy is to separate concrete states of plant and # controller d_array = np.tile(abs_state.ps.d, (num_actual_samples, 1)) p_array = np.tile(abs_state.ps.pvt, (num_actual_samples, 1)) pi_array = np.zeros((num_actual_samples, A.num_dims.pi)) t = abs_state.plant_state.n * A.delta_t print 't:', t t_array = np.tile(t, (num_actual_samples, 1)) state = st.StateArray( t=t_array, x=x_array, d=d_array, pvt=p_array, s=s_array, u=u_array, pi=pi_array, ci=ci_array, ) # plot # plt.annotate('{},{}'.format(x, u), (x_array[0, 0], x_array[0, 1])) # if history depth exceeded, concretize all states! if abs_state.cs.hd >= self.max_hd: # ##!!##logger.debug('max history depth reached, concretizing...') for i in range(num_actual_samples): # reachable_controller_state = self.get_abs_state_from_concrete_state(s__array[i, :], hd=0, p='p' + str(p_id))#pid_str) reachable_controller_state.C = self.solver.equal(s_, s__array[i, :]) reachable_controller_state.hd = 0 reachable_controller_state.x_ival_cons_list = [(x, x_ival_cons)] reachable_state_list.append((reachable_controller_state, state)) reachable_controller_state.concrete_state_list.append(state[i]) # clear cpid history as well reachable_controller_state.cpid = \ reachable_controller_state.pid else: reachable_state_list.append((reachable_controller_state, state)) reachable_controller_state.concrete_state_list.append(state) # print reachable_controller_state if not reachable_state_list: raise err.Fatal('no reachable state found!') # print reachable_state_list return reachable_state_list # \gamma() # TODO: should check for cached concrete states # def get_concrete_states_from_abs_state(self, abs_state, num_req_samples): # ###### Pick 1! # (x_array, u_array, s_array, s__array, ci_array, num_actual_samples)\ # = self.sample_smt(abs_state, num_req_samples) # (x_array, u_array, s_array, s__array, ci_array, num_actual_samples)\ # = self.sample_random_smt(abs_state, num_req_samples) # return (x_array, u_array, s_array, s__array, ci_array, num_actual_samples) def get_concrete_states_from_abs_state(self, abs_state, num_req_samples): # print 'req. samples = {}'.format(num_req_samples) return self.sample_smt(abs_state, num_req_samples) # print 'found samples = {}'.format(num_actual_samples) # #### ( x_array, u_array, s_array, s__array, ci_array, num_actual_samples, ) = self.sample_smt(abs_state, 1) if num_actual_samples > 0: ( x_array_, u_array_, s_array_, s__array_, ci_array_, num_actual_samples_, ) = self.sample_random_smt(abs_state, num_req_samples) if num_actual_samples_ == 0: return ( x_array, u_array, s_array, s__array, ci_array, num_actual_samples, ) else: return ( x_array_, u_array_, s_array_, s__array_, ci_array_, num_actual_samples_, ) else: return ( x_array, u_array, s_array, s__array, ci_array, num_actual_samples, ) def get_ival_constraints(self, abs_state): raise NotImplementedError class ControllerSymbolicAbstractState(object): # c_pid def __init__( self, C, s, x, s_, u, pid, cpid, ci, hd, x_ival_cons_list, concrete_state_list=[], Cx=None, ): # time of creation self.toc = time.time() self.pid = pid self.cpid = cpid self.s = s self.u = u self.s_ = s_ self.x = x self.ci = ci # constraint on controller self.C = C # constraint o nplant state self.Cx = Cx # history depth self.hd = hd self.concrete_state_list = concrete_state_list self.x_ival_cons_list = x_ival_cons_list # print cpid return def __eq__(self, abs_state): # print 'controller_eq_invoked' return hash(self) == hash(abs_state) def __hash__(self): # print 'controller_hash_invoked' # return hash(tuple(self.s)) # print self.s # print '#', self.s_.sexpr() # return hash(self.s_.sexpr()) # TODO: fix it! removing hash function to speed up for now. # Must supply a unique hash function! # return hash(self.pid) # print 'cpid:', self.cpid, self.pid return hash(self.cpid) def __repr__(self): return '(S: {}, S\': {}, X: {}, U: {}, P: {}, CI: {})'.format( self.s, self.s_, self.x, self.u, self.pid, self.ci, )
bsd-2-clause
872,237,588,762,542,000
27.217893
143
0.484173
false
basnijholt/holoviews
holoviews/tests/ipython/testdisplayhooks.py
2
2330
from holoviews import Store, Curve from holoviews.ipython import notebook_extension, IPTestCase class TestDisplayHooks(IPTestCase): def setUp(self): super(TestDisplayHooks, self).setUp() if not notebook_extension._loaded: notebook_extension('matplotlib', ip=self.ip) self.backup = Store.display_formats Store.display_formats = self.format def tearDown(self): Store._custom_options = {k:{} for k in Store._custom_options.keys()} self.ip.run_line_magic("unload_ext", "holoviews.ipython") del self.ip Store.display_hooks = self.backup notebook_extension._loaded = False super(TestDisplayHooks, self).tearDown() class TestHTMLDisplay(TestDisplayHooks): def setUp(self): self.format = ['html'] super(TestHTMLDisplay, self).setUp() def test_store_render_html(self): curve = Curve([1, 2, 3]) data, metadata = Store.render(curve) mime_types = {'text/html', 'application/javascript', 'application/vnd.holoviews_exec.v0+json'} self.assertEqual(set(data), mime_types) class TestPNGDisplay(TestDisplayHooks): def setUp(self): self.format = ['png'] super(TestPNGDisplay, self).setUp() def test_store_render_png(self): curve = Curve([1, 2, 3]) data, metadata = Store.render(curve) mime_types = {'image/png'} self.assertEqual(set(data), mime_types) class TestSVGDisplay(TestDisplayHooks): def setUp(self): self.format = ['svg'] super(TestSVGDisplay, self).setUp() def test_store_render_svg(self): curve = Curve([1, 2, 3]) data, metadata = Store.render(curve) mime_types = {'image/svg+xml'} self.assertEqual(set(data), mime_types) class TestCombinedDisplay(TestDisplayHooks): def setUp(self): self.format = ['html', 'svg', 'png'] super(TestCombinedDisplay, self).setUp() def test_store_render_combined(self): curve = Curve([1, 2, 3]) data, metadata = Store.render(curve) mime_types = {'text/html', 'application/javascript', 'application/vnd.holoviews_exec.v0+json', 'image/svg+xml', 'image/png'} self.assertEqual(set(data), mime_types)
bsd-3-clause
644,588,123,788,594,300
30.066667
76
0.618884
false
wohllab/milkyway_proteomics
galaxy_milkyway_files/tools/wohl-proteomics/wohl_skyline/msstats_wrapper.py
1
47691
import os, sys, re import optparse import shutil import pandas import numpy import gc import subprocess import uniprot as uni from natsort import natsorted, ns import warnings #From stack overflow, to redirect pandas stderr warnings to stdout for Galaxy's sake....# def customwarn(message, category, filename, lineno, file=None, line=None): sys.stdout.write(warnings.formatwarning(message, category, filename, lineno)) warnings.showwarning = customwarn ##################################### ##################################### #This is a script to combine the output reports from #Skyline, in preparation for MSstats! Let's get started. # #VERSION 0.92 version="0.92" #DATE: 10/02/2017 date="10/02/2017" ##################################### print "-----------------------------------------------------------------------" print "Welcome to the MSstats wrapper for Galaxy, Wohlschlegel Lab UCLA" print "Written by William Barshop" print "Version: ",version print "Date: ",date basedir=os.getcwd() #################################### #Argument parsing! So much fun! #We'll use OptParse even though some #people really rave about argparse... # # # NB: With Optparse, if an option is # not specified, it will take a # value of None #################################### parser = optparse.OptionParser() parser.add_option("--folder",action="store",type="string",dest="operation_folder",default=".") parser.add_option("--galaxy-csv",action="store",type="string",dest="galaxy_csv") parser.add_option("--experiment_file",action="store",type="string",dest="experiment_file") parser.add_option("--remove_decoys",action="store_true",dest="remove_decoys") parser.add_option("--remove_precursors",action="store_true",dest="remove_precursors") parser.add_option("--rename",action="store_true",dest="rename") #Rename columns. parser.add_option("--renameProteinType",action="store",type="string",default="accToGene",dest="renameProteinType") #Rename proteins from accToGene, accToProtein, geneToProtein (for TGGT/ToxoDB) parser.add_option("--remove_empty",action="store_true",dest="remove_empty") parser.add_option("--merge_isotopes",action="store_true",dest="merge_isotopes") #Merge isotopes. parser.add_option("--peptide_level",action="store_true",dest="peptide_level") #no proteins, just peptides. parser.add_option("--zeros",action="store_true",dest="zeros") # 0 ---> np.nan parser.add_option("--zero_to_one",action="store_true",dest="zero_to_one") # 0 ---> 1 parser.add_option("--remove_truncated_peaks",action="store_true",dest="remove_truncated_peaks") parser.add_option("--remove_single_run",action="store_true",dest="remove_single_run") #Makes it so that you have to have measurement from >1 run. parser.add_option("--mprophet_q",action="store",type="float",dest="mprophet_q") #We'll throw out things above this q-value threshold. parser.add_option("--fractionated",action="store_true",dest="fractionated") # Using the final -??? as the fractionation identifier parser.add_option("--remove_repeated_peptides",action="store_true",dest="remove_repeated_peptides") parser.add_option("--removeProteinsByText",action="store",type="string",dest="remove_proteins_by_text") parser.add_option("--keepProteinsByText",action="store",type="string",dest="keep_proteins_by_text") parser.add_option("--minPep",action="store",type="int",dest="minimum_peptide_count") # Require at least "n" peptides for a protein. parser.add_option("--featureSubset",action="store",type="string",dest="feature_subset") parser.add_option("--featureSubsetN",action="store",type="int",dest="feature_subset_N") # For use with "topN" parser.add_option("--remove_proteins_with_interference",action="store_true",dest="remove_interfered_proteins") parser.add_option("--censoredInt",action="store",type="string",dest="censoredInt",default="0") parser.add_option("--cutoffCensored",action="store",type="string",dest="cutoffCensored",default="minFeature") # Can be "minFeature", "minFeatureNRun", "minRun" parser.add_option("--fillIncompleteRows",action="store_true",dest="fillIncompleteRows") # If data is missing, we'll put it back in with "NA" values parser.add_option("--MBimpute",action="store_true",dest="MBimpute") parser.add_option("--noimpute",action="store_true",dest="noimpute") parser.add_option("--remove50missing",action="store_true",dest="remove50missing") parser.add_option("--normalization",action="store",type="string",dest="normalization") # Can be "Quantile", "equalizeMedians", "globalStandards" parser.add_option("--normalization_protein",action="store",type="string",dest="normalize_protein") #Can be a comma separated list, but must match the FASTA header... parser.add_option("--maxQuantileforCensored",action="store",type="string",dest="maxQuantileforCensored") # Can be used to emulate 3.4 behavior parser.add_option("--fillMissingFeatures",action="store_true",dest="fillMissingFeatures") ################# OUTPUTS ################################ parser.add_option("--processedDataOutput",action="store",type="string",dest="processedOutput") parser.add_option("--comparisonOutput",action="store",type="string",dest="comparisonOutput") parser.add_option("--RDataOutput",action="store",type="string",dest="RDataOutput") parser.add_option("--QCplotOutput",action="store",type="string",dest="QCplotOutput") parser.add_option("--ProfilePlotOutput",action="store",type="string",dest="profilePlotOutput") parser.add_option("--ConditionPlotOutput",action="store",type="string",dest="conditionPlotOutput") parser.add_option("--RScriptOutput",action="store",type="string",dest="RScriptOutput") parser.add_option("--quantificationOutput",action="store",type="string",dest="quantificationOutput") parser.add_option("--quantificationConditionOutput",action="store",type="string",dest="quantificationConditionOutput") parser.add_option("--conditionPlotCSVOutput",action="store",type="string",dest="conditionPlotCSVOutput") ################## BELOW THIS ARE PLOTTING OPTIONS ############################## These are actually all going to be moved into a separate tool #parser.add_option("--significance",action="store",type="float",dest="significance") # For the volcano plots... #parser.add_option("--FCthreshold",action="store",type="float",dest="FCthreshold") # FC threshold For the volcano plots... #parser.add_option("--ylimUp",action="store",type="float",dest="ylimUp") # ylimUp threshold for the plots #parser.add_option("--ylimDown",action="store",type="float",dest="ylimDown") # ylimDown threshold for plots #parser.add_option("--xlimUp",action="store",type="float",dest="xlimUp") # xlimUp threshold for Volcano plots #parser.add_option("--numProtein",action="store",type="int",dest="numProtein",default=180) # Number of proteins per heatmap... Max is 180 #parser.add_option("--clustering",action="store",type="string",dest="clustering",default="protein") # clustering type for heatmap... Can be "protein", "comparison", "both" #parser.add_option("--proteinName",action="store_true",dest="proteinName") # On volcano plot, draw protein names? #parser.add_option("--dotSize",action="store",type="int",dest="dotSize",default=3) #parser.add_option("--textSize",action="store",type="int",dest="textSize",default=4) #parser.add_option("--legendSize",action="store",type="int",dest="legendSize",default=7) #parser.add_option("--width",action="store",type="int",dest="width",default=10) #parser.add_option("--height",action="store",type="int",dest="height",default=10) (options,args) = parser.parse_args() def help(): print "Syntax is as follows:" print "python msstats_merger.py --folder=<input folder> --remove_decoys --rename" print "--folder is a required input, it is optional to remove decoys from the MSstats input file..." if options.operation_folder is None: print "Please provide a --folder=<input folder> argument!" print "It is necessary so that I can run..." help() sys.exit(1) def appendFraction(x): try: #appended_name=str(x['Peptide Modified Sequence']+"_"+str(x['File Name'].split(".")[0].rsplit("-",1)[1]))# MAY HAVE TO CHANGE x['FILE NAME'] TO STR(x['FILE NAME']).... !!!!!!!!!!!! # 3/3/2016 -- BACK TO THIS... See: https://groups.google.com/forum/#!searchin/msstats/multiple$20methods/msstats/ZzP3Q8hGXBY/oTYo60cfovMJ appended_name=str(x['Peptide Modified Sequence']+"-Frac"+str(x['File Name'].split(".")[0].rsplit("-",1)[1]))# MAY HAVE TO CHANGE x['FILE NAME'] TO STR(x['FILE NAME']).... !!!!!!!!!!!! # 3/3/2016 changed to - to make sure we aren't screwing with MSstats return appended_name except: print "FAILED ON CORRECTING FRACTION NAMES ON",x sys.exit(0) def fixFileName(x): return str(x['File Name'].split('.')[0].rsplit("-",1)[0])#[:-3]) def peptide_level_fixer(x): return x.split("_")[0] #return x['Peptide Modified Sequence'].split("_")[0] os.chdir(options.operation_folder) if options.galaxy_csv is not None: shutil.copy(options.galaxy_csv,"MSstats_input.csv") if os.path.isfile("MSstats_combined_input.csv"): os.remove("MSstats_combined_input.csv") print "Deleted MSstats_combined_input.csv to make room for a new one..." os.chdir(basedir) infiles = [] for file in os.listdir(options.operation_folder): if file.endswith(".csv"): if "MSstats" in file: infiles.append(file) else: print "ignoring file ",str(file)," because it does not contain \"MSstats\" in the file name" #for root, subFolders, files in os.walk(options.operation_folder): # for eachfile in files: # if '.csv' in eachfile[-4:]:#We'll only look for files which end with ".csv", which is the expected file format for the MSstats input files! # infiles.append(str(os.path.join(root,eachfile))) print "about to read csv..." dataframe_vector=[] os.chdir(options.operation_folder) for eachfile in infiles: newdf=pandas.read_csv(eachfile,sep=',',index_col=False)#Used to have low_memory=False) dataframe_vector.append(newdf) del newdf gc.collect() os.chdir(basedir) print "done reading csvs..." #Peptide Modified Sequence <----- This will be the column to edit... and it'll be based on "File Name".split(".")[0].rsplit("-",1)[1] which should be like "F1" or "F2", etc... combined_results=pandas.concat(dataframe_vector) if options.remove_decoys: combined_results.rename(columns={'Protein Name':'ProteinName'},inplace=True) mask=combined_results[numpy.invert(combined_results.ProteinName.str.contains("Decoys"))] combined_results=mask combined_results.rename(columns={'ProteinName':'Protein Name'},inplace=True) if options.remove_precursors: combined_results.rename(columns={'Fragment Ion':'FragmentIon'},inplace=True) mask=combined_results[numpy.invert(combined_results.FragmentIon.str.contains("precursor"))] combined_results=mask combined_results.rename(columns={'FragmentIon':'Fragment Ion'},inplace=True) if options.fractionated: fixed_peptide_names=combined_results.apply(appendFraction,axis=1) fixed_file_names=combined_results.apply(fixFileName,axis=1) combined_results['Peptide Modified Sequence']=fixed_peptide_names combined_results['File Name']=fixed_file_names combined_results=combined_results.copy(deep=True) if options.normalization=="globalStandards": combined_results["Stanard Type"]=combined_results["Standard Type"].astype(str) protein_norm_dict={} for each_item in options.normalize_protein.split(","): if "::" in each_item: peptide_list=[item for item in each_item.split("::")[1:]] protein_norm_dict[each_item.split("::")[0]]=peptide_list else: protein_norm_dict[each_item]=[] for each_protein in protein_norm_dict: this_pep_list=protein_norm_dict[each_protein] if len(this_pep_list)==0: #combined_results############################################## combined_results.loc[combined_results['Protein Name'].str.contains(each_protein),"Standard Type"]="globalStandard" else: for each_peptide in this_pep_list: combined_results.loc[numpy.logical_and(combined_results['Protein Name'].str.contains(each_protein),combined_results['Peptide Modified Sequence']==each_peptide),"Standard Type"]="globalStandard" print "about to remove proteins by text..." if options.remove_proteins_by_text is not None and options.remove_proteins_by_text is not "": proteins_to_remove=options.remove_proteins_by_text.split(",") for each_protein in proteins_to_remove: each_protein=each_protein.strip() print "Removing protein",each_protein,"from the analysis." combined_results=combined_results[numpy.invert(combined_results['Protein Name'].str.contains(each_protein))] print "about to filter to keep proteins by text..." print "Starting with",len(combined_results['Protein Name'].unique()) if options.keep_proteins_by_text is not None and options.remove_proteins_by_text is not "": proteins_to_keep=options.keep_proteins_by_text.split(",") #for each_protein in proteins_to_keep: # each_protein=each_protein.strip() # print "Keeping protein",each_protein,"in the analysis." combined_results=combined_results[combined_results['Protein Name'].str.contains("|".join(proteins_to_keep))] print "Ending with",len(combined_results['Protein Name'].unique()) #Finally, we'll filter out any protein which doesn't have enough peptides based on the optional input. #combined_results.sort(columns='Protein Name',inplace=True) combined_results.sort_values(by='Protein Name',inplace=True) ############## We're going to try to extract uniprot acc's from each protein name! ####### #combined_results['gene_name']= #uni.map([x for x in combined_results['uniprot_acc'] if x is not numpy.nan]) #combined_results.loc[combined_results['uniprot_acc'] == numpy.nan]['uniprot_acc']=combined_results['Protein Name'] #combined_results['Protein Name']=combined_results.apply(lambda x: combined_results['backup']=combined_results['Protein Name'] uni_mapping_dict={} uniprot_pattern=r'([A-NR-Z][0-9][A-Z][A-Z0-9][A-Z0-9][0-9]|[OPQ][0-9][A-Z0-9][A-Z0-9][A-Z0-9][0-9])' if "accToGene" in options.renameProteinType or options.renameProteinType is None: #This means we'll take the acc's and replace with GENE NAMES combined_results['uniprot_acc']=combined_results['Protein Name'].str.extract(uniprot_pattern) #print combined_results['uniprot_acc'] uni_mapping_dict_combined=uni.map(list(combined_results['uniprot_acc'].unique()),f='ACC',t='GENENAME') for each_acc in uni_mapping_dict_combined: catch=uni_mapping_dict_combined[each_acc] #catch=uni.map(each_acc,f='ACC',t='GENENAME') if len(catch)>0: #print catch,type(catch) uni_mapping_dict[str(each_acc)]=str(catch.pop()) #else: # catch=uni.map(each_acc,f='ACC',t='ID') # if len(catch)>0: # uni_mapping_dict[str(each_acc)]=str(catch[each_acc].pop()) # else: # uni_mapping_dict[str(each_acc)]=str(each_acc) second_query=[] for each_acc in combined_results['uniprot_acc'].unique(): if each_acc not in uni_mapping_dict_combined.keys(): second_query.append(each_acc) if len(second_query)>0: uni_mapping_dict_two=uni.map(list(second_query),f='ACC',t='ID') for each_acc in uni_mapping_dict_two: try: catch=uni_mapping_dict_combined[each_acc] except: pass #uni_mapping_dict[str(each_acc)]=str(each_acc) #catch=uni.map(each_acc,f='ACC',t='GENENAME') if len(catch)>0: #print catch,type(catch) uni_mapping_dict[str(each_acc)]=str(catch.pop()) combined_results['Protein Name']=combined_results['uniprot_acc'].map(uni_mapping_dict) combined_results.drop('uniprot_acc',axis=1,inplace=True) missing_protein_names=combined_results[combined_results['Protein Name'].isnull()] for each_key,each_row in missing_protein_names.iterrows(): combined_results.loc[each_key,'Protein Name']=combined_results.loc[each_key,'backup'] elif "accToProtein" in options.renameProteinType: # THIS MEANS WE'LL TAKE AND REPLACE WITH PROTEIN NAMES combined_results['uniprot_acc']=combined_results['Protein Name'].str.extract(uniprot_pattern) #print combined_results['uniprot_acc'] uni_mapping_dict_combined=uni.map(list(combined_results['uniprot_acc'].unique()),f='ACC',t='ID') for each_acc in uni_mapping_dict_combined: catch=uni_mapping_dict_combined[each_acc] #catch=uni.map(each_acc,f='ACC',t='GENENAME') if len(catch)>0: #print catch,type(catch) uni_mapping_dict[str(each_acc)]=str(catch.pop()) #else: # catch=uni.map(each_acc,f='ACC',t='ID') # if len(catch)>0: # uni_mapping_dict[str(each_acc)]=str(catch[each_acc].pop()) # else: # uni_mapping_dict[str(each_acc)]=str(each_acc) second_query=[] for each_acc in combined_results['uniprot_acc'].unique(): if each_acc not in uni_mapping_dict_combined.keys(): second_query.append(each_acc) if len(second_query)>0: uni_mapping_dict_two=uni.map(list(second_query),f='ACC',t='GENENAME') for each_acc in uni_mapping_dict_two: try: catch=uni_mapping_dict_combined[each_acc] except: pass #uni_mapping_dict[str(each_acc)]=str(each_acc) #catch=uni.map(each_acc,f='ACC',t='GENENAME') if len(catch)>0: #print catch,type(catch) uni_mapping_dict[str(each_acc)]=str(catch.pop()) combined_results['Protein Name']=combined_results['uniprot_acc'].map(uni_mapping_dict) combined_results.drop('uniprot_acc',axis=1,inplace=True) missing_protein_names=combined_results[combined_results['Protein Name'].isnull()] for each_key,each_row in missing_protein_names.iterrows(): combined_results.loc[each_key,'Protein Name']=combined_results.loc[each_key,'backup'] elif "geneToProtein" in options.renameProteinType: # THIS MEANS WE'LL TAKE AND REPLACE WITH PROTEIN NAMES combined_results['uniprot_acc']=combined_results['Protein Name'].str.extract(uniprot_pattern) #print combined_results['uniprot_acc'] uni_mapping_dict_combined=uni.map(list(combined_results['uniprot_acc'].unique()),f='GENENAME',t='ID') for each_acc in uni_mapping_dict_combined: catch=uni_mapping_dict_combined[each_acc] #catch=uni.map(each_acc,f='ACC',t='GENENAME') if len(catch)>0: #print catch,type(catch) uni_mapping_dict[str(each_acc)]=str(catch.pop()) #else: # catch=uni.map(each_acc,f='ACC',t='ID') # if len(catch)>0: # uni_mapping_dict[str(each_acc)]=str(catch[each_acc].pop()) # else: # uni_mapping_dict[str(each_acc)]=str(each_acc) second_query=[] for each_acc in combined_results['uniprot_acc'].unique(): if each_acc not in uni_mapping_dict_combined.keys(): second_query.append(each_acc) if len(second_query)>0: uni_mapping_dict_two=uni.map(list(second_query),f='ACC',t='ID') for each_acc in uni_mapping_dict_two: try: catch=uni_mapping_dict_combined[each_acc] except: pass #uni_mapping_dict[str(each_acc)]=str(each_acc) #catch=uni.map(each_acc,f='ACC',t='GENENAME') if len(catch)>0: #print catch,type(catch) uni_mapping_dict[str(each_acc)]=str(catch.pop()) combined_results['Protein Name']=combined_results['uniprot_acc'].map(uni_mapping_dict) combined_results.drop('uniprot_acc',axis=1,inplace=True) missing_protein_names=combined_results[combined_results['Protein Name'].isnull()] for each_key,each_row in missing_protein_names.iterrows(): combined_results.loc[each_key,'Protein Name']=combined_results.loc[each_key,'backup'] elif "norename" in options.renameProteinType: print "Not renaming proteins... leaving as is!" combined_results.drop('backup',axis=1,inplace=True) #combined_results.to_csv("testing_output_BEFORE.csv") #print uni_mapping_dict #try: # pass #except: # pass #for index,row in combined_results.iterrows(): # if combined_results.loc[index,'uniprot_acc'] is numpy.nan: # pass # else: # combined_results.loc[index,'Protein Name']=uni.map(row['uniprot_acc'],f='ACC',t='GENENAME') #print uni_mapping_dict #print combined_results[combined_results['Peptide Modified Sequence']=="INTQWLLTSGTTEANAWK"] #combined_results.to_csv("testing_output.csv") #sys.exit(0) if options.mprophet_q is not None: #We'll take the results below the Q-value cutoff and set them to zero #combined_results=combined_results[combined_results['annotation_QValue']<=options.mprophet_q] #combined_results=combined_results.copy(deep=True) # BAD #combined_results[combined_results['annotation_QValue']>options.mprophet_q]['Area']=numpy.nan #ADVISED TO CHANGE TO ZERO VALUES IN SKYLINE BY M. CHOI #combined_results[combined_results['annotation_QValue']>options.mprophet_q]['Area']=0 combined_results.loc[combined_results['annotation_QValue']>options.mprophet_q,'Area']=0 #combined_results=combined_results.copy(deep=True) # BAD PRACTICE... #combined_results.loc[combined_results['annotation_QValue']>options.mprophet_q,'Area']=numpy.nan if options.remove_truncated_peaks: combined_results['Area'] if options.merge_isotopes: combined_results['Precursor Charge']=combined_results['Precursor Charge'].astype(str) combined_results['unique_name']=combined_results['Peptide Modified Sequence']+"_"+combined_results['Precursor Charge']+"_"+combined_results['File Name']+"_"+combined_results['Protein Name'] combined_results['Precursor Charge']=combined_results['Precursor Charge'].astype(int) groups=combined_results.groupby(by=['unique_name'],as_index=False).agg({'Area':numpy.sum}) #ADD MASK HERE TO SUM PRECURSOR AREAS IN A FUTURE UPDATE TO ALLOW FOR DIA DATA combined_results.drop('Area',1,inplace=True) merged_results=pandas.merge(combined_results,groups,on=['unique_name']) merged_results.drop_duplicates(subset='unique_name',inplace=True) merged_results['Fragment Ion']="sum" combined_results=merged_results column_list=combined_results.columns.tolist() column_list.append(column_list.pop(column_list.index('Standard Type'))) column_list.append(column_list.pop(column_list.index('Truncated'))) combined_results.reindex(columns=column_list) combined_results.drop('unique_name',1,inplace=True) #combined_results.to_csv("TEMP_TEST.csv",sep=",",index=False) if options.peptide_level: fixed_names=combined_results['Peptide Modified Sequence'].apply(peptide_level_fixer) #fixed_names=[row[0] for row in fixed_names_list] combined_results['Protein Name']=fixed_names combined_results=combined_results.copy(deep=True) combined_results.sort_values(by='Protein Name',inplace=True) if options.remove_repeated_peptides: combined_results['Precursor Charge']=combined_results['Precursor Charge'].astype(str) combined_results['unique_name']=combined_results['Peptide Modified Sequence']+"_"+combined_results['Precursor Charge']+"_"+combined_results['Fragment Ion']+"_"+combined_results['File Name'] combined_results['Precursor Charge']=combined_results['Precursor Charge'].astype(int) print "Removing duplicate peptides...",len(combined_results) combined_results.drop_duplicates(subset='unique_name',keep=False,inplace=True) print "Done!",len(combined_results) combined_results.drop('unique_name',1,inplace=True) if options.minimum_peptide_count is not None and options.minimum_peptide_count > 0: #unique_peps_only=combined_results.drop_duplicates(subset="Peptide Modified Sequence") protein_groups=combined_results.groupby("Protein Name") passing_proteins=protein_groups.filter(lambda x: len(x["Peptide Modified Sequence"].unique()) >= options.minimum_peptide_count) passing_proteins_list=passing_proteins["Protein Name"].unique().tolist() combined_results=combined_results[combined_results["Protein Name"].isin(passing_proteins_list)] if options.remove_empty: if options.merge_isotopes: #mask = combined_results['Area'].apply(lambda x: numpy.isnan(x)) #combined_results=combined_results[mask] combined_results['Precursor Charge']=combined_results['Precursor Charge'].astype(str) combined_results['unique_name']=combined_results['Peptide Modified Sequence']+"_"+combined_results['Precursor Charge']+"_"+combined_results['Protein Name'] combined_results['Precursor Charge']=combined_results['Precursor Charge'].astype(int) groups=combined_results.groupby(by=['unique_name'],as_index=False).agg({'Area':numpy.nansum}) # CHANGED TO NANSUM groups.rename(columns={'Area':'TransitionAreaSum'},inplace=True) merged_results=pandas.merge(combined_results,groups,on=['unique_name']) #crap=merged_results[numpy.invert(merged_results['TransitionAreaSum']>0)] #print list(merged_results.columns.values) #crap=combined_results[numpy.invert(combined_results['Area_y']>0)] #print "FILTERING CRAP:",crap #pause=raw_input("paused....") combined_results=merged_results[merged_results['TransitionAreaSum']>0] combined_results.drop('unique_name',1,inplace=True) combined_results.drop('TransitionAreaSum',1,inplace=True) # DROP TRANSITION AREA SUM, TOO.... BUT LEFT FOR NOW FOR TESTING ##transition_groups=combined_results.groupby(by='unique_name',as_index=True)['Area'].sum() ##transition_groups=transition_groups.apply(lambda x: numpy.isnan(x) or x<=0.0) ##transition_groups=transition_groups[transition_groups==True] ##index_list=list(transition_groups.index.values) ##combined_results=combined_results[numpy.invert(combined_results.unique_name.str.contains("|".join(index_list)))] #print transition_groups,type(transition_groups) #print combined_results,"BEFORE" #pause=raw_input("PAUSE...") #print combined_results,"AFTER" #sys.exit(0) #empty_transitions=[] #empty_transitions=transition_groups. #for index,each in transition_groups.iteritems(): # if numpy.isnan(each) or each<=0.0: # empty_transitions.append(str(index)) #for eachtransition in empty_transitions: # combined_results=combined_results[numpy.invert(combined_results.unique_name.str.contains(eachtransition))] protein_groups=combined_results.groupby(by='Protein Name',as_index=False).agg({'Area':numpy.nansum})#['Area'].sum() protein_groups.rename(columns={'Area':'ProteinAreaSum'},inplace=True) merged_results=pandas.merge(combined_results,protein_groups,on=['Protein Name']) combined_results=merged_results[merged_results['ProteinAreaSum']>0] combined_results.drop('ProteinAreaSum',1,inplace=True) # DROP TRANSITION AREA SUM, TOO.... BUT LEFT FOR NOW FOR TESTING combined_results['Area'].replace(0,numpy.nan) del protein_groups #empty_proteins=[] #for index,each in protein_groups.iteritems(): # #print index # #if "AIPT[+80]VNHSGTFSPQAPVPTTVPVVDVR" in str(index): ## #print each,"THIS IS IT FOR AIPT[+80]VNHSGTFSPQAPVPTTVPVVDVR" # # #sys.exit(0) # if numpy.isnan(each) or each<=0.0: # if "AIPT[+80]VNHSGTFSPQAPVPTTVPVVDVR" in str(index): # print each,"THIS IS IT FOR AIPT[+80]VNHSGTFSPQAPVPTTVPVVDVR" # empty_proteins.append(str(index)) #combined_results.rename(columns={'Protein Name':'ProteinName'},inplace=True)# # #for eachprotein in empty_proteins: # combined_results=combined_results[numpy.invert(combined_results.ProteinName.str.contains(eachprotein))] # combined_results.rename(columns={'ProteinName':'Protein Name'},inplace=True) if options.zeros: print "We're going to replace all zero values with np.nan!" combined_results['Area'].replace(0,numpy.nan) elif options.zero_to_one: print "We're going to replace all zero values with 1!\n This is usually a BAD idea... Like, almost ALWAYS." combined_results['Area'].replace(0,1) if options.zeros: print "WARNING: CANNOT REPLACE ZEROS WITH ONE-- AND NP NAN. REPLACING WITH NAN TAKES PRECEDENT." #else: # print "CAUTION: THERE MAY BE ZEROS IN DATASET WHICH MAY INTERFERE WITH STATISTICAL ANALYSIS." if options.remove_single_run: feature_groups=combined_results.groupby(by=['Protein Name','Peptide Modified Sequence'],as_index=False).count() #if options.merge_isotopes: # feature_groups=feature_groups[feature_groups['Area']>1] #>1 means at least 2 runs have data (summed isotopes) #else: # feature_groups=feature_groups[feature_groups['Area']>3] #>3 means at least 2 runs have data with 3 isotopes feature_groups['unique_name']=feature_groups['Protein Name']+feature_groups['Peptide Modified Sequence'] feature_groups=feature_groups[['Area','unique_name']] feature_groups.rename(columns={'Area':'AreaCount'},inplace=True) #feature_groups['keep']=1 combined_results['unique_name']=combined_results['Protein Name']+combined_results['Peptide Modified Sequence'] #merged_results=pandas.merge(combined_results,groups,on=['unique_name']) combined_results=pandas.merge(combined_results,feature_groups,on=['unique_name'])#,indicator=True)#,how="right") if options.merge_isotopes: combined_results=combined_results[combined_results['AreaCount']>1] else: combined_results=combined_results[combined_results['AreaCount']>3] combined_results.drop('unique_name',1,inplace=True) combined_results.drop('AreaCount',1,inplace=True) #print combined_results #Okay, now we'll go ahead and print out the final MSstats input file! if options.rename: combined_results.rename(columns={'Protein Name':'ProteinName','Peptide Modified Sequence':'PeptideSequence','Precursor Charge':'PrecursorCharge','Fragment Ion':'FragmentIon','Product Charge':'ProductCharge','Isotope Label Type':'IsotopeLabelType','Standard Type':'StandardType','File Name':'Run','Area':'Intensity'},inplace=True) if options.mprophet_q is not None: combined_results = combined_results.drop('annotation_QValue', 1) combined_results = combined_results.drop('annotation_Score',1) #if options.remove_decoys: # if not options.rename: # combined_results.rename(columns={'Protein Name':'ProteinName'},inplace=True) # mask=combined_results[numpy.invert(combined_results.ProteinName.str.contains("Decoys"))] # if not options.rename: # mask.rename(columns={'ProteinName':'Protein Name'},inplace=True) # mask.to_csv(str(os.path.join(basedir,options.operation_folder))+"/MSstats_combined_input.csv",sep=",",index=False) #else: if options.fillMissingFeatures: bioreplicate_dict={} condition_dict={} for each_run in combined_results['Run'].unique(): temp=combined_results[combined_results['Run']==each_run] bioreplicate_dict[each_run]=temp['BioReplicate'].unique()[0] condition_dict[each_run]=temp['Condition'].unique()[0] grouped_df=combined_results.groupby(["PeptideSequence","PrecursorCharge","FragmentIon","ProductCharge"]) concat_list=[] correct_length=len(bioreplicate_dict.keys()) for name,eachgroup in grouped_df: if len(eachgroup)!=correct_length: for each_name in bioreplicate_dict.keys():#name_list: if each_name not in eachgroup['Run'].unique(): new_row=eachgroup.head(n=1).copy(deep=True) new_row['Run']=each_name new_row['Intensity']=numpy.nan new_row['Condition']=condition_dict[each_name] new_row['BioReplicate']=bioreplicate_dict[each_name] concat_list.append(new_row) concat_list.append(combined_results) combined_results=pandas.concat(concat_list) #combined_results=pandas.concat([combined_results,new_row]) combined_results=combined_results.drop('Mass Error PPM', 1) combined_results=combined_results.drop('Average Mass Error PPM', 1) combined_results.to_csv(str(os.path.join(basedir,options.operation_folder))+"/MSstats_combined_input.csv",sep=",",index=False) print "We have now written the combined MSstats input file to "+str(os.path.join(basedir,options.operation_folder))+"\\MSstats_combined_input.csv" #print "Now finished. Quitting!" print "Now we're going to prepare the R script for MSstats" #Let's start by reading in the experiment structure. group_information = pandas.read_csv(options.experiment_file,sep='\t') group_information["Biological Condition"]=group_information["Biological Condition"].astype(str) with open("MSstats_Script.R",'wb') as script_writer: script_writer.write("library(MSstats)\n") script_writer.write("library(preprocessCore)\n") if os.name=="nt": script_writer.write(str("setwd(\""+str(os.path.join(basedir,options.operation_folder))+"\")\n").replace("\\","\\\\")) #We're going to set the current directory... script_writer.write(str("raw<-read.csv(\""+str(os.path.join(basedir,options.operation_folder))+"/MSstats_combined_input.csv"+"\")\n").replace("\\","\\\\")) #We will load in the input CSV file! (In this case by absolute path, though that's not necessary...) else: script_writer.write("setwd(\""+str(os.path.join(basedir,options.operation_folder))+"\")\n") #We're going to set the current directory... script_writer.write("raw<-read.csv(\""+str(os.path.join(basedir,options.operation_folder))+"/MSstats_combined_input.csv"+"\")\n") #We will load in the input CSV file! (In this case by absolute path, though that's not necessary...) ##### MSstats dataProcess ##### script_writer.write("TMP_result<-dataProcess(raw=raw,") if options.fillIncompleteRows: script_writer.write("fillIncompleteRows=TRUE,") else: script_writer.write("fillIncompleteRows=FALSE,") if "NULL" in options.maxQuantileforCensored: script_writer.write("maxQuantileforCensored=NULL,") print "MAX QUANTILE FOR CENSORED HAS BEEN TURNED OFF." else: #print "MAX QUANTILE FOR CENSORED HAS BEEN TURNED OFF." #try: #float(options.maxQuantileforCensored) #print "MAX QUANTILE FOR CENSORED HAS BEEN TURNED OFF." script_writer.write("maxQuantileforCensored={0},".format(options.maxQuantileforCensored)) #except: # #print "MAX QUANTILE FOR CENSORED HAS BEEN TURNED OFF." # # #script_writer.write("maxQuantileforCensored=NULL,") if "quantile" in options.normalization: script_writer.write("normalization=\"quantile\",") elif "equalizeMedians" in options.normalization: script_writer.write("normalization=\"equalizeMedians\",") elif "globalStandards" in options.normalization: norm_peptide_set=set(combined_results[combined_results["StandardType"]=="globalStandard"]["PeptideSequence"]) if len(norm_peptide_set)>0: script_writer.write("normalization=\"globalStandards\",") peptide_norm_string="c(\""+"\",\"".join(norm_peptide_set)+"\")" script_writer.write("nameStandards="+peptide_norm_string+",") else: os.write(2, "\n\nERROR::: The user requested global standard normalization, but no targets were included after filtering.\nPlease change the targets or the filter settings and try again!\n") sys.exit(2) elif "false" in options.normalization: script_writer.write("normalization=FALSE,") #script_writer.write("betweenRunInterferenceScore=TRUE,") #because why not have them? # because it crashes the analysis on certain large projects... if "all" in options.feature_subset: script_writer.write("featureSubset=\"all\",") elif "topN" in options.feature_subset: if options.feature_subset_N==3: script_writer.write("featureSubset=\"top3\",") else: script_writer.write("featureSubset=\"topN\",") script_writer.write("n_top_feature="+str(options.feature_subset_N)+",") elif "highQuality" in options.feature_subset: script_writer.write("featureSubset=\"highQuality\",") script_writer.write("summaryMethod=\"TMP\",") if options.noimpute: script_writer.write("censoredInt=NULL,MBimpute=FALSE,") else: if int(options.censoredInt)==0: script_writer.write("censoredInt=0,") elif "NULL" in options.censoredInt: script_writer.write("censoredInt=\"NA\",") if options.MBimpute: script_writer.write("MBimpute=TRUE,") else: script_writer.write("MBimpute=FALSE,") if "minFeature" in options.cutoffCensored: script_writer.write("cutoffCensored=\"minFeature\",") elif "minRun" in options.cutoffCensored: script_writer.write("cutoffCensored=\"minRun\",") elif "minFeatureNRun" in options.cutoffCensored: script_writer.write("cutoffCensored=\"minFeatureNRun\",") if options.remove50missing: script_writer.write("remove50missing=TRUE,") else: script_writer.write("remove50missing=FALSE,") script_writer.write("logTrans=2)\n") #script_writer.write("logTrans=2,") #script_writer.write("skylineReport=TRUE)\n") ###################################################### #Data is now stored in "TMP_result$ProcessedData" #TMP_result$RunlevelData #TMP_result$SummaryMethod #TMP_result$PredictBySurvival (if MBimpute with AFT) #In this section, we'll write out the QC type plots... and then make our comparison based on the experimental structure info... (which is stored in "group_information") script_writer.write("write.csv(TMP_result$ProcessedData,file=\"TMP_dataProcess_output.csv\")\n") script_writer.write("quant_matrix<-quantification(TMP_result,type=\"Sample\",format=\"matrix\")\n") script_writer.write("write.csv(quant_matrix,file=\"quantification.csv\")\n") script_writer.write("cond_quant_matrix<-quantification(TMP_result,type=\"Group\",format=\"matrix\")\n") script_writer.write("write.csv(cond_quant_matrix,file=\"condition_quantification.csv\")\n") script_writer.write("dataProcessPlots(data = TMP_result, type=\"QCplot\", ylimUp=35)\n") script_writer.write("dataProcessPlots(data = TMP_result, type=\"ProfilePlot\", ylimUp=35, featureName=\"NA\",width=7,height=7)\n") script_writer.write("dataProcessPlots(data = TMP_result, type=\"ConditionPlot\",save_condition_plot_result=TRUE)\n") #dataProcessPlots(data=ideal,type="QCPlot") #dataProcessPlots(data=ideal,type="ProfilePlot") #dataProcessPlots(data=ideal,type="ConditionPlot") ###################################################### ### After we have those plots, we're going to ask for the comparisons themselves... (again, "group_information") #### Note that the conditions are arranged in alphabetical order <--------------- !!!!!! # # #Let's go ahead and make strings to describe our comparison matrix for MSstats! Let's grab all of the biological conditions from group_information dataframe! #groups_abet=sorted(group_information['Biological Condition'].unique().tolist()) #This should be the same order that MSstats sees! -- it appears that it is not... #groups_abet=natsorted(group_information['Biological Condition'].unique().tolist(),alg=ns.IGNORECASE | ns.REAL) #This should be the same order that MSstats sees! -- it appears that it is not... alphabetical ignore case #Natsort fails when numbers come into play, so we'll just put this to bed and use R to sort. with open("R_sorter.Rscript",'wb') as sorter_script_writer: sorter_script_writer.write("sort(c(\""+"\",\"".join(group_information['Biological Condition'].astype(str).unique().tolist())+"\"))") groups_abet_cmdout=subprocess.check_output(["Rscript","R_sorter.Rscript"]) print groups_abet_cmdout,"\nACTUALOUTPUT===================" groups_abet=[x.strip("\"").rstrip("\"") for x in groups_abet_cmdout.split("\"")[1:]] new_abet=[] for each_group_prestrip in groups_abet: each_group=each_group_prestrip.strip() if len(each_group)==0: continue if each_group.startswith('[') and each_group.endswith(']'): #These groups are actually just line information from the R output. We'll remove them. print "Group ",each_group," is not a real group... dropping..." else: new_abet.append(each_group) groups_abet=new_abet print groups_abet,"This should be alphabetical!" num_groups=len(groups_abet) group_to_MSstats_order={} MSstats_order_to_group={} control_index_list=[] test_index_list=[] for i in xrange(0,num_groups): if "C" in group_information[group_information['Biological Condition'] == groups_abet[i]]['Test or Control'].unique().tolist(): control_index_list.append(i) else: print "was looking for C in...",group_information[group_information['Biological Condition'] == groups_abet[i]]['Test or Control'].unique().tolist() test_index_list.append(i) group_to_MSstats_order[groups_abet[i]]=i MSstats_order_to_group[i]=groups_abet[i] comparisons=[] comparison_names=[] control_fraction=1.0/float(len(control_index_list)) print "MSstats o to g",MSstats_order_to_group for i in xrange(0,num_groups): #A line for each comparison this_comparison_name="" if i in control_index_list: continue #Otherwise... new_comparison=[] for j in xrange(0,num_groups): #A position in the line for each comparison if i==j: new_comparison.append(str(1)) #this_comparison_name=MSstats_order_to_group[i]+" vs."+this_comparison_name this_comparison_name=MSstats_order_to_group[i]+" "#+" vs."+this_comparison_name elif j in control_index_list: new_comparison.append(str(-1.0*control_fraction)) #this_comparison_name=this_comparison_name+" "+MSstats_order_to_group[j]+"&" else: new_comparison.append(str(0)) this_comparison_name=this_comparison_name[:-1] comparison_names.append(this_comparison_name) comparisons.append(new_comparison) ######################### OKAY ####################### ########## Now we have the comparisons built and we're ########## going to take those comparisons and build ########## the strings that MSstats needs to make its ########## analysis go! ########## comparison1<-matrix(c(-1,0,1,0,0,0,0,0,0,0),nrow=1) ########## comparison2<-matrix(c(-1,0,0,0,0,0,1,0,0,0),nrow=1) ########## comparison3<-matrix(c(-1,0,0,0,0,0,0,0,1,0),nrow=1) ########## comparison<-rbind(comparison1,comparison2, comparison3) ########## row.names(comparison)<-c("T3-T1","T7-T1","T9-T1") i=1 temp_rbind_string="" for each_comparison_list in comparisons: script_writer.write("comparison"+str(i)+"<-matrix(c("+",".join(each_comparison_list)+"),nrow=1)\n") temp_rbind_string+="comparison"+str(i)+"," i+=1 temp_rbind_string=temp_rbind_string[:-1] #if len(temp_rbind_string.split(","))>1: script_writer.write("comparison<-rbind("+temp_rbind_string+")\n") #else: # script_writer.write("comparison<-comparison1\n") temp_comparison_string="" for each_comp in comparison_names: temp_comparison_string+="\""+each_comp+"\""+"," temp_comparison_string=temp_comparison_string[:-1] script_writer.write("row.names(comparison)<-c("+temp_comparison_string+")\n") # OKAY, so at this point we've now constructed our "comparison" object in the R script... # Now we will have to prepare the groupComparison! script_writer.write("comparisonResult<-groupComparison(contrast.matrix=comparison,data=TMP_result)\n") #May need TMP_result$ProcessedData script_writer.write("print(levels(TMP_result$ProcessedData$GROUP_ORIGINAL))\n") # The heavy lifting is done!!! Let's write out a csv before we go crazy with plotting... script_writer.write("write.csv(comparisonResult$ComparisonResult,file=\"comparisonResult_output.csv\")\n") #script_writer.write("save(comparisonResult,file=\"image.RData\")\n") script_writer.write("quantification_csv<-read.csv(\"quantification.csv\",check.names=FALSE)\n") script_writer.write("comparison_csv<-read.csv(\"comparisonResult_output.csv\",check.names=FALSE)\n") script_writer.write("condition_plot_csv<-read.csv(\"ConditionPlot_value.csv\",check.names=FALSE)\n") #script_writer.write("experiment_design<-read.csv(\""+options.experiment_file+"\",sep=\"\\t\",check.names=FALSE)\n") script_writer.write("save.image(file=\"image.RData\")\n") #OKAY! So, now we're going to write out the plots... This may take a bit... #So, first, let's check if we can output a heatmap (number of comparisons >2) #if len(temp_comparison_string.split(","))>2: # script_writer.write("groupComparisonPlots(data=comparisonResult$ComparisonResult,type="Heatmap", logBase.pvalue=2, sig="+str(options.significance)+", FCcutoff="+options.FCthreshold pass #OKAY.... The R Script has been written! #We're going to execute the R script now! print "Copying RScript back to Galaxy..." shutil.copy('MSstats_Script.R',options.RScriptOutput) #subprocess.check_call(['Rscript', 'MSstats_Script.R'],shell=False,stderr=sys.stdout.fileno()) #A word to the wise... Don't run MSstats through Rscript... At least as of 2017/05/02, it causes some serious issues... Much better to run straight into R itself, for whatever reason... if os.name=="nt": #subprocess.check_call(["Rscript", 'MSstats_Script.R'],shell=True,stderr=sys.stdout.fileno()) ##subprocess.check_call(["R",'<', 'MSstats_Script.R','--vanilla'],shell=True,stderr=sys.stdout.fileno()) subprocess.check_call(["R < MSstats_Script.R --vanilla"],shell=True,stderr=sys.stdout.fileno()) else: #subprocess.check_call(['Rscript', 'MSstats_Script.R'],shell=True,stderr=sys.stdout.fileno()) ##subprocess.check_call(['R','<', 'MSstats_Script.R','--vanilla'],shell=True,stderr=sys.stdout.fileno()) subprocess.check_call(['R < MSstats_Script.R --vanilla'],shell=True,stderr=sys.stdout.fileno()) print "Moving files to final output locations...." shutil.copy('TMP_dataProcess_output.csv',options.processedOutput) shutil.copy('quantification.csv',options.quantificationOutput) shutil.copy('condition_quantification.csv',options.quantificationConditionOutput) shutil.copy('comparisonResult_output.csv',options.comparisonOutput) shutil.copy('ConditionPlot_value.csv',options.conditionPlotCSVOutput) shutil.copy('image.RData',options.RDataOutput) shutil.copy('QCPlot.pdf',options.QCplotOutput) shutil.copy('ProfilePlot.pdf',options.profilePlotOutput) shutil.copy('ConditionPlot.pdf',options.conditionPlotOutput) print "All done!"
mit
-7,744,223,414,931,171,000
51.639073
333
0.680862
false
VirusTotal/msticpy
msticpy/sectools/geoip.py
1
28312
# ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- """ Geoip Lookup module using IPStack and Maxmind GeoLite2. Geographic location lookup for IP addresses. This module has two classes for different services: - GeoLiteLookup - Maxmind Geolite (see https://www.maxmind.com) - IPStackLookup - IPStack (see https://ipstack.com) Both services offer a free tier for non-commercial use. However, a paid tier will normally get you more accuracy, more detail and a higher throughput rate. Maxmind geolite uses a downloadable database, while IPStack is an online lookup (API key required). """ import math import os import tarfile import warnings from abc import ABCMeta, abstractmethod from collections.abc import Iterable from datetime import datetime, timedelta from json import JSONDecodeError from pathlib import Path from typing import Any, Dict, List, Mapping, Optional, Tuple import geoip2.database # type: ignore import pandas as pd import requests from geoip2.errors import AddressNotFoundError # type: ignore from IPython import get_ipython from IPython.display import HTML, display from requests.exceptions import HTTPError from .._version import VERSION from ..nbtools.entityschema import GeoLocation, IpAddress # type: ignore from ..common.utility import export from ..common.exceptions import MsticpyUserConfigError from ..common.provider_settings import ProviderSettings, get_provider_settings __version__ = VERSION __author__ = "Ian Hellen" class GeoIPDatabaseException(Exception): """Exception when GeoIP database cannot be found.""" class GeoIpLookup(metaclass=ABCMeta): """ Abstract base class for GeoIP Lookup classes. See Also -------- IPStackLookup : IPStack GeoIP Implementation GeoLiteLookup : MaxMind GeoIP Implementation """ _LICENSE_TXT: Optional[str] = None _LICENSE_HTML: Optional[str] = None _license_shown: bool = False def __init__(self): """Initialize instance of GeoIpLookup class.""" self._print_license() @abstractmethod def lookup_ip( self, ip_address: str = None, ip_addr_list: Iterable = None, ip_entity: IpAddress = None, ) -> Tuple[List[Any], List[IpAddress]]: """ Lookup IP location abstract method. Parameters ---------- ip_address : str, optional a single address to look up (the default is None) ip_addr_list : Iterable, optional a collection of addresses to lookup (the default is None) ip_entity : IpAddress, optional an IpAddress entity (the default is None) - any existing data in the Location property will be overwritten Returns ------- Tuple[List[Any], List[IpAddress]]: raw geolocation results and same results as IpAddress entities with populated Location property. """ def df_lookup_ip(self, data: pd.DataFrame, column: str) -> pd.DataFrame: """ Lookup Geolocation data from a pandas Dataframe. Parameters ---------- data : pd.DataFrame pandas dataframe containing IpAddress column column : str the name of the dataframe column to use as a source Returns ------- pd.DataFrame Copy of original dataframe with IP Location information columns appended (where a location lookup was successful) """ ip_list = list(data[column].values) _, entities = self.lookup_ip(ip_addr_list=ip_list) ip_dicts = [ {**ent.Location.properties, "IpAddress": ent.Address} for ent in entities ] df_out = pd.DataFrame(data=ip_dicts) return data.merge(df_out, how="left", left_on=column, right_on="IpAddress") # pylint: disable=protected-access def _print_license(self): if self.__class__._license_shown: return if self._LICENSE_HTML and get_ipython(): display(HTML(self._LICENSE_HTML)) elif self._LICENSE_TXT: print(self._LICENSE_TXT) self.__class__._license_shown = True # pylint: enable=protected-access @export class IPStackLookup(GeoIpLookup): """ IPStack GeoIP Implementation. See Also -------- GeoIpLookup : Abstract base class GeoLiteLookup : MaxMind GeoIP Implementation """ _LICENSE_HTML = """ This library uses services provided by ipstack. <a href="https://ipstack.com">https://ipstack.com</a>""" _LICENSE_TXT = """ This library uses services provided by ipstack (https://ipstack.com)""" _IPSTACK_API = "http://api.ipstack.com/{iplist}?access_key={access_key}&output=json" _NO_API_KEY_MSSG = """ No API Key was found to access the IPStack service. If you do not have an account, go here to create one and obtain and API key. Add this API key to your msticpyconfig.yaml Alternatively, you can pass this to the IPStackLookup class when creating it: >>> iplookup = IPStackLookup(api_key="your_api_key") """ def __init__(self, api_key: Optional[str] = None, bulk_lookup: bool = False): """ Create a new instance of IPStackLookup. Parameters ---------- api_key : str, optional API Key from IPStack - see https://ipstack.com default is None - obtain key from msticpyconfig.yaml bulk_lookup : bool, optional For Professional and above tiers allowing you to submit multiple IPs in a single request. (the default is False, which submits a single request per address) """ super().__init__() self.settings = _get_geoip_provider_settings("IPStack") if api_key: self._api_key = api_key else: self._api_key = self.settings.args.get("AuthKey") # type: ignore if not self._api_key: raise MsticpyUserConfigError( self._NO_API_KEY_MSSG, help_uri=( "https://msticpy.readthedocs.io/en/latest/data_acquisition/" + "GeoIPLookups.html#ipstack-geo-lookup-class" ), service_uri="https://ipstack.com/product", title="IPStack API key not found", ) self.bulk_lookup = bulk_lookup def lookup_ip( self, ip_address: str = None, ip_addr_list: Iterable = None, ip_entity: IpAddress = None, ) -> Tuple[List[Any], List[IpAddress]]: """ Lookup IP location from IPStack web service. Parameters ---------- ip_address : str, optional a single address to look up (the default is None) ip_addr_list : Iterable, optional a collection of addresses to lookup (the default is None) ip_entity : IpAddress, optional an IpAddress entity (the default is None) - any existing data in the Location property will be overwritten Returns ------- Tuple[List[Any], List[IpAddress]]: raw geolocation results and same results as IpAddress entities with populated Location property. Raises ------ ConnectionError Invalid status returned from http request PermissionError Service refused request (e.g. requesting batch of addresses on free tier API key) """ if ip_address and isinstance(ip_address, str): ip_list = [ip_address.strip()] elif ip_addr_list: ip_list = list((ip.strip() for ip in ip_addr_list)) elif ip_entity: ip_list = [ip_entity.Address] else: raise ValueError("No valid ip addresses were passed as arguments.") results = self._submit_request(ip_list) output_raw = [] output_entities = [] for ip_loc, status in results: if status == 200: output_entities.append(self._create_ip_entity(ip_loc, ip_entity)) output_raw.append((ip_loc, status)) return output_raw, output_entities @staticmethod def _create_ip_entity(ip_loc: dict, ip_entity) -> IpAddress: if not ip_entity: ip_entity = IpAddress() ip_entity.Address = ip_loc["ip"] geo_entity = GeoLocation() geo_entity.CountryCode = ip_loc["country_code"] # type: ignore geo_entity.CountryName = ip_loc["country_name"] # type: ignore geo_entity.State = ip_loc["region_name"] # type: ignore geo_entity.City = ip_loc["city"] # type: ignore geo_entity.Longitude = ip_loc["longitude"] # type: ignore geo_entity.Latitude = ip_loc["latitude"] # type: ignore if "connection" in ip_loc: geo_entity.Asn = ip_loc["connection"]["asn"] # type: ignore ip_entity.Location = geo_entity return ip_entity def _submit_request(self, ip_list: List[str]) -> List[Tuple[Dict[str, str], int]]: """ Submit the request to IPStack. Parameters ---------- ip_list : List[str] String list of IPs to look up Returns ------- List[Tuple[str, int]] List of response, status code pairs """ if not self.bulk_lookup: return self._lookup_ip_list(ip_list) submit_url = self._IPSTACK_API.format( iplist=",".join(ip_list), access_key=self._api_key ) response = requests.get(submit_url) if response.status_code == 200: results = response.json() # {"success":false,"error":{"code":303,"type":"batch_not_supported_on_plan", # "info":"Bulk requests are not supported on your plan. # Please upgrade your subscription."}} if "success" in results and not results["success"]: raise PermissionError( "Service unable to complete request. Error: {}".format( results["error"] ) ) return [(item, response.status_code) for item in results] if response: try: return [(response.json(), response.status_code)] except JSONDecodeError: pass return [({}, response.status_code)] def _lookup_ip_list(self, ip_list: List[str]): """Lookup IP Addresses one-by-one.""" ip_loc_results = [] with requests.Session() as session: for ip_addr in ip_list: submit_url = self._IPSTACK_API.format( iplist=ip_addr, access_key=self._api_key ) response = session.get(submit_url) if response.status_code == 200: ip_loc_results.append((response.json(), response.status_code)) else: if response: try: ip_loc_results.append( (response.json(), response.status_code) ) continue except JSONDecodeError: ip_loc_results.append((None, response.status_code)) else: print("Unknown response from IPStack request.") ip_loc_results.append((None, -1)) return ip_loc_results @export class GeoLiteLookup(GeoIpLookup): """ GeoIP Lookup using MaxMindDB database. See Also -------- GeoIpLookup : Abstract base class IPStackLookup : IPStack GeoIP Implementation """ _MAXMIND_DOWNLOAD = ( "https://download.maxmind.com/app/geoip_download?" + "edition_id=GeoLite2-City&license_key={license_key}&suffix=tar.gz" ) _DB_HOME = os.path.join(os.path.expanduser("~"), ".msticpy", "GeoLite2") _DB_ARCHIVE = "GeoLite2-City.mmdb.tar.gz" _DB_FILE = "GeoLite2-City.mmdb" _LICENSE_HTML = """ This product includes GeoLite2 data created by MaxMind, available from <a href="https://www.maxmind.com">https://www.maxmind.com</a>. """ _LICENSE_TXT = """ This product includes GeoLite2 data created by MaxMind, available from https://www.maxmind.com. """ _NO_API_KEY_MSSG = """ No API Key was found to download the Maxmind GeoIPLite database. If you do not have an account, go here to create one and obtain and API key. https://www.maxmind.com/en/geolite2/signup Add this API key to your msticpyconfig.yaml https://msticpy.readthedocs.io/en/latest/data_acquisition/GeoIPLookups.html#maxmind-geo-ip-lite-lookup-class. Alternatively, you can pass this to the GeoLiteLookup class when creating it: >>> iplookup = GeoLiteLookup(api_key="your_api_key") """ def __init__( self, api_key: Optional[str] = None, db_folder: Optional[str] = None, force_update: bool = False, auto_update: bool = True, ): r""" Return new instance of GeoLiteLookup class. Parameters ---------- api_key : str, optional Default is None - use configuration value from msticpyconfig.yaml. API Key from MaxMind - Read more about GeoLite2 : https://dev.maxmind.com/geoip/geoip2/geolite2/ Sign up for a MaxMind account: https://www.maxmind.com/en/geolite2/signup Set your password and create a license key: https://www.maxmind.com/en/accounts/current/license-key db_folder: str, optional Provide absolute path to the folder containing MMDB file (e.g. '/usr/home' or 'C:/maxmind'). If no path provided, it is set to download to .msticpy/GeoLite2 under user`s home directory. force_update : bool, optional Force update can be set to true or false. depending on it, new download request will be initiated. auto_update: bool, optional Auto update can be set to true or false. depending on it, new download request will be initiated if age criteria is matched. """ super().__init__() self.settings = _get_geoip_provider_settings("GeoIPLite") if api_key: self._api_key = api_key else: self._api_key = self.settings.args.get("AuthKey") # type: ignore self._dbfolder = db_folder if self._dbfolder is None: self._dbfolder = self.settings.args.get("DBFolder", self._DB_HOME) self._dbfolder = str(Path(self._dbfolder).expanduser()) # type: ignore self._force_update = force_update self._auto_update = auto_update self._check_and_update_db(self._dbfolder, self._force_update, self._auto_update) self._dbpath = self._get_geoip_dbpath(self._dbfolder) if not self._dbpath: raise MsticpyUserConfigError( "No usable GeoIP Database could be found.", "Check that you have correctly configured the Maxmind API key.", ( "If you are using a custom DBFolder setting in your config, " + "check that this is a valid path." ), help_uri=( "https://msticpy.readthedocs.io/en/latest/data_acquisition/" + "GeoIPLookups.html#maxmind-geo-ip-lite-lookup-class" ), service_uri="https://www.maxmind.com/en/geolite2/signup", title="Maxmind GeoIP database not found", ) self._reader = geoip2.database.Reader(self._dbpath) def _check_and_update_db( self, db_folder: str = None, force_update: bool = False, auto_update: bool = True, ): r""" Check the age of geo ip database file and download if it older than 30 days. User can set auto_update or force_update to True or False to override auto-download behavior. Parameters ---------- db_folder: str, optional Provide absolute path to the folder containing MMDB file (e.g. '/usr/home' or 'C:\maxmind'). If no path provided, it is set to download to .msticpy\GeoLite2 dir under user`s home directory. force_update : bool, optional Force update can be set to true or false. depending on it, new download request will be initiated, overriding age criteria. auto_update : bool, optional Auto update can be set to true or false. depending on it, new download request will be initiated if age criteria is matched. """ geoip_db_path = self._get_geoip_dbpath(db_folder) url = self._MAXMIND_DOWNLOAD.format(license_key=self._api_key) if geoip_db_path is None: print( "No local Maxmind City Database found. ", f"Attempting to downloading new database to {db_folder}", ) self._download_and_extract_archive(url, db_folder) else: # Create a reader object to retrive db info and build date # to check age from build_epoch property. with geoip2.database.Reader(geoip_db_path) as reader: last_mod_time = datetime.utcfromtimestamp(reader.metadata().build_epoch) # Check for out of date DB file according to db_age db_age = datetime.utcnow() - last_mod_time db_updated = True if db_age > timedelta(30) and auto_update: print( "Latest local Maxmind City Database present is older than 30 days.", f"Attempting to download new database to {db_folder}", ) if not self._download_and_extract_archive(url, db_folder): warnings.warn("DB download failed") db_updated = False elif force_update: print( "force_update is set to True.", f"Attempting to download new database to {db_folder}", ) if not self._download_and_extract_archive(url, db_folder): warnings.warn("DB download failed") db_updated = False if not db_updated: warnings.warn( "Continuing with cached database. Results may inaccurate." ) def _download_and_extract_archive( # noqa: MC0001 self, url: str = None, db_folder: str = None ) -> bool: r""" Download file from the given URL and extract if it is archive. Parameters ---------- url : str Web URL location to the Maxmind city Database. (the default is None) db_folder: str, optional Provide absolute path to the folder containing MMDB file (e.g. '/usr/home' or 'C:\maxmind'). If no path provided, it is set to download to .msticpy dir under user`s home directory.(the default is None) Returns ------- bool : True if download_success """ if not self._api_key: return False if url is None: url = self._MAXMIND_DOWNLOAD.format(license_key=self._api_key) if db_folder is None: db_folder = self._DB_HOME if not os.path.exists(db_folder): # using makedirs to create intermediate-level dirs to contain the leaf dir os.makedirs(db_folder) db_archive_path = os.path.join(db_folder, self._DB_ARCHIVE) db_file_path = os.path.join(db_folder, self._DB_FILE) try: with requests.get(url, stream=True) as response: response = requests.get(url, stream=True) response.raise_for_status() print("Downloading and extracting GeoLite DB archive from MaxMind....") with open(db_archive_path, "wb") as file_hdl: for chunk in response.iter_content(chunk_size=10000): file_hdl.write(chunk) file_hdl.flush() except HTTPError as http_err: warnings.warn( f"HTTP error occurred trying to download GeoLite DB: {http_err}" ) # pylint: disable=broad-except except Exception as err: warnings.warn(f"Other error occurred trying to download GeoLite DB: {err}") # pylint: enable=broad-except else: try: tar_archive = tarfile.open(db_archive_path) for member in tar_archive.getmembers(): if ( member.isreg() ): # Will skip the dirs to extract only file objects # Strip the path from files to extract it to desired directory member.name = os.path.basename(member.name) tar_archive.extract(member, db_folder) print("Extraction complete. Local Maxmind city DB:", f"{db_file_path}") return True except IOError as err: warnings.warn(f"Error writing GeoIP DB file: {db_file_path} - {err}") return False @staticmethod def _get_geoip_dbpath(db_folder: str = None) -> Optional[str]: r""" Get the correct path containing GeoLite City Database. Parameters ---------- db_folder: str, optional Provide absolute path to the folder containing MMDB file (e.g. '/usr/home' or 'C:\maxmind'). If no path provided, it is set to download to .msticpy\GeoLite2 dir under user`s home directory. Returns ------- Optional[str] Returns the absolute path of local maxmind geolite city database after control flow logic. """ if not db_folder: db_folder = "." list_of_db_paths = [str(db) for db in Path(db_folder).glob("*.mmdb")] if len(list_of_db_paths) > 1: latest_db_path = max(list_of_db_paths, key=os.path.getmtime) elif len(list_of_db_paths) == 1: latest_db_path = list_of_db_paths[0] else: return None return latest_db_path def lookup_ip( self, ip_address: str = None, ip_addr_list: Iterable = None, ip_entity: IpAddress = None, ) -> Tuple[List[Any], List[IpAddress]]: """ Lookup IP location from GeoLite2 data created by MaxMind. Parameters ---------- ip_address : str, optional a single address to look up (the default is None) ip_addr_list : Iterable, optional a collection of addresses to lookup (the default is None) ip_entity : IpAddress, optional an IpAddress entity (the default is None) - any existing data in the Location property will be overwritten Returns ------- Tuple[List[Any], List[IpAddress]] raw geolocation results and same results as IpAddress entities with populated Location property. """ if ip_address and isinstance(ip_address, str): ip_list = [ip_address.strip()] elif ip_addr_list: ip_list = list((ip.strip() for ip in ip_addr_list)) elif ip_entity: ip_list = [ip_entity.Address] else: raise ValueError("No valid ip addresses were passed as arguments.") output_raw = [] output_entities = [] for ip_input in ip_list: geo_match = None try: geo_match = self._reader.city(ip_input).raw except (AddressNotFoundError, AttributeError, ValueError): continue if geo_match: output_raw.append(geo_match) output_entities.append( self._create_ip_entity(ip_input, geo_match, ip_entity) ) return output_raw, output_entities @staticmethod def _create_ip_entity( ip_address: str, geo_match: Mapping[str, Any], ip_entity: IpAddress = None ) -> IpAddress: if not ip_entity: ip_entity = IpAddress() ip_entity.Address = ip_address geo_entity = GeoLocation() geo_entity.CountryCode = geo_match.get("country", {}).get( # type: ignore "iso_code", None ) geo_entity.CountryName = ( # type: ignore geo_match.get("country", {}).get("names", {}).get("en", None) ) subdivs = geo_match.get("subdivisions", []) if subdivs: geo_entity.State = ( # type: ignore subdivs[0].get("names", {}).get("en", None) ) geo_entity.City = ( # type: ignore geo_match.get("city", {}).get("names", {}).get("en", None) ) geo_entity.Longitude = geo_match.get("location", {}).get( # type: ignore "longitude", None ) geo_entity.Latitude = geo_match.get("location", {}).get( # type: ignore "latitude", None ) ip_entity.Location = geo_entity # type: ignore return ip_entity def _get_geoip_provider_settings(provider_name: str) -> ProviderSettings: """ Return settings for a provider. Parameters ---------- provider_name : str Name of the provider. Returns ------- ProviderSettings Settings for the provider. """ settings = get_provider_settings(config_section="OtherProviders") if provider_name in settings: return settings[provider_name] return ProviderSettings(name=provider_name, description="Not found.") @export def entity_distance(ip_src: IpAddress, ip_dest: IpAddress) -> float: """ Return distance between two IP Entities. Parameters ---------- ip_src : IpAddress Source/Origin IpAddress Entity ip_dest : IpAddress Destination IpAddress Entity Returns ------- float Distance in kilometers. Raises ------ AttributeError If either entity has no location information """ if not ip_src.Location or not ip_dest.Location: raise AttributeError( "Source and destination entities must have defined Location properties." ) return geo_distance( origin=(ip_src.Location.Latitude, ip_src.Location.Longitude), destination=(ip_dest.Location.Latitude, ip_dest.Location.Longitude), ) _EARTH_RADIUS_KM = 6371 # km @export def geo_distance( origin: Tuple[float, float], destination: Tuple[float, float] ) -> float: """ Calculate the Haversine distance. Parameters ---------- origin : Tuple[float, float] Latitude, Longitude of origin of distance measurement. destination : Tuple[float, float] Latitude, Longitude of origin of distance measurement. Returns ------- float Distance in kilometers. Examples -------- >>> origin = (48.1372, 11.5756) # Munich >>> destination = (52.5186, 13.4083) # Berlin >>> round(geo_distance(origin, destination), 1) 504.2 Notes ----- Author: Martin Thoma - stackoverflow """ orig_lat, orig_lon = origin dest_lat, dest_lon = destination ang_dist_lat = math.radians(dest_lat - orig_lat) ang_dist_lon = math.radians(dest_lon - orig_lon) hav_a = (math.sin(ang_dist_lat / 2) * math.sin(ang_dist_lat / 2)) + ( math.cos(math.radians(orig_lat)) * math.cos(math.radians(dest_lat)) * math.sin(ang_dist_lon / 2) * math.sin(ang_dist_lon / 2) ) hav_c = 2 * math.atan2(math.sqrt(hav_a), math.sqrt(1 - hav_a)) return _EARTH_RADIUS_KM * hav_c
mit
6,716,206,817,671,337,000
34.039604
109
0.576858
false
toobaz/pandas
pandas/tseries/holiday.py
2
16121
from datetime import datetime, timedelta from typing import List import warnings from dateutil.relativedelta import FR, MO, SA, SU, TH, TU, WE # noqa import numpy as np from pandas.errors import PerformanceWarning from pandas import DateOffset, Series, Timestamp, date_range from pandas.tseries.offsets import Day, Easter def next_monday(dt): """ If holiday falls on Saturday, use following Monday instead; if holiday falls on Sunday, use Monday instead """ if dt.weekday() == 5: return dt + timedelta(2) elif dt.weekday() == 6: return dt + timedelta(1) return dt def next_monday_or_tuesday(dt): """ For second holiday of two adjacent ones! If holiday falls on Saturday, use following Monday instead; if holiday falls on Sunday or Monday, use following Tuesday instead (because Monday is already taken by adjacent holiday on the day before) """ dow = dt.weekday() if dow == 5 or dow == 6: return dt + timedelta(2) elif dow == 0: return dt + timedelta(1) return dt def previous_friday(dt): """ If holiday falls on Saturday or Sunday, use previous Friday instead. """ if dt.weekday() == 5: return dt - timedelta(1) elif dt.weekday() == 6: return dt - timedelta(2) return dt def sunday_to_monday(dt): """ If holiday falls on Sunday, use day thereafter (Monday) instead. """ if dt.weekday() == 6: return dt + timedelta(1) return dt def weekend_to_monday(dt): """ If holiday falls on Sunday or Saturday, use day thereafter (Monday) instead. Needed for holidays such as Christmas observation in Europe """ if dt.weekday() == 6: return dt + timedelta(1) elif dt.weekday() == 5: return dt + timedelta(2) return dt def nearest_workday(dt): """ If holiday falls on Saturday, use day before (Friday) instead; if holiday falls on Sunday, use day thereafter (Monday) instead. """ if dt.weekday() == 5: return dt - timedelta(1) elif dt.weekday() == 6: return dt + timedelta(1) return dt def next_workday(dt): """ returns next weekday used for observances """ dt += timedelta(days=1) while dt.weekday() > 4: # Mon-Fri are 0-4 dt += timedelta(days=1) return dt def previous_workday(dt): """ returns previous weekday used for observances """ dt -= timedelta(days=1) while dt.weekday() > 4: # Mon-Fri are 0-4 dt -= timedelta(days=1) return dt def before_nearest_workday(dt): """ returns previous workday after nearest workday """ return previous_workday(nearest_workday(dt)) def after_nearest_workday(dt): """ returns next workday after nearest workday needed for Boxing day or multiple holidays in a series """ return next_workday(nearest_workday(dt)) class Holiday: """ Class that defines a holiday with start/end dates and rules for observance. """ def __init__( self, name, year=None, month=None, day=None, offset=None, observance=None, start_date=None, end_date=None, days_of_week=None, ): """ Parameters ---------- name : str Name of the holiday , defaults to class name offset : array of pandas.tseries.offsets or class from pandas.tseries.offsets computes offset from date observance: function computes when holiday is given a pandas Timestamp days_of_week: provide a tuple of days e.g (0,1,2,3,) for Monday Through Thursday Monday=0,..,Sunday=6 Examples -------- >>> from pandas.tseries.holiday import Holiday, nearest_workday >>> from dateutil.relativedelta import MO >>> USMemorialDay = Holiday('Memorial Day', month=5, day=31, offset=pd.DateOffset(weekday=MO(-1))) >>> USLaborDay = Holiday('Labor Day', month=9, day=1, offset=pd.DateOffset(weekday=MO(1))) >>> July3rd = Holiday('July 3rd', month=7, day=3,) >>> NewYears = Holiday('New Years Day', month=1, day=1, observance=nearest_workday), >>> July3rd = Holiday('July 3rd', month=7, day=3, days_of_week=(0, 1, 2, 3)) """ if offset is not None and observance is not None: raise NotImplementedError("Cannot use both offset and observance.") self.name = name self.year = year self.month = month self.day = day self.offset = offset self.start_date = ( Timestamp(start_date) if start_date is not None else start_date ) self.end_date = Timestamp(end_date) if end_date is not None else end_date self.observance = observance assert days_of_week is None or type(days_of_week) == tuple self.days_of_week = days_of_week def __repr__(self): info = "" if self.year is not None: info += "year={year}, ".format(year=self.year) info += "month={mon}, day={day}, ".format(mon=self.month, day=self.day) if self.offset is not None: info += "offset={offset}".format(offset=self.offset) if self.observance is not None: info += "observance={obs}".format(obs=self.observance) repr = "Holiday: {name} ({info})".format(name=self.name, info=info) return repr def dates(self, start_date, end_date, return_name=False): """ Calculate holidays observed between start date and end date Parameters ---------- start_date : starting date, datetime-like, optional end_date : ending date, datetime-like, optional return_name : bool, optional, default=False If True, return a series that has dates and holiday names. False will only return dates. """ start_date = Timestamp(start_date) end_date = Timestamp(end_date) filter_start_date = start_date filter_end_date = end_date if self.year is not None: dt = Timestamp(datetime(self.year, self.month, self.day)) if return_name: return Series(self.name, index=[dt]) else: return [dt] dates = self._reference_dates(start_date, end_date) holiday_dates = self._apply_rule(dates) if self.days_of_week is not None: holiday_dates = holiday_dates[ np.in1d(holiday_dates.dayofweek, self.days_of_week) ] if self.start_date is not None: filter_start_date = max( self.start_date.tz_localize(filter_start_date.tz), filter_start_date ) if self.end_date is not None: filter_end_date = min( self.end_date.tz_localize(filter_end_date.tz), filter_end_date ) holiday_dates = holiday_dates[ (holiday_dates >= filter_start_date) & (holiday_dates <= filter_end_date) ] if return_name: return Series(self.name, index=holiday_dates) return holiday_dates def _reference_dates(self, start_date, end_date): """ Get reference dates for the holiday. Return reference dates for the holiday also returning the year prior to the start_date and year following the end_date. This ensures that any offsets to be applied will yield the holidays within the passed in dates. """ if self.start_date is not None: start_date = self.start_date.tz_localize(start_date.tz) if self.end_date is not None: end_date = self.end_date.tz_localize(start_date.tz) year_offset = DateOffset(years=1) reference_start_date = Timestamp( datetime(start_date.year - 1, self.month, self.day) ) reference_end_date = Timestamp( datetime(end_date.year + 1, self.month, self.day) ) # Don't process unnecessary holidays dates = date_range( start=reference_start_date, end=reference_end_date, freq=year_offset, tz=start_date.tz, ) return dates def _apply_rule(self, dates): """ Apply the given offset/observance to a DatetimeIndex of dates. Parameters ---------- dates : DatetimeIndex Dates to apply the given offset/observance rule Returns ------- Dates with rules applied """ if self.observance is not None: return dates.map(lambda d: self.observance(d)) if self.offset is not None: if not isinstance(self.offset, list): offsets = [self.offset] else: offsets = self.offset for offset in offsets: # if we are adding a non-vectorized value # ignore the PerformanceWarnings: with warnings.catch_warnings(): warnings.simplefilter("ignore", PerformanceWarning) dates += offset return dates holiday_calendars = {} def register(cls): try: name = cls.name except AttributeError: name = cls.__name__ holiday_calendars[name] = cls def get_calendar(name): """ Return an instance of a calendar based on its name. Parameters ---------- name : str Calendar name to return an instance of """ return holiday_calendars[name]() class HolidayCalendarMetaClass(type): def __new__(cls, clsname, bases, attrs): calendar_class = super().__new__(cls, clsname, bases, attrs) register(calendar_class) return calendar_class class AbstractHolidayCalendar(metaclass=HolidayCalendarMetaClass): """ Abstract interface to create holidays following certain rules. """ rules = [] # type: List[Holiday] start_date = Timestamp(datetime(1970, 1, 1)) end_date = Timestamp(datetime(2030, 12, 31)) _cache = None def __init__(self, name=None, rules=None): """ Initializes holiday object with a given set a rules. Normally classes just have the rules defined within them. Parameters ---------- name : str Name of the holiday calendar, defaults to class name rules : array of Holiday objects A set of rules used to create the holidays. """ super().__init__() if name is None: name = self.__class__.__name__ self.name = name if rules is not None: self.rules = rules def rule_from_name(self, name): for rule in self.rules: if rule.name == name: return rule return None def holidays(self, start=None, end=None, return_name=False): """ Returns a curve with holidays between start_date and end_date Parameters ---------- start : starting date, datetime-like, optional end : ending date, datetime-like, optional return_name : bool, optional If True, return a series that has dates and holiday names. False will only return a DatetimeIndex of dates. Returns ------- DatetimeIndex of holidays """ if self.rules is None: raise Exception( "Holiday Calendar {name} does not have any " "rules specified".format(name=self.name) ) if start is None: start = AbstractHolidayCalendar.start_date if end is None: end = AbstractHolidayCalendar.end_date start = Timestamp(start) end = Timestamp(end) holidays = None # If we don't have a cache or the dates are outside the prior cache, we # get them again if self._cache is None or start < self._cache[0] or end > self._cache[1]: for rule in self.rules: rule_holidays = rule.dates(start, end, return_name=True) if holidays is None: holidays = rule_holidays else: holidays = holidays.append(rule_holidays) self._cache = (start, end, holidays.sort_index()) holidays = self._cache[2] holidays = holidays[start:end] if return_name: return holidays else: return holidays.index @staticmethod def merge_class(base, other): """ Merge holiday calendars together. The base calendar will take precedence to other. The merge will be done based on each holiday's name. Parameters ---------- base : AbstractHolidayCalendar instance/subclass or array of Holiday objects other : AbstractHolidayCalendar instance/subclass or array of Holiday objects """ try: other = other.rules except AttributeError: pass if not isinstance(other, list): other = [other] other_holidays = {holiday.name: holiday for holiday in other} try: base = base.rules except AttributeError: pass if not isinstance(base, list): base = [base] base_holidays = {holiday.name: holiday for holiday in base} other_holidays.update(base_holidays) return list(other_holidays.values()) def merge(self, other, inplace=False): """ Merge holiday calendars together. The caller's class rules take precedence. The merge will be done based on each holiday's name. Parameters ---------- other : holiday calendar inplace : bool (default=False) If True set rule_table to holidays, else return array of Holidays """ holidays = self.merge_class(self, other) if inplace: self.rules = holidays else: return holidays USMemorialDay = Holiday( "Memorial Day", month=5, day=31, offset=DateOffset(weekday=MO(-1)) ) USLaborDay = Holiday("Labor Day", month=9, day=1, offset=DateOffset(weekday=MO(1))) USColumbusDay = Holiday( "Columbus Day", month=10, day=1, offset=DateOffset(weekday=MO(2)) ) USThanksgivingDay = Holiday( "Thanksgiving", month=11, day=1, offset=DateOffset(weekday=TH(4)) ) USMartinLutherKingJr = Holiday( "Martin Luther King Jr. Day", start_date=datetime(1986, 1, 1), month=1, day=1, offset=DateOffset(weekday=MO(3)), ) USPresidentsDay = Holiday( "Presidents Day", month=2, day=1, offset=DateOffset(weekday=MO(3)) ) GoodFriday = Holiday("Good Friday", month=1, day=1, offset=[Easter(), Day(-2)]) EasterMonday = Holiday("Easter Monday", month=1, day=1, offset=[Easter(), Day(1)]) class USFederalHolidayCalendar(AbstractHolidayCalendar): """ US Federal Government Holiday Calendar based on rules specified by: https://www.opm.gov/policy-data-oversight/ snow-dismissal-procedures/federal-holidays/ """ rules = [ Holiday("New Years Day", month=1, day=1, observance=nearest_workday), USMartinLutherKingJr, USPresidentsDay, USMemorialDay, Holiday("July 4th", month=7, day=4, observance=nearest_workday), USLaborDay, USColumbusDay, Holiday("Veterans Day", month=11, day=11, observance=nearest_workday), USThanksgivingDay, Holiday("Christmas", month=12, day=25, observance=nearest_workday), ] def HolidayCalendarFactory(name, base, other, base_class=AbstractHolidayCalendar): rules = AbstractHolidayCalendar.merge_class(base, other) calendar_class = type(name, (base_class,), {"rules": rules, "name": name}) return calendar_class
bsd-3-clause
-3,756,555,009,378,504,000
29.13271
85
0.586874
false
Unidata/MetPy
v0.10/_downloads/8591910a2b42dadcf3b05658ddd9c600/isentropic_example.py
4
7204
# Copyright (c) 2017,2018 MetPy Developers. # Distributed under the terms of the BSD 3-Clause License. # SPDX-License-Identifier: BSD-3-Clause """ =================== Isentropic Analysis =================== The MetPy function `mpcalc.isentropic_interpolation` allows for isentropic analysis from model analysis data in isobaric coordinates. """ ######################################## import cartopy.crs as ccrs import cartopy.feature as cfeature import matplotlib.pyplot as plt import numpy as np import xarray as xr import metpy.calc as mpcalc from metpy.cbook import get_test_data from metpy.plots import add_metpy_logo, add_timestamp from metpy.units import units ####################################### # **Getting the data** # # In this example, NARR reanalysis data for 18 UTC 04 April 1987 from the National Centers # for Environmental Information (https://www.ncdc.noaa.gov/data-access/model-data) # will be used. data = xr.open_dataset(get_test_data('narr_example.nc', False)) ########################## print(list(data.variables)) ############################# # We will reduce the dimensionality of the data as it is pulled in to remove an empty time # dimension. # Assign data to variable names lat = data['lat'] lon = data['lon'] lev = data['isobaric'] times = data['time'] tmp = data['Temperature'][0] uwnd = data['u_wind'][0] vwnd = data['v_wind'][0] spech = data['Specific_humidity'][0] # pint doesn't understand gpm data['Geopotential_height'].attrs['units'] = 'meter' hgt = data['Geopotential_height'][0] ############################# # To properly interpolate to isentropic coordinates, the function must know the desired output # isentropic levels. An array with these levels will be created below. isentlevs = [296.] * units.kelvin #################################### # **Conversion to Isentropic Coordinates** # # Once three dimensional data in isobaric coordinates has been pulled and the desired # isentropic levels created, the conversion to isentropic coordinates can begin. Data will be # passed to the function as below. The function requires that isentropic levels, isobaric # levels, and temperature be input. Any additional inputs (in this case relative humidity, u, # and v wind components) will be linearly interpolated to isentropic space. isent_anal = mpcalc.isentropic_interpolation(isentlevs, lev, tmp, spech, uwnd, vwnd, hgt, tmpk_out=True) ##################################### # The output is a list, so now we will separate the variables to different names before # plotting. isentprs, isenttmp, isentspech, isentu, isentv, isenthgt = isent_anal isentu.ito('kt') isentv.ito('kt') ######################################## # A quick look at the shape of these variables will show that the data is now in isentropic # coordinates, with the number of vertical levels as specified above. print(isentprs.shape) print(isentspech.shape) print(isentu.shape) print(isentv.shape) print(isenttmp.shape) print(isenthgt.shape) ################################# # **Converting to Relative Humidity** # # The NARR only gives specific humidity on isobaric vertical levels, so relative humidity will # have to be calculated after the interpolation to isentropic space. isentrh = 100 * mpcalc.relative_humidity_from_specific_humidity(isentspech, isenttmp, isentprs) ####################################### # **Plotting the Isentropic Analysis** # Set up our projection crs = ccrs.LambertConformal(central_longitude=-100.0, central_latitude=45.0) # Coordinates to limit map area bounds = [(-122., -75., 25., 50.)] # Choose a level to plot, in this case 296 K level = 0 fig = plt.figure(figsize=(17., 12.)) add_metpy_logo(fig, 120, 245, size='large') ax = fig.add_subplot(1, 1, 1, projection=crs) ax.set_extent(*bounds, crs=ccrs.PlateCarree()) ax.add_feature(cfeature.COASTLINE.with_scale('50m'), linewidth=0.75) ax.add_feature(cfeature.STATES, linewidth=0.5) # Plot the surface clevisent = np.arange(0, 1000, 25) cs = ax.contour(lon, lat, isentprs[level, :, :], clevisent, colors='k', linewidths=1.0, linestyles='solid', transform=ccrs.PlateCarree()) ax.clabel(cs, fontsize=10, inline=1, inline_spacing=7, fmt='%i', rightside_up=True, use_clabeltext=True) # Plot RH cf = ax.contourf(lon, lat, isentrh[level, :, :], range(10, 106, 5), cmap=plt.cm.gist_earth_r, transform=ccrs.PlateCarree()) cb = fig.colorbar(cf, orientation='horizontal', extend='max', aspect=65, shrink=0.5, pad=0.05, extendrect='True') cb.set_label('Relative Humidity', size='x-large') # Plot wind barbs ax.barbs(lon.values, lat.values, isentu[level, :, :].m, isentv[level, :, :].m, length=6, regrid_shape=20, transform=ccrs.PlateCarree()) # Make some titles ax.set_title('{:.0f} K Isentropic Pressure (hPa), Wind (kt), Relative Humidity (percent)' .format(isentlevs[level].m), loc='left') add_timestamp(ax, times[0].dt, y=0.02, high_contrast=True) fig.tight_layout() ###################################### # **Montgomery Streamfunction** # # The Montgomery Streamfunction, :math:`{\psi} = gdz + CpT`, is often desired because its # gradient is proportional to the geostrophic wind in isentropic space. This can be easily # calculated with `mpcalc.montgomery_streamfunction`. # Calculate Montgomery Streamfunction and scale by 10^-2 for plotting msf = mpcalc.montgomery_streamfunction(isenthgt, isenttmp) / 100. # Choose a level to plot, in this case 296 K level = 0 fig = plt.figure(figsize=(17., 12.)) add_metpy_logo(fig, 120, 250, size='large') ax = plt.subplot(111, projection=crs) ax.set_extent(*bounds, crs=ccrs.PlateCarree()) ax.add_feature(cfeature.COASTLINE.with_scale('50m'), linewidth=0.75) ax.add_feature(cfeature.STATES.with_scale('50m'), linewidth=0.5) # Plot the surface clevmsf = np.arange(0, 4000, 5) cs = ax.contour(lon, lat, msf[level, :, :], clevmsf, colors='k', linewidths=1.0, linestyles='solid', transform=ccrs.PlateCarree()) ax.clabel(cs, fontsize=10, inline=1, inline_spacing=7, fmt='%i', rightside_up=True, use_clabeltext=True) # Plot RH cf = ax.contourf(lon, lat, isentrh[level, :, :], range(10, 106, 5), cmap=plt.cm.gist_earth_r, transform=ccrs.PlateCarree()) cb = fig.colorbar(cf, orientation='horizontal', extend='max', aspect=65, shrink=0.5, pad=0.05, extendrect='True') cb.set_label('Relative Humidity', size='x-large') # Plot wind barbs. ax.barbs(lon.values, lat.values, isentu[level, :, :].m, isentv[level, :, :].m, length=6, regrid_shape=20, transform=ccrs.PlateCarree()) # Make some titles ax.set_title('{:.0f} K Montgomery Streamfunction '.format(isentlevs[level].m) + r'($10^{-2} m^2 s^{-2}$), Wind (kt), Relative Humidity (percent)', loc='left') add_timestamp(ax, times[0].dt, y=0.02, pretext='Valid: ', high_contrast=True) fig.tight_layout() plt.show()
bsd-3-clause
952,601,301,824,138,600
36.134021
95
0.642421
false
prisae/empymod
examples/time_domain/note_for_land_csem.py
1
4486
r""" Improve land CSEM computation ============================= The problem ----------- There exists a numerical singularity in the wavenumber-frequency domain. This singularity is always present, but it is usually neglectible except when the resistivity is very high (like air; hence conductivity goes to zero and therefore the real part of :math:`\eta_H`, :math:`\eta_V` goes to zero) and source and receiver are close to this boundary. So unfortunately exactly in the case of a land CSEM survey. This singularity leads to noise at very high frequencies and therefore at very early times because the Hankel transform cannot handle the singularity correctly (or, if you would choose a sufficiently precise quadrature, it would take literally forever to compute it). The "solution" -------------- Electric permittivity (:math:`\varepsilon_H`, :math:`\varepsilon_V`) are set to 1 by default. They are not important for the frequency range of CSEM. By setting the electric permittivity of the air-layer to 0, the singularity disapears, which subsquently improves a lot the time-domain result for land CSEM. It therefore uses the diffusive approximation for the air layer, but again, that doesn't matter for the frequencies required for CSEM. """ import empymod import numpy as np import matplotlib.pyplot as plt plt.style.use('ggplot') ############################################################################### # Define model # ------------ # Times (s) t = np.logspace(-2, 1, 301) # Model model = { 'src': [0, 0, 0.001], # Src at origin, slightly below interface 'rec': [6000, 0, 0.0001], # 6 km off., in-line, slightly bel. interf. 'depth': [0, 2000, 2100], # Target of 100 m at 2 km depth 'res': [2e14, 10, 100, 10], # Res: [air, overb., target, half-space] 'epermH': [1, 1, 1, 1], # El. permittivity: default values 'freqtime': t, # Times 'signal': 0, # 0: Impulse response 'ftarg': {'dlf': 'key_81_CosSin_2009'}, # Shorter filter then the default 'verb': 1, # Verbosity; set to 3 to see all parameters } ############################################################################### # Compute # ------- # Compute with default eperm_air = 1 res_1 = empymod.dipole(**model) # Set horizontal and vertical electric permittivity of air to 0 model['epermH'][0] = 0 # Note that for empymod < v2.0.0 you have to set `epermH` AND `epermV`. From # v2.0.0 onwards `eperm` is assumed isotropic if `epermV` is not provided, and # `epermV` is therefore internally a copy of `epermH`. # Compute with eperm_air = 0 res_0 = empymod.dipole(**model) ############################################################################### # Plot result # ----------- # # As it can be seen, setting :math:`\varepsilon_{air} =` 0 improves the land # CSEM result significantly for earlier times, where the signal should be zero. plt.figure() plt.title('Time domain impulse response') plt.semilogx(t, res_1, label=r'$\varepsilon_{air}=1$') plt.semilogx(t, res_0, label=r'$\varepsilon_{air}=0$') plt.xlabel(r'Time $[s]$') plt.ylabel(r'Amplitude $[V/(m\,s)]$') plt.legend() plt.show() ############################################################################### # Version 1.7.0 and older # ~~~~~~~~~~~~~~~~~~~~~~~ # # If you use a version of `empymod` that is smaller than `1.7.1` and set # :math:`\varepsilon_H`, :math:`\varepsilon_V` to zero you will see the # following warning, # # :: # # * WARNING :: Parameter epermH < 1e-20 are set to 1e-20 ! # * WARNING :: Parameter epermV < 1e-20 are set to 1e-20 ! # # and the values will be re-set to the defined minimum value, which is by # default 1e-20. Using a value of 1e-20 for `epermH`/`epermV` is also working # just fine for land CSEM. # # It is possible to change the minimum to zero for these old versions of # `empymod`. However, there is a caveat: In `empymod v1.7.0` and older, the # `min_param`-parameter also checks frequency and anisotropy. If you set # `min_param=0`, and provide `empymod` with resistivities or anisotropies # equals to zero, `empymod` will crash due to division by zero errors (avoiding # division by zero is the purpose behind the `min_param`-parameter). # # To change the `min_param`-parameter do: # # :: # # import empymod # empymod.set_minimum(min_param=0) # ############################################################################### empymod.Report()
apache-2.0
-3,886,957,565,752,093,000
35.177419
79
0.613464
false
pkerpedjiev/ernwin
fess/motif/annotate.py
1
11791
from __future__ import print_function import os.path as op import os import subprocess as sp import pandas as pa import warnings from . import motif_atlas as ma import collections as clcs import fess.builder.config as cbc import forgi.utilities.debug as fud import forgi.threedee.model.coarse_grain as ftmc import sys import forgi.graph.bulge_graph as fgb import logging log = logging.getLogger(__name__) all = [ "annotate_structure" ] JARED_DIR = op.expanduser(cbc.Configuration.jar3d_dir) JARED_BIN = cbc.Configuration.jar3d_jar IL_FILE = cbc.Configuration.jar3d_IL #Download from http://rna.bgsu.edu/data/jar3d/models/ #Relative to JARED_DIR MOTIF_ATLAS_FILE = cbc.Configuration.jar3d_motif #Click Download at http://rna.bgsu.edu/rna3dhub/motifs/release/il/current# #Relative to JARED_DIR def annotate_structure(cg, temp_dir, exclude_structure=None, jared_file=None, il_file=None, atlas_file=None): ''' Get the motifs present in this structure. :param cg: A CoarseGrainRNA :param temp_dir: A directory to place the intermediate files :param exclude_structure: None or a string containing a pdb id. :param jared_file: path to the jared executable :param il_file: path to the interior loop motif atlas file. :return: A string containing the motifs. ''' temp_dir = op.expanduser(temp_dir) # enumerate the interior loops in the structure loop_file = op.join(temp_dir, 'loops') try: os.makedirs(op.dirname(loop_file)) except OSError: pass with open(loop_file, 'w') as f: loop_str = cg_to_jared_input(cg) f.write(loop_str) #fud.pv('jared_file') if jared_file is None: jared_file = op.expanduser(op.join(JARED_DIR,JARED_BIN)) if il_file is None: il_file = op.expanduser(op.join(JARED_DIR,IL_FILE)) # run the loops through JAR3D jared_output = op.join(temp_dir, 'jared_output') cmd = ['java', '-jar', jared_file, loop_file, il_file, op.join(temp_dir, 'IL_loop_results.txt'), op.join(temp_dir, 'IL_sequence_results.txt')] #fud.pv("cmd") #fud.pv('" ".join(cmd)') devnull = open('/dev/null', 'w') p = sp.Popen(cmd, stdout=devnull) out, err = p.communicate() return parse_jared_output(op.join(temp_dir, 'IL_sequence_results.txt'), atlas_file, exclude_structure=exclude_structure, cg=cg) def get_cg_from_pdb(pdb_file, chains, args, temp_dir=None, cg_filename=None): ''' Get a BulgeGraph from a pdb file. :param pdb_file: The filename of the pdb file :param chains: The chain ids within the file for which to load the BulgeGraph. If more than one chain is given, they must be connected. :param cg_filename: If given, write the cg to this file ''' if temp_dir is not None: temp_dir = op.join(temp_dir, 'cg_temp') log.info("Creating CG RNA for: %s", pdb_file) cg, = ftmc.CoarseGrainRNA.from_pdb(pdb_file, load_chains = chains, remove_pseudoknots = False, dissolve_length_one_stems=not args.keep_length_one_stems, annotation_tool=args.pdb_annotation_tool) if cg_filename is not None: cg.to_file(cg_filename) return cg def cgdirname_from_args(args): if args.pdb_annotation_tool: annot_tool=args.pdb_annotation_tool else: import forgi.config c = forgi.config.read_config() if "PDB_ANNOTATION_TOOL" in c: annot_tool = c["PDB_ANNOTATION_TOOL"] else: log.warning("No preferred PDB-Annotation-tool set. Inconcistencies due to cached data are possible.") annot_tool="?" # In this case, inconsistencies are possible. return "cgs_{}_{}".format(int(args.keep_length_one_stems), annot_tool) def get_coarse_grain_files(struct_name, chains, args, temp_dir=None): ''' Load all connected coarse-grain files for a structure. Download the corresponding pdb, if needed. :param struct_name: The name of the structure (i.e. '1Y26') :param chains: A sequence of chain_ids. If more than one chain_id is given, the chains have to be connected by at least one basepair. @return: A forgi.graph.bulge_graph structure describing this chain. ''' CG_DIR = op.join(JARED_DIR, cgdirname_from_args(args)) PDB_DIR = op.join(JARED_DIR, "pdbs") if not op.exists(PDB_DIR): os.makedirs(PDB_DIR) if not op.exists(CG_DIR): os.makedirs(CG_DIR) cg_filename = op.join(CG_DIR, struct_name+"_"+"-".join(sorted(chains))+".cg") # do we already have the cg representation if op.exists(cg_filename): return ftmc.CoarseGrainRNA.from_bg_file(cg_filename) else: pdb_filename = op.join(PDB_DIR, struct_name + ".pdb") #do we at least have a pdb file if op.exists(pdb_filename): return get_cg_from_pdb(pdb_filename, chains, temp_dir=temp_dir, cg_filename=cg_filename, args=args) else: log.info ("Downloading pdb for: %s", struct_name) import urllib2 response = urllib2.urlopen('http://www.rcsb.org/pdb/download/downloadFile.do?fileFormat=pdb&compression=NO&structureId=%s' % (struct_name)) html = response.read() with open(pdb_filename, 'w') as f: f.write(html) f.flush() return get_cg_from_pdb(pdb_filename, chains, temp_dir=temp_dir, cg_filename=cg_filename, args=args) def print_stats_for_motifs(motifs, filename, args, temp_dir=None): ''' Convert all of the motif alignments to coarse-grain element names. This requires that the coarse grain representation of the pdb file from which the motif comes from be loaded and the element name be determined from the nucleotides within the motif. :param motifs: A dictionary indexed by an element name. The values are the json motif object from the BGSU motif atlas. :param filename: The filename where the stats will be written to. :param args: Tha argparse Namespace object. Needed, to use the correct PDB annotation tool. ''' new_motifs = clcs.defaultdict(list) i=0 with open(filename, "w") as file_: for key in motifs: for motif_entry in motifs[key]: log.info(motif_entry) for a in motif_entry['alignment']: alignment = ma.MotifAlignment(motif_entry['alignment'][a], motif_entry['chainbreak']) try: cg = get_coarse_grain_files(alignment.struct, temp_dir=temp_dir, chains = alignment.chains, args=args) except fgb.GraphConstructionError as e: log.warning("Skipping JAR3D entry for {}. Could not " "construct BulgeGraph because: {}".format(alignment, e)) continue elements = set() for r in alignment.residues: log.info(r) elements.add(cg.get_elem(r)) loop_elements = set() for e in elements: if e[0] != 's': loop_elements.add(e) try: element_id, = loop_elements except (TypeError, ValueError): log.debug("Skipping JAR3D entry for %s. Elements %s in cg do not match JAR3D.",alignment, elements) continue stats = cg.get_stats(element_id) for stat in stats: i+=1 # To ensure unique stat-ids, we use 'j' to identify JAR3D followed by an increasing integer. stat.pdb_name = motif_entry["motif_id"]+"_"+stat.pdb_name+":{}_j{}".format(element_id[0], i) print(stat, file = file_) def cg_to_jared_input(cg): ''' Take a coarse grain RNA and output all of the loop regions within it in a format that JAR3D can understand. :param cg: A CoarseGrainRNA structure :return: A string containing the interior loops for jared ''' bg = cg out_str = '' #iterate over the interior loops loops = False for il in bg.iloop_iterator(): # get a tuple containing the sequence on each strand seqs = bg.get_define_seq_str(il, adjacent=True) il_id = ">%s_%s" % (bg.name, "_".join(map(str, bg.defines[il]))) out_str += il_id + "\n" out_str += "*".join(seqs) + "\n" loops = True if not loops: raise ValueError("No interior loops found in structure") return out_str def parse_jared_output(sequence_results, motif_atlas_file=None, exclude_structure=None, cg=None): ''' Parse the output of the JAR3D file and return all of the motifs. :param sequence_results: The sequence results file from JAR3D. :param motif_atlas_file: The location of the motif atlas. ''' if motif_atlas_file is None: motif_atlas_file = op.join(JARED_DIR, MOTIF_ATLAS_FILE) motif_atlas_file = op.expanduser(motif_atlas_file) #print ("SEQ", sequence_results) data = pa.read_csv(sequence_results) atlas = ma.MotifAtlas(motif_atlas_file) found_motifs = clcs.defaultdict(list) for motif in set(data['identifier']): #In older versions of JAR3D, identifier was sequenceId subdata = data[data['identifier'] == motif] with warnings.catch_warnings(): #We do not care if subdata is a view or copy from data. #We assign to subdata, but never access the corresponding part of data later on! warnings.simplefilter("ignore") subdata['score'] = subdata['score'].astype(float) subdata = subdata.sort_values(by='score', ascending=False) for i, row in subdata.iterrows(): if not row["passedCutoff"]: continue motif_id = row['motifId'].split('.')[0] motif_entry = atlas.motifs[motif_id] res_num = int(motif.split('_')[-1]) if exclude_structure: if atlas.struct_in_motif(motif_id, exclude_structure): # this motif comes from the given structure so we'll exclude it # when reporting the results log.warning("Excluding JAR3D hit %s %s %s, because it is from the input structure.", cg.get_node_from_residue_num(res_num), motif_id, motif_entry['common_name']) continue if cg: #print '--------------------------------' element_name = cg.get_node_from_residue_num(res_num) #print element_name, motif, motif_id, motif_entry['common_name'] if motif_entry['alignment']: ''' for a in motif_entry['alignment']: # Print out where this motif comes from print ma.MotifAlignment(motif_entry['alignment'][a], motif_entry['chainbreak']) ''' found_motifs[element_name] += [motif_entry] else: print ('x', motif, motif_id, motif_entry['common_name'], motif_entry['alignment']) return found_motifs
agpl-3.0
-5,359,215,946,972,601,000
40.083624
181
0.583072
false
laurentgo/arrow
python/pyarrow/tests/test_plasma.py
1
43831
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import multiprocessing import os import pytest import random import signal import struct import subprocess import sys import time import numpy as np import pyarrow as pa DEFAULT_PLASMA_STORE_MEMORY = 10 ** 8 USE_VALGRIND = os.getenv("PLASMA_VALGRIND") == "1" EXTERNAL_STORE = "hashtable://test" SMALL_OBJECT_SIZE = 9000 def random_name(): return str(random.randint(0, 99999999)) def random_object_id(): import pyarrow.plasma as plasma return plasma.ObjectID(np.random.bytes(20)) def generate_metadata(length): metadata = bytearray(length) if length > 0: metadata[0] = random.randint(0, 255) metadata[-1] = random.randint(0, 255) for _ in range(100): metadata[random.randint(0, length - 1)] = random.randint(0, 255) return metadata def write_to_data_buffer(buff, length): array = np.frombuffer(buff, dtype="uint8") if length > 0: array[0] = random.randint(0, 255) array[-1] = random.randint(0, 255) for _ in range(100): array[random.randint(0, length - 1)] = random.randint(0, 255) def create_object_with_id(client, object_id, data_size, metadata_size, seal=True): metadata = generate_metadata(metadata_size) memory_buffer = client.create(object_id, data_size, metadata) write_to_data_buffer(memory_buffer, data_size) if seal: client.seal(object_id) return memory_buffer, metadata def create_object(client, data_size, metadata_size=0, seal=True): object_id = random_object_id() memory_buffer, metadata = create_object_with_id(client, object_id, data_size, metadata_size, seal=seal) return object_id, memory_buffer, metadata @pytest.mark.plasma class TestPlasmaClient: def setup_method(self, test_method): import pyarrow.plasma as plasma # Start Plasma store. self.plasma_store_ctx = plasma.start_plasma_store( plasma_store_memory=DEFAULT_PLASMA_STORE_MEMORY, use_valgrind=USE_VALGRIND) self.plasma_store_name, self.p = self.plasma_store_ctx.__enter__() # Connect to Plasma. self.plasma_client = plasma.connect(self.plasma_store_name) self.plasma_client2 = plasma.connect(self.plasma_store_name) def teardown_method(self, test_method): try: # Check that the Plasma store is still alive. assert self.p.poll() is None # Ensure Valgrind and/or coverage have a clean exit # Valgrind misses SIGTERM if it is delivered before the # event loop is ready; this race condition is mitigated # but not solved by time.sleep(). if USE_VALGRIND: time.sleep(1.0) self.p.send_signal(signal.SIGTERM) self.p.wait(timeout=5) assert self.p.returncode == 0 finally: self.plasma_store_ctx.__exit__(None, None, None) def test_connection_failure_raises_exception(self): import pyarrow.plasma as plasma # ARROW-1264 with pytest.raises(IOError): plasma.connect('unknown-store-name', num_retries=1) def test_create(self): # Create an object id string. object_id = random_object_id() # Create a new buffer and write to it. length = 50 memory_buffer = np.frombuffer(self.plasma_client.create(object_id, length), dtype="uint8") for i in range(length): memory_buffer[i] = i % 256 # Seal the object. self.plasma_client.seal(object_id) # Get the object. memory_buffer = np.frombuffer( self.plasma_client.get_buffers([object_id])[0], dtype="uint8") for i in range(length): assert memory_buffer[i] == i % 256 def test_create_with_metadata(self): for length in range(0, 1000, 3): # Create an object id string. object_id = random_object_id() # Create a random metadata string. metadata = generate_metadata(length) # Create a new buffer and write to it. memory_buffer = np.frombuffer(self.plasma_client.create(object_id, length, metadata), dtype="uint8") for i in range(length): memory_buffer[i] = i % 256 # Seal the object. self.plasma_client.seal(object_id) # Get the object. memory_buffer = np.frombuffer( self.plasma_client.get_buffers([object_id])[0], dtype="uint8") for i in range(length): assert memory_buffer[i] == i % 256 # Get the metadata. metadata_buffer = np.frombuffer( self.plasma_client.get_metadata([object_id])[0], dtype="uint8") assert len(metadata) == len(metadata_buffer) for i in range(len(metadata)): assert metadata[i] == metadata_buffer[i] def test_create_existing(self): # This test is partially used to test the code path in which we create # an object with an ID that already exists length = 100 for _ in range(1000): object_id = random_object_id() self.plasma_client.create(object_id, length, generate_metadata(length)) try: self.plasma_client.create(object_id, length, generate_metadata(length)) # TODO(pcm): Introduce a more specific error type here. except pa.lib.ArrowException: pass else: assert False def test_create_and_seal(self): # Create a bunch of objects. object_ids = [] for i in range(1000): object_id = random_object_id() object_ids.append(object_id) self.plasma_client.create_and_seal(object_id, i * b'a', i * b'b') for i in range(1000): [data_tuple] = self.plasma_client.get_buffers([object_ids[i]], with_meta=True) assert data_tuple[1].to_pybytes() == i * b'a' assert (self.plasma_client.get_metadata( [object_ids[i]])[0].to_pybytes() == i * b'b') # Make sure that creating the same object twice raises an exception. object_id = random_object_id() self.plasma_client.create_and_seal(object_id, b'a', b'b') with pytest.raises(pa.plasma.PlasmaObjectExists): self.plasma_client.create_and_seal(object_id, b'a', b'b') # Make sure that these objects can be evicted. big_object = DEFAULT_PLASMA_STORE_MEMORY // 10 * b'a' object_ids = [] for _ in range(20): object_id = random_object_id() object_ids.append(object_id) self.plasma_client.create_and_seal(random_object_id(), big_object, big_object) for i in range(10): assert not self.plasma_client.contains(object_ids[i]) def test_get(self): num_object_ids = 60 # Test timing out of get with various timeouts. for timeout in [0, 10, 100, 1000]: object_ids = [random_object_id() for _ in range(num_object_ids)] results = self.plasma_client.get_buffers(object_ids, timeout_ms=timeout) assert results == num_object_ids * [None] data_buffers = [] metadata_buffers = [] for i in range(num_object_ids): if i % 2 == 0: data_buffer, metadata_buffer = create_object_with_id( self.plasma_client, object_ids[i], 2000, 2000) data_buffers.append(data_buffer) metadata_buffers.append(metadata_buffer) # Test timing out from some but not all get calls with various # timeouts. for timeout in [0, 10, 100, 1000]: data_results = self.plasma_client.get_buffers(object_ids, timeout_ms=timeout) # metadata_results = self.plasma_client.get_metadata( # object_ids, timeout_ms=timeout) for i in range(num_object_ids): if i % 2 == 0: array1 = np.frombuffer(data_buffers[i // 2], dtype="uint8") array2 = np.frombuffer(data_results[i], dtype="uint8") np.testing.assert_equal(array1, array2) # TODO(rkn): We should compare the metadata as well. But # currently the types are different (e.g., memoryview # versus bytearray). # assert plasma.buffers_equal( # metadata_buffers[i // 2], metadata_results[i]) else: assert results[i] is None # Test trying to get an object that was created by the same client but # not sealed. object_id = random_object_id() self.plasma_client.create(object_id, 10, b"metadata") assert self.plasma_client.get_buffers( [object_id], timeout_ms=0, with_meta=True)[0][1] is None assert self.plasma_client.get_buffers( [object_id], timeout_ms=1, with_meta=True)[0][1] is None self.plasma_client.seal(object_id) assert self.plasma_client.get_buffers( [object_id], timeout_ms=0, with_meta=True)[0][1] is not None def test_buffer_lifetime(self): # ARROW-2195 arr = pa.array([1, 12, 23, 3, 34], pa.int32()) batch = pa.RecordBatch.from_arrays([arr], ['field1']) # Serialize RecordBatch into Plasma store sink = pa.MockOutputStream() writer = pa.RecordBatchStreamWriter(sink, batch.schema) writer.write_batch(batch) writer.close() object_id = random_object_id() data_buffer = self.plasma_client.create(object_id, sink.size()) stream = pa.FixedSizeBufferWriter(data_buffer) writer = pa.RecordBatchStreamWriter(stream, batch.schema) writer.write_batch(batch) writer.close() self.plasma_client.seal(object_id) del data_buffer # Unserialize RecordBatch from Plasma store [data_buffer] = self.plasma_client2.get_buffers([object_id]) reader = pa.RecordBatchStreamReader(data_buffer) read_batch = reader.read_next_batch() # Lose reference to returned buffer. The RecordBatch must still # be backed by valid memory. del data_buffer, reader assert read_batch.equals(batch) def test_put_and_get(self): for value in [["hello", "world", 3, 1.0], None, "hello"]: object_id = self.plasma_client.put(value) [result] = self.plasma_client.get([object_id]) assert result == value result = self.plasma_client.get(object_id) assert result == value object_id = random_object_id() [result] = self.plasma_client.get([object_id], timeout_ms=0) assert result == pa.plasma.ObjectNotAvailable def test_put_and_get_raw_buffer(self): temp_id = random_object_id() use_meta = b"RAW" def deserialize_or_output(data_tuple): if data_tuple[0] == use_meta: return data_tuple[1].to_pybytes() else: if data_tuple[1] is None: return pa.plasma.ObjectNotAvailable else: return pa.deserialize(data_tuple[1]) for value in [b"Bytes Test", temp_id.binary(), 10 * b"\x00", 123]: if isinstance(value, bytes): object_id = self.plasma_client.put_raw_buffer( value, metadata=use_meta) else: object_id = self.plasma_client.put(value) [result] = self.plasma_client.get_buffers([object_id], with_meta=True) result = deserialize_or_output(result) assert result == value object_id = random_object_id() [result] = self.plasma_client.get_buffers([object_id], timeout_ms=0, with_meta=True) result = deserialize_or_output(result) assert result == pa.plasma.ObjectNotAvailable def test_put_and_get_serialization_context(self): class CustomType: def __init__(self, val): self.val = val val = CustomType(42) with pytest.raises(pa.ArrowSerializationError): self.plasma_client.put(val) serialization_context = pa.SerializationContext() serialization_context.register_type(CustomType, 20*"\x00") object_id = self.plasma_client.put( val, None, serialization_context=serialization_context) with pytest.raises(pa.ArrowSerializationError): result = self.plasma_client.get(object_id) result = self.plasma_client.get( object_id, -1, serialization_context=serialization_context) assert result.val == val.val def test_store_arrow_objects(self): data = np.random.randn(10, 4) # Write an arrow object. object_id = random_object_id() tensor = pa.Tensor.from_numpy(data) data_size = pa.ipc.get_tensor_size(tensor) buf = self.plasma_client.create(object_id, data_size) stream = pa.FixedSizeBufferWriter(buf) pa.ipc.write_tensor(tensor, stream) self.plasma_client.seal(object_id) # Read the arrow object. [tensor] = self.plasma_client.get_buffers([object_id]) reader = pa.BufferReader(tensor) array = pa.ipc.read_tensor(reader).to_numpy() # Assert that they are equal. np.testing.assert_equal(data, array) @pytest.mark.pandas def test_store_pandas_dataframe(self): import pandas as pd import pyarrow.plasma as plasma d = {'one': pd.Series([1., 2., 3.], index=['a', 'b', 'c']), 'two': pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd'])} df = pd.DataFrame(d) # Write the DataFrame. record_batch = pa.RecordBatch.from_pandas(df) # Determine the size. s = pa.MockOutputStream() stream_writer = pa.RecordBatchStreamWriter(s, record_batch.schema) stream_writer.write_batch(record_batch) data_size = s.size() object_id = plasma.ObjectID(np.random.bytes(20)) buf = self.plasma_client.create(object_id, data_size) stream = pa.FixedSizeBufferWriter(buf) stream_writer = pa.RecordBatchStreamWriter(stream, record_batch.schema) stream_writer.write_batch(record_batch) self.plasma_client.seal(object_id) # Read the DataFrame. [data] = self.plasma_client.get_buffers([object_id]) reader = pa.RecordBatchStreamReader(pa.BufferReader(data)) result = reader.read_next_batch().to_pandas() pd.testing.assert_frame_equal(df, result) def test_pickle_object_ids(self): # This can be used for sharing object IDs between processes. import pickle object_id = random_object_id() data = pickle.dumps(object_id) object_id2 = pickle.loads(data) assert object_id == object_id2 def test_store_full(self): # The store is started with 1GB, so make sure that create throws an # exception when it is full. def assert_create_raises_plasma_full(unit_test, size): partial_size = np.random.randint(size) try: _, memory_buffer, _ = create_object(unit_test.plasma_client, partial_size, size - partial_size) # TODO(pcm): More specific error here. except pa.lib.ArrowException: pass else: # For some reason the above didn't throw an exception, so fail. assert False PERCENT = DEFAULT_PLASMA_STORE_MEMORY // 100 # Create a list to keep some of the buffers in scope. memory_buffers = [] _, memory_buffer, _ = create_object(self.plasma_client, 50 * PERCENT) memory_buffers.append(memory_buffer) # Remaining space is 50%. Make sure that we can't create an # object of size 50% + 1, but we can create one of size 20%. assert_create_raises_plasma_full( self, 50 * PERCENT + SMALL_OBJECT_SIZE) _, memory_buffer, _ = create_object(self.plasma_client, 20 * PERCENT) del memory_buffer _, memory_buffer, _ = create_object(self.plasma_client, 20 * PERCENT) del memory_buffer assert_create_raises_plasma_full( self, 50 * PERCENT + SMALL_OBJECT_SIZE) _, memory_buffer, _ = create_object(self.plasma_client, 20 * PERCENT) memory_buffers.append(memory_buffer) # Remaining space is 30%. assert_create_raises_plasma_full( self, 30 * PERCENT + SMALL_OBJECT_SIZE) _, memory_buffer, _ = create_object(self.plasma_client, 10 * PERCENT) memory_buffers.append(memory_buffer) # Remaining space is 20%. assert_create_raises_plasma_full( self, 20 * PERCENT + SMALL_OBJECT_SIZE) def test_contains(self): fake_object_ids = [random_object_id() for _ in range(100)] real_object_ids = [random_object_id() for _ in range(100)] for object_id in real_object_ids: assert self.plasma_client.contains(object_id) is False self.plasma_client.create(object_id, 100) self.plasma_client.seal(object_id) assert self.plasma_client.contains(object_id) for object_id in fake_object_ids: assert not self.plasma_client.contains(object_id) for object_id in real_object_ids: assert self.plasma_client.contains(object_id) def test_hash(self): # Check the hash of an object that doesn't exist. object_id1 = random_object_id() try: self.plasma_client.hash(object_id1) # TODO(pcm): Introduce a more specific error type here except pa.lib.ArrowException: pass else: assert False length = 1000 # Create a random object, and check that the hash function always # returns the same value. metadata = generate_metadata(length) memory_buffer = np.frombuffer(self.plasma_client.create(object_id1, length, metadata), dtype="uint8") for i in range(length): memory_buffer[i] = i % 256 self.plasma_client.seal(object_id1) assert (self.plasma_client.hash(object_id1) == self.plasma_client.hash(object_id1)) # Create a second object with the same value as the first, and check # that their hashes are equal. object_id2 = random_object_id() memory_buffer = np.frombuffer(self.plasma_client.create(object_id2, length, metadata), dtype="uint8") for i in range(length): memory_buffer[i] = i % 256 self.plasma_client.seal(object_id2) assert (self.plasma_client.hash(object_id1) == self.plasma_client.hash(object_id2)) # Create a third object with a different value from the first two, and # check that its hash is different. object_id3 = random_object_id() metadata = generate_metadata(length) memory_buffer = np.frombuffer(self.plasma_client.create(object_id3, length, metadata), dtype="uint8") for i in range(length): memory_buffer[i] = (i + 1) % 256 self.plasma_client.seal(object_id3) assert (self.plasma_client.hash(object_id1) != self.plasma_client.hash(object_id3)) # Create a fourth object with the same value as the third, but # different metadata. Check that its hash is different from any of the # previous three. object_id4 = random_object_id() metadata4 = generate_metadata(length) memory_buffer = np.frombuffer(self.plasma_client.create(object_id4, length, metadata4), dtype="uint8") for i in range(length): memory_buffer[i] = (i + 1) % 256 self.plasma_client.seal(object_id4) assert (self.plasma_client.hash(object_id1) != self.plasma_client.hash(object_id4)) assert (self.plasma_client.hash(object_id3) != self.plasma_client.hash(object_id4)) def test_many_hashes(self): hashes = [] length = 2 ** 10 for i in range(256): object_id = random_object_id() memory_buffer = np.frombuffer(self.plasma_client.create(object_id, length), dtype="uint8") for j in range(length): memory_buffer[j] = i self.plasma_client.seal(object_id) hashes.append(self.plasma_client.hash(object_id)) # Create objects of varying length. Each pair has two bits different. for i in range(length): object_id = random_object_id() memory_buffer = np.frombuffer(self.plasma_client.create(object_id, length), dtype="uint8") for j in range(length): memory_buffer[j] = 0 memory_buffer[i] = 1 self.plasma_client.seal(object_id) hashes.append(self.plasma_client.hash(object_id)) # Create objects of varying length, all with value 0. for i in range(length): object_id = random_object_id() memory_buffer = np.frombuffer(self.plasma_client.create(object_id, i), dtype="uint8") for j in range(i): memory_buffer[j] = 0 self.plasma_client.seal(object_id) hashes.append(self.plasma_client.hash(object_id)) # Check that all hashes were unique. assert len(set(hashes)) == 256 + length + length # def test_individual_delete(self): # length = 100 # # Create an object id string. # object_id = random_object_id() # # Create a random metadata string. # metadata = generate_metadata(100) # # Create a new buffer and write to it. # memory_buffer = self.plasma_client.create(object_id, length, # metadata) # for i in range(length): # memory_buffer[i] = chr(i % 256) # # Seal the object. # self.plasma_client.seal(object_id) # # Check that the object is present. # assert self.plasma_client.contains(object_id) # # Delete the object. # self.plasma_client.delete(object_id) # # Make sure the object is no longer present. # self.assertFalse(self.plasma_client.contains(object_id)) # # def test_delete(self): # # Create some objects. # object_ids = [random_object_id() for _ in range(100)] # for object_id in object_ids: # length = 100 # # Create a random metadata string. # metadata = generate_metadata(100) # # Create a new buffer and write to it. # memory_buffer = self.plasma_client.create(object_id, length, # metadata) # for i in range(length): # memory_buffer[i] = chr(i % 256) # # Seal the object. # self.plasma_client.seal(object_id) # # Check that the object is present. # assert self.plasma_client.contains(object_id) # # # Delete the objects and make sure they are no longer present. # for object_id in object_ids: # # Delete the object. # self.plasma_client.delete(object_id) # # Make sure the object is no longer present. # self.assertFalse(self.plasma_client.contains(object_id)) def test_illegal_functionality(self): # Create an object id string. object_id = random_object_id() # Create a new buffer and write to it. length = 1000 memory_buffer = self.plasma_client.create(object_id, length) # Make sure we cannot access memory out of bounds. with pytest.raises(Exception): memory_buffer[length] # Seal the object. self.plasma_client.seal(object_id) # This test is commented out because it currently fails. # # Make sure the object is ready only now. # def illegal_assignment(): # memory_buffer[0] = chr(0) # with pytest.raises(Exception): # illegal_assignment() # Get the object. memory_buffer = self.plasma_client.get_buffers([object_id])[0] # Make sure the object is read only. def illegal_assignment(): memory_buffer[0] = chr(0) with pytest.raises(Exception): illegal_assignment() def test_evict(self): client = self.plasma_client2 object_id1 = random_object_id() b1 = client.create(object_id1, 1000) client.seal(object_id1) del b1 assert client.evict(1) == 1000 object_id2 = random_object_id() object_id3 = random_object_id() b2 = client.create(object_id2, 999) b3 = client.create(object_id3, 998) client.seal(object_id3) del b3 assert client.evict(1000) == 998 object_id4 = random_object_id() b4 = client.create(object_id4, 997) client.seal(object_id4) del b4 client.seal(object_id2) del b2 assert client.evict(1) == 997 assert client.evict(1) == 999 object_id5 = random_object_id() object_id6 = random_object_id() object_id7 = random_object_id() b5 = client.create(object_id5, 996) b6 = client.create(object_id6, 995) b7 = client.create(object_id7, 994) client.seal(object_id5) client.seal(object_id6) client.seal(object_id7) del b5 del b6 del b7 assert client.evict(2000) == 996 + 995 + 994 # Mitigate valgrind-induced slowness SUBSCRIBE_TEST_SIZES = ([1, 10, 100, 1000] if USE_VALGRIND else [1, 10, 100, 1000, 10000]) def test_subscribe(self): # Subscribe to notifications from the Plasma Store. self.plasma_client.subscribe() for i in self.SUBSCRIBE_TEST_SIZES: object_ids = [random_object_id() for _ in range(i)] metadata_sizes = [np.random.randint(1000) for _ in range(i)] data_sizes = [np.random.randint(1000) for _ in range(i)] for j in range(i): self.plasma_client.create( object_ids[j], data_sizes[j], metadata=bytearray(np.random.bytes(metadata_sizes[j]))) self.plasma_client.seal(object_ids[j]) # Check that we received notifications for all of the objects. for j in range(i): notification_info = self.plasma_client.get_next_notification() recv_objid, recv_dsize, recv_msize = notification_info assert object_ids[j] == recv_objid assert data_sizes[j] == recv_dsize assert metadata_sizes[j] == recv_msize def test_subscribe_socket(self): # Subscribe to notifications from the Plasma Store. self.plasma_client.subscribe() rsock = self.plasma_client.get_notification_socket() for i in self.SUBSCRIBE_TEST_SIZES: # Get notification from socket. object_ids = [random_object_id() for _ in range(i)] metadata_sizes = [np.random.randint(1000) for _ in range(i)] data_sizes = [np.random.randint(1000) for _ in range(i)] for j in range(i): self.plasma_client.create( object_ids[j], data_sizes[j], metadata=bytearray(np.random.bytes(metadata_sizes[j]))) self.plasma_client.seal(object_ids[j]) # Check that we received notifications for all of the objects. for j in range(i): # Assume the plasma store will not be full, # so we always get the data size instead of -1. msg_len, = struct.unpack('L', rsock.recv(8)) content = rsock.recv(msg_len) recv_objids, recv_dsizes, recv_msizes = ( self.plasma_client.decode_notifications(content)) assert object_ids[j] == recv_objids[0] assert data_sizes[j] == recv_dsizes[0] assert metadata_sizes[j] == recv_msizes[0] def test_subscribe_deletions(self): # Subscribe to notifications from the Plasma Store. We use # plasma_client2 to make sure that all used objects will get evicted # properly. self.plasma_client2.subscribe() for i in self.SUBSCRIBE_TEST_SIZES: object_ids = [random_object_id() for _ in range(i)] # Add 1 to the sizes to make sure we have nonzero object sizes. metadata_sizes = [np.random.randint(1000) + 1 for _ in range(i)] data_sizes = [np.random.randint(1000) + 1 for _ in range(i)] for j in range(i): x = self.plasma_client2.create( object_ids[j], data_sizes[j], metadata=bytearray(np.random.bytes(metadata_sizes[j]))) self.plasma_client2.seal(object_ids[j]) del x # Check that we received notifications for creating all of the # objects. for j in range(i): notification_info = self.plasma_client2.get_next_notification() recv_objid, recv_dsize, recv_msize = notification_info assert object_ids[j] == recv_objid assert data_sizes[j] == recv_dsize assert metadata_sizes[j] == recv_msize # Check that we receive notifications for deleting all objects, as # we evict them. for j in range(i): assert (self.plasma_client2.evict(1) == data_sizes[j] + metadata_sizes[j]) notification_info = self.plasma_client2.get_next_notification() recv_objid, recv_dsize, recv_msize = notification_info assert object_ids[j] == recv_objid assert -1 == recv_dsize assert -1 == recv_msize # Test multiple deletion notifications. The first 9 object IDs have # size 0, and the last has a nonzero size. When Plasma evicts 1 byte, # it will evict all objects, so we should receive deletion # notifications for each. num_object_ids = 10 object_ids = [random_object_id() for _ in range(num_object_ids)] metadata_sizes = [0] * (num_object_ids - 1) data_sizes = [0] * (num_object_ids - 1) metadata_sizes.append(np.random.randint(1000)) data_sizes.append(np.random.randint(1000)) for i in range(num_object_ids): x = self.plasma_client2.create( object_ids[i], data_sizes[i], metadata=bytearray(np.random.bytes(metadata_sizes[i]))) self.plasma_client2.seal(object_ids[i]) del x for i in range(num_object_ids): notification_info = self.plasma_client2.get_next_notification() recv_objid, recv_dsize, recv_msize = notification_info assert object_ids[i] == recv_objid assert data_sizes[i] == recv_dsize assert metadata_sizes[i] == recv_msize assert (self.plasma_client2.evict(1) == data_sizes[-1] + metadata_sizes[-1]) for i in range(num_object_ids): notification_info = self.plasma_client2.get_next_notification() recv_objid, recv_dsize, recv_msize = notification_info assert object_ids[i] == recv_objid assert -1 == recv_dsize assert -1 == recv_msize def test_use_full_memory(self): # Fill the object store up with a large number of small objects and let # them go out of scope. for _ in range(100): create_object( self.plasma_client2, np.random.randint(1, DEFAULT_PLASMA_STORE_MEMORY // 20), 0) # Create large objects that require the full object store size, and # verify that they fit. for _ in range(2): create_object(self.plasma_client2, DEFAULT_PLASMA_STORE_MEMORY, 0) # Verify that an object that is too large does not fit. # Also verifies that the right error is thrown, and does not # create the object ID prematurely. object_id = random_object_id() for i in range(3): with pytest.raises(pa.plasma.PlasmaStoreFull): self.plasma_client2.create( object_id, DEFAULT_PLASMA_STORE_MEMORY + SMALL_OBJECT_SIZE) @staticmethod def _client_blocked_in_get(plasma_store_name, object_id): import pyarrow.plasma as plasma client = plasma.connect(plasma_store_name) # Try to get an object ID that doesn't exist. This should block. client.get([object_id]) def test_client_death_during_get(self): object_id = random_object_id() p = multiprocessing.Process(target=self._client_blocked_in_get, args=(self.plasma_store_name, object_id)) p.start() # Make sure the process is running. time.sleep(0.2) assert p.is_alive() # Kill the client process. p.terminate() # Wait a little for the store to process the disconnect event. time.sleep(0.1) # Create the object. self.plasma_client.put(1, object_id=object_id) # Check that the store is still alive. This will raise an exception if # the store is dead. self.plasma_client.contains(random_object_id()) @staticmethod def _client_get_multiple(plasma_store_name, object_ids): import pyarrow.plasma as plasma client = plasma.connect(plasma_store_name) # Try to get an object ID that doesn't exist. This should block. client.get(object_ids) def test_client_getting_multiple_objects(self): object_ids = [random_object_id() for _ in range(10)] p = multiprocessing.Process(target=self._client_get_multiple, args=(self.plasma_store_name, object_ids)) p.start() # Make sure the process is running. time.sleep(0.2) assert p.is_alive() # Create the objects one by one. for object_id in object_ids: self.plasma_client.put(1, object_id=object_id) # Check that the store is still alive. This will raise an exception if # the store is dead. self.plasma_client.contains(random_object_id()) # Make sure that the blocked client finishes. start_time = time.time() while True: if time.time() - start_time > 5: raise Exception("Timing out while waiting for blocked client " "to finish.") if not p.is_alive(): break @pytest.mark.plasma class TestEvictionToExternalStore: def setup_method(self, test_method): import pyarrow.plasma as plasma # Start Plasma store. self.plasma_store_ctx = plasma.start_plasma_store( plasma_store_memory=1000 * 1024, use_valgrind=USE_VALGRIND, external_store=EXTERNAL_STORE) self.plasma_store_name, self.p = self.plasma_store_ctx.__enter__() # Connect to Plasma. self.plasma_client = plasma.connect(self.plasma_store_name) def teardown_method(self, test_method): try: # Check that the Plasma store is still alive. assert self.p.poll() is None self.p.send_signal(signal.SIGTERM) self.p.wait(timeout=5) finally: self.plasma_store_ctx.__exit__(None, None, None) def test_eviction(self): client = self.plasma_client object_ids = [random_object_id() for _ in range(0, 20)] data = b'x' * 100 * 1024 metadata = b'' for i in range(0, 20): # Test for object non-existence. assert not client.contains(object_ids[i]) # Create and seal the object. client.create_and_seal(object_ids[i], data, metadata) # Test that the client can get the object. assert client.contains(object_ids[i]) for i in range(0, 20): # Since we are accessing objects sequentially, every object we # access would be a cache "miss" owing to LRU eviction. # Try and access the object from the plasma store first, and then # try external store on failure. This should succeed to fetch the # object. However, it may evict the next few objects. [result] = client.get_buffers([object_ids[i]]) assert result.to_pybytes() == data # Make sure we still cannot fetch objects that do not exist [result] = client.get_buffers([random_object_id()], timeout_ms=100) assert result is None @pytest.mark.plasma def test_object_id_size(): import pyarrow.plasma as plasma with pytest.raises(ValueError): plasma.ObjectID("hello") plasma.ObjectID(20 * b"0") @pytest.mark.plasma def test_object_id_equality_operators(): import pyarrow.plasma as plasma oid1 = plasma.ObjectID(20 * b'0') oid2 = plasma.ObjectID(20 * b'0') oid3 = plasma.ObjectID(19 * b'0' + b'1') assert oid1 == oid2 assert oid2 != oid3 assert oid1 != 'foo' @pytest.mark.xfail(reason="often fails on travis") @pytest.mark.skipif(not os.path.exists("/mnt/hugepages"), reason="requires hugepage support") def test_use_huge_pages(): import pyarrow.plasma as plasma with plasma.start_plasma_store( plasma_store_memory=2*10**9, plasma_directory="/mnt/hugepages", use_hugepages=True) as (plasma_store_name, p): plasma_client = plasma.connect(plasma_store_name) create_object(plasma_client, 10**8) # This is checking to make sure plasma_clients cannot be destroyed # before all the PlasmaBuffers that have handles to them are # destroyed, see ARROW-2448. @pytest.mark.plasma def test_plasma_client_sharing(): import pyarrow.plasma as plasma with plasma.start_plasma_store( plasma_store_memory=DEFAULT_PLASMA_STORE_MEMORY) \ as (plasma_store_name, p): plasma_client = plasma.connect(plasma_store_name) object_id = plasma_client.put(np.zeros(3)) buf = plasma_client.get(object_id) del plasma_client assert (buf == np.zeros(3)).all() del buf # This segfaulted pre ARROW-2448. @pytest.mark.plasma def test_plasma_list(): import pyarrow.plasma as plasma with plasma.start_plasma_store( plasma_store_memory=DEFAULT_PLASMA_STORE_MEMORY) \ as (plasma_store_name, p): plasma_client = plasma.connect(plasma_store_name) # Test sizes u, _, _ = create_object(plasma_client, 11, metadata_size=7, seal=False) l1 = plasma_client.list() assert l1[u]["data_size"] == 11 assert l1[u]["metadata_size"] == 7 # Test ref_count v = plasma_client.put(np.zeros(3)) # Ref count has already been released # XXX flaky test, disabled (ARROW-3344) # l2 = plasma_client.list() # assert l2[v]["ref_count"] == 0 a = plasma_client.get(v) l3 = plasma_client.list() assert l3[v]["ref_count"] == 1 del a # Test state w, _, _ = create_object(plasma_client, 3, metadata_size=0, seal=False) l4 = plasma_client.list() assert l4[w]["state"] == "created" plasma_client.seal(w) l5 = plasma_client.list() assert l5[w]["state"] == "sealed" # Test timestamps slack = 1.5 # seconds t1 = time.time() x, _, _ = create_object(plasma_client, 3, metadata_size=0, seal=False) t2 = time.time() l6 = plasma_client.list() assert t1 - slack <= l6[x]["create_time"] <= t2 + slack time.sleep(2.0) t3 = time.time() plasma_client.seal(x) t4 = time.time() l7 = plasma_client.list() assert t3 - t2 - slack <= l7[x]["construct_duration"] assert l7[x]["construct_duration"] <= t4 - t1 + slack @pytest.mark.plasma def test_object_id_randomness(): cmd = "from pyarrow import plasma; print(plasma.ObjectID.from_random())" first_object_id = subprocess.check_output([sys.executable, "-c", cmd]) second_object_id = subprocess.check_output([sys.executable, "-c", cmd]) assert first_object_id != second_object_id @pytest.mark.plasma def test_store_capacity(): import pyarrow.plasma as plasma with plasma.start_plasma_store(plasma_store_memory=10000) as (name, p): plasma_client = plasma.connect(name) assert plasma_client.store_capacity() == 10000
apache-2.0
-3,870,818,677,396,803,600
40.001871
79
0.567658
false
fegonda/icon_demo
code/model/deleteme/cnn_model.py
1
4305
#--------------------------------------------------------------------------- # Utility.py # # Author : Felix Gonda # Date : July 10, 2015 # School : Harvard University # # Project : Master Thesis # An Interactive Deep Learning Toolkit for # Automatic Segmentation of Images # # Summary : This file contains utility functions for reading, writing, and # processing images. #--------------------------------------------------------------------------- import os import sys import time import ConfigParser import pandas as pd import numpy as np import theano import theano.tensor as T import cPickle theano.config.floatX = 'float32' base_path = os.path.dirname(__file__) sys.path.insert(1,os.path.join(base_path, '../external')) sys.path.insert(2,os.path.join(base_path, '../common')) sys.path from logistic_sgd import LogisticRegression from mlp import HiddenLayer from mlp_model import MLP_Model from lenet import LeNetConvPoolLayer from activation_functions import rectified_linear class CNN_Model(object): def __init__(self, input, batch_size, patchSize, rng, nkerns, kernelSizes, hiddenSizes, fileName=None, activation=rectified_linear): self.convLayers = [] self.trainingCost = [] self.validationError = [] self.nkerns = nkerns self.kernelSizes = kernelSizes self.hiddenSizes = hiddenSizes self.patchSize = patchSize self.batch_size = batch_size input = input.reshape((self.batch_size, 1, self.patchSize, self.patchSize)) self.layer0_input = input self.params = [] input_next = input numberOfFeatureMaps = 1 featureMapSize = patchSize for i in range(len(nkerns)): layer = LeNetConvPoolLayer( rng, input=input_next, image_shape=(batch_size, numberOfFeatureMaps, featureMapSize, featureMapSize), filter_shape=(nkerns[i], numberOfFeatureMaps, kernelSizes[i], kernelSizes[i]), poolsize=(2, 2) ) input_next = layer.output numberOfFeatureMaps = nkerns[i] featureMapSize = np.int16(np.floor((featureMapSize - kernelSizes[i]+1) / 2)) self.params += layer.params self.convLayers.append(layer) # the 2 is there to preserve the batchSize mlp_input = self.convLayers[-1].output.flatten(2) self.mlp = MLP_Model( rng=rng, input=mlp_input, n_in=nkerns[-1] * (featureMapSize ** 2), n_hidden=hiddenSizes, n_out=2, activation=rectified_linear ) self.params += self.mlp.params self.cost = self.mlp.negative_log_likelihood self.errors = self.mlp.errors self.p_y_given_x = self.mlp.p_y_given_x self.y_pred = self.mlp.y_pred self.debug_x = self.p_y_given_x if not fileName is None: with open(fileName, 'r') as file: saved_convLayers, saved_hiddenLayers, saved_logRegressionLayer, self.trainingCost, self.validationError, saved_nkerns, saved_kernelSizes, saved_batch_size, saved_patchSize, saved_hiddenSizes = cPickle.load(file) for s_cl, cl in zip(saved_convLayers, self.convLayers): cl.W.set_value(s_cl.W.get_value()) cl.b.set_value(s_cl.b.get_value()) for s_hl, hl in zip(saved_hiddenLayers, self.mlp.hiddenLayers): hl.W.set_value(np.float32(s_hl.W.eval())) hl.b.set_value(s_hl.b.get_value()) self.mlp.logRegressionLayer.W.set_value(np.float32(saved_logRegressionLayer.W.eval())) self.mlp.logRegressionLayer.b.set_value(saved_logRegressionLayer.b.get_value()) def save(self, filename): with open(filename, 'wb') as file: cPickle.dump((self.convLayers, self.mlp.hiddenLayers, self.mlp.logRegressionLayer, self.trainingCost, self.validationError, self.nkerns, self.kernelSizes, self.batch_size, self.patchSize, self.hiddenSizes), file)
mit
6,086,488,391,834,939,000
29.104895
98
0.581417
false
wathen/PhD
MHD/FEniCS/FieldSplit/LSC/3D/NSpicard.py
1
11853
#!/opt/local/bin/python from dolfin import * import petsc4py import sys petsc4py.init(sys.argv) from petsc4py import PETSc # from MatrixOperations import * import numpy as np import matplotlib.pylab as plt import os import scipy.io #from PyTrilinos import Epetra, EpetraExt, AztecOO, ML, Amesos #from scipy2Trilinos import scipy_csr_matrix2CrsMatrix import PETScIO as IO import time import common import CheckPetsc4py as CP import NSprecond from scipy.sparse import spdiags import MatrixOperations as MO import ExactSol parameters["form_compiler"]["optimize"] = True parameters["form_compiler"]["cpp_optimize"] = True #MO.SwapBackend('epetra') #os.system("echo $PATH") m = 5 errL2u =np.zeros((m-1,1)) errH1u =np.zeros((m-1,1)) errL2p =np.zeros((m-1,1)) l2uorder = np.zeros((m-1,1)) H1uorder =np.zeros((m-1,1)) l2porder = np.zeros((m-1,1)) NN = np.zeros((m-1,1)) DoF = np.zeros((m-1,1)) Vdim = np.zeros((m-1,1)) Qdim = np.zeros((m-1,1)) Wdim = np.zeros((m-1,1)) l2uorder = np.zeros((m-1,1)) l2porder = np.zeros((m-1,1)) nonlinear = np.zeros((m-1,1)) SolTime = np.zeros((m-1,1)) AvIt = np.zeros((m-1,1)) nn = 2 dim = 2 Solver = 'PCD' Saving = 'no' case = 1 # parameters['linear_algebra_backend'] = 'uBLAS' # parameters = CP.ParameterSetup() def LOG(arg): if INFO: print(arg) for xx in xrange(1,m): print xx nn = 2**(xx) # Create mesh and define function space nn = int(nn) NN[xx-1] = nn mesh = BoxMesh(-1, -1,-1, 1, 1, 1, nn, nn,nn) # tic() parameters['reorder_dofs_serial'] = False V = VectorFunctionSpace(mesh, "CG", 2) Q = FunctionSpace(mesh, "CG", 1) # QQ = VectorFunctionSpace(mesh,"B",3) # V = V+QQ parameters['reorder_dofs_serial'] = False # print 'time to create function spaces', toc(),'\n\n' W = V*Q Vdim[xx-1] = V.dim() Qdim[xx-1] = Q.dim() Wdim[xx-1] = W.dim() print "\n\nV: ",Vdim[xx-1],"Q: ",Qdim[xx-1],"W: ",Wdim[xx-1],"\n\n" def boundary(x, on_boundary): return on_boundary u0, p0, Laplacian, Advection, gradPres = ExactSol.NS3D(case) R = 100 MU = Constant(1e0) # MU = 2/R bcc = DirichletBC(W.sub(0),u0, boundary) bcs = [bcc] (u, p) = TrialFunctions(W) (v, q) = TestFunctions(W) f = -MU*Laplacian+Advection+gradPres n = FacetNormal(mesh) h = CellSize(mesh) h_avg =avg(h) d = 0 u_k,p_k = common.Stokes(V,Q,u0,Laplacian+gradPres,[1,1,MU]) # p_k.vector()[:] = p_k.vector().array() # u_k = Function(V) # p_k = Function(Q) uOld = np.concatenate((u_k.vector().array(),p_k.vector().array()), axis=0) r = IO.arrayToVec(uOld) a11 = MU*inner(grad(v), grad(u))*dx + inner((grad(u)*u_k),v)*dx + (1/2)*div(u_k)*inner(u,v)*dx- (1/2)*inner(u_k,n)*inner(u,v)*ds a12 = div(v)*p*dx a21 = div(u)*q*dx L1 = inner(v, f)*dx a = a11-a12-a21 r11 = MU*inner(grad(v), grad(u_k))*dx + inner((grad(u_k)*u_k),v)*dx + (1/2)*div(u_k)*inner(u_k,v)*dx- (1/2)*inner(u_k,n)*inner(u_k,v)*ds r12 = div(v)*p_k*dx r21 = div(u_k)*q*dx RHSform = r11-r12-r21 p11 = MU*inner(grad(v), grad(u))*dx + inner((grad(u)*u_k),v)*dx + (1/2)*div(u_k)*inner(u,v)*dx- (1/2)*inner(u_k,n)*inner(u,v)*ds p12 = div(v)*p*dx prec = p11 -p12 bc = DirichletBC(W.sub(0),Expression(("0","0","0")), boundary) bcs = [bc] eps = 1.0 # error measure ||u-u_k|| tol = 1.0E-5 # tolerance iter = 0 # iteration counter maxiter = 10 # max no of iterations allowed # parameters = CP.ParameterSetup() outerit = 0 if Solver == "LSC": parameters['linear_algebra_backend'] = 'uBLAS' BQB = assemble(inner(u,v)*dx- div(v)*p*dx-div(u)*q*dx) bc.apply(BQB) BQB = BQB.sparray() X = BQB[0:V.dim(),0:V.dim()] Xdiag = X.diagonal() # Xdiag = X.sum(1).A # print Xdiag B = BQB[V.dim():W.dim(),0:V.dim()] Bt = BQB[0:V.dim(),V.dim():W.dim()] d = spdiags(1.0/Xdiag, 0, len(Xdiag), len(Xdiag)) L = B*d*Bt Bd = B*d dBt = d*Bt L = PETSc.Mat().createAIJ(size=L.shape,csr=(L.indptr, L.indices, L.data)) Bd = PETSc.Mat().createAIJ(size=Bd.shape,csr=(Bd.indptr, Bd.indices, Bd.data)) dBt = PETSc.Mat().createAIJ(size=dBt.shape,csr=(dBt.indptr, dBt.indices, dBt.data)) parameters['linear_algebra_backend'] = 'PETSc' elif Solver == "PCD": (pQ) = TrialFunction(Q) (qQ) = TestFunction(Q) Mass = assemble(inner(pQ,qQ)*dx) L = assemble(inner(grad(pQ),grad(qQ))*dx) fp = MU*inner(grad(qQ), grad(pQ))*dx+inner((u_k[0]*grad(pQ)[0]+u_k[1]*grad(pQ)[1]+u_k[2]*grad(pQ)[2]),qQ)*dx + (1/2)*div(u_k)*inner(pQ,qQ)*dx - (1/2)*(u_k[0]*n[0]+u_k[1]*n[1]+u_k[2]*n[2])*inner(pQ,qQ)*ds # print "hi" L = CP.Assemble(L) Mass = CP.Assemble(Mass) # print L SolutionTime = 0 while eps > tol and iter < maxiter: iter += 1 x = Function(W) uu = Function(W) tic() AA, bb = assemble_system(a, L1-RHSform, bcs) A,b = CP.Assemble(AA,bb) print toc() print A # b = b.getSubVector(t_is) PP = assemble(prec) bcc.apply(PP) P = CP.Assemble(PP) b = bb.array() zeros = 0*b bb = IO.arrayToVec(b) x = IO.arrayToVec(zeros) ksp = PETSc.KSP() ksp.create(comm=PETSc.COMM_WORLD) ksp.setTolerances(1e-5) ksp.setType('grmes') pc = ksp.getPC() ksp.setOperators(A,P) del A, P pc.setType(PETSc.PC.Type.PYTHON) if Solver == "LSC": pc.setPythonContext(NSprecond.LSCnew(W,A,L,Bd,dBt)) elif Solver == "PCD": F = assemble(fp) F = CP.Assemble(F) pc.setPythonContext(NSprecond.PCD(W, Mass, F, L)) # OptDB = PETSc.Options() # OptDB['pc_factor_shift_amount'] = 1 # OptDB['pc_factor_mat_ordering_type'] = 'rcm' # OptDB['pc_factor_mat_solver_package'] = 'mumps' ksp.setFromOptions() toc() ksp.solve(bb, x) time = toc() print time SolutionTime = SolutionTime +time print ksp.its outerit += ksp.its # r = bb.duplicate() # A.MUlt(x, r) # r.aypx(-1, bb) # rnorm = r.norm() # PETSc.Sys.Print('error norm = %g' % rnorm,comm=PETSc.COMM_WORLD) uu = IO.vecToArray(x) UU = uu[0:Vdim[xx-1][0]] # time = time+toc() u1 = Function(V) u1.vector()[:] = u1.vector()[:] + UU pp = uu[Vdim[xx-1][0]:] # time = time+toc() p1 = Function(Q) n = pp.shape p1.vector()[:] = p1.vector()[:] + pp diff = u1.vector().array() eps = np.linalg.norm(diff, ord=np.Inf) print '\n\n\niter=%d: norm=%g' % (iter, eps) print np.linalg.norm(p1.vector().array(),ord=np.inf) u2 = Function(V) u2.vector()[:] = u1.vector().array() + u_k.vector().array() p2 = Function(Q) p2.vector()[:] = p1.vector().array() + p_k.vector().array() u_k.assign(u2) p_k.assign(p2) uOld = np.concatenate((u_k.vector().array(),p_k.vector().array()), axis=0) r = IO.arrayToVec(uOld) SolTime[xx-1] = SolutionTime/iter if case == 1: ue = u0 pe = p0 elif case == 2: ue = u0 pe = p0 AvIt[xx-1] = np.ceil(outerit/iter) u = interpolate(ue,V) p = interpolate(pe,Q) ua = Function(V) ua.vector()[:] = u_k.vector().array() # nonlinear[xx-1] = assemble(inner((grad(ua)*ua),ua)*dx+(1/2)*div(ua)*inner(ua,ua)*dx- (1/2)*inner(ua,n)*inner(ua,ua)*ds) VelocityE = VectorFunctionSpace(mesh,"CG",4) u = interpolate(ue,VelocityE) PressureE = FunctionSpace(mesh,"CG",3) Nv = ua.vector().array().shape X = IO.vecToArray(r) xu = X[0:V.dim()] ua = Function(V) ua.vector()[:] = xu pp = X[V.dim():V.dim()+Q.dim()] n = pp.shape pa = Function(Q) pa.vector()[:] = pp pend = assemble(pa*dx) ones = Function(Q) ones.vector()[:]=(0*pp+1) pp = Function(Q) pp.vector()[:] = pa.vector().array()- assemble(pa*dx)/assemble(ones*dx) pInterp = interpolate(pe,PressureE) pe = Function(PressureE) pe.vector()[:] = pInterp.vector().array() const = - assemble(pe*dx)/assemble(ones*dx) pe.vector()[:] = pe.vector()[:]+const ErrorU = Function(V) ErrorP = Function(Q) ErrorU = ue-ua ErrorP = pe-pp errL2u[xx-1]= sqrt(abs(assemble(inner(ErrorU, ErrorU)*dx))) errH1u[xx-1]= errornorm(ue, ua, norm_type='H10', degree_rise=1) errL2p[xx-1]= sqrt(abs(assemble(inner(ErrorP, ErrorP)*dx))) if xx == 1: l2uorder[xx-1] = 0 l2porder[xx-1] = 0 else: l2uorder[xx-1] = np.abs(np.log2(errL2u[xx-2]/errL2u[xx-1])) H1uorder[xx-1] = np.abs(np.log2(errH1u[xx-2]/errH1u[xx-1])) l2porder[xx-1] = np.abs(np.log2(errL2p[xx-2]/errL2p[xx-1])) print errL2u[xx-1] print errL2p[xx-1] # del solver print nonlinear print "Velocity Elements rate of convergence ", np.log2(np.average((errL2u[0:m-2]/errL2u[1:m-1]))) print "Pressure Elements rate of convergence ", np.log2(np.average((errL2p[0:m-2]/errL2p[1:m-1]))) import pandas as pd # tableTitles = ["Total DoF","V DoF","Q DoF","AvIt","V-L2","V-order","P-L2","P-order"] # tableValues = np.concatenate((Wdim,Vdim,Qdim,AvIt,errL2u,l2uorder,errL2p,l2porder),axis=1) # df = pd.DataFrame(tableValues, columns = tableTitles) # pd.set_option('precision',3) # print df # print df.to_latex() # print "\n\n Velocity convergence" # VelocityTitles = ["Total DoF","V DoF","Soln Time","AvIt","V-L2","L2-order","V-H1","H1-order"] # VelocityValues = np.concatenate((Wdim,Vdim,SolTime,AvIt,errL2u,l2uorder,errH1u,H1uorder),axis=1) # VelocityTable= pd.DataFrame(VelocityValues, columns = VelocityTitles) # pd.set_option('precision',3) # VelocityTable = MO.PandasFormat(VelocityTable,"V-L2","%2.4e") # VelocityTable = MO.PandasFormat(VelocityTable,'V-H1',"%2.4e") # VelocityTable = MO.PandasFormat(VelocityTable,"H1-order","%1.2f") # VelocityTable = MO.PandasFormat(VelocityTable,'L2-order',"%1.2f") # print VelocityTable print "\n\n Pressure convergence" PressureTitles = ["Total DoF","P DoF","Soln Time","AvIt","P-L2","L2-order"] PressureValues = np.concatenate((Wdim,Qdim,SolTime,AvIt,errL2p,l2porder),axis=1) PressureTable= pd.DataFrame(PressureValues, columns = PressureTitles) pd.set_option('precision',3) PressureTable = MO.PandasFormat(PressureTable,"P-L2","%2.4e") PressureTable = MO.PandasFormat(PressureTable,'L2-order',"%1.2f") print PressureTable LatexTitles = ["DoFu","Dofp","V-L2","L2-order","V-H1","H1-order","P-L2","PL2-order"] LatexValues = np.concatenate((Vdim,Qdim,errL2u,l2uorder,errH1u,H1uorder,errL2p,l2porder), axis=1) LatexTable = pd.DataFrame(LatexValues, columns = LatexTitles) pd.set_option('precision',3) LatexTable = MO.PandasFormat(LatexTable,"V-L2","%2.4e") LatexTable = MO.PandasFormat(LatexTable,'V-H1',"%2.4e") LatexTable = MO.PandasFormat(LatexTable,"H1-order","%1.2f") LatexTable = MO.PandasFormat(LatexTable,'L2-order',"%1.2f") LatexTable = MO.PandasFormat(LatexTable,"P-L2","%2.4e") LatexTable = MO.PandasFormat(LatexTable,'PL2-order',"%1.2f") print LatexTable.to_latex() # plt.loglog(N,erru) # plt.title('Error plot for P2 elements - convergence = %f' % np.log2(np.average((erru[0:m-2]/erru[1:m-1])))) # plt.xlabel('N') # plt.ylabel('L2 error') # plt.figure() # plt.loglog(N,errp) # plt.title('Error plot for P1 elements - convergence = %f' % np.log2(np.average((errp[0:m-2]/errp[1:m-1])))) # plt.xlabel('N') # plt.ylabel('L2 error') # plot(ua) # plot(interpolate(ue,V)) # plot(pp) # plot(interpolate(pe,Q)) # interactive() # plt.show()
mit
7,153,257,545,973,754,000
27.839416
211
0.585759
false
agartland/utils
ics/merge_gatingsets.py
1
26203
import itertools import pandas as pd import numpy as np import re import feather from os.path import join as opj import os from glob import glob from functools import partial import time import sys import tempfile import traceback from .loading import applyResponseCriteria, generateGzAPerfExceptions __all__ = ['matchSamples', 'mergeSamples', 'extractFunctions', 'extractFunctionsMarkers', 'extractFunctionsGBY', 'extractFunctionsMarkersGBY', 'extractRawFunctions', 'parseSubsets', 'mergeFeathers'] """ if sys.platform == 'win32': _dataPrefix = 'X:/' _homePrefix = 'A:/' GIT_PATH = 'A:/gitrepo/' else: _dataPrefix = '/fh' _homePrefix = '/home/agartlan' GIT_PATH = '/home/agartlan/gitrepo/' sys.path.append(opj(GIT_PATH, 'utils')) from ics import * dataFolder = opj(_dataPrefix, 'fast/gilbert_p/grp/compass_hvtn602_aw/tmpdata') batchFolder = opj(dataFolder, '773-1') metaFn = 'metadata.csv' featherFn = 'gs_1_sample_61918.fcs_333821.feather' f = feather.read_dataframe(opj(batchFolder, featherFn)) mDf = pd.read_csv(opj(batchFolder, metaFn)) subsetsFn = opj(_homePrefix, 'gitrepo/utils/ics/sample_subsets3.csv') subsets, markers, functions, exclude = parseSubsets(subsetsFn) cdf = extractFunctions(f, subsets, functions, compressions=[('ALL', 2)]) cmdf = extractFunctionsMarkers(f, subsets, functions, markers, compressions=[('ALL', 2)]) mDf = pd.read_csv(opj(batchFolder, 'metadata.csv')) featherList = glob(opj(batchFolder, '*.feather')) exKwargs = dict(subsets=subsets, functions=functions, compressions=[('ALL', 1), ('ALL', 2), (['IFNg','IL2', 'TNFa'], 1), (['IFNg','IL2', 'TNFa'], 2), (['IFNg','IL2'], 1)]) outDf = mergeSamples(batchFolder, extractionFunc=extractFunctions, extractionKwargs=exKwargs) """ def mergeFeathers(files, mergedFilename, writeCSV, deleteSource=True): data = [feather.read_dataframe(f) for f in files if not f == ''] if len(data) > 0: df = pd.concat(data, sort=False, axis=0, ignore_index=True, copy=False) else: print('mergeFeathers: No files to merge!') return '' if writeCSV: df.to_csv(mergedFilename) else: try: feather.write_dataframe(df, mergedFilename) except: print('Error writing merged feather: Trying CSV') print(df.shape) traceback.print_exc() try: df.to_csv(mergedFilename.replace('.feather', '.csv')) except: print('Error writing merged CSV: Writing list of unmerged temp files.') with open(mergedFilename.replace('.feather', '.csv'), 'w') as fh: for f in files: fh.write(f + '\n') deleteSource = False if deleteSource: for f in files: if not f == '': try: os.remove(f) except: print('Could not delete merged temp file: %s' % f) return mergedFilename def matchSamples(batchFolder, matchStr='*.feather', test=False): """Match each row of the metadata with each feather file (sample) in the batch folder""" mDf = pd.read_csv(opj(batchFolder, 'metadata.csv')) featherList = glob(opj(batchFolder, matchStr)) if len(featherList) == 0: print('No feather files matching "%s" in "%s"' % (matchStr, batchFolder)) return {} featherLU = {sample_name:[fn for fn in featherList if sample_name in fn] for sample_name in mDf.sample_name} fallback = False if not len(featherLU) == mDf.shape[0]: print('Could not match all samples in the metadata.') fallback = True L = pd.Series({k:len(v) for k,v in featherLU.items()}) if not (L == 1).all(): print('Some samples in metadata matched to >1 feather file:') for k,v in featherLU.items(): if len(v) > 1: print('\t%s: %s' % (k, v[:2])) fallback = True if fallback: featherLU = {} print('Attempting to use sample order with check on total event count.') for i,sample_name in enumerate(mDf.sample_name): events = int(sample_name.split('_')[-1]) fn = [f for f in featherList if 'gs_%d_' % (i + 1) in f][0] f = feather.read_dataframe(opj(batchFolder, fn)) if events == f.shape[0]: featherLU.update({sample_name:fn}) print('Matched %s to %s. (%d of %d)' % (sample_name, fn, i+1, mDf.shape[0])) if test and (i + 1) >= 2: break else: print('Sample order strategy not working.') break else: featherLU = {k:v[0] for k,v in featherLU.items()} if not len(featherLU) == mDf.shape[0]: print('Could not match all samples in the metadata.') if test: out = {} i = 0 for k,v in featherLU.items(): out.update({k:v}) i += 1 if i >= 2: break featherLU = out return featherLU def mergeSamples(batchFolder, extractionFunc, extractionKwargs, matchStr='*.feather', test=False, metaCols=None, filters=None): """Go through each feather file (sample) in a batch folder, apply the analysis function, and merge together.""" mDf = pd.read_csv(opj(batchFolder, 'metadata.csv')) print(batchFolder, matchStr) featherList = glob(opj(batchFolder, matchStr)) featherLU = matchSamples(batchFolder, matchStr=matchStr, test=test) if not metaCols is None: if not 'sample_name' in metaCols: metaCols.append('sample_name') try: mDf = mDf[metaCols] except KeyError: print(metaCols) print(mDf.head()) print(mDf.columns) mDf = mDf.set_index('sample_name') feathers = [] i = 1 print('Extracting from batch %s (%s)' % (batchFolder, time.ctime())) sttime = time.time() for sample_name, fn in featherLU.items(): filterOut = False if not filters is None: """Keep only samples whose meta data matches all of the filters""" filterOut = False for col, valList in filters.items(): if not mDf.loc[sample_name, col] in valList: filterOut = True break if not filterOut: f = feather.read_dataframe(fn) # print('Extracting from sample %s (%d of %d)' % (sample_name, i, len(featherLU))) try: x = extractionFunc(f, **extractionKwargs) x.loc[:, 'sample_name'] = sample_name except: print('Error extracting from batch %s, sample %s (%d)' % (batchFolder, sample_name, i)) print(x.shape) print(x.head()) traceback.print_exc() feathers.append(x) i += 1 if len(feathers) > 0: outDf = pd.merge(pd.concat(feathers, axis=0), mDf.reset_index(), how='left', left_on='sample_name', right_on='sample_name') print('Finished batch %s (%1.0f minutes)' % (batchFolder, (time.time() - sttime) / 60), flush=True) """Write to a temporary merge file and return filename""" with tempfile.NamedTemporaryFile(mode='w', suffix='.feather', prefix='merged_tmp_', dir=batchFolder, delete=False) as fh: tmpFilename = fh.name feather.write_dataframe(outDf, tmpFilename) else: tmpFilename = '' return tmpFilename def subset2vec(cy): """Convert: "IFNg+IL2-TNFa+" To: (1, 0, 1)""" vec = np.array([1 if i == '+' else 0 for i in re.findall(r'[\+-]', cy)]) return vec def vec2subset(vec, cytokines): """Convert: (1, 0, 1) To: "IFNg+IL2-TNFa+" """ s = '' for i,cy in enumerate(cytokines): s += cy s += '+' if vec[i] == 1 else '-' return s def parseSubsets(subsetsFn): """Read on lists of subsets and functions from a config file""" df = pd.read_csv(subsetsFn) emptyName = df.name.isnull() df.loc[emptyName, 'name'] = df.loc[emptyName, 'value'] subsets = df.loc[df.type == 'subset'].set_index('name')['value'].to_dict() markers = df.loc[df.type == 'marker'].set_index('name')['value'].to_dict() functions = df.loc[df.type == 'function'].set_index('name')['value'].to_dict() exclude = df.loc[df.type == 'exclude'].set_index('name')['value'].to_dict() return subsets, markers, functions, exclude def extractFunctions(f, subsets, functions, compressions=None): """Extract functions from the GatingSet DataFrame and optionally apply response criteria. Parameters ---------- subsets : dict From a config file, with names of subsets (keys) and column names (values) functions : dict From a config file, with names of functions (keys) and column name subset post-fixes (values) compression : list Optionally, provide list of cytokine subsets and ANYs for response calls. Use 'ALL' for all functions. Returns ------- df : pd.DataFrame Data with columns: subset, cytokine, cytnum, nsub""" j = '/' cols = f.columns.tolist() newCols = {} """Subsets with functions""" for ssName, ssVal in subsets.items(): fdict = {fk:fv for fk,fv in functions.items() if j.join([ssVal, fv]) in cols} fkeys = [fk for fk in fdict.keys()] fvals = [fdict[fk] for fk in fkeys] fkeys_stripped = [fk.replace('+','').replace('-','') for fk in fkeys] for vals in itertools.product(*[(0,1)]*len(fkeys)): """Go through each column name thats part of the subset function and parse into positive vs neg boolean comb.""" neg = [] pos = [] for v,fv in zip(vals, fvals): nc = j.join([ssVal, fv]) if v: pos.append(nc) else: neg.append(nc) ncName = (ssName, vec2subset(vals, fkeys_stripped)) newCols[ncName] = {'pos':pos, 'neg':neg, 'subset':ssVal} out = [] for ssName, cytokine in newCols: """Create the composite variable for each combination of variables/columns""" tmpk = (ssName, cytokine) nsub = f[newCols[tmpk]['subset']].values.sum(axis=0) """Start with the boolean index that is True for all cells in the subset""" posCols = [cols.index(c) for c in [newCols[tmpk]['subset']] + newCols[tmpk]['pos']] negCols = [cols.index(c) for c in newCols[tmpk]['neg']] """By doing this on two large matrices in numpy it is much faster""" ind = f.values[:, posCols].all(axis=1) & (~(f.values[:, negCols])).all(axis=1) cytnum = ind.sum(axis=0) tmp = {'subset':ssName, 'cytokine':cytokine, 'nsub':nsub, 'cytnum':cytnum} out.append(tmp) cdf = pd.DataFrame(out) out = [] if not compressions is None: """Apply response criteria to in "compressions""" for cytList, ANY in compressions: for ss in cdf.subset.unique(): ssdf = cdf.loc[cdf.subset == ss] if not type(cytList) == list and cytList == 'ALL': cytokines = ssdf.cytokine.iloc[0].replace('-','+').split('+')[:-1] else: cytokines = ssdf.cytokine.iloc[0].replace('-','+').split('+')[:-1] cytokines = [c for c in cytokines if c in cytList] if len(cytokines) == 0: cytokines = ssdf.cytokine.iloc[0].replace('-','+').split('+')[:-1] ssdf = applyResponseCriteria(ssdf, subset=cytokines, ANY=ANY, indexCols=['subset'], exceptions=generateGzAPerfExceptions(cytokines)) out.append(ssdf) cdf = pd.concat(out, axis=0) cdf.index = np.arange(cdf.shape[0]) return cdf def extractFunctionsMarkers(f, subsets, functions, markers, compressions=[('ALL', 2)]): """Extract functions from the GatingSet DataFrame, then apply a response criteria before analyzing the proportion of positive/negative cells that express a combination of activation/phenotypic markers Parameters ---------- subsets : dict From a config file, with names of subsets (keys) and column names (values) functions : dict From a config file, with names of functions (keys) and column name subset post-fixes (values) markers : dict From a config file, with names of functions (keys) and column name subset post-fixes (values) compression : list Optionally, provide list of cytokine subsets and ANYs for response calls. Use 'ALL' for all functions. Returns ------- df : pd.DataFrame Data with columns: subset, cytokine, marker, cytnum, nsub""" def _prepKeys(d): fdict = {fk:fv for fk,fv in d.items() if j.join([ssVal, fv]) in cols} fkeys = [fk for fk in fdict.keys()] fvals = [fdict[fk] for fk in fkeys] fkeys_stripped = [fk.replace('+','').replace('-','') for fk in fkeys] return fkeys, fvals, fkeys_stripped j = '/' cols = f.columns.tolist() newCols = {} """Subsets with each function:marker combination""" for ssName, ssVal in subsets.items(): fkeys, fvals, fkeys_stripped = _prepKeys(functions) mkeys, mvals, mkeys_stripped = _prepKeys(markers) for vals in itertools.product(*[(0,1)]*len(fkeys + mkeys)): """Go through each column name thats part of the subset function and parse into positive vs neg boolean comb.""" fneg, mneg = [], [] fpos, mpos = [], [] for v,mfv in zip(vals, fvals + mvals): nc = j.join([ssVal, mfv]) if v: if mfv in fvals: fpos.append(nc) else: mpos.append(nc) else: if mfv in fvals: fneg.append(nc) else: mneg.append(nc) ncName = (ssName, vec2subset(vals[:len(fkeys)], fkeys_stripped), vec2subset(vals[len(fkeys):], mkeys_stripped)) newCols[ncName] = {'fpos':fpos, 'fneg':fneg, 'mpos':mpos, 'mneg':mneg, 'subset':ssVal} out = [] for ssName, cytokine, marker in newCols: """Create the composite variable for each combination of variables/columns""" tmpk = (ssName, cytokine, marker) nsub = f[newCols[tmpk]['subset']].values.sum(axis=0) """Start with the boolean index that is True for all cells in the subset""" posCols = [newCols[tmpk]['subset']] + newCols[tmpk]['fpos'] + newCols[tmpk]['mpos'] posCols = [cols.index(c) for c in posCols] negCols = newCols[tmpk]['fneg'] + newCols[tmpk]['mneg'] negCols = [cols.index(c) for c in negCols] """By doing this on two large matrices in numpy it is much faster""" ind = f.values[:, posCols].all(axis=1) & (~(f.values[:, negCols])).all(axis=1) cytnum = ind.sum(axis=0) tmp = {'subset':ssName, 'cytokine':cytokine, 'marker':marker, 'nsub':nsub, 'cytnum':cytnum} out.append(tmp) cdf = pd.DataFrame(out) out = [] """Apply response criteria to in "compressions""" for cytList, ANY in compressions: for ss in cdf.subset.unique(): ssdf = cdf.loc[cdf.subset == ss] if not type(cytList) == list and cytList == 'ALL': cytokines = ssdf.cytokine.iloc[0].replace('-','+').split('+')[:-1] else: cytokines = ssdf.cytokine.iloc[0].replace('-','+').split('+')[:-1] cytokines = [c for c in cytokines if c in cytList] if len(cytokines) == 0: cytokines = ssdf.cytokine.iloc[0].replace('-','+').split('+')[:-1] ssdf = applyResponseCriteria(ssdf, subset=cytokines, ANY=ANY, indexCols=['subset', 'marker'], exceptions=generateGzAPerfExceptions(cytokines)) """Now marginalize across markers to get the nsub_cyt column for function positive cells""" tmp = ssdf.groupby(['subset', 'cytokine'])['cytnum'].agg(np.sum) tmp.name = 'nsub_cyt' ssdf = ssdf.set_index(['subset', 'cytokine']).join(tmp).reset_index() out.append(ssdf) cdf = pd.concat(out, axis=0) cdf.index = np.arange(cdf.shape[0]) return cdf def extractFunctionsGBY(f, subsets, functions, compressions=None, mincells=0): """Extract functions from the GatingSet DataFrame and optionally apply response criteria. Parameters ---------- subsets : dict From a config file, with names of subsets (keys) and column names (values) functions : dict From a config file, with names of functions (keys) and column name subset post-fixes (values) compression : list Optionally, provide list of cytokine subsets and ANYs for response calls. Use 'ALL' for all functions. mincells : int Do not include function combinations with less than mincells. Returns ------- df : pd.DataFrame Data with columns: subset, cytokine, cytnum, nsub""" def _prepKeys(d): fdict = {fk:fv for fk,fv in d.items() if j.join([ssVal, fv]) in f.columns} fkeys = [fk for fk in fdict.keys()] fvals = [fdict[fk] for fk in fkeys] fkeys_stripped = [fk.replace('+','').replace('-','') for fk in fkeys] return fkeys, fvals, fkeys_stripped j = '/' out = [] for ssName, ssVal in subsets.items(): fkeys, fvals, fkeys_stripped = _prepKeys(functions) ssDf = f.loc[f[ssVal]] gbyCols = [j.join([ssVal, v]) for v in fvals] if len(gbyCols) == 0: continue nsub = ssDf.shape[0] cytnums = ssDf.groupby(gbyCols, sort=False)[ssVal].count() cytnums.index.names = [n.split('/')[-1][:-1] for n in cytnums.index.names] for vals in itertools.product(*[(0,1)]*len(fkeys)): if vals in cytnums.index: cytnum = cytnums.loc[vals] if cytnum >= mincells: tmp = {'subset':ssName, 'cytokine':vec2subset(vals[:len(fkeys)], fkeys_stripped), 'nsub':nsub, 'cytnum':cytnum} elif mincells > 0: tmp = {'subset':ssName, 'cytokine':vec2subset(vals[:len(fkeys)], fkeys_stripped), 'nsub':nsub, 'cytnum':0} out.append(tmp) cdf = pd.DataFrame(out) out = [] if not compressions is None: """Apply response criteria to in "compressions""" for cytList, ANY in compressions: for ss in cdf.subset.unique(): ssdf = cdf.loc[cdf.subset == ss] if not type(cytList) == list and cytList == 'ALL': cytokines = ssdf.cytokine.iloc[0].replace('-','+').split('+')[:-1] else: cytokines = ssdf.cytokine.iloc[0].replace('-','+').split('+')[:-1] cytokines = [c for c in cytokines if c in cytList] if len(cytokines) == 0: cytokines = ssdf.cytokine.iloc[0].replace('-','+').split('+')[:-1] ssdf = applyResponseCriteria(ssdf, subset=cytokines, ANY=ANY, indexCols=['subset'], exceptions=generateGzAPerfExceptions(cytokines)) out.append(ssdf) cdf = pd.concat(out, axis=0) cdf.index = np.arange(cdf.shape[0]) return cdf def extractFunctionsMarkersGBY(f, subsets, functions, markers, compressions=[('ALL', 2)]): """Extract functions from the GatingSet DataFrame, then apply a response criteria before analyzing the proportion of positive/negative cells that express a combination of activation/phenotypic markers Parameters ---------- subsets : dict From a config file, with names of subsets (keys) and column names (values) functions : dict From a config file, with names of functions (keys) and column name subset post-fixes (values) markers : dict From a config file, with names of functions (keys) and column name subset post-fixes (values) compression : list Optionally, provide list of cytokine subsets and ANYs for response calls. Use 'ALL' for all functions. Returns ------- df : pd.DataFrame Data with columns: subset, cytokine, marker, cytnum, nsub""" def _prepKeys(d): fdict = {fk:fv for fk,fv in d.items() if j.join([ssVal, fv]) in f.columns} fkeys = [fk for fk in fdict.keys()] fvals = [fdict[fk] for fk in fkeys] fkeys_stripped = [fk.replace('+','').replace('-','') for fk in fkeys] return fkeys, fvals, fkeys_stripped j = '/' """Subsets with each function:marker combination""" out = [] for ssName, ssVal in subsets.items(): fkeys, fvals, fkeys_stripped = _prepKeys(functions) mkeys, mvals, mkeys_stripped = _prepKeys(markers) ssDf = f.loc[f[ssVal]] gbyCols = [j.join([ssVal, v]) for v in fvals+mvals] if len(gbyCols) == 0: continue nsub = ssDf.shape[0] cytnums = ssDf.groupby(gbyCols, sort=False)[ssVal].count() # cytnum.index = cytnum.index.reorder_levels(gbyCols) cytnums.index.names = [n.split('/')[-1][:-1] for n in cytnums.index.names] for vals in itertools.product(*[(0,1)]*len(fkeys + mkeys)): if vals in cytnums.index: tmp = {'subset':ssName, 'cytokine':vec2subset(vals[:len(fkeys)], fkeys_stripped), 'marker':vec2subset(vals[len(fkeys):], mkeys_stripped), 'nsub':nsub, 'cytnum':cytnums.loc[vals]} else: tmp = {'subset':ssName, 'cytokine':vec2subset(vals[:len(fkeys)], fkeys_stripped), 'marker':vec2subset(vals[len(fkeys):], mkeys_stripped), 'nsub':nsub, 'cytnum':0} out.append(tmp) cdf = pd.DataFrame(out) out = [] """Apply response criteria to in "compressions""" for cytList, ANY in compressions: for ss in cdf.subset.unique(): ssdf = cdf.loc[cdf.subset == ss] if not type(cytList) == list and cytList == 'ALL': cytokines = ssdf.cytokine.iloc[0].replace('-','+').split('+')[:-1] else: cytokines = ssdf.cytokine.iloc[0].replace('-','+').split('+')[:-1] cytokines = [c for c in cytokines if c in cytList] if len(cytokines) == 0: cytokines = ssdf.cytokine.iloc[0].replace('-','+').split('+')[:-1] ssdf = applyResponseCriteria(ssdf, subset=cytokines, ANY=ANY, indexCols=['subset', 'marker'], exceptions=generateGzAPerfExceptions(cytokines)) """Now marginalize across markers to get the nsub_cyt column for function positive cells""" tmp = ssdf.groupby(['subset', 'cytokine'])['cytnum'].agg(np.sum) tmp.name = 'nsub_cyt' ssdf = ssdf.set_index(['subset', 'cytokine']).join(tmp).reset_index() out.append(ssdf) cdf = pd.concat(out, axis=0) cdf.index = np.arange(cdf.shape[0]) return cdf def extractRawFunctions(f, subsets, functions, downsample=1.): """Extract all cells from the GatingSet DataFrame and keep only the relevant columns for subsets and functions Parameters ---------- subsets : dict From a config file, with names of subsets (keys) and column names (values) functions : dict From a config file, with names of functions (keys) and column name subset post-fixes (values) downsample : float Fraction of samples to keep. Returns ------- df : pd.DataFrame Data with one row per cell and columns for each marker""" def _prepKeys(d): fdict = {fk:fv for fk,fv in d.items() if j.join([ssVal, fv]) in f.columns} fkeys = [fk for fk in fdict.keys()] fvals = [fdict[fk] for fk in fkeys] fkeys_stripped = [fk.replace('+','').replace('-','') for fk in fkeys] return fkeys, fvals, fkeys_stripped j = '/' keepCols = [] ssCols = [] for ssName, ssVal in subsets.items(): fkeys, fvals, fkeys_stripped = _prepKeys(functions) keepCols.extend([j.join([ssVal, v]) for v in fvals]) ssCols.append(ssVal) """Only keep the cells that are positive for one of the subsets""" f = f.loc[f[ssCols].any(axis=1)] if downsample < 1: ind = np.random.rand(f.shape[0]) < downsample f = f.loc[ind] f = f[keepCols + ssCols] return f
mit
-2,208,608,108,407,013,400
40.135008
131
0.54925
false
anaandresarroyo/Python-GarminDataAnalyser
database/plot.py
1
1348
import numpy as np import pandas as pd from matplotlib import pyplot as plt def generate_colours(df, column, cmap_name): # TODO: they get generated a little different than what pandas does automatically labels = np.sort(df[column].unique()) cmap = plt.get_cmap(cmap_name) colours = cmap(np.linspace(0,1,len(labels)+1)) colour_dict = dict(zip(labels,colours)) return colour_dict def populate_plot_options(kind, alpha, cmap_name, df=pd.DataFrame(), index=False, legend=False, stacked=True): plot_options = dict() plot_options['kind'] = kind plot_options['alpha'] = alpha if not df.empty: colour_dict = generate_colours(df, legend, cmap_name) label = df.loc[index,legend] plot_options['c'] = colour_dict[label] plot_options['label'] = str(label) else: plot_options['colormap'] = cmap_name if kind == 'line': plot_options['linewidth'] = 2 # plot_options['marker'] = '.' # plot_options['markersize'] = 12 # TODO: move default marker size to MatplotlibSettings.py elif kind == 'scatter': plot_options['edgecolors'] = 'face' plot_options['s'] = 12 elif 'bar' in kind: plot_options['stacked'] = stacked plot_options['edgecolor'] = 'none' return plot_options
mit
2,650,399,088,792,729,000
30.372093
85
0.620178
false
JackKelly/neuralnilm_prototype
scripts/e273.py
2
5910
from __future__ import print_function, division import matplotlib matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab! from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer from neuralnilm.net import BidirectionalRecurrentLayer from lasagne.nonlinearities import sigmoid, rectify, tanh from lasagne.objectives import crossentropy, mse from lasagne.init import Uniform, Normal from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer, RecurrentLayer from lasagne.updates import nesterov_momentum from functools import partial import os from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff from neuralnilm.experiment import run_experiment from neuralnilm.net import TrainingError import __main__ from copy import deepcopy from math import sqrt NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0] PATH = "/homes/dk3810/workspace/python/neuralnilm/figures" SAVE_PLOT_INTERVAL = 500 GRADIENT_STEPS = 100 """ e233 based on e131c but with: * lag=32 * pool e234 * init final layer and conv layer 235 no lag 236 should be exactly as 131c: no pool, no lag, no init for final and conv layer 237 putting the pool back 238 seems pooling hurts us! disable pooling. enable lag = 32 239 BLSTM lag = 20 240 LSTM not BLSTM various lags 241 output is prediction 260 standardise inputs and outputs. 261 trying just 3 appliances. Standardisation 263 conv1d between layers ideas for next TODO: * 3 LSTM layers with smaller conv between them * why does pooling hurt us? """ from theano.ifelse import ifelse import theano.tensor as T THRESHOLD = 0 def scaled_cost(x, t): sq_error = (x - t) ** 2 def mask_and_mean_sq_error(mask): masked_sq_error = sq_error[mask.nonzero()] mean = masked_sq_error.mean() mean = ifelse(T.isnan(mean), 0.0, mean) return mean above_thresh_mean = mask_and_mean_sq_error(t > THRESHOLD) below_thresh_mean = mask_and_mean_sq_error(t <= THRESHOLD) return (above_thresh_mean + below_thresh_mean) / 2.0 source_dict = dict( filename='/data/dk3810/ukdale.h5', appliances=[ ['fridge freezer', 'fridge', 'freezer'], 'hair straighteners', 'television' #'dish washer', #['washer dryer', 'washing machine'] ], max_appliance_powers=[300, 500, 200, 2500, 2400], on_power_thresholds=[5] * 5, max_input_power=5900, min_on_durations=[60, 60, 60, 1800, 1800], min_off_durations=[12, 12, 12, 1800, 600], window=("2013-06-01", "2014-07-01"), seq_length=1500, output_one_appliance=False, boolean_targets=False, train_buildings=[1], validation_buildings=[1], # skip_probability=0.0, n_seq_per_batch=50, # subsample_target=5, include_diff=False, clip_appliance_power=True, target_is_prediction=False, standardise_input=True, standardise_targets=True, input_padding=0, lag=0 ) def change_learning_rate(net, epoch): net.updates = partial(nesterov_momentum, learning_rate=0.001) net.compile() def change_subsample(net, epoch): net.source.subsample_target = 5 net.generate_validation_data_and_set_shapes() net_dict = dict( save_plot_interval=SAVE_PLOT_INTERVAL, loss_function=scaled_cost, updates=partial(nesterov_momentum, learning_rate=0.01), do_save_activations=True, epoch_callbacks={500: change_learning_rate} ) def exp_a(name): global source # source_dict_copy = deepcopy(source_dict) # source = RealApplianceSource(**source_dict_copy) source.subsample_target = 4 net_dict_copy = deepcopy(net_dict) net_dict_copy.update(dict(experiment_name=name, source=source)) net_dict_copy['layers_config'] = [ { 'type': BidirectionalRecurrentLayer, 'num_units': 25, 'gradient_steps': GRADIENT_STEPS, 'W_in_to_hid': Normal(std=1.), 'nonlinearity': tanh }, { 'type': FeaturePoolLayer, 'ds': 2, # number of feature maps to be pooled together 'axis': 1, # pool over the time axis 'pool_function': T.mean }, { 'type': BidirectionalRecurrentLayer, 'num_units': 25, 'gradient_steps': GRADIENT_STEPS, 'W_in_to_hid': Normal(std=1/sqrt(25)), 'nonlinearity': tanh }, { 'type': FeaturePoolLayer, 'ds': 2, # number of feature maps to be pooled together 'axis': 1, # pool over the time axis 'pool_function': T.mean }, { 'type': BidirectionalRecurrentLayer, 'num_units': 25, 'gradient_steps': GRADIENT_STEPS, 'W_in_to_hid': Normal(std=1/sqrt(25)), 'nonlinearity': tanh }, { 'type': DenseLayer, 'num_units': source.n_outputs, 'nonlinearity': None, 'W': Normal(std=(1/sqrt(25))) } ] net = Net(**net_dict_copy) return net def init_experiment(experiment): full_exp_name = NAME + experiment func_call = 'exp_{:s}(full_exp_name)'.format(experiment) print("***********************************") print("Preparing", full_exp_name, "...") net = eval(func_call) return net def main(): for experiment in list('a'): full_exp_name = NAME + experiment path = os.path.join(PATH, full_exp_name) try: net = init_experiment(experiment) run_experiment(net, path, epochs=None) except KeyboardInterrupt: break except TrainingError as exception: print("EXCEPTION:", exception) except Exception as exception: print("EXCEPTION:", exception) raise if __name__ == "__main__": main()
mit
1,456,759,378,619,718,700
26.235023
109
0.631472
false
arthur-gouveia/DAT210x
Module2/assignment4.py
1
2175
import pandas as pd # TODO: Load up the table, and extract the dataset # out of it. If you're having issues with this, look # carefully at the sample code provided in the reading # url = 'http://www.espn.com/nhl/statistics/player/_/stat/points/\ sort/points/year/2015/seasontype/2' table = pd.read_html(url, skiprows=0, header=1) # TODO: Rename the columns so that they match the # column definitions provided to you on the website # df = table[0].copy() df.columns = ['RK', 'Player', 'Team', 'Games_Played', 'Goals', 'Assists', 'Points', 'Plus_Minus_Rating', 'Penalty_Minutes', 'Points_Per_Game', 'Shots_on_Goal', 'Shooting_Percentage', 'Game_Winning_Goals', 'Power_Play_Goals', 'Power_Play_Assists', 'Short_Handed_Goals', 'Short_Handed_Assists'] # TODO: Get rid of any row that has at least 4 NANs in it # df = df.dropna(axis=0, thresh=(len(df.columns)-3)) # TODO: At this point, look through your dataset by printing # it. There probably still are some erroneous rows in there. # What indexing command(s) can you use to select all rows # EXCEPT those rows? # # I'd use df.ix[df.RK != 'RK', :] removable_indexes = df.ix[df.RK == 'RK', :].index df.drop(labels=removable_indexes, axis=0, inplace=True) # TODO: Get rid of the 'RK' column # df.drop(labels='RK', axis=1, inplace=True) # TODO: Ensure there are no holes in your index by resetting # it. By the way, don't store the original index # df.reset_index(drop=True, inplace=True) # TODO: Check the data type of all columns, and ensure those # that should be numeric are numeric numeric_columns = list(df.columns[2:]) df[numeric_columns] = df[numeric_columns].apply(pd.to_numeric, args=('coerce',)) # TODO: Your dataframe is now ready! Use the appropriate # commands to answer the questions on the course lab page. print('The dataframe has {} rows,'.format(len(df))) print('{} unique PCT values in the table'.format(len( df.Shooting_Percentage.unique()))) print('and we get {} by adding GP[15] and GP[16]'.format( df.Games_Played[15] + df.Games_Played[16]))
mit
666,916,966,561,414,700
33.52381
77
0.662529
false
pombredanne/pdftables
pdftables/TableFinder.py
1
3830
#!/usr/bin/env python # ScraperWiki Limited # Ian Hopkinson, 2013-06-14 # -*- coding: utf-8 -*- """ Code to find tables in PDF files """ import os # import requests import scraperwiki # pdftoxml does not work on Windows import lxml.html import glob import matplotlib.pyplot as plt import collections from counter import Counter # TODO - Use pdfminer # TODO def pdftoxml(filename,options): ConverterPath = unicode(r'C:\Users\Ian\BitBucketRepos\0939-AgraInforma\bin\pdftohtml.exe') directory = os.path.split(filename)[0] tmpxml = os.path.join(directory,"temph.xml") if tmpxml in os.listdir('.'): os.remove(tmpxml) cmd = '%s -xml %s "%s" %s' % (ConverterPath, options, filename, os.path.splitext(tmpxml)[0]) os.system(cmd) f = open(tmpxml,'rb') content = f.read() f.close() return content def processpage(page): left=[] width=[] top=[] right=[] for textchunk in (page is not None and page.xpath('text')): thisleft = int(textchunk.attrib.get('left')) thiswidth = int(textchunk.attrib.get('width')) left.append(thisleft) width.append(thiswidth) top.append(pageheight - int(textchunk.attrib.get('top'))) right.append(thisleft + thiswidth) return pageheight,pagewidth,left,top,right def plotpage(pageheight,pagewidth,pagenumber,SelectedPDF,left,top,right): fig = plt.figure() ax1 = fig.add_subplot(111) ax1.axis('equal') ax1.plot([0,pagewidth,pagewidth,0,0],[0,0,pageheight,pageheight,0]) ax1.scatter(left, top, s=10, c='b', marker="s") ax1.scatter(right, top, s=10, c='r', marker="o") fig.suptitle('%s : Page %d' % (SelectedPDF,pagenumber), fontsize=15) plt.show() return fig PDF_TEST_FILES = unicode(r'C:\Users\Ian\BitBucketRepos\0939-AgraInforma\fixtures') # PDFList = glob.glob(os.path.join(PDF_TEST_FILES,'*.pdf')) # SelectedPDF = 6 # 6 = cit0613.pdf - table is actually an image # r = requests.get(os.path.join(PDF_TEST_FILES,PDFList[SelectedPDF])) # options = "" # xmldata = pdftoxml(os.path.join(PDF_TEST_FILES,PDFList[SelectedPDF]),options) # xmldata = pdftoxml(os.path.join(PDF_TEST_FILES,"cit0613.pdf"),options) # Works but first page is an image # xmldata = pdftoxml(os.path.join(PDF_TEST_FILES,"2012.01.PosRpt.pdf"),options) # PDF to HTML does not like # xmldata = pdftoxml(os.path.join(PDF_TEST_FILES,"COPAWEEKLYJUNE52013.pdf"),options) # xmldata = pdftoxml(os.path.join(PDF_TEST_FILES,"COPAMONTHLYMay2013.pdf"),options) # lxml doesn't like this one, interleaved <b> and <i> tags # xmldata = pdftoxml(os.path.join(PDF_TEST_FILES,"13_06_12_10_36_58_boletim_ingles_junho_2013.pdf"),options) # Long document with many tables # xmldata = pdftoxml(os.path.join(PDF_TEST_FILES,"1359397366Final_Coceral grain estimate_2012_December.pdf"),options) # xmldata = pdftoxml(os.path.join(PDF_TEST_FILES,"ClinicalResearchDisclosureReport2012Q2.pdf"),options) # throws not allowed # xmldata = pdftoxml(os.path.join(PDF_TEST_FILES,"argentina_diputados_voting_record.pdf"),options) # xmldata = pdftoxml(os.path.join(PDF_TEST_FILES,"bo_page24.pdf"),options) # Multi-column text and tables mixed on the page # xmldata = pdftoxml(os.path.join(PDF_TEST_FILES,"tabla_subsidios.pdf"),options) # Multi-column text and tables mixed on the page SelectedPDF = "argentina_diputados_voting_record.pdf" xmldata = pdftoxml(os.path.join(PDF_TEST_FILES,SelectedPDF),options) root = lxml.etree.fromstring(xmldata) pages = list(root) # This is ok but for page in pages: pagenumber = int(page.attrib.get("number")) pagewidth = int(page.attrib.get("width")) pageheight = int(page.attrib.get("height")) pageheight,pagewidth,left,top,right = processpage(page) fig = plotpage(pageheight,pagewidth,pagenumber,SelectedPDF,left,top,right) # counter=Counter(left)
bsd-2-clause
2,174,444,690,923,890,400
35.826923
142
0.707572
false
eoinmurray/icarus
Icarus/Classes/Channel.py
1
4803
import numpy as np import matplotlib.pyplot as plt from utils.EventEmitter import EventEmitter class Channel(EventEmitter): """ Photon counting modules channel. """ def __init__(self, bin_width, detector1, detector2, name, mode = 'non_HBT'): """ Initializes bins, time tags and calculates the channels matrix (for entangled photon measurements only). """ self.bin_width = bin_width self.detector1 = detector1 self.detector2 = detector2 self._starts = np.array([]) self._stops = np.array([]) self.start = None self.stop = None self.matrix = np.kron(detector2.matrix, detector1.matrix) self.name = name self.initializeBins() self.initializeDetectorEvents() self.g2 = 0 self.sidepeaks_avg = 0 def resetTimeTags(self): """ Resets the starts and stops """ self._stops = np.array([]) self._starts = np.array([]) def getStarts(self): """ Returns the start tags. """ return self._starts def getStops(self): """ Returns the start tags. """ return self._stops def get_g2(self): """ Returns g2. """ return self.g2 def get_sidepeaks_avg(self): """ Returns sidepeaks_avg. """ return self.sidepeaks_avg def initializeBins(self): """ Calculates the bins. """ self.bins = np.linspace(0.04,400, num=400/(self.bin_width*27./1000.)) self.counts = np.zeros(self.bins.size-1) self.previous_counts = np.zeros(self.bins.size-1) self.bin_edges = np.histogram(np.array([0]), self.bins)[1][0:self.bins.size-1] def initializeDetectorEvents(self): """ Sets up the detection hit events. """ def setStarts(detector): if self.start is None: self.start = detector.last_time self._starts = detector.time_tags self.real_time_process() def setStops(detector): if self.stop is None: self.stop = detector.last_time self._stops = detector.time_tags self.real_time_process() self.detector1.on('change', setStarts) self.detector2.on('change', setStops) def calculate_probability(self, state): """ Calculates the probability of a state hitting this channel. """ probability = ((np.abs( np.transpose(self.matrix)*state )**2)*4)[0,0] return probability def real_time_process(self): """ HBT needs to be processed in real time. """ if self.mode is 'HBT': if self.start is not None and self.stop is not None: temp_counts, self.bin_edges = np.histogram(self.stop - self.start, self.bins) self.counts += temp_counts self.start = None self.stop = None def processTimeTags(self): """ If both the start and stop arrays have non-zero length, then it bins the counts. """ if self.mode is not 'HBT': if (self._starts.size > 0) and (self._stops.size > 0): self.counts += self.processTimeTagsAlgorithm(self.bins, self._starts, self._stops) self.trigger('change') def processTimeTagsAlgorithm(self, bins, _starts, _stops): """ Calculates the corrolation between the starts and stops. """ counts = np.zeros(bins.size - 1) for stop in _stops: diff = stop - _starts diff = diff[(diff > bins.min()) & (diff < bins.max())] counts = counts + np.histogram(diff, bins)[0] return counts def normalize(self, pulse_width): """ Integrates the counts in each peak, and normalizes by the average of the counts in the sidepeaks. """ hold_int = [] hold_max = [] x = self.bin_edges y = self.counts for j in xrange(int(x.max()/pulse_width)): minIdx = np.abs(x - pulse_width*j).argmin() maxIdx = np.abs(x - pulse_width*(j+1)).argmin() peakX = x[minIdx: maxIdx] peakY = y[minIdx:maxIdx] if j != 6: hold_max.append( np.max(peakY) ) hold_int.append( np.sum(peakY) ) else: delay_peak = np.sum(peakY) y = y/np.mean(hold_max) self.previous_counts = self.counts self.counts = y self.g2 = self.calculate_g2(delay_peak, hold_int) def unnormalize(self): """ Return the unnormalized counts. """ if not self.previous_counts.all(): self.counts = self.previous_counts def plotPeaks(self, pulse_width): """ Plots the individual peaks to check if the processing algorithm accurately selects the peaks """ x = self.bin_edges y = self.counts for j in xrange(int(x.max()/pulse_width)): minIdx = np.abs(x - pulse_width*j).argmin() maxIdx = np.abs(x - pulse_width*(j+1)).argmin() peakX = x[minIdx: maxIdx] peakY = y[minIdx:maxIdx] plt.plot(peakX, peakY) plt.show() def calculate_g2(self, delay_peak, hold_int): """ Divide the delay peak by the average of the sidepeaks. """ if np.array(hold_int).mean() > 0: g2 = delay_peak/np.array(hold_int).mean() g2 = np.around(g2, decimals=3) return g2 else: return 0
mit
3,459,324,685,873,773,600
18.604082
87
0.644597
false
BigDataforYou/movie_recommendation_workshop_1
big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/io/tests/parser/c_parser_only.py
1
18902
# -*- coding: utf-8 -*- """ Tests that apply specifically to the CParser. Unless specifically stated as a CParser-specific issue, the goal is to eventually move as many of these tests out of this module as soon as the Python parser can accept further arguments when parsing. """ import nose import numpy as np import pandas as pd import pandas.util.testing as tm from pandas import DataFrame, Series, Index, MultiIndex from pandas import compat from pandas.compat import StringIO, range, lrange class CParserTests(object): def test_buffer_overflow(self): # see gh-9205: test certain malformed input files that cause # buffer overflows in tokenizer.c malfw = "1\r1\r1\r 1\r 1\r" # buffer overflow in words pointer malfs = "1\r1\r1\r 1\r 1\r11\r" # buffer overflow in stream pointer malfl = "1\r1\r1\r 1\r 1\r11\r1\r" # buffer overflow in lines pointer cperr = 'Buffer overflow caught - possible malformed input file.' for malf in (malfw, malfs, malfl): try: self.read_table(StringIO(malf)) except Exception as err: self.assertIn(cperr, str(err)) def test_buffer_rd_bytes(self): # see gh-12098: src->buffer in the C parser can be freed twice leading # to a segfault if a corrupt gzip file is read with 'read_csv' and the # buffer is filled more than once before gzip throws an exception data = '\x1F\x8B\x08\x00\x00\x00\x00\x00\x00\x03\xED\xC3\x41\x09' \ '\x00\x00\x08\x00\xB1\xB7\xB6\xBA\xFE\xA5\xCC\x21\x6C\xB0' \ '\xA6\x4D' + '\x55' * 267 + \ '\x7D\xF7\x00\x91\xE0\x47\x97\x14\x38\x04\x00' \ '\x1f\x8b\x08\x00VT\x97V\x00\x03\xed]\xefO' for i in range(100): try: self.read_csv(StringIO(data), compression='gzip', delim_whitespace=True) except Exception: pass def test_delim_whitespace_custom_terminator(self): # See gh-12912 data = """a b c~1 2 3~4 5 6~7 8 9""" df = self.read_csv(StringIO(data), lineterminator='~', delim_whitespace=True) expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=['a', 'b', 'c']) tm.assert_frame_equal(df, expected) def test_parse_dates_empty_string(self): # see gh-2263 s = StringIO("Date, test\n2012-01-01, 1\n,2") result = self.read_csv(s, parse_dates=["Date"], na_filter=False) self.assertTrue(result['Date'].isnull()[1]) def test_dtype_and_names_error(self): # see gh-8833: passing both dtype and names # resulting in an error reporting issue data = """ 1.0 1 2.0 2 3.0 3 """ # base cases result = self.read_csv(StringIO(data), sep='\s+', header=None) expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]]) tm.assert_frame_equal(result, expected) result = self.read_csv(StringIO(data), sep='\s+', header=None, names=['a', 'b']) expected = DataFrame( [[1.0, 1], [2.0, 2], [3.0, 3]], columns=['a', 'b']) tm.assert_frame_equal(result, expected) # fallback casting result = self.read_csv(StringIO( data), sep='\s+', header=None, names=['a', 'b'], dtype={'a': np.int32}) expected = DataFrame([[1, 1], [2, 2], [3, 3]], columns=['a', 'b']) expected['a'] = expected['a'].astype(np.int32) tm.assert_frame_equal(result, expected) data = """ 1.0 1 nan 2 3.0 3 """ # fallback casting, but not castable with tm.assertRaisesRegexp(ValueError, 'cannot safely convert'): self.read_csv(StringIO(data), sep='\s+', header=None, names=['a', 'b'], dtype={'a': np.int32}) def test_passing_dtype(self): # see gh-6607 df = DataFrame(np.random.rand(5, 2), columns=list( 'AB'), index=['1A', '1B', '1C', '1D', '1E']) with tm.ensure_clean('__passing_str_as_dtype__.csv') as path: df.to_csv(path) # see gh-3795: passing 'str' as the dtype result = self.read_csv(path, dtype=str, index_col=0) tm.assert_series_equal(result.dtypes, Series( {'A': 'object', 'B': 'object'})) # we expect all object columns, so need to # convert to test for equivalence result = result.astype(float) tm.assert_frame_equal(result, df) # invalid dtype self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'foo', 'B': 'float64'}, index_col=0) # valid but we don't support it (date) self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'datetime64', 'B': 'float64'}, index_col=0) self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'datetime64', 'B': 'float64'}, index_col=0, parse_dates=['B']) # valid but we don't support it self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'timedelta64', 'B': 'float64'}, index_col=0) # see gh-12048: empty frame actual = self.read_csv(StringIO('A,B'), dtype=str) expected = DataFrame({'A': [], 'B': []}, index=[], dtype=str) tm.assert_frame_equal(actual, expected) def test_precise_conversion(self): # see gh-8002 tm._skip_if_32bit() from decimal import Decimal normal_errors = [] precise_errors = [] # test numbers between 1 and 2 for num in np.linspace(1., 2., num=500): # 25 decimal digits of precision text = 'a\n{0:.25}'.format(num) normal_val = float(self.read_csv(StringIO(text))['a'][0]) precise_val = float(self.read_csv( StringIO(text), float_precision='high')['a'][0]) roundtrip_val = float(self.read_csv( StringIO(text), float_precision='round_trip')['a'][0]) actual_val = Decimal(text[2:]) def error(val): return abs(Decimal('{0:.100}'.format(val)) - actual_val) normal_errors.append(error(normal_val)) precise_errors.append(error(precise_val)) # round-trip should match float() self.assertEqual(roundtrip_val, float(text[2:])) self.assertTrue(sum(precise_errors) <= sum(normal_errors)) self.assertTrue(max(precise_errors) <= max(normal_errors)) def test_compact_ints(self): if compat.is_platform_windows() and not self.low_memory: raise nose.SkipTest( "segfaults on win-64, only when all tests are run") data = ('0,1,0,0\n' '1,1,0,0\n' '0,1,0,1') result = self.read_csv(StringIO(data), delimiter=',', header=None, compact_ints=True, as_recarray=True) ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)]) self.assertEqual(result.dtype, ex_dtype) result = self.read_csv(StringIO(data), delimiter=',', header=None, as_recarray=True, compact_ints=True, use_unsigned=True) ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)]) self.assertEqual(result.dtype, ex_dtype) def test_compact_ints_as_recarray(self): if compat.is_platform_windows() and self.low_memory: raise nose.SkipTest( "segfaults on win-64, only when all tests are run") data = ('0,1,0,0\n' '1,1,0,0\n' '0,1,0,1') result = self.read_csv(StringIO(data), delimiter=',', header=None, compact_ints=True, as_recarray=True) ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)]) self.assertEqual(result.dtype, ex_dtype) result = self.read_csv(StringIO(data), delimiter=',', header=None, as_recarray=True, compact_ints=True, use_unsigned=True) ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)]) self.assertEqual(result.dtype, ex_dtype) def test_pass_dtype(self): data = """\ one,two 1,2.5 2,3.5 3,4.5 4,5.5""" result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'}) self.assertEqual(result['one'].dtype, 'u1') self.assertEqual(result['two'].dtype, 'object') def test_pass_dtype_as_recarray(self): if compat.is_platform_windows() and self.low_memory: raise nose.SkipTest( "segfaults on win-64, only when all tests are run") data = """\ one,two 1,2.5 2,3.5 3,4.5 4,5.5""" result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'}, as_recarray=True) self.assertEqual(result['one'].dtype, 'u1') self.assertEqual(result['two'].dtype, 'S1') def test_empty_pass_dtype(self): data = 'one,two' result = self.read_csv(StringIO(data), dtype={'one': 'u1'}) expected = DataFrame({'one': np.empty(0, dtype='u1'), 'two': np.empty(0, dtype=np.object)}) tm.assert_frame_equal(result, expected, check_index_type=False) def test_empty_with_index_pass_dtype(self): data = 'one,two' result = self.read_csv(StringIO(data), index_col=['one'], dtype={'one': 'u1', 1: 'f'}) expected = DataFrame({'two': np.empty(0, dtype='f')}, index=Index([], dtype='u1', name='one')) tm.assert_frame_equal(result, expected, check_index_type=False) def test_empty_with_multiindex_pass_dtype(self): data = 'one,two,three' result = self.read_csv(StringIO(data), index_col=['one', 'two'], dtype={'one': 'u1', 1: 'f8'}) exp_idx = MultiIndex.from_arrays([np.empty(0, dtype='u1'), np.empty(0, dtype='O')], names=['one', 'two']) expected = DataFrame( {'three': np.empty(0, dtype=np.object)}, index=exp_idx) tm.assert_frame_equal(result, expected, check_index_type=False) def test_empty_with_mangled_column_pass_dtype_by_names(self): data = 'one,one' result = self.read_csv(StringIO(data), dtype={ 'one': 'u1', 'one.1': 'f'}) expected = DataFrame( {'one': np.empty(0, dtype='u1'), 'one.1': np.empty(0, dtype='f')}) tm.assert_frame_equal(result, expected, check_index_type=False) def test_empty_with_mangled_column_pass_dtype_by_indexes(self): data = 'one,one' result = self.read_csv(StringIO(data), dtype={0: 'u1', 1: 'f'}) expected = DataFrame( {'one': np.empty(0, dtype='u1'), 'one.1': np.empty(0, dtype='f')}) tm.assert_frame_equal(result, expected, check_index_type=False) def test_empty_with_dup_column_pass_dtype_by_names(self): data = 'one,one' result = self.read_csv( StringIO(data), mangle_dupe_cols=False, dtype={'one': 'u1'}) expected = pd.concat([Series([], name='one', dtype='u1')] * 2, axis=1) tm.assert_frame_equal(result, expected, check_index_type=False) def test_empty_with_dup_column_pass_dtype_by_indexes(self): # FIXME in gh-9424 raise nose.SkipTest( "gh-9424; known failure read_csv with duplicate columns") data = 'one,one' result = self.read_csv( StringIO(data), mangle_dupe_cols=False, dtype={0: 'u1', 1: 'f'}) expected = pd.concat([Series([], name='one', dtype='u1'), Series([], name='one', dtype='f')], axis=1) tm.assert_frame_equal(result, expected, check_index_type=False) def test_usecols_dtypes(self): data = """\ 1,2,3 4,5,6 7,8,9 10,11,12""" result = self.read_csv(StringIO(data), usecols=(0, 1, 2), names=('a', 'b', 'c'), header=None, converters={'a': str}, dtype={'b': int, 'c': float}, ) result2 = self.read_csv(StringIO(data), usecols=(0, 2), names=('a', 'b', 'c'), header=None, converters={'a': str}, dtype={'b': int, 'c': float}, ) self.assertTrue((result.dtypes == [object, np.int, np.float]).all()) self.assertTrue((result2.dtypes == [object, np.float]).all()) def test_memory_map(self): # it works! self.read_csv(self.csv1, memory_map=True) def test_disable_bool_parsing(self): # #2090 data = """A,B,C Yes,No,Yes No,Yes,Yes Yes,,Yes No,No,No""" result = self.read_csv(StringIO(data), dtype=object) self.assertTrue((result.dtypes == object).all()) result = self.read_csv(StringIO(data), dtype=object, na_filter=False) self.assertEqual(result['B'][2], '') def test_euro_decimal_format(self): data = """Id;Number1;Number2;Text1;Text2;Number3 1;1521,1541;187101,9543;ABC;poi;4,738797819 2;121,12;14897,76;DEF;uyt;0,377320872 3;878,158;108013,434;GHI;rez;2,735694704""" df2 = self.read_csv(StringIO(data), sep=';', decimal=',') self.assertEqual(df2['Number1'].dtype, float) self.assertEqual(df2['Number2'].dtype, float) self.assertEqual(df2['Number3'].dtype, float) def test_custom_lineterminator(self): data = 'a,b,c~1,2,3~4,5,6' result = self.read_csv(StringIO(data), lineterminator='~') expected = self.read_csv(StringIO(data.replace('~', '\n'))) tm.assert_frame_equal(result, expected) def test_raise_on_passed_int_dtype_with_nas(self): # see gh-2631 data = """YEAR, DOY, a 2001,106380451,10 2001,,11 2001,106380451,67""" self.assertRaises(ValueError, self.read_csv, StringIO(data), sep=",", skipinitialspace=True, dtype={'DOY': np.int64}) def test_na_trailing_columns(self): data = """Date,Currenncy,Symbol,Type,Units,UnitPrice,Cost,Tax 2012-03-14,USD,AAPL,BUY,1000 2012-05-12,USD,SBUX,SELL,500""" result = self.read_csv(StringIO(data)) self.assertEqual(result['Date'][1], '2012-05-12') self.assertTrue(result['UnitPrice'].isnull().all()) def test_parse_ragged_csv(self): data = """1,2,3 1,2,3,4 1,2,3,4,5 1,2 1,2,3,4""" nice_data = """1,2,3,, 1,2,3,4, 1,2,3,4,5 1,2,,, 1,2,3,4,""" result = self.read_csv(StringIO(data), header=None, names=['a', 'b', 'c', 'd', 'e']) expected = self.read_csv(StringIO(nice_data), header=None, names=['a', 'b', 'c', 'd', 'e']) tm.assert_frame_equal(result, expected) # too many columns, cause segfault if not careful data = "1,2\n3,4,5" result = self.read_csv(StringIO(data), header=None, names=lrange(50)) expected = self.read_csv(StringIO(data), header=None, names=lrange(3)).reindex(columns=lrange(50)) tm.assert_frame_equal(result, expected) def test_tokenize_CR_with_quoting(self): # see gh-3453 data = ' a,b,c\r"a,b","e,d","f,f"' result = self.read_csv(StringIO(data), header=None) expected = self.read_csv(StringIO(data.replace('\r', '\n')), header=None) tm.assert_frame_equal(result, expected) result = self.read_csv(StringIO(data)) expected = self.read_csv(StringIO(data.replace('\r', '\n'))) tm.assert_frame_equal(result, expected) def test_raise_on_no_columns(self): # single newline data = "\n" self.assertRaises(ValueError, self.read_csv, StringIO(data)) # test with more than a single newline data = "\n\n\n" self.assertRaises(ValueError, self.read_csv, StringIO(data)) def test_1000_sep_with_decimal(self): data = """A|B|C 1|2,334.01|5 10|13|10. """ expected = DataFrame({ 'A': [1, 10], 'B': [2334.01, 13], 'C': [5, 10.] }) tm.assert_equal(expected.A.dtype, 'int64') tm.assert_equal(expected.B.dtype, 'float') tm.assert_equal(expected.C.dtype, 'float') df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.') tm.assert_frame_equal(df, expected) df = self.read_table(StringIO(data), sep='|', thousands=',', decimal='.') tm.assert_frame_equal(df, expected) data_with_odd_sep = """A|B|C 1|2.334,01|5 10|13|10, """ df = self.read_csv(StringIO(data_with_odd_sep), sep='|', thousands='.', decimal=',') tm.assert_frame_equal(df, expected) df = self.read_table(StringIO(data_with_odd_sep), sep='|', thousands='.', decimal=',') tm.assert_frame_equal(df, expected) def test_grow_boundary_at_cap(self): # See gh-12494 # # Cause of error was that the C parser # was not increasing the buffer size when # the desired space would fill the buffer # to capacity, which would later cause a # buffer overflow error when checking the # EOF terminator of the CSV stream def test_empty_header_read(count): s = StringIO(',' * count) expected = DataFrame(columns=[ 'Unnamed: {i}'.format(i=i) for i in range(count + 1)]) df = self.read_csv(s) tm.assert_frame_equal(df, expected) for count in range(1, 101): test_empty_header_read(count) def test_inf_parsing(self): data = """\ ,A a,inf b,-inf c,Inf d,-Inf e,INF f,-INF g,INf h,-INf i,inF j,-inF""" inf = float('inf') expected = Series([inf, -inf] * 5) df = self.read_csv(StringIO(data), index_col=0) tm.assert_almost_equal(df['A'].values, expected.values) df = self.read_csv(StringIO(data), index_col=0, na_filter=False) tm.assert_almost_equal(df['A'].values, expected.values)
mit
8,874,371,931,012,196,000
35.28023
79
0.535287
false
aemerick/galaxy_analysis
plot/plot_mass_volume_fractions.py
1
4149
from galaxy_analysis.plot.plot_styles import * import matplotlib.pyplot as plt "-----------------------------------------" import deepdish as dd from galaxy_analysis.utilities import utilities from galaxy_analysis.plot.plot_styles import color_dict import numpy as np import sys TMAX = 500.0 # maximum (normalized) plot time def get_fractions(ftype, data = None, tmin = None, tmax = None, average = False, phases = ['Molecular','CNM','WNM','WIM','HIM']): if (not (ftype == 'mass')) and (not (ftype == 'volume')): print("ftype must be either 'mass' or 'volume'") raise ValueError if data is None: if tmin is None or tmax is None: print("need to specify data set or time range") raise ValueError if average: # compute averages for each field fractions = {} min, max, std = {}, {}, {} for k in phases: x, fractions[k], min[k], max[k], std[k] =\ compute_time_average(['gas_meta_data', ftype +'_fractions'.k], tmin = tmin, tmax = tmax, xbins = None) return fractions, min, max, std else: data_list, times = utilities.select_data_by_time(tmin = tmin, tmax = tmax) all_fractions = [None]*len(data_list) for i, dname in enumerate(data_list): all_fractions[i] = dd.io.load(dname, '/gas_meta_data/' + ftype + '_fractions/') # transfer to plotable format combined_fractions = {} for k in phases: combined_fractions[k] = [all_fractions[i][k] for i in np.arange(len(data_list))] return times[(times>tmin)*(times<tmax)], combined_fractions else: fractions = data['gas_meta_data'][ftype +'_fractions'] return fractions def plot_mass_fraction(t, y, std = None, outdir = './', phases = ['Molecular','CNM','WNM','WIM','HIM'] ): fig, ax = plt.subplots() for k in phases: ax.plot(t-t[0], y[k], lw = line_width, label = k, color = color_dict[k]) ax.set_xlabel(r'Time (Myr)') ax.set_ylabel(r'ISM Mass Fraction') ax.legend(loc='upper right', ncol=2) plt.minorticks_on() fig.set_size_inches(8,8) plt.tight_layout() ax.set_ylim(0.0,0.9) ax.set_xlim(0.0, np.min([TMAX,np.max(t-t[0])]) ) fig.savefig('phase_mass_fraction_evolution.png') ax.set_ylim(1.0E-5, 1.0) ax.semilogy() ax.legend(loc='lower right', ncol=2) fig.savefig(outdir + 'phase_mass_fraction_evolution_log.png') plt.close() return def plot_volume_fraction(t, y, std = None, outdir = './', phases = ['Molecular','CNM','WNM','WIM','HIM']): fig, ax = plt.subplots() for k in phases: ax.plot(t-t[0], y[k], lw = line_width, label = k, color = color_dict[k]) ax.set_xlabel(r'Time (Myr)') ax.set_ylabel(r'ISM Volume Fraction') ax.legend(loc='upper right', ncol=2) ax.set_xlim(0.0, np.min([TMAX,np.max(t-t[0])]) ) plt.minorticks_on() fig.set_size_inches(8,8) ax.set_ylim(0.0, 0.9) plt.tight_layout() fig.savefig('phase_volume_fraction_evolution.png') ax.set_ylim(1.0E-5, 1.0) ax.semilogy() ax.legend(loc='lower right', ncol = 2) fig.savefig(outdir + 'phase_volume_fraction_evolution_log.png') plt.close() return def plot_fractions(outdir = './', phases = ['Molecular','CNM','WNM','WIM','HIM']): times, mass = get_fractions(tmin = 50, tmax = 1260, ftype = 'mass', phases = phases) plot_mass_fraction(times, mass, outdir = outdir, phases = phases) times, volume = get_fractions(tmin = 50, tmax = 1260, ftype = 'volume', phases = phases) plot_volume_fraction(times, volume, outdir = outdir, phases = phases) return if __name__=='__main__': outdir = './' if len(sys.argv) > 1: outdir = sys.argv[1] phases = ['CNM','WNM','WIM','HIM'] plot_fractions(outdir = outdir, phases = phases)
mit
1,797,107,613,218,390,300
31.928571
96
0.555556
false
axeven/fd-tools
compute_uct_jumps.py
1
7770
#!/usr/bin/env python3 import argparse import math import matplotlib.pyplot as plt import numpy as np import os import re from common import get_file_list from PIL import Image, ImageDraw class Node: def __init__(self, vars): self.vars = vars self.initial_reward = 0 self.reward = 0 self.children = set() self.parents = set() self.duplicate = 0 self.simulation_count = 0 def __eq__(self, other): return self.vars == other.vars def __hash__(self): return hash(self.vars) def add_child(self, node): self.children.add(node) node.parents.add(self) def update_reward(self, reward): if self.simulation_count == 0: self.initial_reward = reward self.reward = self.reward * self.simulation_count + reward self.simulation_count += 1 self.reward /= self.simulation_count # if self.reward > 0: # print(self.vars, self.reward) for par in self.parents: par.update_reward(reward) class MinMaxColor: def __init__(self, min_value, max_value, min_color, max_color): self.min_value = min_value self.max_value = max_value self.min_color = min_color self.max_color = max_color def get_color(self, value): if self.min_value < self.max_value: intp_value = (value - self.min_value) / (self.max_value - self.min_value) else: intp_value = 0 intp_color = [0] * len(self.min_color) for i in range(len(self.min_color)): intp_color[i] = int(self.min_color[i] + intp_value * (self.max_color[i] - self.min_color[i])) return tuple(intp_color) def get_parents(existing_nodes, child_id): parents = [] for var in child_id: parent_id = set(child_id) parent_id.remove(var) frozen_id = frozenset(parent_id) if frozen_id in existing_nodes: parents.append(existing_nodes[frozen_id]) return parents def construct_tree_from_log_file(location): id_pattern = re.compile(r'merged variables \{([\d\s]*)\}') reward_pattern = re.compile(r'Reward for this simulation: (\d+\.*\d*)') root = Node(frozenset()) existing_nodes = {frozenset([0]): root} min_reward = float('inf') max_reward = 0 reward_match = False last_node_id = None jump_count = 0 with open(location, 'r') as f: for line in f: if not reward_match: reward_match = reward_pattern.search(line) else: id_match = id_pattern.search(line) if id_match: ids = id_match.group(1).strip().split(' ') id_set = set([]) for i in ids: id_set.add(int(i)) frozen_id = frozenset(id_set) if frozen_id not in existing_nodes: reward = float(reward_match.group(1)) min_reward = min(min_reward, reward) max_reward = max(max_reward, reward) if len(frozen_id) == 2: parents = [root] else: parents = get_parents(existing_nodes, frozen_id) child = Node(frozen_id) existing_nodes[frozen_id] = child for par in parents: par.add_child(child) # if reward > 1: # print(line, reward) # return child.update_reward(reward) # computing jumps if len(frozen_id) > 2: if last_node_id is not None and len(frozen_id.intersection(last_node_id)) < 2: # print('from', last_node_id, 'to', frozen_id) jump_count += 1 last_node_id = frozen_id reward_match = False return root, min_reward, max_reward, jump_count def print_node(node, prefix=''): print(prefix + str(node.vars), str(node.reward), str(node.simulation_count)) for child in node.children: print_node(child, prefix + ' ') def visualize_reward_distribution(node, min_reward, max_reward, bin_count, max_y, output_folder='./'): node_at_depth = [] queue = [(node, 0)] while len(queue) != 0: next_queue = [] for cur_node, depth in queue: if depth == len(node_at_depth): node_at_depth.append(set()) node_at_depth[depth].add(cur_node) if len(cur_node.children) != 0: for child_i in cur_node.children: next_queue.append((child_i, depth + 1)) queue = next_queue for i in range(len(node_at_depth)): plt.clf() bin_size = (max_reward - min_reward) / bin_count bins = [min_reward + bin_size * i for i in range(bin_count)] n = [0] * bin_count for v in node_at_depth[i]: n[max(0, int(math.ceil((v.reward - min_reward) / bin_size)))] += 1 bin_size = bins[1] - bins[0] if i > 0: child_hist = [0] * 50 child_x = [bins[0] + i * bin_size for i in range(50)] for nd in node_at_depth[i]: if bin_size == 0: idx = 0 else: idx = max(int(math.ceil(((nd.reward - bins[0]) / bin_size))), 0) child_hist[idx] += nd.simulation_count plt.bar(child_x, child_hist, bin_size, color='yellow', alpha=1) plt.bar(bins, n, bin_size, color='blue', alpha=0.5) plt.xlim([min_reward, max_reward]) plt.ylim([0, max_y]) # plt.hist(rewards, 50, facecolor='blue', alpha=0.5) plt.grid(True) plt.savefig(output_folder + 'hist_at_' + str(i) + '.png') def main(): parser = argparse.ArgumentParser() parser.add_argument("input_file", help="file or folder containing the log data") # parser.add_argument('output_folder', help="output folder to save the images", default='./') # parser.add_argument('--min_reward', '-miny', help='minimum in x axis', default=0, type=float) # parser.add_argument('--max_reward', '-maxx', help='maximum in x axis', default=1, type=float) # parser.add_argument('--bin_count', '-bin', help='histogram bin count', default=50, type=int) # parser.add_argument('--max_y', '-maxy', help='maximum in y axis', default=100, type=int) args = parser.parse_args() input_files = [] if os.path.isfile(args.input_file): input_files = [args.input_file] elif os.path.exists(args.input_file): input_files = get_file_list(args.input_file, ext='.log') if len(input_files) == 0: print(args.input_file, 'does not exist') return """ output_folder = args.output_folder if not output_folder.endswith('/'): output_folder += '/' """ for file in input_files: # print('processing', file, '...') tree_root, min_reward, max_reward, jump_count = construct_tree_from_log_file(file) print(file, jump_count) # print_node(tree_root) """ if len(input_files) == 1: out = output_folder elif len(input_files) > 1: out = output_folder + '/' + file[len(args.input_file):] + '/' if not os.path.exists(out): os.makedirs(out) """ # visualize_reward_distribution(tree_root, args.min_reward, args.max_reward, args.bin_count, args.max_y, output_folder=out) if __name__ == '__main__': main()
gpl-3.0
3,594,872,216,282,004,500
36.177033
131
0.532819
false
smdabdoub/phylotoast
bin/diversity.py
2
9652
#!/usr/bin/env python """ Calculate and plot alpha diversity for two or more sample categories. """ import sys import csv import argparse import os.path as osp from itertools import izip_longest from collections import defaultdict from phylotoast import graph_util as gu, util as putil importerrors = [] try: import biom except ImportError as ie: importerrors.append(ie) try: import scipy.stats as stats except ImportError as ie: importerrors.append(ie) try: from skbio.diversity import alpha except ImportError as ie: importerrors.append(ie) try: from matplotlib import pyplot as plt, gridspec except ImportError as ie: importerrors.append(ie) if len(importerrors) != 0: for item in importerrors: print "Import Error. Please install missing module:", item sys.exit() def gather_samples(biomT): return {sid: biomT.data(sid).astype(int) for sid in biomT.ids()} def calc_diversity(method, parsed_mapf, biom, cats, cats_index): counts = {cat: [] for cat in cats} sample_ids = [] for sid, sample_counts in gather_samples(biom).items(): sample_ids.append(sid) if sid in parsed_mapf: counts[parsed_mapf[sid][cats_index]].append((sid, sample_counts)) div_calc = {cat: {count[0]: method(count[1]) for count in counts} for cat, counts in counts.items()} return div_calc, sample_ids def print_MannWhitneyU(div_calc): """ Compute the Mann-Whitney U test for unequal group sample sizes. """ try: x = div_calc.values()[0].values() y = div_calc.values()[1].values() except: return "Error setting up input arrays for Mann-Whitney U Test. Skipping "\ "significance testing." T, p = stats.mannwhitneyu(x, y) print "\nMann-Whitney U test statistic:", T print "Two-tailed p-value: {}".format(2 * p) def print_KruskalWallisH(div_calc): """ Compute the Kruskal-Wallis H-test for independent samples. A typical rule is that each group must have at least 5 measurements. """ calc = defaultdict(list) try: for k1, v1 in div_calc.iteritems(): for k2, v2 in v1.iteritems(): calc[k1].append(v2) except: return "Error setting up input arrays for Kruskal-Wallis H-Test. Skipping "\ "significance testing." h, p = stats.kruskal(*calc.values()) print "\nKruskal-Wallis H-test statistic for {} groups: {}".format(str(len(div_calc)), h) print "p-value: {}".format(p) def plot_group_diversity(diversities, grp_colors, title, diversity_type, out_dir, plot_ext): fig_div = plt.figure(figsize=(21, 7)) grid = gridspec.GridSpec(1, 2) # Disease States Shannon Diversity plots ax_div = fig_div.add_subplot(grid[0, 0]) for i, grp in enumerate(diversities): gu.plot_kde(diversities[grp].values(), ax_div, title, grp_colors[grp]) ax_div.set_xlabel(diversity_type.title()) ax_div.set_ylabel("Density") ax_div.legend([plt.Rectangle((0, 0), 1, 1, fc=color) for color in grp_colors.values()], grp_colors.keys(), loc="best") fig_div.savefig(osp.join(out_dir, diversity_type+"."+plot_ext), facecolor="white", edgecolor="none", bbox_inches="tight", pad_inches=0.2) def write_diversity_metrics(data, sample_ids, fp=None): """ Given a dictionary of diversity calculations (keyed by method) write out the data to a file. """ if fp is None: fp = "./diversity_data.txt" with open(fp, "w") as outf: out = csv.writer(outf, delimiter="\t") out.writerow(["SampleID", "Group", "Calculation"]) for group, d in data.iteritems(): for sid, value in d.iteritems(): out.writerow([sid, group, value]) def handle_program_options(): """Parses the given options passed in at the command line.""" parser = argparse.ArgumentParser(description="Calculate the alpha diversity\ of a set of samples using one or more \ metrics and output a kernal density \ estimator-smoothed histogram of the \ results.") parser.add_argument("-m", "--map_file", help="QIIME mapping file.") parser.add_argument("-i", "--biom_fp", help="Path to the BIOM table") parser.add_argument("-c", "--category", help="Specific category from the mapping file.") parser.add_argument("-d", "--diversity", default=["shannon"], nargs="+", help="The alpha diversity metric. Default \ value is 'shannon', which will calculate the Shannon\ entropy. Multiple metrics can be specified (space separated).\ The full list of metrics is available at:\ http://scikit-bio.org/docs/latest/generated/skbio.diversity.alpha.html.\ Beta diversity metrics will be supported in the future.") parser.add_argument("--x_label", default=[None], nargs="+", help="The name of the diversity metric to be displayed on the\ plot as the X-axis label. If multiple metrics are specified,\ then multiple entries for the X-axis label should be given.") parser.add_argument("--color_by", help="A column name in the mapping file containing\ hexadecimal (#FF0000) color values that will\ be used to color the groups. Each sample ID must\ have a color entry.") parser.add_argument("--plot_title", default="", help="A descriptive title that will appear at the top \ of the output plot. Surround with quotes if there are\ spaces in the title.") parser.add_argument("-o", "--output_dir", default=".", help="The directory plots will be saved to.") parser.add_argument("--image_type", default="png", help="The type of image to save: png, svg, pdf, eps, etc...") parser.add_argument("--save_calculations", help="Path and name of text file to store the calculated " "diversity metrics.") parser.add_argument("--suppress_stats", action="store_true", help="Do not display " "significance testing results which are shown by default.") parser.add_argument("--show_available_metrics", action="store_true", help="Supply this parameter to see which alpha diversity metrics " " are available for usage. No calculations will be performed" " if this parameter is provided.") return parser.parse_args() def main(): args = handle_program_options() metrics = [m for m in alpha.__all__ if "_ci" not in m] try: metrics.remove("faith_pd") except ValueError: pass if args.show_available_metrics: print "\nAvailable alpha diversity metrics:" return "\n".join(metrics) # check that the output dir exists, create it if not msg = putil.ensure_dir(args.output_dir) # if an error occurs, print and exit if msg: sys.exit(msg) # parse mapping file try: header, sample_map = putil.parse_map_file(args.map_file) except Exception as ioe: err_msg = "\nError while processing the mapping file: {}\n" sys.exit(err_msg.format(ioe)) # parse BIOM table try: biom_tbl = biom.load_table(args.biom_fp) except Exception as ioe: err_msg = "\nError loading BIOM table file: {}\n" sys.exit(err_msg.format(ioe)) # group samples by category if args.category not in header: sys.exit("Category '{}' not found".format(args.category)) cat_idx = header.index(args.category) cat_vals = {entry[cat_idx] for entry in sample_map.values()} plot_title = args.plot_title colors = putil.color_mapping(sample_map, header, args.category, args.color_by) # Perform diversity calculations and density plotting for method, x_label in izip_longest(args.diversity, args.x_label): if x_label is None: x_label = method.title() if method not in alpha.__all__: sys.exit("ERROR: Diversity metric not found: {}.".format(method)) elif method in alpha.__all__ and method not in metrics: sys.exit("Currently, PhyloToAST does not support {} metric.".format(method)) metric = eval("alpha."+method) div_calc, sample_ids = calc_diversity(metric, sample_map, biom_tbl, cat_vals, cat_idx) if args.save_calculations: write_diversity_metrics(div_calc, sample_ids, args.save_calculations) plot_group_diversity(div_calc, colors, plot_title, x_label, args.output_dir, args.image_type) # calculate and print significance testing results if not args.suppress_stats: print "Diversity significance testing: {}".format(x_label) if len(cat_vals) == 2: print_MannWhitneyU(div_calc) elif len(cat_vals) > 2: print_KruskalWallisH(div_calc) print else: continue if __name__ == "__main__": sys.exit(main())
mit
-6,323,100,033,863,713,000
38.557377
101
0.593452
false
chrishavlin/nyc_taxi_viz
src/pl_load_and_map.py
1
2391
""" pl_load_and_map.py a script for loading and plotting NYC yellowcab data already processed and binned in lat/lon Copyright (C) 2016 Chris Havlin, <https://chrishavlin.wordpress.com> This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. The database is NOT distributed with the code here. Data source: NYC Taxi & Limousine Commision, TLC Trip Record Data <http://www.nyc.gov/html/tlc/html/about/trip_record_data.shtml> """ import taxi_plotmod as tpm import taxi_main as tm import numpy as np import matplotlib.pyplot as plt # select directory and variable data_dir='../data_products/sub_sampled' varname='dist_mi' # output figure names fig_name='dist_map' # load binned data VarMean,VarCount,Varx,Vary=tm.read_gridded_file(data_dir,varname) VarMean=VarMean * (VarCount > 5) # plot settings log_plot=False # take log10 of the data? minval=0.1 # min value for color scale maxval=10 # max value for color scale x_dim=5 # figure x dimension [inches] y_dim=7 # figure y dimension [inches] ShowFig=True # display the figure? SaveFig=True # save the figure? figdpi=1200 # dpi resolution for save savename=data_dir+'/'+fig_name # name of figure for save # plot it tpm.plt_map(VarMean,minval,maxval,Varx,Vary,log_plot,ShowFig,SaveFig,savename,x_dim,y_dim,figdpi) # calculate and plot mean trip distance vs latitude Mean_vs_y = np.zeros((Vary.size,1)) for iLat in range(0,len(Vary)-1): x_var=VarMean[iLat,:] x_var=x_var[np.where(x_var>0.1)] if len(x_var)!=0: Mean_vs_y[iLat] = np.mean(x_var) fig=plt.figure() plt.plot(Mean_vs_y,Vary) plt.xlabel('Mean trip distance in miles') plt.ylabel('Degree latitude') axes = plt.gca() axes.set_ylim([Vary.min(),Vary.max()]) axes.set_xlim([2,4.5]) print '...' print '... close figure to continue ...' plt.show()
gpl-3.0
-8,654,509,315,482,538,000
30.051948
97
0.724383
false
tyarkoni/featureX
pliers/extractors/text.py
1
36443
''' Extractors that operate primarily or exclusively on Text stimuli. ''' import sys import itertools import logging import numpy as np import pandas as pd import scipy import nltk from nltk.sentiment.vader import SentimentIntensityAnalyzer from pliers.stimuli.text import TextStim, ComplexTextStim from pliers.extractors.base import Extractor, ExtractorResult from pliers.support.exceptions import PliersError from pliers.support.decorators import requires_nltk_corpus from pliers.datasets.text import fetch_dictionary from pliers.transformers import BatchTransformerMixin from pliers.utils import (attempt_to_import, verify_dependencies, flatten, listify) keyedvectors = attempt_to_import('gensim.models.keyedvectors', 'keyedvectors', ['KeyedVectors']) sklearn_text = attempt_to_import('sklearn.feature_extraction.text', 'sklearn_text', ['CountVectorizer']) spacy = attempt_to_import('spacy') transformers = attempt_to_import('transformers') class TextExtractor(Extractor): ''' Base Text Extractor class; all subclasses can only be applied to text. ''' _input_type = TextStim class ComplexTextExtractor(Extractor): ''' Base ComplexTextStim Extractor class; all subclasses can only be applied to ComplexTextStim instance. ''' _input_type = ComplexTextStim def _extract(self, stim): ''' Returns all words. ''' props = [(e.text, e.onset, e.duration) for e in stim.elements] vals, onsets, durations = map(list, zip(*props)) return ExtractorResult(vals, stim, self, ['word'], onsets, durations) class DictionaryExtractor(TextExtractor): ''' A generic dictionary-based extractor that supports extraction of arbitrary features contained in a lookup table. Args: dictionary (str, DataFrame): The dictionary containing the feature values. Either a string giving the path to the dictionary file, or a pandas DF. Format must be tab-delimited, with the first column containing the text key used for lookup. Subsequent columns each represent a single feature that can be used in extraction. variables (list): Optional subset of columns to keep from the dictionary. missing: Value to insert if no lookup value is found for a text token. Defaults to numpy's NaN. ''' _log_attributes = ('dictionary', 'variables', 'missing') VERSION = '1.0' def __init__(self, dictionary, variables=None, missing=np.nan): if isinstance(dictionary, str): self.dictionary = dictionary # for TranformationHistory logging dictionary = pd.read_csv(dictionary, sep='\t', index_col=0) else: self.dictionary = None self.data = dictionary if variables is None: variables = list(self.data.columns) else: self.data = self.data[variables] self.variables = variables # Set up response when key is missing self.missing = missing super().__init__() def _extract(self, stim): if stim.text not in self.data.index: vals = pd.Series(self.missing, self.variables) else: vals = self.data.loc[stim.text].fillna(self.missing) vals = vals.to_dict() return ExtractorResult(np.array([list(vals.values())]), stim, self, features=list(vals.keys())) class PredefinedDictionaryExtractor(DictionaryExtractor): ''' A generic Extractor that maps words onto values via one or more pre-defined dictionaries accessed via the web. Args: variables (list or dict): A specification of the dictionaries and column names to map the input TextStims onto. If a list, each element must be a string with the format 'dict/column', where the value before the slash gives the name of the dictionary, and the value after the slash gives the name of the column in that dictionary. These names can be found in the dictionaries.json specification file under the datasets submodule. Examples of valid values are 'affect/V.Mean.Sum' and 'subtlexusfrequency/Lg10WF'. If a dict, the keys are the names of the dictionary files (e.g., 'affect'), and the values are lists of columns to use (e.g., ['V.Mean.Sum', 'V.SD.Sum']). missing (object): Value to use when an entry for a word is missing in a dictionary (defaults to numpy's NaN). case_sensitive (bool): If True, entries in the dictionary are treated as case-sensitive (e.g., 'John' and 'john' are different words). force_retrieve (bool): If True, the source dictionary will always be retrieved/download, even if it exists locally. If False, a cached local version will be used if it exists. ''' _log_attributes = ('variables', 'missing', 'case_sensitive') VERSION = '1.0' def __init__(self, variables, missing=np.nan, case_sensitive=False, force_retrieve=False): self.case_sensitive = case_sensitive if isinstance(variables, (list, tuple)): _vars = {} for v in variables: v = v.split('/') if v[0] not in _vars: _vars[v[0]] = [] if len(v) == 2: _vars[v[0]].append(v[1]) variables = _vars dicts = [] for k, v in variables.items(): d = fetch_dictionary(k, force_retrieve=force_retrieve) if not case_sensitive: d.index = d.index.str.lower() if v: d = d[v] d.columns = ['{}_{}'.format(k, c) for c in d.columns] dicts.append(d) # Make sure none of the dictionaries have duplicate indices drop_dups = lambda d: d[~d.index.duplicated(keep='first')] dicts = [d if d.index.is_unique else drop_dups(d) for d in dicts] dictionary = pd.concat(dicts, axis=1, join='outer', sort=False) super().__init__( dictionary, missing=missing) class LengthExtractor(TextExtractor): ''' Extracts the length of the text in characters. ''' VERSION = '1.0' def _extract(self, stim): return ExtractorResult(np.array([[len(stim.text.strip())]]), stim, self, features=['text_length']) class NumUniqueWordsExtractor(TextExtractor): ''' Extracts the number of unique words used in the text. ''' _log_attributes = ('tokenizer',) VERSION = '1.0' def __init__(self, tokenizer=None): super().__init__() self.tokenizer = tokenizer @requires_nltk_corpus def _extract(self, stim): text = stim.text if self.tokenizer is None: if nltk is None: num_words = len(set(text.split())) else: num_words = len(set(nltk.word_tokenize(text))) else: num_words = len(set(self.tokenizer.tokenize(text))) return ExtractorResult(np.array([[num_words]]), stim, self, features=['num_unique_words']) class PartOfSpeechExtractor(BatchTransformerMixin, TextExtractor): ''' Tags parts of speech in text with nltk. ''' _batch_size = sys.maxsize VERSION = '1.0' @requires_nltk_corpus def _extract(self, stims): words = [w.text for w in stims] pos = nltk.pos_tag(words) if len(words) != len(pos): raise PliersError( "The number of words does not match the number of tagged words" "returned by nltk's part-of-speech tagger.") results = [] tagset = nltk.data.load('help/tagsets/upenn_tagset.pickle').keys() for i, s in enumerate(stims): pos_vector = dict.fromkeys(tagset, 0) pos_vector[pos[i][1]] = 1 values = [list(pos_vector.values())] results.append(ExtractorResult(values, s, self, features=list(pos_vector.keys()))) return results class WordEmbeddingExtractor(TextExtractor): ''' An extractor that uses a word embedding file to look up embedding vectors for text. Args: embedding_file (str): Path to a word embedding file. Assumed to be in word2vec format compatible with gensim. binary (bool): Flag indicating whether embedding file is saved in a binary format. prefix (str): Prefix for feature names in the ExtractorResult. unk_vector (numpy array or str): Default vector to use for texts not found in the embedding file. If None is specified, uses a vector with all zeros. If 'random' is specified, uses a vector with random values between -1.0 and 1.0. Must have the same dimensions as the embeddings. ''' _log_attributes = ('wvModel', 'prefix') def __init__(self, embedding_file, binary=False, prefix='embedding_dim', unk_vector=None): verify_dependencies(['keyedvectors']) self.wvModel = keyedvectors.KeyedVectors.load_word2vec_format( embedding_file, binary=binary) self.prefix = prefix self.unk_vector = unk_vector super().__init__() def _extract(self, stim): num_dims = self.wvModel.vector_size if stim.text in self.wvModel: embedding_vector = self.wvModel[stim.text] else: unk = self.unk_vector if hasattr(unk, 'shape') and unk.shape[0] == num_dims: embedding_vector = unk elif unk == 'random': embedding_vector = 2.0 * np.random.random(num_dims) - 1.0 else: # By default, UNKs will have zeroed-out vectors embedding_vector = np.zeros(num_dims) features = ['%s%d' % (self.prefix, i) for i in range(num_dims)] return ExtractorResult([embedding_vector], stim, self, features=features) class TextVectorizerExtractor(BatchTransformerMixin, TextExtractor): ''' Uses a scikit-learn Vectorizer to extract bag-of-features from text. Args: vectorizer (sklearn Vectorizer or str): a scikit-learn Vectorizer (or the name in a string) to extract with. Will use the CountVectorizer by default. Uses supporting *args and **kwargs. ''' _log_attributes = ('vectorizer',) _batch_size = sys.maxsize def __init__(self, vectorizer=None, *vectorizer_args, **vectorizer_kwargs): verify_dependencies(['sklearn_text']) if isinstance(vectorizer, sklearn_text.CountVectorizer): self.vectorizer = vectorizer elif isinstance(vectorizer, str): vec = getattr(sklearn_text, vectorizer) self.vectorizer = vec(*vectorizer_args, **vectorizer_kwargs) else: self.vectorizer = sklearn_text.CountVectorizer(*vectorizer_args, **vectorizer_kwargs) super().__init__() def _extract(self, stims): mat = self.vectorizer.fit_transform([s.text for s in stims]).toarray() results = [] for i, row in enumerate(mat): results.append( ExtractorResult([row], stims[i], self, features=self.vectorizer.get_feature_names())) return results class VADERSentimentExtractor(TextExtractor): ''' Uses nltk's VADER lexicon to extract (0.0-1.0) values for the positve, neutral, and negative sentiment of a TextStim. Also returns a compound score ranging from -1 (very negative) to +1 (very positive). ''' _log_attributes = ('analyzer',) VERSION = '1.0' def __init__(self): self.analyzer = SentimentIntensityAnalyzer() super().__init__() @requires_nltk_corpus def _extract(self, stim): scores = self.analyzer.polarity_scores(stim.text) features = ['sentiment_' + k for k in scores.keys()] return ExtractorResult([list(scores.values())], stim, self, features=features) class SpaCyExtractor(TextExtractor): ''' A generic class for Spacy Text extractors Uses SpaCy to extract features from text. Extracts features for every word (token) in a sentence. Args: extractor_type(str): The type of feature to extract. Must be one of 'doc' (analyze an entire sentence/document) or 'token' (analyze each word). features(list): A list of strings giving the names of spaCy features to extract. See SpacY documentation for details. By default, returns all available features for the given extractor type. model (str): The name of the language model to use. ''' def __init__(self, extractor_type='token', features=None, model='en_core_web_sm'): verify_dependencies(['spacy']) try: self.model = spacy.load(model) except (ImportError, OSError) as e: logging.warning("Spacy Models ('{}') not found. Downloading and" "installing".format(model)) spacy.cli.download(model) self.model = spacy.load(model) logging.info('Loaded model: {}'.format(self.model)) self.features = features self.extractor_type = extractor_type.lower() super().__init__() def _extract(self, stim): features_list = [] elements = self.model(stim.text) order_list = [] if self.extractor_type == 'token': if self.features is None: self.features = ['text', 'lemma_', 'pos_', 'tag_', 'dep_', 'shape_', 'is_alpha', 'is_stop', 'is_punct', 'sentiment', 'is_ascii', 'is_digit'] elif self.extractor_type == 'doc': elements = [elem.as_doc() for elem in list(elements.sents)] if self.features is None: self.features = ['text', 'is_tagged', 'is_parsed', 'is_sentenced', 'sentiment'] else: raise(ValueError("Invalid extractor_type; must be one of 'token'" " or 'doc'.")) features_list = [] for elem in elements: arr = [] for feat in self.features: arr.append(getattr(elem, feat)) features_list.append(arr) order_list = list(range(1, len(elements) + 1)) return ExtractorResult(features_list, stim, self, features=self.features, orders=order_list) class BertExtractor(ComplexTextExtractor): ''' Returns encodings from the last hidden layer of BERT or similar models (ALBERT, DistilBERT, RoBERTa, CamemBERT). Excludes special tokens. Base class for other Bert extractors. Args: pretrained_model (str): A string specifying which transformer model to use. Can be any pretrained BERT or BERT-derived (ALBERT, DistilBERT, RoBERTa, CamemBERT etc.) models listed at https://huggingface.co/transformers/pretrained_models.html or path to custom model. tokenizer (str): Type of tokenization used in the tokenization step. If different from model, out-of-vocabulary tokens may be treated as unknown tokens. model_class (str): Specifies model type. Must be one of 'AutoModel' (encoding extractor) or 'AutoModelWithLMHead' (language model). These are generic model classes, which use the value of pretrained_model to infer the model-specific transformers class (e.g. BertModel or BertForMaskedLM for BERT, RobertaModel or RobertaForMaskedLM for RoBERTa). Fixed by each subclass. framework (str): name deep learning framework to use. Must be 'pt' (PyTorch) or 'tf' (tensorflow). Defaults to 'pt'. return_input (bool): if True, the extractor returns encoded token and encoded word as features. model_kwargs (dict): Named arguments for transformer model. See https://huggingface.co/transformers/main_classes/model.html tokenizer_kwargs (dict): Named arguments for tokenizer. See https://huggingface.co/transformers/main_classes/tokenizer.html ''' _log_attributes = ('pretrained_model', 'framework', 'tokenizer_type', 'model_class', 'return_input', 'model_kwargs', 'tokenizer_kwargs') _model_attributes = ('pretrained_model', 'framework', 'model_class', 'tokenizer_type') def __init__(self, pretrained_model='bert-base-uncased', tokenizer='bert-base-uncased', model_class='AutoModel', framework='pt', return_input=False, model_kwargs=None, tokenizer_kwargs=None): verify_dependencies(['transformers']) if framework not in ['pt', 'tf']: raise(ValueError('''Invalid framework; must be one of 'pt' (pytorch) or 'tf' (tensorflow)''')) self.pretrained_model = pretrained_model self.tokenizer_type = tokenizer self.model_class = model_class self.framework = framework self.return_input = return_input self.model_kwargs = model_kwargs if model_kwargs else {} self.tokenizer_kwargs = tokenizer_kwargs if tokenizer_kwargs else {} model = model_class if self.framework == 'pt' else 'TF' + model_class self.model = getattr(transformers, model).from_pretrained( pretrained_model, **self.model_kwargs) self.tokenizer = transformers.BertTokenizer.from_pretrained( tokenizer, **self.tokenizer_kwargs) super().__init__() def _mask_words(self, wds): ''' Called by _preprocess method. Takes list of words in the Stim as input (i.e. the .text attribute for each TextStim in the ComplexTextStim). If class has mask attribute, replaces word in the input sequence with [MASK] token based on the value of mask (either index in the sequence, or word to replace). Here, returns list of words (without masking) ''' return wds def _preprocess(self, stims): ''' Extracts text, onset, duration from ComplexTextStim, masks target words (if relevant), tokenizes the input, and casts words, onsets, and durations to token-level lists. Called within _extract method to prepare input for the model. ''' els = [(e.text, e.onset, e.duration) for e in stims.elements] wds, ons, dur = map(list, zip(*els)) tok = [self.tokenizer.tokenize(w) for w in self._mask_words(wds)] n_tok = [len(t) for t in tok] stims.name = ' '.join(wds) if stims.name == '' else stims.name wds, ons, dur = map(lambda x: np.repeat(x, n_tok), [wds, ons, dur]) tok = list(flatten(tok)) idx = self.tokenizer.encode(tok, return_tensors=self.framework) return wds, ons, dur, tok, idx def _extract(self, stims): ''' Takes stim as input, preprocesses it, feeds it to Bert model, then postprocesses the output ''' wds, ons, dur, tok, idx = self._preprocess(stims) preds = self.model(idx) data, feat, ons, dur = self._postprocess(stims, preds, tok, wds, ons, dur) return ExtractorResult(data, stims, self, features=feat, onsets=ons, durations=dur) def _postprocess(self, stims, preds, tok, wds, ons, dur): ''' Postprocesses model output (subsets relevant information, transforms it where relevant, adds model metadata). Takes prediction array, token list, word list, onsets and durations and input. Here, returns token-level encodings (excluding special tokens). ''' out = preds.last_hidden_state[:, 1:-1, :] if self.framework == 'pt': out = out.detach() out = out.numpy().squeeze() data = [out.tolist()] feat = ['encoding'] if self.return_input: data += [tok, wds] feat += ['token', 'word'] return data, feat, ons, dur def _to_df(self, result): res_df = pd.DataFrame(dict(zip(result.features, result._data))) res_df['object_id'] = range(res_df.shape[0]) return res_df class BertSequenceEncodingExtractor(BertExtractor): ''' Extract contextualized sequence encodings using pretrained BERT (or similar models, e.g. DistilBERT). Args: pretrained_model (str): A string specifying which transformer model to use. Can be any pretrained BERT or BERT-derived (ALBERT, DistilBERT, RoBERTa, CamemBERT etc.) models listed at https://huggingface.co/transformers/pretrained_models.html or path to custom model. tokenizer (str): Type of tokenization used in the tokenization step. If different from model, out-of-vocabulary tokens may be treated as unknown tokens. framework (str): name deep learning framework to use. Must be 'pt' (PyTorch) or 'tf' (tensorflow). Defaults to 'pt'. pooling (str): defines numpy function to use to pool token-level encodings (excludes special tokens). return_special (str): defines whether to return encoding for special sequence tokens ('[CLS]' or '[SEP]'), instead of pooling of other tokens. Must be '[CLS]', '[SEP]', or 'pooler_output'. The latter option returns last layer hidden-state of [CLS] token further processed by a linear layer and tanh activation function, with linear weights trained on the next sentence classification task. Note that some Bert-derived models, such as DistilBert, were not trained on this task. For these models, setting this argument to 'pooler_output' will return an error. return_input (bool): If True, the extractor returns an additional feature column with the encoded sequence. model_kwargs (dict): Named arguments for pretrained model. See: https://huggingface.co/transformers/main_classes/model.html and https://huggingface.co/transformers/model_doc/bert.html tokenizer_kwargs (dict): Named arguments for tokenizer. See https://huggingface.co/transformers/main_classes/tokenizer.html ''' _log_attributes = ('pretrained_model', 'framework', 'tokenizer_type', 'pooling', 'return_special', 'return_input', 'model_class', 'model_kwargs', 'tokenizer_kwargs') _model_attributes = ('pretrained_model', 'framework', 'model_class', 'pooling', 'return_special', 'tokenizer_type') def __init__(self, pretrained_model='bert-base-uncased', tokenizer='bert-base-uncased', framework='pt', pooling='mean', return_special=None, return_input=False, model_kwargs=None, tokenizer_kwargs=None): if return_special and pooling: logging.warning('Pooling and return_special argument are ' 'mutually exclusive. Setting pooling to None.') pooling = None if pooling: try: getattr(np, pooling) except: raise(ValueError('Pooling must be a valid numpy function.')) elif return_special: if return_special not in ['[CLS]', '[SEP]', 'pooler_output']: raise(ValueError('Value of return_special argument must be ' 'one of \'[CLS]\', \'[SEP]\' or \'pooler_output\'')) self.pooling = pooling self.return_special = return_special super().__init__( pretrained_model=pretrained_model, tokenizer=tokenizer, return_input=return_input, model_class='AutoModel', framework=framework, model_kwargs=model_kwargs, tokenizer_kwargs=tokenizer_kwargs) def _postprocess(self, stims, preds, tok, wds, ons, dur): try: dur = ons[-1] + dur[-1] - ons[0] except: dur = None ons = ons[0] if self.pooling: pool_func = getattr(np, self.pooling) p = preds.last_hidden_state[0, 1:-1, :] if self.framework == 'pt': p = p.detach() out = pool_func(p.numpy().squeeze(), axis=0) elif self.return_special: if self.return_special == '[CLS]': out = preds.last_hidden_state[:,0,:] elif self.return_special == '[SEP]': out = preds.last_hidden_state[:,-1,:] else: out = preds.pooler_output if self.framework == 'pt': out = out.detach() out = out.numpy().squeeze() data = [[out.tolist()]] feat = ['encoding'] if self.return_input: data += [stims.name] feat += ['sequence'] return data, feat, ons, dur class BertLMExtractor(BertExtractor): ''' Returns masked words predictions from BERT (or similar, e.g. DistilBERT) models. Args: pretrained_model (str): A string specifying which transformer model to use. Can be any pretrained BERT or BERT-derived (ALBERT, DistilBERT, RoBERTa, CamemBERT etc.) models listed at https://huggingface.co/transformers/pretrained_models.html or path to custom model. tokenizer (str): Type of tokenization used in the tokenization step. If different from model, out-of-vocabulary tokens may be treated as unknown tokens. framework (str): name deep learning framework to use. Must be 'pt' (PyTorch) or 'tf' (tensorflow). Defaults to 'pt'. mask (int or str): Words to be masked (string) or indices of words in the sequence to be masked (indexing starts at 0). Can be either a single word/index or a list of words/indices. If str is passed and more than one word in the input matches the string, only the first one is masked. top_n (int): Specifies how many of the highest-probability tokens are to be returned. Mutually exclusive with target and threshold. target (str or list): Vocabulary token(s) for which probability is to be returned. Tokens defined in the vocabulary change across tokenizers. Mutually exclusive with top_n and threshold. threshold (float): If defined, only values above this threshold will be returned. Mutually exclusive with top_n and target. return_softmax (bool): if True, returns probability scores instead of raw predictions. return_masked_word (bool): if True, returns masked word (if defined in the tokenizer vocabulary) and its probability. model_kwargs (dict): Named arguments for pretrained model. See: https://huggingface.co/transformers/main_classes/model.html and https://huggingface.co/transformers/model_doc/bert.html. tokenizer_kwargs (dict): Named arguments for tokenizer. See https://huggingface.co/transformers/main_classes/tokenizer.html. ''' _log_attributes = ('pretrained_model', 'framework', 'top_n', 'target', 'mask', 'tokenizer_type', 'return_softmax', 'return_masked_word') _model_attributes = ('pretrained_model', 'framework', 'top_n', 'mask', 'target', 'threshold', 'tokenizer_type') def __init__(self, pretrained_model='bert-base-uncased', tokenizer='bert-base-uncased', framework='pt', mask='MASK', top_n=None, threshold=None, target=None, return_softmax=False, return_masked_word=False, return_input=False, model_kwargs=None, tokenizer_kwargs=None): if any([top_n and target, top_n and threshold, threshold and target]): raise ValueError('top_n, threshold and target arguments ' 'are mutually exclusive') if type(mask) not in [int, str]: raise ValueError('Mask must be a string or an integer.') super().__init__(pretrained_model=pretrained_model, tokenizer=tokenizer, framework=framework, return_input=return_input, model_class='AutoModelWithLMHead', model_kwargs=model_kwargs, tokenizer_kwargs=tokenizer_kwargs) self.target = listify(target) if self.target: missing = set(self.target) - set(self.tokenizer.vocab.keys()) if missing: logging.warning(f'{missing} not in vocabulary. Dropping.') present = set(self.target) & set(self.tokenizer.vocab.keys()) self.target = list(present) if self.target == []: raise ValueError('No valid target token. Import transformers' ' and run transformers.BertTokenizer.from_pretrained' f'(\'{tokenizer}\').vocab.keys() to see available tokens') self.mask = mask self.top_n = top_n self.threshold = threshold self.return_softmax = return_softmax self.return_masked_word = return_masked_word def update_mask(self, new_mask): ''' Updates mask attribute with value of new_mask. Args: new_mask (str or int): word to mask (str) or index/position of the word to mask in input sequence (int). Indexing starts at 0. ''' if type(new_mask) not in [str, int]: raise ValueError('Mask must be a string or an integer.') self.mask = new_mask def _mask_words(self, wds): mwds = wds.copy() if isinstance(self.mask, str): self.mask_token = self.mask self.mask_pos = np.where(np.array(mwds)==self.mask)[0][0] else: self.mask_pos = self.mask self.mask_token = mwds[self.mask] mwds[self.mask_pos] = '[MASK]' return mwds def _postprocess(self, stims, preds, tok, wds, ons, dur): if self.framework == 'pt': preds = preds.logits[:,1:-1,:].detach().numpy() else: preds = preds.logits[:,1:-1,:].numpy() if self.return_softmax: preds = scipy.special.softmax(preds, axis=-1) out_idx = preds[0,self.mask_pos,:].argsort()[::-1] if self.top_n: sub_idx = out_idx[:self.top_n] elif self.target: sub_idx = self.tokenizer.convert_tokens_to_ids(self.target) elif self.threshold: sub_idx = np.where(preds[0,self.mask_pos,:] >= self.threshold)[0] else: sub_idx = out_idx out_idx = [idx for idx in out_idx if idx in sub_idx] feat = self.tokenizer.convert_ids_to_tokens(out_idx) feat = [f.capitalize() if len(f)==len(f.encode()) else f for f in feat] data = [listify(p) for p in preds[0,self.mask_pos,out_idx]] if self.return_masked_word: feat, data = self._return_masked_word(preds, feat, data) if self.return_input: data += [stims.name] feat += ['sequence'] mask_ons = listify(stims.elements[self.mask_pos].onset) mask_dur = listify(stims.elements[self.mask_pos].duration) return data, feat, mask_ons, mask_dur def _return_masked_word(self, preds, feat, data): if self.mask_token in self.tokenizer.vocab: true_vocab_idx = self.tokenizer.vocab[self.mask_token] true_score = preds[0,self.mask_pos,true_vocab_idx] else: true_score = np.nan logging.warning('True token not in vocabulary. Returning NaN') feat += ['true_word', 'true_word_score'] data += [self.mask_token, true_score] return feat, data class BertSentimentExtractor(BertExtractor): ''' Extracts sentiment for sequences using BERT (or similar, e.g. DistilBERT) models fine-tuned for sentiment classification. Args: pretrained_model (str): A string specifying which transformer model to use (must be one fine-tuned for sentiment classification) tokenizer (str): Type of tokenization used in the tokenization step. framework (str): name deep learning framework to use. Must be 'pt' (PyTorch) or 'tf' (tensorflow). Defaults to 'pt'. return_softmax (bool): If True, the extractor returns softmaxed sentiment scores instead of raw model predictions. return_input (bool): If True, the extractor returns an additional feature column with the encoded sequence. model_kwargs (dict): Named arguments for pretrained model. tokenizer_kwargs (dict): Named arguments for tokenizer. ''' _log_attributes = ('pretrained_model', 'framework', 'tokenizer_type', 'return_softmax', 'return_input', 'model_class', 'model_kwargs', 'tokenizer_kwargs') _model_attributes = ('pretrained_model', 'framework', 'tokenizer_type', 'return_input', 'return_softmax',) def __init__(self, pretrained_model='distilbert-base-uncased-finetuned-sst-2-english', tokenizer='bert-base-uncased', framework='pt', return_softmax=True, return_input=False, model_kwargs=None, tokenizer_kwargs=None): self.return_softmax = return_softmax super().__init__( pretrained_model=pretrained_model, tokenizer=tokenizer, framework=framework, return_input=return_input, model_class='AutoModelForSequenceClassification', model_kwargs=model_kwargs, tokenizer_kwargs=tokenizer_kwargs) def _postprocess(self, stims, preds, tok, wds, ons, dur): data = preds.logits if self.framework == 'pt': data = data.detach() data = data.numpy().squeeze() if self.return_softmax: data = scipy.special.softmax(data) data = [listify(d) for d in data.tolist()] tok = [' '.join(wds)] try: dur = ons[-1] + dur[-1] - ons[0] except: dur = None ons = ons[0] feat = ['sent_pos', 'sent_neg'] if self.return_input: data += tok feat += ['sequence'] return data, feat, ons, dur class WordCounterExtractor(ComplexTextExtractor): ''' Extracts number of times each unique word has occurred within text Args: log_scale(bool): specifies if count values are to be returned in log- scale (defaults to False) ''' _log_attributes = ('case_sensitive', 'log_scale') def __init__(self, case_sensitive=False, log_scale=False): self.log_scale = log_scale self.case_sensitive = case_sensitive self.features = ['log_word_count'] if self.log_scale else ['word_count'] super().__init__() def _extract(self, stims): onsets = [s.onset for s in stims] durations = [s.duration for s in stims] tokens = [s.text for s in stims] tokens = [t if self.case_sensitive else t.lower() for t in tokens] word_counter = pd.Series(tokens).groupby(tokens).cumcount() + 1 if self.log_scale: word_counter = np.log(word_counter) return ExtractorResult(word_counter, stims, self, features=self.features, onsets=onsets, durations=durations)
bsd-3-clause
5,275,248,594,534,816,000
41.523921
84
0.59578
false
daeyun/dshinpy
scripts/ipython_start.py
1
3953
""" %run -i ipython_start.py {page_width} """ import sys from IPython import display as IPy_display def set_page_width(width='1700', full=False): if full: width = '100%' else: if not width.endswith('%'): width = '{}px'.format(width) IPy_display.display(IPy_display.HTML( "<style>.container {{ width:{width} !important; }}</style>".format(width=width))) if len(sys.argv) > 1: set_page_width(sys.argv[1]) else: set_page_width() from os import path def __extend_sys_path(prepend_paths=(), append_paths=()): pathset = set(sys.path) for p in prepend_paths: fullpath = path.realpath(path.expanduser(p)) if fullpath not in pathset: sys.path.insert(0, fullpath) for p in append_paths: fullpath = path.realpath(path.expanduser(p)) if fullpath not in pathset: sys.path.append(fullpath) sys.path = [p for p in sys.path if p] __extend_sys_path( prepend_paths=[], append_paths=['~/Dropbox/git/dshinpy/', '~/Dropbox/git/mvshape/', '~/Dropbox/git/multiview_shape/', '~/git/mvshape/python'], ) import abc import array import collections import contextlib import copy import functools import glob import itertools import math import multiprocessing as mp import os import random import re import struct import threading import time import typing import psutil import numpy as np import numpy.linalg as la import matplotlib.pyplot as pt import tensorflow as tf from IPython import get_ipython ipython = get_ipython() ipython.magic('reload_ext autoreload') ipython.magic('autoreload 2') # ipython.magic('aimport dshin') # ipython.magic('aimport multiview_shape') # TODO(daeyun): this is temporary. ipython.run_cell_magic('javascript', '', """ require(['codemirror/keymap/vim'], function() { // enable the 'Ctrl-C' mapping // change the code mirror configuration var cm_config = require("notebook/js/cell").Cell.options_default.cm_config; delete cm_config.extraKeys['Ctrl-C']; // change settings for existing cells Jupyter.notebook.get_cells().map(function(cell) { var cm = cell.code_mirror; if (cm) { delete cm.getOption('extraKeys')['Ctrl-C']; } }); // map the keys CodeMirror.Vim.map("<C-c>", "<Esc>", "insert"); }); """) from IPython.core.magic import (register_line_magic, register_cell_magic, register_line_cell_magic, needs_local_scope) from dshin import log from dshin import geom2d from dshin import geom3d from dshin import transforms @register_cell_magic @needs_local_scope def tf_init(self, cell): global sess, g, sess_conf tf.reset_default_graph() # conf = nn.utils.default_sess_config() try: if 'sess' in globals(): sess.close() except: pass sess_conf = tf.ConfigProto( device_count={'GPU': 0} ) g = tf.get_default_graph() sess = tf.InteractiveSession(graph=g, config=sess_conf) ip = get_ipython() ip.run_cell(cell) filename = nn.graph_utils.save_graph_text_summary(g, random_dirname=True, verbose=False) IPy_display.display(IPy_display.HTML(""" <span style="font-size:80%">{0}</span> """.format(filename))) # sess.run(tf.initialize_all_variables()) # sess.run(tf.initialize_local_variables()) # tf.train.start_queue_runners(sess) from IPython import Application @register_line_magic def restart(line): app = Application.instance() app.kernel.do_shutdown(True) def import_optional(module_name, alias=None, is_verbose=False): if alias is None: alias = module_name try: globals()[alias] = __import__(module_name) except ImportError: if is_verbose: log.info('Optional dependency {} could not be imported.'.format(module_name)) # import_optional('tflearn') import_optional('tqdm') # Aliases arr = np.array norm = la.norm
mpl-2.0
-8,503,458,028,739,978,000
22.813253
128
0.65621
false
google-research/google-research
smurf/smurf_plotting.py
1
5999
# coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """SMURF plotting. This library provides some plotting functionality for optical flow. """ # pylint:skip-file import io import os import time import matplotlib matplotlib.use('Agg') # None-interactive plots do not need tk import matplotlib.pyplot as plt import numpy as np import tensorflow as tf from smurf import smurf_utils # How much to scale motion magnitude in visualization. _FLOW_SCALING_FACTOR = 50.0 def print_log(log, epoch=None, mean_over_num_steps=1): """Print log returned by smurf.train(...).""" if epoch is None: status = '' else: status = '{} -- '.format(epoch) status += 'total-loss: {:.6f}'.format( np.mean(log['total-loss'][-mean_over_num_steps:])) for key in sorted(log): if key not in ['total-loss']: loss_mean = np.mean(log[key][-mean_over_num_steps:]) status += ', {}: {:.6f}'.format(key, loss_mean) print(status) def print_eval(eval_dict): """Print eval_dict returned by the eval_function in smurf_main.py.""" status = ''.join( ['{}: {:.6f}, '.format(key, eval_dict[key]) for key in sorted(eval_dict)]) print(status[:-2]) def time_data_it(data_it, simulated_train_time_ms=100.0): print('Timing training iterator with simulated train time of {:.2f}ms'.format( simulated_train_time_ms)) for i in range(100): start = time.time() _ = data_it.get_next() end = time.time() print(i, 'Time to get one batch (ms):', (end - start) * 1000) if simulated_train_time_ms > 0.0: plt.pause(simulated_train_time_ms / 1000.) def save_image_as_png(image, filename): image_uint8 = tf.image.convert_image_dtype(image, tf.uint8, saturate=True) image_png = tf.image.encode_png(image_uint8) tf.io.write_file(filename, image_png) def plot_data(data_it, plot_dir, num_plots): print('Saving images from the dataset to', plot_dir) for i, image_batch in enumerate(data_it): if i >= num_plots: break for j, images in enumerate(image_batch['images']): for k, image in enumerate(images): save_image_as_png( image, os.path.join(plot_dir, '{}_{}_{}.png'.format(i, j, k))) def flow_to_rgb(flow): """Compute an RGB visualization of a flow field.""" shape = tf.cast(tf.shape(flow), tf.float32) height, width = shape[-3], shape[-2] scaling = _FLOW_SCALING_FACTOR / (height**2 + width**2)**0.5 # Compute angles and lengths of motion vectors. motion_angle = tf.atan2(flow[Ellipsis, 1], flow[Ellipsis, 0]) motion_magnitude = (flow[Ellipsis, 1]**2 + flow[Ellipsis, 0]**2)**0.5 # Visualize flow using the HSV color space, where angles are represented by # hue and magnitudes are represented by saturation. flow_hsv = tf.stack([((motion_angle / np.math.pi) + 1.) / 2., tf.clip_by_value(motion_magnitude * scaling, 0.0, 1.0), tf.ones_like(motion_magnitude)], axis=-1) # Transform colors from HSV to RGB color space for plotting. return tf.image.hsv_to_rgb(flow_hsv) def complete_paper_plot(plot_dir, index, image1, image2, flow_uv, ground_truth_flow_uv=None, flow_valid_occ=None, predicted_occlusion=None, ground_truth_occlusion=None, frame_skip=None): def post_imshow(name, plot_dir): plt.xticks([]) plt.yticks([]) if frame_skip is not None: filename = str(index) + '_' + str(frame_skip) + '_' + name plt.savefig(os.path.join(plot_dir, filename), bbox_inches='tight') else: filepath = str(index) + '_' + name plt.savefig(os.path.join(plot_dir, filepath), bbox_inches='tight') plt.clf() warp = smurf_utils.flow_to_warp(tf.convert_to_tensor(flow_uv)) image1_reconstruction = smurf_utils.resample(tf.expand_dims(image2, axis=0), tf.expand_dims(warp, axis=0))[0] flow_uv = -flow_uv[:, :, ::-1] if ground_truth_flow_uv is not None: ground_truth_flow_uv = -ground_truth_flow_uv[:, :, ::-1] plt.figure() plt.clf() plt.imshow(image1) post_imshow('image1_rgb', plot_dir) plt.imshow(image1_reconstruction) post_imshow('image1_reconstruction_rgb', plot_dir) plt.imshow(image1_reconstruction * predicted_occlusion) post_imshow('image1_reconstruction_occlusions_rgb', plot_dir) plt.imshow((image1 + image2) / 2.) post_imshow('image_rgb', plot_dir) plt.imshow(flow_to_rgb(flow_uv)) post_imshow('predicted_flow', plot_dir) if ground_truth_flow_uv is not None and flow_valid_occ is not None: plt.imshow(flow_to_rgb(ground_truth_flow_uv * flow_valid_occ)) post_imshow('ground_truth_flow', plot_dir) endpoint_error = np.sum( (ground_truth_flow_uv - flow_uv)**2, axis=-1, keepdims=True)**0.5 plt.imshow( (endpoint_error * flow_valid_occ)[:, :, 0], cmap='viridis', vmin=0, vmax=40) post_imshow('flow_error', plot_dir) if predicted_occlusion is not None: plt.imshow((predicted_occlusion[:, :, 0]) * 255, cmap='Greys') post_imshow('predicted_occlusion', plot_dir) if ground_truth_occlusion is not None: plt.imshow((ground_truth_occlusion[:, :, 0]) * 255, cmap='Greys') post_imshow('ground_truth_occlusion', plot_dir) plt.close('all')
apache-2.0
2,627,034,293,135,984,000
32.327778
80
0.633772
false
kjung/scikit-learn
examples/neural_networks/plot_mlp_alpha.py
19
4088
""" ================================================ Varying regularization in Multi-layer Perceptron ================================================ A comparison of different values for regularization parameter 'alpha' on synthetic datasets. The plot shows that different alphas yield different decision functions. Alpha is a parameter for regularization term, aka penalty term, that combats overfitting by constraining the size of the weights. Increasing alpha may fix high variance (a sign of overfitting) by encouraging smaller weights, resulting in a decision boundary plot that appears with lesser curvatures. Similarly, decreasing alpha may fix high bias (a sign of underfitting) by encouraging larger weights, potentially resulting in a more complicated decision boundary. """ print(__doc__) # Author: Issam H. Laradji # License: BSD 3 clause import numpy as np from matplotlib import pyplot as plt from matplotlib.colors import ListedColormap from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.datasets import make_moons, make_circles, make_classification from sklearn.neural_network import MLPClassifier h = .02 # step size in the mesh alphas = np.logspace(-5, 3, 5) names = [] for i in alphas: names.append('alpha ' + str(i)) classifiers = [] for i in alphas: classifiers.append(MLPClassifier(alpha=i, random_state=1)) X, y = make_classification(n_features=2, n_redundant=0, n_informative=2, random_state=0, n_clusters_per_class=1) rng = np.random.RandomState(2) X += 2 * rng.uniform(size=X.shape) linearly_separable = (X, y) datasets = [make_moons(noise=0.3, random_state=0), make_circles(noise=0.2, factor=0.5, random_state=1), linearly_separable] figure = plt.figure(figsize=(17, 9)) i = 1 # iterate over datasets for X, y in datasets: # preprocess dataset, split into training and test part X = StandardScaler().fit_transform(X) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4) x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # just plot the dataset first cm = plt.cm.RdBu cm_bright = ListedColormap(['#FF0000', '#0000FF']) ax = plt.subplot(len(datasets), len(classifiers) + 1, i) # Plot the training points ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright) # and testing points ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6) ax.set_xlim(xx.min(), xx.max()) ax.set_ylim(yy.min(), yy.max()) ax.set_xticks(()) ax.set_yticks(()) i += 1 # iterate over classifiers for name, clf in zip(names, classifiers): ax = plt.subplot(len(datasets), len(classifiers) + 1, i) clf.fit(X_train, y_train) score = clf.score(X_test, y_test) # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. if hasattr(clf, "decision_function"): Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()]) else: Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1] # Put the result into a color plot Z = Z.reshape(xx.shape) ax.contourf(xx, yy, Z, cmap=cm, alpha=.8) # Plot also the training points ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright) # and testing points ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6) ax.set_xlim(xx.min(), xx.max()) ax.set_ylim(yy.min(), yy.max()) ax.set_xticks(()) ax.set_yticks(()) ax.set_title(name) ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'), size=15, horizontalalignment='right') i += 1 figure.subplots_adjust(left=.02, right=.98) plt.show()
bsd-3-clause
-5,217,613,598,453,486,000
35.176991
79
0.621086
false
SKravitsky/MachineLearningServer
MachineLearning/CSV_Prediction.py
1
1337
import numpy as np import pandas as pd import os from sklearn import tree from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score from sklearn.externals.six import StringIO import pydot def get_csv(): if os.path.exists("Update2.csv"): df = pd.read_csv("Update2.csv") return df def scrub_csv(data): #print("* df.head()", data.head()) features = list(df.columns[:3]) #print("* features:", features) X = df[features] Y = df["Prediction"] #print("Head", X.tail()) #print("Head2", Y.tail()) return X,Y,features def prediction(F, T, N): clf = tree.DecisionTreeClassifier() F_train, F_test, T_train, T_test = train_test_split(F, T, test_size = .2) clf.fit(F_train, T_train) predictions = clf.predict(F_test) print accuracy_score(T_test, predictions) class_names_tree = clf.classes_ temp = class_names_tree.tolist() for x,y in enumerate(temp): temp[x] = str(y) print type(temp[x]) tree.export_graphviz(clf, out_file='tree.dot', feature_names=N, class_names=temp, filled=True, rounded=True) os.system('dot -Tpng tree.dot -o tree.png') if __name__ == "__main__": df = get_csv() features, targets, names = scrub_csv(df) prediction(features, targets, names)
apache-2.0
-3,554,030,434,061,879,300
24.226415
112
0.635752
false
animeshsrivastava24/3D-SCANNER-IITB
Miscellanous Applications/Read_Plot_Single_Frame.py
1
1065
'''This code is a part of the 3D Scanner project''' '''Developed by team SAAS''' '''Ekalavya 2017''' '''IIT Bombay''' '''This code is used to plot generate scatter plots from the txt files that the software makes''' '''The code is written to capture a frame from camera input and then parse it and then find it's respective X,Y and Z co-ordinate points and plot it using matplotlib.pyplot as plt''' #import the necessary modules import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from matplotlib.ticker import LinearLocator fig=plt.figure() ax=fig.add_subplot(111,projection='3d') path=raw_input("enter path") f=open(path,'r') data=f.read() f.close() #extract the coordinates from the txt file coordinates=data.split('\n\n') coordinates[0]=coordinates[0][1:(len(coordinates[0])-1)] coordinates[1]=coordinates[1][1:(len(coordinates[1])-1)] X=coordinates[0].split(',') Y=coordinates[1].split(',') Z=[] for i in range(len(X)): X[i]=long(int(X[i])) Y[i]=long(int(Y[i])) Z.append(0) #make a scatter plot surf=ax.scatter(X,Y,Z) plt.show()
mit
3,594,703,377,847,266,000
27.783784
130
0.71831
false
jameshensman/GPclust
GPclust/OMGP.py
1
8106
# Copyright (c) 2012, 2013, 2014 James Hensman # Licensed under the GPL v3 (see LICENSE.txt) import numpy as np from .collapsed_mixture import CollapsedMixture import GPy from GPy.util.linalg import mdot, pdinv, backsub_both_sides, dpotrs, jitchol, dtrtrs from GPy.util.linalg import tdot_numpy as tdot class OMGP(CollapsedMixture): """ Overlapping mixtures of Gaussian processes """ def __init__(self, X, Y, K=2, kernels=None, variance=1., alpha=1., prior_Z='symmetric', name='OMGP'): N, self.D = Y.shape self.Y = Y self.YYT = tdot(self.Y) self.X = X if kernels == None: self.kern = [] for i in range(K): self.kern.append(GPy.kern.RBF(input_dim=1)) else: self.kern = kernels CollapsedMixture.__init__(self, N, K, prior_Z, alpha, name) self.link_parameter(GPy.core.parameterization.param.Param('variance', variance, GPy.core.parameterization.transformations.Logexp())) self.link_parameters(*self.kern) def parameters_changed(self): """ Set the kernel parameters """ self.update_kern_grads() def do_computations(self): """ Here we do all the computations that are required whenever the kernels or the variational parameters are changed. """ if len(self.kern) < self.K: self.kern.append(self.kern[-1].copy()) self.link_parameter(self.kern[-1]) if len(self.kern) > self.K: for kern in self.kern[self.K:]: self.unlink_parameter(kern) self.kern = self.kern[:self.K] def update_kern_grads(self): """ Set the derivative of the lower bound wrt the (kernel) parameters """ grad_Lm_variance = 0.0 for i, kern in enumerate(self.kern): K = kern.K(self.X) B_inv = np.diag(1. / (self.phi[:, i] / self.variance)) # Numerically more stable version using cholesky decomposition #alpha = linalg.cho_solve(linalg.cho_factor(K + B_inv), self.Y) #K_B_inv = pdinv(K + B_inv)[0] #dL_dK = .5*(tdot(alpha) - K_B_inv) # Make more stable using cholesky factorization: Bi, LB, LBi, Blogdet = pdinv(K+B_inv) tmp = dpotrs(LB, self.YYT)[0] GPy.util.diag.subtract(tmp, 1) dL_dB = dpotrs(LB, tmp.T)[0] kern.update_gradients_full(dL_dK=.5*dL_dB, X=self.X) # variance gradient #for i, kern in enumerate(self.kern): K = kern.K(self.X) #I = np.eye(self.N) B_inv = np.diag(1. / ((self.phi[:, i] + 1e-6) / self.variance)) #alpha = np.linalg.solve(K + B_inv, self.Y) #K_B_inv = pdinv(K + B_inv)[0] #dL_dB = tdot(alpha) - K_B_inv grad_B_inv = np.diag(1. / (self.phi[:, i] + 1e-6)) grad_Lm_variance += 0.5 * np.trace(np.dot(dL_dB, grad_B_inv)) grad_Lm_variance -= .5*self.D * np.einsum('j,j->',self.phi[:, i], 1./self.variance) self.variance.gradient = grad_Lm_variance def bound(self): """ Compute the lower bound on the marginal likelihood (conditioned on the GP hyper parameters). """ GP_bound = 0.0 for i, kern in enumerate(self.kern): K = kern.K(self.X) B_inv = np.diag(1. / ((self.phi[:, i] + 1e-6) / self.variance)) # Make more stable using cholesky factorization: Bi, LB, LBi, Blogdet = pdinv(K+B_inv) # Data fit # alpha = linalg.cho_solve(linalg.cho_factor(K + B_inv), self.Y) # GP_bound += -0.5 * np.dot(self.Y.T, alpha).trace() GP_bound -= .5 * dpotrs(LB, self.YYT)[0].trace() # Penalty # GP_bound += -0.5 * np.linalg.slogdet(K + B_inv)[1] GP_bound -= 0.5 * Blogdet # Constant, weighted by model assignment per point #GP_bound += -0.5 * (self.phi[:, i] * np.log(2 * np.pi * self.variance)).sum() GP_bound -= .5*self.D * np.einsum('j,j->',self.phi[:, i], np.log(2 * np.pi * self.variance)) return GP_bound + self.mixing_prop_bound() + self.H def vb_grad_natgrad(self): """ Natural Gradients of the bound with respect to phi, the variational parameters controlling assignment of the data to GPs """ grad_Lm = np.zeros_like(self.phi) for i, kern in enumerate(self.kern): K = kern.K(self.X) I = np.eye(self.N) B_inv = np.diag(1. / ((self.phi[:, i] + 1e-6) / self.variance)) K_B_inv = pdinv(K + B_inv)[0] alpha = np.dot(K_B_inv, self.Y) dL_dB = tdot(alpha) - K_B_inv for n in range(self.phi.shape[0]): grad_B_inv_nonzero = -self.variance / (self.phi[n, i] ** 2 + 1e-6) grad_Lm[n, i] = 0.5 * dL_dB[n, n] * grad_B_inv_nonzero grad_phi = grad_Lm + self.mixing_prop_bound_grad() + self.Hgrad natgrad = grad_phi - np.sum(self.phi * grad_phi, 1)[:, None] grad = natgrad * self.phi return grad.flatten(), natgrad.flatten() def predict(self, Xnew, i): """ Predictive mean for a given component """ kern = self.kern[i] K = kern.K(self.X) kx = kern.K(self.X, Xnew) # Predict mean # This works but should Cholesky for stability B_inv = np.diag(1. / (self.phi[:, i] / self.variance)) K_B_inv = pdinv(K + B_inv)[0] mu = kx.T.dot(np.dot(K_B_inv, self.Y)) # Predict variance kxx = kern.K(Xnew, Xnew) va = self.variance + kxx - kx.T.dot(np.dot(K_B_inv, kx)) return mu, va def predict_components(self, Xnew): """The predictive density under each component""" mus = [] vas = [] for i in range(len(self.kern)): mu, va = self.predict(Xnew, i) mus.append(mu) vas.append(va) return np.array(mus)[:, :, 0].T, np.array(vas)[:, :, 0].T def plot(self, gp_num=0): """ Plot the mixture of Gaussian Processes. Supports plotting 1d and 2d regression. """ from matplotlib import pylab as plt from matplotlib import cm XX = np.linspace(self.X.min(), self.X.max())[:, None] if self.Y.shape[1] == 1: plt.scatter(self.X, self.Y, c=self.phi[:, gp_num], cmap=cm.RdBu, vmin=0., vmax=1., lw=0.5) plt.colorbar(label='GP {} assignment probability'.format(gp_num)) GPy.plotting.Tango.reset() for i in range(self.phi.shape[1]): YY_mu, YY_var = self.predict(XX, i) col = GPy.plotting.Tango.nextMedium() plt.fill_between(XX[:, 0], YY_mu[:, 0] - 2 * np.sqrt(YY_var[:, 0]), YY_mu[:, 0] + 2 * np.sqrt(YY_var[:, 0]), alpha=0.1, facecolor=col) plt.plot(XX, YY_mu[:, 0], c=col, lw=2); elif self.Y.shape[1] == 2: plt.scatter(self.Y[:, 0], self.Y[:, 1], c=self.phi[:, gp_num], cmap=cm.RdBu, vmin=0., vmax=1., lw=0.5) plt.colorbar(label='GP {} assignment probability'.format(gp_num)) GPy.plotting.Tango.reset() for i in range(self.phi.shape[1]): YY_mu, YY_var = self.predict(XX, i) col = GPy.plotting.Tango.nextMedium() plt.plot(YY_mu[:, 0], YY_mu[:, 1], c=col, lw=2); else: raise NotImplementedError('Only 1d and 2d regression can be plotted') def plot_probs(self, gp_num=0): """ Plot assignment probabilities for each data point of the OMGP model """ from matplotlib import pylab as plt plt.scatter(self.X, self.phi[:, gp_num]) plt.ylim(-0.1, 1.1) plt.ylabel('GP {} assignment probability'.format(gp_num))
gpl-3.0
-8,319,944,142,268,774,000
34.39738
140
0.527387
false
lisitsyn/shogun
examples/undocumented/python/graphical/interactive_svr_demo.py
4
11281
""" Shogun demo, based on PyQT Demo by Eli Bendersky Christian Widmer Soeren Sonnenburg License: GPLv3 """ import numpy import sys, os, csv from PyQt4.QtCore import * from PyQt4.QtGui import * import matplotlib from matplotlib.colorbar import make_axes, Colorbar from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar from matplotlib.figure import Figure from shogun import * import shogun as sg class Form(QMainWindow): def __init__(self, parent=None): super(Form, self).__init__(parent) self.setWindowTitle('SHOGUN interactive demo') self.data = DataHolder() self.series_list_model = QStandardItemModel() self.create_menu() self.create_main_frame() self.create_status_bar() self.on_show() def load_file(self, filename=None): filename = QFileDialog.getOpenFileName(self, 'Open a data file', '.', 'CSV files (*.csv);;All Files (*.*)') if filename: self.data.load_from_file(filename) self.fill_series_list(self.data.series_names()) self.status_text.setText("Loaded " + filename) def on_show(self): self.axes.clear() self.axes.grid(True) self.axes.plot(self.data.x1, self.data.x2, 'bo') self.axes.set_xlim((-5,5)) self.axes.set_ylim((-5,5)) self.canvas.draw() self.fill_series_list(self.data.get_stats()) def on_about(self): msg = __doc__ QMessageBox.about(self, "About the demo", msg.strip()) def fill_series_list(self, names): self.series_list_model.clear() for name in names: item = QStandardItem(name) item.setCheckState(Qt.Unchecked) item.setCheckable(False) self.series_list_model.appendRow(item) def onclick(self, event): print 'button=%d, x=%d, y=%d, xdata=%f, ydata=%f'%(event.button, event.x, event.y, event.xdata, event.ydata) self.data.add_example(event.xdata, event.ydata) self.on_show() def clear(self): self.data.clear() self.on_show() def enable_widgets(self): kernel_name = self.kernel_combo.currentText() if kernel_name == "LinearKernel": self.sigma.setDisabled(True) self.degree.setDisabled(True) elif kernel_name == "PolynomialKernel": self.sigma.setDisabled(True) self.degree.setEnabled(True) elif kernel_name == "GaussianKernel": self.sigma.setEnabled(True) self.degree.setDisabled(True) def train_svm(self): width = float(self.sigma.text()) degree = int(self.degree.text()) self.axes.clear() self.axes.grid(True) self.axes.plot(self.data.x1, self.data.x2, 'bo') # train svm labels = self.data.get_labels() print type(labels) lab = RegressionLabels(labels) features = self.data.get_examples() train = RealFeatures(features) kernel_name = self.kernel_combo.currentText() print "current kernel is %s" % (kernel_name) if kernel_name == "LinearKernel": gk = LinearKernel(train, train) gk.set_normalizer(IdentityKernelNormalizer()) elif kernel_name == "PolynomialKernel": gk = sg.kernel("PolyKernel", degree=degree, c=1.0) gk.init(train, train) gk.set_normalizer(IdentityKernelNormalizer()) elif kernel_name == "GaussianKernel": gk = GaussianKernel(train, train, width) cost = float(self.cost.text()) tubeeps = float(self.tubeeps.text()) print "cost", cost svm = LibSVR(cost, tubeeps, gk, lab) svm.train() svm.set_epsilon(1e-2) x=numpy.linspace(-5.0,5.0,100) y=svm.apply(RealFeatures(numpy.array([x]))).get_labels() self.axes.plot(x,y,'r-') self.axes.set_xlim((-5,5)) self.axes.set_ylim((-5,5)) self.canvas.draw() def create_main_frame(self): self.main_frame = QWidget() plot_frame = QWidget() self.dpi = 100 self.fig = Figure((6.0, 6.0), dpi=self.dpi) self.canvas = FigureCanvas(self.fig) self.canvas.setParent(self.main_frame) cid = self.canvas.mpl_connect('button_press_event', self.onclick) self.axes = self.fig.add_subplot(111) self.cax = None #self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame) log_label = QLabel("Number of examples:") self.series_list_view = QListView() self.series_list_view.setModel(self.series_list_model) cost_label = QLabel('C') #self.cost = QSpinBox()#QLineEdit() self.cost = QLineEdit() self.cost.setText("1.0") #self.cost.setMinimum(1) spin_label2 = QLabel('tube') self.tubeeps = QLineEdit() self.tubeeps.setText("0.1") spin_label3 = QLabel('sigma') self.sigma = QLineEdit() self.sigma.setText("1.2") #self.sigma.setMinimum(1) spin_label4 = QLabel('d') self.degree = QLineEdit() self.degree.setText("2") #self.sigma.setMinimum(1) spins_hbox = QHBoxLayout() spins_hbox.addWidget(cost_label) spins_hbox.addWidget(self.cost) spins_hbox.addWidget(spin_label2) spins_hbox.addWidget(self.tubeeps) spins_hbox.addWidget(spin_label3) spins_hbox.addWidget(self.sigma) spins_hbox.addWidget(spin_label4) spins_hbox.addWidget(self.degree) spins_hbox.addStretch(1) self.legend_cb = QCheckBox("Show Support Vectors") self.legend_cb.setChecked(False) self.show_button = QPushButton("&Train SVR") self.connect(self.show_button, SIGNAL('clicked()'), self.train_svm) self.clear_button = QPushButton("&Clear") self.connect(self.clear_button, SIGNAL('clicked()'), self.clear) self.kernel_combo = QComboBox() self.kernel_combo.insertItem(-1, "GaussianKernel") self.kernel_combo.insertItem(-1, "PolynomialKernel") self.kernel_combo.insertItem(-1, "LinearKernel") self.kernel_combo.maximumSize = QSize(300, 50) self.connect(self.kernel_combo, SIGNAL("currentIndexChanged(QString)"), self.enable_widgets) left_vbox = QVBoxLayout() left_vbox.addWidget(self.canvas) #left_vbox.addWidget(self.mpl_toolbar) right0_vbox = QVBoxLayout() right0_vbox.addWidget(log_label) right0_vbox.addWidget(self.series_list_view) #right0_vbox.addWidget(self.legend_cb) right0_vbox.addStretch(1) right2_vbox = QVBoxLayout() right2_label = QLabel("Settings") right2_vbox.addWidget(right2_label) right2_vbox.addWidget(self.show_button) right2_vbox.addWidget(self.kernel_combo) right2_vbox.addLayout(spins_hbox) right2_clearlabel = QLabel("Remove Data") right2_vbox.addWidget(right2_clearlabel) right2_vbox.addWidget(self.clear_button) right2_vbox.addStretch(1) right_vbox = QHBoxLayout() right_vbox.addLayout(right0_vbox) right_vbox.addLayout(right2_vbox) hbox = QVBoxLayout() hbox.addLayout(left_vbox) hbox.addLayout(right_vbox) self.main_frame.setLayout(hbox) self.setCentralWidget(self.main_frame) self.enable_widgets() def create_status_bar(self): self.status_text = QLabel("") self.statusBar().addWidget(self.status_text, 1) def create_menu(self): self.file_menu = self.menuBar().addMenu("&File") load_action = self.create_action("&Load file", shortcut="Ctrl+L", slot=self.load_file, tip="Load a file") quit_action = self.create_action("&Quit", slot=self.close, shortcut="Ctrl+Q", tip="Close the application") self.add_actions(self.file_menu, (load_action, None, quit_action)) self.help_menu = self.menuBar().addMenu("&Help") about_action = self.create_action("&About", shortcut='F1', slot=self.on_about, tip='About the demo') self.add_actions(self.help_menu, (about_action,)) def add_actions(self, target, actions): for action in actions: if action is None: target.addSeparator() else: target.addAction(action) def create_action( self, text, slot=None, shortcut=None, icon=None, tip=None, checkable=False, signal="triggered()"): action = QAction(text, self) if icon is not None: action.setIcon(QIcon(":/%s.png" % icon)) if shortcut is not None: action.setShortcut(shortcut) if tip is not None: action.setToolTip(tip) action.setStatusTip(tip) if slot is not None: self.connect(action, SIGNAL(signal), slot) if checkable: action.setCheckable(True) return action class DataHolder(object): """ Just a thin wrapper over a dictionary that holds integer data series. Each series has a name and a list of numbers as its data. The length of all series is assumed to be the same. The series can be read from a CSV file, where each line is a separate series. In each series, the first item in the line is the name, and the rest are data numbers. """ def __init__(self, filename=None): self.clear() self.load_from_file(filename) def clear(self): self.x1 = [] self.x2 = [] def get_stats(self): num = len(self.x1) str_num = "num examples: %i" % num return (str_num, str_num) def get_labels(self): return numpy.array(self.x2, dtype=numpy.float64) def get_examples(self): num = len(self.x1) examples = numpy.zeros((1,num)) for i in xrange(num): examples[0,i] = self.x1[i] return examples def add_example(self, x1, x2): self.x1.append(x1) self.x2.append(x2) def load_from_file(self, filename=None): self.data = {} self.names = [] if filename: for line in csv.reader(open(filename, 'rb')): self.names.append(line[0]) self.data[line[0]] = map(int, line[1:]) self.datalen = len(line[1:]) def series_names(self): """ Names of the data series """ return self.names def series_len(self): """ Length of a data series """ return self.datalen def series_count(self): return len(self.data) def get_series_data(self, name): return self.data[name] def main(): app = QApplication(sys.argv) form = Form() form.show() app.exec_() if __name__ == "__main__": main() #~ dh = DataHolder('qt_mpl_data.csv') #~ print dh.data #~ print dh.get_series_data('1991 Sales') #~ print dh.series_names() #~ print dh.series_count()
bsd-3-clause
-8,283,956,265,595,372,000
28.923077
116
0.596667
false
sinhrks/cesiumpy
cesiumpy/entities/tests/test_material.py
1
1677
#!/usr/bin/env python # coding: utf-8 import nose import unittest import re import cesiumpy import cesiumpy.testing as tm class TestImageMaterial(unittest.TestCase): def test_imagematerial(self): m = cesiumpy.entities.material.ImageMaterialProperty('xxx.png') self.assertEqual(repr(m), 'ImageMaterialProperty(xxx.png)') self.assertEqual(m.script, """new Cesium.ImageMaterialProperty({image : "xxx.png"})""") class TestTempImageMaterial(unittest.TestCase): def test_matplotlibimage(self): tm._skip_if_no_matplotlib() import numpy as np import matplotlib.pyplot as plt img = np.random.randint(0, 255, (100, 100, 3)) ax = plt.imshow(img) img = cesiumpy.entities.material.TemporaryImage(ax.figure) m = cesiumpy.entities.material.ImageMaterialProperty(img) self.assertTrue(re.match("""new Cesium\\.ImageMaterialProperty\\({image : "\w+\\.png"}\\)""", m.script)) img = cesiumpy.entities.material.TemporaryImage(ax) m = cesiumpy.entities.material.ImageMaterialProperty(img) self.assertTrue(re.match("""new Cesium\\.ImageMaterialProperty\\({image : "\w+\\.png"}\\)""", m.script)) plt.close() fig, axes = plt.subplots(2, 2) msg = "Unable to trim a Figure contains multiple Axes" with nose.tools.assert_raises_regexp(ValueError, msg): img = cesiumpy.entities.material.TemporaryImage(fig) cesiumpy.entities.material.ImageMaterialProperty(img) plt.close() if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False)
apache-2.0
-7,493,760,151,751,198,000
32.54
112
0.648778
false
StartupsPoleEmploi/labonneboite
labonneboite/importer/compute_score.py
1
26783
# coding: utf-8 """Machine learning module. The outside world uses the "run" function which will predict how many hirings a company will make in the next period (i.e. 6 months for DPAE and 6 months for Alternance as well) and we use a regression model to do so. The predicted number of hirings is then transformed and obfuscated into a "score" value between 0 and 100 for each office ("etablissement"). We do this because we consider the predicted number of hirings to be a sensitive confidential data, this way, by storing only the obfuscated score in db, even if our db gets hacked you cannot transform the score back into their corresponding predicted number of hirings. We train a machine learning algorithm on companies and employment data to predict the number of hirings. We use the scikit-learn library: more info at http://scikit-learn.org/stable/documentation.html """ from calendar import monthrange import math from operator import getitem import os import pickle import sys from datetime import datetime, timedelta from dateutil.relativedelta import relativedelta import pandas as pd import numpy as np from sklearn import linear_model from sklearn.metrics import mean_squared_error import sqlalchemy from sqlalchemy.pool import NullPool from sqlalchemy import func from labonneboite.common.util import timeit from labonneboite.importer import settings as importer_settings from labonneboite.importer.models.computing import DpaeStatistics, Hiring, RawOffice from labonneboite.common import scoring as scoring_util from labonneboite.common.database import get_db_string from labonneboite.common.env import get_current_env, ENV_DEVELOPMENT from .debug import listen from .jobs.common import logger listen() def get_engine(): return sqlalchemy.create_engine(get_db_string(), poolclass=NullPool) # Output additional debug info about these sirets # To disable, set to an empty list [] # Sirets must be string, not int DEBUG_SIRETS = ["19240023200018", "33530956300047", "26760168000015"] # DEBUG_SIRETS = [] # disable unnecessary pandas SettingWithCopyWarning # see http://stackoverflow.com/questions/20625582/how-to-deal-with-settingwithcopywarning-in-pandas # default value is 'warn' pd.options.mode.chained_assignment = None class NotEnoughDataException(Exception): pass def raise_with_message(msg): logger.info("WARNING exception: %s", msg) raise Exception(msg) def debug_df(df, description="no description"): """ Outputs useful information about dataframe """ if len(DEBUG_SIRETS) >= 1: logger.debug("dataframe debug info [%s] about sirets %s", description, DEBUG_SIRETS) columns = list(df.columns) logger.debug("dataframe has %s rows and columns = %s", len(df), columns) if "siret" in columns: tmp_df = df[df.siret.isin(DEBUG_SIRETS)] logger.debug("dataframe content :\n %s", tmp_df) else: logger.debug("dataframe does not have a siret colum") logger.debug("columns : %s", columns) def discarded_check(departement): # weird data distribution for these departements, discard safey return int(departement) in [20, ] def check_coefficient_of_variation(df_etab, departement, period_count_columns, prefix): hiring_count_per_period_sum = df_etab[period_count_columns].sum(axis=0) coefficient_of_variation = hiring_count_per_period_sum.std() / hiring_count_per_period_sum.mean() logger.debug('hiring_count_per_period_sum %s', hiring_count_per_period_sum) logger.debug('coefficient of variation of hiring count per period %s', coefficient_of_variation) logger.info("checking mean_between_existing_and_new_score: %s", coefficient_of_variation) if not discarded_check(departement): if not coefficient_of_variation < importer_settings.SCORE_COEFFICIENT_OF_VARIATION_MAX: raise_with_message("[dpt%s] %s coefficient_of_variation too high: %s > %s" % ( departement, prefix, coefficient_of_variation, importer_settings.SCORE_COEFFICIENT_OF_VARIATION_MAX )) def check_mean_between_existing_and_new_score(departement, df): mean = np.nan_to_num(df[["diff_score"]].mean())[0] logger.info("checking mean_between_existing_and_new_score: %s", mean) if not discarded_check(departement): if not np.nan_to_num(df[["diff_score"]].mean())[0] < importer_settings.SCORE_COMPUTING_MAX_DIFF_MEAN: raise "np.nan_to_num(df[[\"diff_score\"]].mean())[0] too high" def check_highly_scored_companies_evolution(departement, high_existing_scores, high_new_scores): evolution = 100 * abs(high_new_scores - high_existing_scores) / high_existing_scores logger.info("highly_scored_companies_evolution: %s", evolution) if not discarded_check(departement): if not evolution < importer_settings.HIGH_SCORE_COMPANIES_DIFF_MAX: raise_with_message( "evolution too high: %s > %s" % (evolution, importer_settings.HIGH_SCORE_COMPANIES_DIFF_MAX) ) def check_number_highly_scored_companies(departement, high_new_scores): if not discarded_check(departement): if not high_new_scores > importer_settings.HIGH_SCORE_COMPANIES_COUNT_MIN: error_msg = "high_new_scores too low: %s < %s" % ( high_new_scores, importer_settings.HIGH_SCORE_COMPANIES_COUNT_MIN ) raise Exception(error_msg) def tranche_to_effectif(tranche): map_tranche_to_effectif = { '00': 0, '01': 1, '02': 3, '03': 6, '11': 10, '12': 20, '21': 50, '22': 100, '31': 200, '32': 250, '41': 500, '42': 1000, '51': 2000, '52': 5000, '53': 10000 } if tranche not in list(map_tranche_to_effectif.keys()): return 1 return map_tranche_to_effectif[tranche] def check_prediction_beginning_date(prediction_beginning_date): if prediction_beginning_date.day != 1: raise ValueError("prediction_beginning_date should be the first day of the month") def check_last_historical_data_date(last_historical_data_date): # last day of the month == next day is first day of the month if (last_historical_data_date + timedelta(days=1)).day != 1: raise ValueError("last_historical_data_date should be the last day of the month") def go_back_last_day_of_the_month(date): if (date + timedelta(days=1)).day == 1: return date # already a last day of the month first_day_of_the_month = date.replace(day=1) return first_day_of_the_month - timedelta(days=1) # last day of previous month @timeit def get_df_etab(departement): logger.debug("reading etablissements data (%s)", departement) df_etab = pd.read_sql_query(""" select * from %s where departement = %s and siret != '' """ % (RawOffice.__tablename__, departement), get_engine()) debug_df(df_etab, "after loading from raw office table") if df_etab.empty: logger.warning("dataframe empty for departement %s", departement) return None logger.debug("loading data (%s) OK (%i etablissements)!", departement, len(df_etab)) logger.debug("adding effectif (%s)...", departement) df_etab['effectif'] = df_etab['trancheeffectif'].map(tranche_to_effectif) logger.debug("effectif done (%s)!", departement) return df_etab @timeit def get_df_hiring(departement, prediction_beginning_date): logger.debug("reading hiring data...") df_hiring = pd.read_sql_query(""" select siret, hiring_date, case when contract_type in (%s) then 'dpae' when contract_type in (%s) then 'alt' end as hiring_type from %s where departement = %s and contract_type in (%s) and hiring_date < '%s' """ % ( ', '.join([str(c_t) for c_t in Hiring.CONTRACT_TYPES_DPAE]), ', '.join([str(c_t) for c_t in Hiring.CONTRACT_TYPES_ALTERNANCE]), Hiring.__tablename__, departement, ', '.join([str(c_t) for c_t in Hiring.CONTRACT_TYPES_ALL]), str(prediction_beginning_date.isoformat()), ), get_engine(), ) debug_df(df_hiring, "after loading from hiring table") if df_hiring.empty: logger.warning("no hiring data for departement %s", departement) return None df_hiring["hiring_date_month"] = pd.DatetimeIndex(df_hiring["hiring_date"]).month df_hiring["hiring_date_year"] = pd.DatetimeIndex(df_hiring["hiring_date"]).year return df_hiring @timeit def get_df_etab_with_hiring_monthly_aggregates(departement, prediction_beginning_date): """ Returns a df_etab dataframe with one row per siret and one column per month (hirings total for given month) for all (past) months before (now) prediction_beginning_date. """ df_etab = get_df_etab(departement) # has one row per siret df_dpae = get_df_hiring(departement, prediction_beginning_date) # has one row per hiring if df_etab is None or df_dpae is None: return None df_dpae = df_dpae.groupby(["siret", "hiring_type", "hiring_date_year", "hiring_date_month"]).count().reset_index() debug_df(df_dpae, "after group by") logger.debug("pivoting table dpae (%s)...", departement) # FIXME understand why `values="hiring_date"` is needed at all df_dpae = pd.pivot_table(df_dpae, values="hiring_date", index="siret", columns=["hiring_type", "hiring_date_year", "hiring_date_month"]) debug_df(df_dpae, "after pivot") logger.debug("pivoting table (%s) ok!", departement) # after this pivoting, # df_dpae has one row per siret and one column per month (hirings total for given month) # and per hiring_type df_dpae["siret"] = df_dpae.index df_dpae = df_dpae.fillna(0) df_dpae.columns = ['-'.join([str(c) for c in col]) for col in df_dpae.columns.values] siret_raw_column_name = 'siret--' df_dpae['siret'] = df_dpae[siret_raw_column_name] del df_dpae[siret_raw_column_name] debug_df(df_dpae, "after transform") #### joining hiring and etab # at this moment # df_etab has one row per siret # df_dpae has one row per siret and one column per month (hirings total for given month) # and per hiring_type logger.debug("merging dpae with etablissements (%s)...", departement) # inner join to keep only etabs which have at least one dpae df_etab = pd.merge(df_dpae, df_etab, on='siret', how="inner") debug_df(df_etab, "after merge") logger.debug("merging done with %s offices(%s)!", len(df_etab), departement) # after this inner joining, # df_etab has one row per siret and one column per month (hirings total for given month) # and per hiring_type if "website" not in list(df_etab.columns): raise Exception("missing website column") df_etab = df_etab.fillna(0) return df_etab def compute_prediction_beginning_date(): """ We predict hirings starting from the 1st of the current month. """ # foo dpae_statistics = DpaeStatistics.query.filter(DpaeStatistics.file_type==DpaeStatistics.DPAE)\ .order_by(DpaeStatistics.most_recent_data_date.desc()).first() now = dpae_statistics.most_recent_data_date prediction_beginning_date = now.replace(day=1) # get 1st of the month logger.info("prediction_beginning_date = %s", prediction_beginning_date) return prediction_beginning_date def total_hired_period(period): def f(office): return office[period] return f # from https://stackoverflow.com/questions/7015587/python-difference-of-2-datetimes-in-months def months_between_dates(d1, d2): delta = 0 while True: mdays = monthrange(d1.year, d1.month)[1] d1 += timedelta(days=mdays) if d1 <= d2: delta += 1 else: break return delta def get_features_for_lag(df_etab, prefix, training_periods, data_gap_in_periods, periods_back_in_time, debug_msg="Unnamed"): temporal_features = [] for i in range(0, training_periods): # [0, 1, ... training_periods - 1] index = data_gap_in_periods + (1 + i) + periods_back_in_time temporal_features.append('%s-period-%i' % (prefix, index)) features = list(temporal_features) features.append('effectif') df_etab_features_only = df_etab[features] X = df_etab_features_only.values df_etab_for_debug = df_etab[["siret"] + features] debug_df(df_etab_for_debug, debug_msg) return X, features def get_hirings_over_period_for_office(office, prediction_beginning_date, months_per_period, minus, prefix): """ office : one row of df_etab. minus : how many periods to go back in time. """ start_date = prediction_beginning_date + relativedelta(months=-(months_per_period * minus)) columns_of_period = [] for i in range(0, months_per_period): # [0, 1, 2, ..., months_per_period - 1] current_date = start_date + relativedelta(months=i) columns_of_period.append('%s-%s-%s' % (prefix, current_date.year, current_date.month)) hirings_over_period = 0 for column in columns_of_period: try: hirings_over_period += getitem(office, column) except KeyError: pass return hirings_over_period def compute_hiring_aggregates( df_etab, departement, prediction_beginning_date, periods, prefix, months_per_period): """ Edits in place df_etab. Adds one column per period containing hiring total for this hiring_type. """ logger.debug("computing %s hiring aggregates (%s)...", prefix, departement) # df_etab has one row per siret and one column per hiring month-aggregate and per hiring_type period_count_columns = [] for i in range(1, periods + 1): # [1, 2, ..., periods] column = '%s-period-%s' % (prefix, i) # pylint: disable=cell-var-from-loop df_etab[column] = df_etab.apply( lambda office: get_hirings_over_period_for_office( office, prediction_beginning_date, months_per_period, minus=i, prefix=prefix, ), axis=1, ) period_count_columns.append(column) logger.debug("finished calculating %s temporal features (%s)!", prefix, departement) check_coefficient_of_variation(df_etab, departement, period_count_columns, prefix) debug_df(df_etab, "df_etab after compute_hiring_aggregates") @timeit def train(df_etab, departement, prediction_beginning_date, last_historical_data_date, months_per_period, training_periods, prefix_for_fields, score_field_name, is_lbb=True): """ Edits in place df_etab by adding final score columns (e.g. score and score_regr for DPAE/LBB, or score_alternance and score_alternance_regr for LBA). TODO we should trash the score and score_alternance legacy columns since they come from legacy binary classification model and are no longer used. We now only use score_regr and score_alternance_regr computed by the regression model. At the beginning of this method, df_etab has one row per siret and one column per month and per hiring_type. At the end of this method df_etab has one row per siret and one column per hiring_type and per period (hirings total for given period). """ check_prediction_beginning_date(prediction_beginning_date) check_last_historical_data_date(last_historical_data_date) data_gap_in_months = months_between_dates(last_historical_data_date, prediction_beginning_date) # math.ceil returns a float and not an int, thus we still need to cast it to int. # `1.0 * int/int` trick is needed because otherwise int/int gives the floor int value. data_gap_in_periods = int(math.ceil(data_gap_in_months / months_per_period)) #Since August 2020, we have all alternance data, so there should be no gap anymore if get_current_env() == ENV_DEVELOPMENT: if data_gap_in_periods <= 0: raise ValueError("dpae data should have at least one period of gap") else: #if data_gap_in_periods != 0: if data_gap_in_periods not in [0, 1]: # FIXME restore when we have dpae 10 april raise ValueError("dpae data should have no gap") # Training set is moved 2 years back in time relative to live set, # and testing set is moved 1 year back in time relative to live set. # To understand why check # https://docs.google.com/document/d/1-UqC8wZBhHEgMvzMi0SQamnXvkU6itLwubZiI00yH6E/edit periods_per_year = 12 // months_per_period total_periods = training_periods + 2 * periods_per_year + data_gap_in_periods # add in place hiring aggregates per period compute_hiring_aggregates( df_etab, departement, prediction_beginning_date, periods=total_periods, prefix=prefix_for_fields, months_per_period=months_per_period, ) # We model the problem as a linear regression. regr = linear_model.LinearRegression() y_train_period = '%s-period-%s' % (prefix_for_fields, 2 * periods_per_year + data_gap_in_periods) y_train_regr = df_etab.apply(total_hired_period(y_train_period), axis=1) X_train, X_train_feature_names = get_features_for_lag( df_etab, prefix_for_fields, training_periods, data_gap_in_periods, periods_back_in_time=2 * periods_per_year, debug_msg="X_train", ) logger.debug("(%s %s) %s offices", departement, prefix_for_fields, len(df_etab)) if len(df_etab) < importer_settings.MINIMUM_OFFICES_REQUIRED_TO_TRAIN_MODEL: # problems happen if we don't have enough offices to train on... # throw an exception to show we don't have enough data for this departement raise NotEnoughDataException("only %s offices !" % len(df_etab)) logger.debug("(%s %s) fitting the model on X_train...", departement, prefix_for_fields) regr.fit(X_train, y_train_regr) logger.debug("(%s %s) fitting done!", departement, prefix_for_fields) logger.debug("(%s %s) X_train_feature_names: %s", departement, prefix_for_fields, X_train_feature_names) logger.debug("(%s %s) regression_coefficients (fitting done on X_train) : %s", departement, prefix_for_fields, regr.coef_) X_test, X_test_feature_names = get_features_for_lag( df_etab, prefix_for_fields, training_periods, data_gap_in_periods, periods_back_in_time=periods_per_year, debug_msg="X_test", ) logger.debug("(%s %s) X_test_feature_names: %s", departement, prefix_for_fields, X_test_feature_names) X_live, X_live_feature_names = get_features_for_lag( df_etab, prefix_for_fields, training_periods, data_gap_in_periods, periods_back_in_time=0, debug_msg="X_live", ) logger.debug("(%s %s) X_live_feature_names: %s", departement, prefix_for_fields, X_live_feature_names) # --- compute regression metrics y_train_regr_pred = regr.predict(X_train) y_test_period = '%s-period-%s' % (prefix_for_fields, periods_per_year + data_gap_in_periods) y_test_regr = df_etab.apply(total_hired_period(y_test_period), axis=1) y_test_regr_pred = regr.predict(X_test) rmse_train = mean_squared_error(y_train_regr, y_train_regr_pred) rmse_test = mean_squared_error(y_test_regr, y_test_regr_pred) # --- end of regression metrics pickle_data = { "departement": departement, "X_train_feature_names": X_train_feature_names, "X_test_feature_names": X_test_feature_names, "X_live_feature_names": X_live_feature_names, "regression": regr, "regression_coefficients": regr.coef_, "regression_scores": { "rmse_train": rmse_train, "rmse_test": rmse_test, } } current_time = datetime.now().strftime("%Y-%m-%d_%Hh%Mm%Ss") pickle_folder = "pickle_exports" if not os.path.exists(pickle_folder): os.makedirs(pickle_folder) pickle_filename = "%s/compute_score_%s_dpt%s.pickle" % (pickle_folder, current_time, departement) pickle.dump(pickle_data, open(pickle_filename, "wb")) try: logger.info("(%s %s) regression_train RMSE : %s", departement, prefix_for_fields, rmse_train) logger.info("(%s %s) regression_test RMSE : %s", departement, prefix_for_fields, rmse_test) if rmse_test >= importer_settings.RMSE_MAX: raise_with_message("is_lbb: %s, rmse_test too high : %s > %s" % (is_lbb, rmse_test, importer_settings.RMSE_MAX)) except IndexError: logger.warning("not enough data to compute RMSE for %s", departement) score_regr_field_name = "%s_regr" % score_field_name df_etab[score_regr_field_name] = regr.predict(X_live) df_etab[score_field_name] = [scoring_util.get_score_from_hirings(h) for h in df_etab[score_regr_field_name]] ranges = [0, 20, 40, 60, 80, 100] logger.info('(%s %s) score distribution : %s', departement, prefix_for_fields, df_etab.groupby(pd.cut(df_etab.score, ranges))[score_field_name].agg('count')) compare_new_scores_to_old_ones(departement, df_etab) @timeit def export_df_etab_to_db(df_etab, departement): logger.debug("writing sql (%s)...", departement) def departement_to_str(x): return "{:02d}".format(int(x["departement"])) df_etab['departement'] = df_etab.apply(departement_to_str, axis=1) # FIXME control more precisely the schema (indexes!) of temporary tables created by to_sql, # current version adds an 'index' column (which is the panda dataframe index column itself, # nothing to do with our app) and makes it a primary key. # see https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_sql.html # Maybe using no index at all (no primary key) would be faster. df_etab.to_sql("etablissements_%s" % departement, get_engine(), if_exists='replace', chunksize=10000) logger.debug("sql done (%s)!", departement) @timeit def compare_new_scores_to_old_ones(departement, df_etab): logger.debug("fetching existing scores for %s", departement) df_existing_score = pd.read_sql_query( "select siret, score as existing_score from %s where departement=%s" % ( importer_settings.SCORE_REDUCING_TARGET_TABLE, departement ), get_engine(), ) debug_df(df_existing_score, "df_existing_score") if df_existing_score.empty: logger.debug("no old score found for departement %s, skipping old/new score comparison...", departement) else: logger.debug("merging existing scores for %s", departement) # merge doc : http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.merge.html # by default how='inner' use intersection of keys from both dataframes (SQL: inner join) # what we need here is how='left': use only keys from left dataframe (SQL: left outer join) # because we keep all companies from current extract, whether or not they were present before debug_df(df_etab, "before merge with df_existing_score") df_etab = pd.merge(df_etab, df_existing_score, on="siret", how="left") debug_df(df_etab, "after merge with df_existing_score and how=left") df_etab["diff_score"] = df_etab["score"] - df_etab["existing_score"] logger.info( "mean difference of scores: %s new score %s existing score %s", df_etab[["diff_score"]].mean()[0], df_etab[["score"]].mean()[0], df_etab[["existing_score"]].mean()[0] ) # abort if the mean between the existing score and the new just computed score is too high check_mean_between_existing_and_new_score(departement, df_etab) high_existing_scores = df_etab[df_etab["existing_score"] > 50]["siret"].count() high_new_scores = df_etab[df_etab["score"] > 50]["siret"].count() logger.info("existing high scores > 50: %s", high_existing_scores) logger.info("new high scores > 50: %s", high_new_scores) # abort if the number of highly scored companies varies much check_highly_scored_companies_evolution(departement, high_existing_scores, high_new_scores) check_number_highly_scored_companies(departement, high_new_scores) @timeit def run( departement, prediction_beginning_date=None, return_df_etab_if_successful=False, ): """ Returns True if computation successful and False otherwise. """ if prediction_beginning_date is None: prediction_beginning_date = compute_prediction_beginning_date() check_prediction_beginning_date(prediction_beginning_date) # get df_etab with monthly hirings for each hiring_type # hirings are not yet grouped by period (i.e. 6 months for DPAE / 6 months for Alternance) df_etab = get_df_etab_with_hiring_monthly_aggregates(departement, prediction_beginning_date) logger.debug("df_etab_with_hiring_monthly_aggregates loaded for departement %s", departement) if df_etab is None: logger.warning("no etab/hiring data found for departement %s", departement) return False # failed computation (e.g. no data) # LBB / DPAE. train( df_etab, departement, prediction_beginning_date=prediction_beginning_date, last_historical_data_date=go_back_last_day_of_the_month(DpaeStatistics.get_last_historical_data_date(DpaeStatistics.DPAE)), months_per_period=6, training_periods=7, prefix_for_fields='dpae', score_field_name='score', ) logger.debug("DPAE/LBB training done for departement %s", departement) # LBA / Alternance. train( df_etab, departement, prediction_beginning_date=prediction_beginning_date, last_historical_data_date=go_back_last_day_of_the_month(DpaeStatistics.get_last_historical_data_date(DpaeStatistics.APR)), months_per_period=6, training_periods=7, prefix_for_fields='alt', score_field_name='score_alternance', is_lbb=False ) logger.debug("Alternance/LBA training done for departement %s", departement) # Final data export. export_df_etab_to_db(df_etab, departement) if return_df_etab_if_successful: return df_etab # only used in test_compute_score.py for inspection return True # successful computation @timeit def run_main(): run(departement=sys.argv[1]) if __name__ == "__main__": run_main()
agpl-3.0
-1,871,347,887,150,394,400
38.796434
131
0.664414
false
gully/PyKE
pyke/kepcotrend.py
2
40071
from .utils import PyKEArgumentHelpFormatter from . import kepmsg, kepio, kepkey import math import numpy as np import matplotlib.pyplot as plt from matplotlib.cbook import is_numlike from scipy.optimize import leastsq from scipy.optimize import fmin from scipy.interpolate import interp1d from astropy.io import fits as pyfits from tqdm import tqdm import re __all__ = ['kepcotrend'] def cut_bad_data(cad, date, flux, err): """ this function finds cadences with good data and returns them """ good_data_mask = np.logical_and(np.isfinite(date), np.isfinite(flux)) date = date[good_data_mask] cad = cad[good_data_mask] flux = flux[good_data_mask] err = err[good_data_mask] return cad, date, flux, err, good_data_mask def put_in_nans(good_data, flux): """ Function finds the cadences where the data has been removed using cut_bad_data() and puts data back in. The flux data put back in is nan. This function is used when writing data to a FITS files. good_data == True means the datapoint is good!! """ newflux = np.empty(len(good_data)) newflux[:] = np.nan newflux[good_data] = flux return newflux def get_pcomp_list_newformat(bvdat, pcomplist, newcad, short, scinterp): """ Finds cotrending basis vectors which have been requested to be used by the user and adds them to an array. """ pcomp = np.zeros((len(pcomplist), len(newcad))) for i in range(len(np.array(pcomplist))): j = int(np.array(pcomplist)[i]) dat = bvdat.field('VECTOR_{}'.format(j))[~np.isnan(bvdat.field('CADENCENO'))] bvcadfull = bvdat.field('CADENCENO')[~np.isnan(bvdat.field('CADENCENO'))] #try: if short: #if the data is short cadence the interpolate the basis vectors bv_data = dat[np.in1d(bvdat.field('CADENCENO'), newcad)] bv_cad = bvcadfull[np.in1d(bvdat.field('CADENCENO'), newcad)] #funny things happen why I use interp1d for linear interpolation #so I have opted to use the numpy interp function for linear if scinterp == 'linear': intpl = np.interp(newcad, bv_cad, bv_data, left=bv_data[0], right=bv_data[-1]) pcomp[i] = intpl else: intpl = interp1d(bv_cad, bv_data, kind=scinterp, bounds_error=False, fill_value=None) pcomp[i] = np.where(np.isnan(intpl(newcad)), 0, intpl(newcad)) mid_pt = np.floor(np.median(np.arange(len(pcomp[i])))) p_len = len(pcomp[i]) lower = [np.logical_and(np.arange(p_len) < mid_pt, pcomp[i] == 0)] upper = [np.logical_and(np.arange(p_len) > mid_pt, pcomp[i] == 0)] pcomp[i][lower] = bv_data[0] pcomp[i][upper] = bv_data[-1] else: pcomp[i] = dat[np.in1d(bvdat.field('CADENCENO'), newcad)] return pcomp def make_sc_lc(obs_cad, bv_cad, flux): """ make short cadence data look like long cadence data """ newflux = np.zeros(len(bv_cad)) for i in range(len(bv_cad)): mask = np.logical_and(obs_cad > bv_cad[i] - 15, obs_cad < bv_cad[i] + 15) newflux[i] = obs_cad[mask] return newflux def near_intpl(xout, xin, yin): """ Interpolate the curve defined by (xin, yin) at points xout. The array xin must be monotonically increasing. The output has the same data type as the input yin. :param yin: y values of input curve :param xin: x values of input curve :param xout: x values of output interpolated curve :param method: interpolation method ('linear' | 'nearest') @:rtype: numpy array with interpolated curve """ lenxin = len(xin) i1 = searchsorted(xin, xout) i1[i1 == 0] = 1 i1[i1 == lenxin] = lenxin - 1 x0 = xin[i1 - 1] x1 = xin[i1] y0 = yin[i1 - 1] y1 = yin[i1] return np.where(abs(xout - x0) < abs(xout - x1), y0, y1) def get_pcomp_list(pcompdata, pcomplist, newcad): pcomp = np.zeros((len(pcomplist), len(newcad))) for i in range(len(np.array(pcomplist))): j = int(np.array(pcomplist)[i]) - 1 dat = pcompdata[..., j + 2] pcomp[i] = dat[np.in1d(pcompdata[..., 1], newcad)] return pcomp def do_lsq_uhat(pcomps, flux): """ does a linear least squares fit of the basis vectors to the light curve using the 'matrix' method - U(transpose) * y = coeffs In my implimentation y is a horizontal 1D array and U is also a long thin array of length correpsonding to the number of basis vectors use. In effect what I have is my U is already transposed and y need to be transposed to used. First I convert to what is expected in leasts squares fitting """ U_hat = np.matrix(pcomps).transpose() y_hat = np.matrix(flux).transpose() U_trans = U_hat.transpose() coeffs = - np.linalg.inv(U_trans * U_hat) * U_trans * y_hat return coeffs def do_lsq_nlin(pcomps, flux): """ does a linear least squares fit of the basis vectors to the light curve using the 'lst_sq' method - this performs a Levenberg-Marquart least squares fit. The initial guess is an array of zeros """ guess = np.append(np.array([1.]), np.zeros(len(pcomps) - 1)) t = leastsq(fitfunct, guess, args=(pcomps, flux), full_output=0) return - np.array(t[0]) def do_lsq_fmin(pcomps, flux): """ performs a simplex fit of the basis vectors to the light curve. Initial guess is an array with 1. as the first element and zero as the value of all other elements in the array """ guess = np.append(np.array([1.]), np.zeros(len(pcomps) - 1)) t = fmin(fitfunct_fmin, guess, args=(pcomps, flux)) return -np.array(t) def do_lsq_fmin_pow(pcomps, flux, order): """ performs a simplex fit of the basis vectors to the light curve. Initial guess is an array with 1. as the first element and zero as the value of all other elements in the array """ guess = np.array([1, 0]) initial = fmin(fitfunct_fmin_pow, guess, args=(pcomps[0:2], flux, order)) guess = np.append(initial, np.zeros(len(pcomps) - 2)) t = fmin(fitfunct_fmin_pow, guess, args=(pcomps, flux, order)) return - np.array(t) def fitfunct_fmin(scale, pcomp, zeroflux): outflux = fitfunct(scale, pcomp, zeroflux) sumsq = np.sum(np.abs(outflux)) return sumsq def fitfunct_fmin_pow(scale, pcomp, zeroflux, order): outflux = fitfunct(scale, pcomp, zeroflux) sumsq = np.sum(np.power(np.abs(outflux), order)) return sumsq def fitfunct(scale, pcomp, zeroflux): outflux = np.copy(zeroflux) outflux -= np.dot(scale, pcomp) return outflux def chi2_gtf(obs, expect, err, dof): """ calculates a chi squared of the model fit to the data """ chisqu = 0. obs = obs expect = expect err = err for i in range(len(obs)): chisqu += ((obs[i] - expect[i]) / err[i]) ** 2 chisqu = chisqu / float(dof) return chisqu def rms(model, data): """ calculates a root mean square of the model fit to the data """ rms = math.sqrt(np.sum((model - data) ** 2) / len(model)) return rms def do_lst_iter(bvs, cad, flux, nsigma, niter, method, order): """ performs an iterative fit of the basis vectors to the light curve after each fit outliers further than nsigma from the fit are removed and the fit recalculated. The sigma actually means a median absolute deviation from the median. """ iiter = 1 fluxnew = np.copy(flux) lcnew = np.copy(cad) bvsnew = np.copy(bvs) if method == 'lst_sq': t = do_lsq_nlin(bvsnew, fluxnew) elif method == 'simplex': t = do_lsq_fmin_pow(bvsnew, fluxnew, order) elif method == 'llsq': t = do_lsq_uhat(bvsnew, fluxnew) bvsum = np.dot(t.T, bvsnew).reshape(-1) while (iiter < niter): iiter += 1 matchrange = 1.4826 * nsigma * MAD_model(np.subtract(fluxnew, bvsum)) mask = np.asarray(abs(fluxnew - bvsum) < matchrange) mask = mask.flatten() fluxnew = fluxnew[mask] lcnew = lcnew[mask] try: bvsnew = np.copy(bvsnew2) except: pass bvsnew2 = newpcompsarray(bvsnew, mask) for i in range(np.shape(bvsnew)[0]): bvsnew2[i] = bvsnew[i][mask] if method == 'llsq': t = do_lsq_uhat(bvsnew2, fluxnew) elif method == 'lst_sq': t = do_lsq_nlin(bvsnew2, fluxnew) elif method == 'simplex': t = do_lsq_fmin_pow(bvsnew2, fluxnew, order) bvsum = np.dot(t.T, bvsnew2).reshape(-1) return t, mask def newpcompsarray(pcomp, mask): pcompnew = np.zeros((np.shape(pcomp)[0], len(mask[mask]))) return pcompnew def MAD_model(xx, minSd=1E-16): """Median Absolute Deviation""" absdev = abs(xx) mad = np.median(absdev, 0) mad = np.maximum(mad, np.multiply(np.ones(mad.shape, np.float32), (minSd / 1.48))) mad = np.asarray(mad) return mad def make_outfile(fitsfile, outfile, flux_new, bvsum, version): """ creates a fits file identical to the input fits file save from containing two extra columns - CBVSAP_MODL and CBVSAP_FLUX which are the sum of basis vectors fit to the data and the resulting corrected flux after the basis vector fit has been subtracted """ if version == 1: unit = 'e-/cadence' flux_new = flux_new * 1625.3514 #convert to e-/cadence elif version == 2: unit = 'e-/s' col1 = pyfits.Column(name='CBVSAP_MODL', format='E13.7 ', unit=unit, array=bvsum) col2 = pyfits.Column(name='CBVSAP_FLUX', format='E13.7 ', unit=unit, array=flux_new) cols = fitsfile[1].columns + col1 + col2 fitsfile[1] = pyfits.BinTableHDU.from_columns(cols, header=fitsfile[1].header) fitsfile.writeto(outfile) def do_plot(date, flux_old, flux_new, bvsum, cad, good_data, cad_nans, version, maskdata, outfile, noninteractive): plt.figure(figsize=[15, 8]) plt.clf() if version == 1: barytime0 = float(int(date[0] / 100) * 100.0) date_sub = date - barytime0 xlab = r'BJD $-$ {}'.format(barytime0+2400000.) elif version == 2: barytime0 = float(int((date[0] + 54833.) / 100) * 100.0) date_sub = date + 54833. - barytime0 xlab = r'BJD $-$ {}'.format(barytime0+2400000.) try: nrm1 = len(str(int(flux_old.max()))) - 1 except: nrm1 = 0 flux_old_sub = flux_old / 10 ** nrm1 bvsum_sub = bvsum / 10 ** nrm1 ylab1 = r'10$^%d$ e$^-$ s$^{-1}$' % nrm1 try: nrm2 = len(str(int(flux_new.max()))) - 1 except: nrm2 = 0 flux_new_sub = flux_new / 10 ** nrm2 ylab2 = r'10$^%d$ e$^-$ s$^{-1}$' % nrm2 xmin = min(date_sub) xmax = max(date_sub) ymin1 = min(min(flux_old_sub), min(bvsum_sub)) ymax1 = max(max(flux_old_sub), max(bvsum_sub)) ymin2 = min(flux_new_sub) ymax2 = max(flux_new_sub) xr = xmax - xmin yr1 = ymax1 - ymin1 yr2 = ymax2 - ymin2 ax1 = plt.subplot(211) blocks = split_on_nans(good_data,cad_nans) for i in range(len(blocks)): if i == 0: block = [blocks[0], blocks[i]] else: block = [blocks[i - 1], blocks[i]] mask = np.logical_and(cad >= block[0], cad <= block[1]) plot_x = date_sub[mask] plot_y = flux_old_sub[mask] if np.nan in plot_y: break plt.scatter(plot_x, plot_y, color='#363636', linestyle='-', linewidth=1.0, marker='.', s=5) plot_y = bvsum_sub[mask] plt.plot(plot_x, plot_y, color='#c0392b', linestyle='-', linewidth=2.0) date2 = np.insert(date_sub, [0], [date_sub[0]]) date2 = np.append(date2, [date_sub[-1]]) flux2 = np.insert(flux_old_sub,[0], [0.0]) flux2 = np.append(flux2, [0.0]) plt.fill(date2, flux2, color='#a8a7a7', linewidth=0.0, alpha=0.2, label='Data') if maskdata is not None: for m in maskdata: pos = np.where((barytime0 + 2400000 + date2 > m[0]) & (barytime0 + 2400000 + date2 <= m[1]))[0] plt.fill_between(date2[pos], flux2[pos].min(), flux2[pos].max(), color='#c0392b', linewidth=0.0, alpha=0.3, label='Masked') plt.xlim(xmin - xr * 0.01, xmax + xr * 0.01) if ymin1 - yr1 * 0.01 <= 0.0: plt.ylim(1.0e-10, ymax1 + yr1 * 0.01) else: plt.ylim(ymin1 - yr1 * 0.01, ymax1 + yr1 * 0.01) plt.xlabel(xlab, {'color' : 'k'}) plt.ylabel(ylab1, {'color' : 'k'}) plt.grid(ls='--', alpha=0.3) plt.legend() ax2 = plt.subplot(212, sharex=ax1) for i in range(len(blocks)): if i == 0: block = [blocks[0], blocks[i]] else: block = [blocks[i - 1], blocks[i]] mask = np.logical_and(cad >= block[0], cad <= block[1]) plot_x = date_sub[mask] plot_y = flux_new_sub[mask] if np.nan in plot_y: break plt.scatter(plot_x, plot_y, color='#363636', linestyle='-', linewidth=1.0, marker='.', s=5) plot_y = bvsum_sub[mask] date2 = np.insert(date_sub, [0], [date_sub[0]]) date2 = np.append(date2, [date_sub[-1]]) flux2 = np.insert(flux_new_sub, [0], [0.0]) flux2 = np.append(flux2, [0.0]) plt.fill(date2, flux2, fc='#a8a7a7', alpha=0.2, linewidth=0.0) if maskdata is not None: for m in maskdata: pos = np.where((barytime0 + 2400000 + date2 >m[0]) & (barytime0 + 2400000 + date2 <=m [1]))[0] plt.fill_between(date2[pos], flux2[pos].min(), flux2[pos].max(), color='#c0392b', linewidth=0.0, alpha=0.3) plt.xlim(xmin - xr * 0.01, xmax + xr * 0.01) if ymin2-yr2*0.01 <= 0.0: plt.ylim(1.0e-10, ymax2 + yr2 * 0.01) else: plt.ylim(ymin2 - yr2 * 0.01, ymax2 + yr2 * 0.01) plt.xlabel(xlab, {'color' : 'k'}) plt.ylabel(ylab2, {'color' : 'k'}) plt.grid(ls='--',alpha=0.3) plt.subplots_adjust(0.1, 0.1, 0.94, 0.94, 0.0, 0.0) plt.gca().xaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False)) plt.gca().yaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False)) # render plot plt.savefig(re.sub('.fits', '.png', outfile), bbox_inches='tight') if not noninteractive: plt.show() def split_on_nans(good_data, cad): blocks = [] time_of_nans = cad[~good_data] if good_data[0]: blocks.append(cad[0]) for i in range(1, len(time_of_nans)): if time_of_nans[i] - time_of_nans[i - 1] > 1: blocks.append(time_of_nans[i]) if good_data[-1]: blocks.append(cad[-1]) return blocks def kepcotrend(infile, bvfile, listbv, outfile=None, fitmethod='llsq', fitpower=1, iterate=False, sigma=None, maskfile='', scinterp='linear', plot=False, noninteractive=False, overwrite=False, verbose=False, logfile='kepcotrend.log'): """ kepcotrend -- Remove systematic trends Kepler light curves using cotrending basis vectors. The cotrending basis vectors files can be found here: http://archive.stsci.edu/kepler/cbv.html Simple Aperture Photometry (SAP) data often contain systematic trends associated with the spacecraft, detector and environment rather than the target. See the the Kepler data release notes for descriptions of systematics and the cadences that they affect. Within the Kepler pipeline these contaminants are treated during Pre-search Data Conditioning (PDC) and cleaned data are provided in the light curve files archived at MAST within the column PDCSAP_FLUX. The Kepler pipeline attempts to remove systematics with a combination of data detrending and cotrending against engineering telemetry from the spacecraft such as detector temperatures. These processes are imperfect but tackled in the spirit of correcting as many targets as possible with enough accuracy for the mission to meet exoplanet detection specifications. The imperfections in the method are most apparent in variable stars, those stars that are of most interest for stellar astrophysics. The PDC correction can occasionally hamper data analysis or, at worst, destroy astrophysical signal from the target. While data filtering (``kepoutlier``, ``kepfilter``) and data detrending with analytical functions (``kepdetrend``) often provide some mitigation for data artifacts, these methods require assumptions and often result in lossy data. An alternative viable approach is to identify the photometric variability common to all of the stars neighboring the target and subtract those trends from the target. In principle, the correct choice, weighting and subtraction of these common trends will leave behind a corrected flux time series which better represents statistically the true signal from the target. While GOs, KASC members and archive users wait for the Kepler project to release quarters of data, they do not have access to all the light curve data neighboring their targets and so cannot take the ensemble approach themselves without help. To mitigate this problem the Kepler Science Office have made available ancillary data which describes the systematic trends present in the ensemble flux data for each CCD channel. These data are known as the Cotrending Basis Vectors (CBVs). More details on the method used to generate these basis vectors will be provided in the Kepler Data Processing Handbook soon, but until that time, a summary of the method is given here. To create the initial basis set, that is the flux time series' that are used to make the cotrending basis vectors: The time series photometry of each star on a specific detector channel is normalized by its own median flux. One (unity) is subtracted from each time series so that the median value of the light curve is zero. The time series is divided by the root-mean square of the photometry. The correlation between each time series on the CCD channel is calculated using the median and root-mean square normalized flux. The median absolute correlation is then calculated for each star. All stars on the channel are sorted into ascending order of correlation. The 50 percent most correlated stars are selected. The median normalized fluxes only (as opposed to the root-mean square normalized fluxes) are now used for the rest of the process Singular Value Decomposition is applied to the matrix of correlated sources to create orthonormal basis vectors from the U matrix, sorted by their singular values. The archived cotrending basis vectors are a reduced-rank representation of the full set of basis vectors and consist of the 16 leading columns. To correct a SAP light curve, :math:`Fsap`, for systematic features, ``kepcotrend`` employs the cotrending basis vectors :math:`CBVi`. The task finds the coefficients :math:`Ai` which minimize .. math:: Fcbv = Fsap - \sum_{i} Ai \cdot CBV_i The corrected light curve, Fcbv, can be tailored to the needs of the user and their scientific objective. The user decides which combination of basis vectors best removes systematics from their specific Kepler SAP light curve. In principle the user can choose any combination of cotrending basis vectors to fit to the data. However, experience suggests that most choices will be to decide how many sequential basis vectors to include in the fit, starting with first vector. For example a user is much more likely to choose a vector combination 1, 2, 3, 4, 5, 6 etc over e.g. a combination 1, 2, 5, 7, 8, 10, 12. The user should always include at least the first two basis vectors. The number of basis vectors used is directly related to the scientific aims of the user and the light curve being analyzed and experimental iteration towards a target-specific optimal basis set is recommended. Occasionally kepcotrend over-fits the data and removes real astrophysical signal. This is particularly prevalent if too many basis vectors are used. A good rule of thumb is to start with two basis vectors and increase the number until there is no improvement, or signals which are thought to be astrophysical start to become distorted. The user is given a choice of fitting algorithm to use. For most purposes the linear least squares method is both the fastest and the most accurate because it gives the exact solution to the least squares problem. However we have found a few situations where the best solution, scientifically, comes from using the simplex fitting algorithm which performs something other than a least squares fit. Performing a least absolute residuals fit (fitpower=1.0), for example, is more robust to outliers. There are instances when the fit performs sub-optimally due to the presence of certain events in the light curve. For this reason we have included two options which can be used individually or simultaneously to improve the fit - iterative fitting and data masking. Iterative fitting performs the fit and rejects data points that are greater than a specified distance from the optimal fit before re-fitting. The lower threshold for data clipping is provided by the user as the number of sigma from the best fit. The clipping threshold is more accurately defined as the number of Median Absolute Deviations (MADs) multiplied by 1.4826. The distribution of MAD will be identical to the distribution of standard deviation if the distribution is Gaussian. We use MAD because in highly non-Gaussian distributions MAD is more robust to outliers than standard deviation. The code will print out the coefficients fit to each basis vector, the root-mean square of the fit and the chi-squared value of the fit. The rms and the chi-squared value include only the data points included in the fit so if an iterative fit is performed these clipped values are not included in this calculation. Parameters ---------- infile : str the input file in the FITS format obtained from MAST outfile : str the output will be a fits file in the same style as the input file but with two additional columns: CBVSAP_MODL and CBVSAP_FLUX. The first of these is the best fitting linear combination of basis vectors. The second is the new flux with the basis vector sum subtracted. This is the new flux value. bvfile : str the name of the FITS file containing the basis vectors listbv : list of integers the basis vectors to fit to the data fitmethod : str fit using either the 'llsq' or the 'simplex' method. 'llsq' is usually the correct one to use because as the basis vectors are orthogonal. Simplex gives you option of using a different merit function - ie. you can minimise the least absolute residual instead of the least squares which weights outliers less fitpower : float if using a simplex you can chose your own power in the metir function - i.e. the merit function minimises :math:`abs(Obs - Mod)^P`. :math:`P = 2` is least squares, :math:`P = 1` minimises least absolutes iterate : bool should the program fit the basis vectors to the light curve data then remove data points further than 'sigma' from the fit and then refit maskfile : str this is the name of a mask file which can be used to define regions of the flux time series to exclude from the fit. The easiest way to create this is by using ``keprange`` from the PyKE set of tools. You can also make this yourself with two BJDs on each line in the file specifying the beginning and ending date of the region to exclude. scinterp : str the basis vectors are only calculated for long cadence data, therefore if you want to use short cadence data you have to interpolate the basis vectors. There are several methods to do this, the best of these probably being nearest which picks the value of the nearest long cadence data point. The options available are: * linear * nearest * zero * slinear * quadratic * cubic plot : bool Plot the data and result? non-interactive : bool If True, prevents the matplotlib window to pop up. overwrite : bool Overwrite the output file? verbose : bool Print informative messages and warnings to the shell and logfile? logfile : str Name of the logfile containing error and warning messages. Examples -------- .. code-block:: bash $ kepcotrend kplr005110407-2009350155506_llc.fits ~/cbv/kplr2009350155506-q03-d25_lcbv.fits '1 2 3' --plot --verbose """ if outfile is None: outfile = infile.split('.')[0] + "-{}.fits".format(__all__[0]) # log the call hashline = '--------------------------------------------------------------' kepmsg.log(logfile, hashline, verbose) call = ('KEPCOTREND -- ' + ' infile={}'.format(infile) + ' outfile={}'.format(outfile) + ' bvfile={}'.format(bvfile) + ' listbv={} '.format(listbv) + ' fitmethod={}'.format(fitmethod) + ' fitpower={}'.format(fitpower) + ' iterate={}'.format(iterate) + ' sigma_clip={}'.format(sigma) + ' mask_file={}'.format(maskfile) + ' scinterp={}'.format(scinterp) + ' plot={}'.format(plot) + ' overwrite={}'.format(overwrite) + ' verbose={}'.format(verbose) + ' logfile={}'.format(logfile)) kepmsg.log(logfile, call+'\n', verbose) # start time kepmsg.clock('KEPCOTREND started at', logfile, verbose) # overwrite output file if overwrite: kepio.overwrite(outfile, logfile, verbose) if kepio.fileexists(outfile): errmsg = 'ERROR -- KEPCOTREND: {} exists. Use --overwrite'.format(outfile) kepmsg.err(logfile, errmsg, verbose) # open input file instr = pyfits.open(infile) tstart, tstop, bjdref, cadence = kepio.timekeys(instr, infile, logfile, verbose) # fudge non-compliant FITS keywords with no values instr = kepkey.emptykeys(instr, infile, logfile, verbose) if not kepio.fileexists(bvfile): message = 'ERROR -- KEPCOTREND: ' + bvfile + ' does not exist.' kepmsg.err(logfile, message, verbose) #lsq_sq - nonlinear least squares fitting and simplex_abs have been #removed from the options in PyRAF but they are still in the code! if fitmethod not in ['llsq','matrix','lst_sq','simplex_abs','simplex']: errmsg = 'Fit method must either: llsq, matrix, lst_sq or simplex' kepmsg.err(logfile, errmsg, verbose) if not is_numlike(fitpower) and fitpower is not None: errmsg = 'Fit power must be an real number or None' kepmsg.err(logfile, errmsg, verbose) if fitpower is None: fitpower = 1. # input data short = False try: test = str(instr[0].header['FILEVER']) version = 2 except KeyError: version = 1 table = instr[1].data if version == 1: if str(instr[1].header['DATATYPE']) == 'long cadence': quarter = str(instr[1].header['QUARTER']) module = str(instr[1].header['MODULE']) output = str(instr[1].header['OUTPUT']) channel = str(instr[1].header['CHANNEL']) lc_cad_o = table.field('cadence_number') lc_date_o = table.field('barytime') lc_flux_o = table.field('ap_raw_flux') / 1625.3468 #convert to e-/s lc_err_o = table.field('ap_raw_err') / 1625.3468 #convert to e-/s elif str(instr[1].header['DATATYPE']) == 'short cadence': short = True quarter = str(instr[1].header['QUARTER']) module = str(instr[1].header['MODULE']) output = str(instr[1].header['OUTPUT']) channel = str(instr[1].header['CHANNEL']) lc_cad_o = table.field('cadence_number') lc_date_o = table.field('barytime') lc_flux_o = table.field('ap_raw_flux') / 54.178 #convert to e-/s lc_err_o = table.field('ap_raw_err') / 54.178 #convert to e-/s elif version >= 2: if str(instr[0].header['OBSMODE']) == 'long cadence': quarter = str(instr[0].header['QUARTER']) module = str(instr[0].header['MODULE']) output = str(instr[0].header['OUTPUT']) channel = str(instr[0].header['CHANNEL']) lc_cad_o = table.field('CADENCENO') lc_date_o = table.field('TIME') lc_flux_o = table.field('SAP_FLUX') lc_err_o = table.field('SAP_FLUX_ERR') elif str(instr[0].header['OBSMODE']) == 'short cadence': short = True quarter = str(instr[0].header['QUARTER']) module = str(instr[0].header['MODULE']) output = str(instr[0].header['OUTPUT']) channel = str(instr[0].header['CHANNEL']) lc_cad_o = table.field('CADENCENO') lc_date_o = table.field('TIME') lc_flux_o = table.field('SAP_FLUX') lc_err_o = table.field('SAP_FLUX_ERR') if str(quarter) == str(4) and version == 1: lc_cad_o = lc_cad_o[lc_cad_o >= 11914] lc_date_o = lc_date_o[lc_cad_o >= 11914] lc_flux_o = lc_flux_o[lc_cad_o >= 11914] lc_err_o = lc_err_o[lc_cad_o >= 11914] if short and scinterp == None: errmsg = ('You cannot select None as the interpolation method ' 'because you are using short cadence data and ' 'therefore must use some form of interpolation. I ' 'reccommend nearest if you are unsure.') kepmsg.err(logfile, errmsg, verbose) bvfiledata = pyfits.open(bvfile) bvdata = bvfiledata['MODOUT_{0}_{1}'.format(module, output)].data if int(bvfiledata[0].header['QUARTER']) != int(quarter): errmsg = ('CBV file and light curve file are from different ' 'quarters. CBV file is from Q{0} and light curve is ' 'from Q{1}'.format(int(bvfiledata[0].header['QUARTER']), int(quarter))) kepmsg.err(logfile, errmsg, verbose) if int(quarter) == 4 and int(module) == 3: errmsg = ('Approximately twenty days into Q4 Module 3 failed. ' 'As a result, Q4 light curves contain these 20 day ' 'of data. However, we do not calculate CBVs for ' 'this section of data.') kepmsg.err(logfile, errmsg, verbose) #cut out infinites and zero flux columns lc_cad, lc_date, lc_flux, lc_err, good_data = cut_bad_data(lc_cad_o, lc_date_o, lc_flux_o, lc_err_o) #get a list of basis vectors to use from the list given #accept different seperators if len(listbv) == 1: bvlist = [listbv] else: listbv = listbv.strip() if listbv[1] in [' ', ',', ':', ';', '|', ', ']: separator = str(listbv)[1] else: message = ('You must separate your basis vector numbers to use ' 'with \' \' \',\' \':\' \';\' or \'|\' and the ' 'first basis vector to use must be between 1 and 9') kepmsg.err(logfile, message, verbose) bvlist = np.fromstring(listbv, dtype=int, sep=separator) if bvlist[0] == 0: errmsg = 'Must use at least one basis vector' kepmsg.err(logfile, errmsg, verbose) if short: bvdata.field('CADENCENO')[:] = ((((bvdata.field('CADENCENO')[:] + (7.5 / 15.) ) * 30.) - 11540.).round()) bvectors = get_pcomp_list_newformat(bvdata, bvlist, lc_cad, short, scinterp) medflux = np.median(lc_flux) n_flux = (lc_flux / medflux) - 1 n_err = np.sqrt(lc_err * lc_err / (medflux * medflux)) if maskfile != '': domasking = True if not kepio.fileexists(maskfile): errmsg = 'Maskfile {} does not exist'.format(maskfile) kepmsg.err(logfile, errmsg, verbose) else: domasking = False if domasking: lc_date_masked = np.copy(lc_date) n_flux_masked = np.copy(n_flux) lc_cad_masked = np.copy(lc_cad) n_err_masked = np.copy(n_err) maskdata = np.atleast_2d(np.genfromtxt(maskfile, delimiter=',')) mask = np.ones(len(lc_date_masked), dtype=bool) for maskrange in maskdata: if version == 1: start = maskrange[0] - 2400000.0 end = maskrange[1] - 2400000.0 elif version == 2: start = maskrange[0] - 2454833. end = maskrange[1] - 2454833. masknew = np.logical_xor(lc_date < start, lc_date > end) mask = np.logical_and(mask,masknew) lc_date_masked = lc_date_masked[mask] n_flux_masked = n_flux_masked[mask] lc_cad_masked = lc_cad_masked[mask] n_err_masked = n_err_masked[mask] else: lc_date_masked = np.copy(lc_date) n_flux_masked = np.copy(n_flux) lc_cad_masked = np.copy(lc_cad) n_err_masked = np.copy(n_err) bvectors_masked = get_pcomp_list_newformat(bvdata, bvlist, lc_cad_masked, short, scinterp) if iterate and sigma is None: errmsg = 'If fitting iteratively you must specify a clipping range' kepmsg.err(logfile, errmsg, verbose) #uses Pvals = yhat * U_transpose if iterate: coeffs, fittedmask = do_lst_iter(bvectors_masked, lc_cad_masked, n_flux_masked, sigma, 50., fitmethod, fitpower) else: if fitmethod == 'lst_sq': coeffs = do_lsq_nlin(bvectors_masked, n_flux_masked) elif fitmethod == 'simplex': coeffs = do_lsq_fmin_pow(bvectors_masked, n_flux_masked, fitpower) else: coeffs = do_lsq_uhat(bvectors_masked, n_flux_masked) coeffs = np.asarray(coeffs) flux_after = medflux * (n_flux + np.dot(coeffs.T, bvectors) + 1).reshape(-1) flux_after_masked = medflux * (n_flux_masked + np.dot(coeffs.T, bvectors_masked) + 1).reshape(-1) bvsum = np.dot(coeffs.T, bvectors).reshape(-1) bvsum_masked = np.dot(coeffs.T, bvectors_masked).reshape(-1) bvsum_nans = put_in_nans(good_data, bvsum) flux_after_nans = put_in_nans(good_data, flux_after) if plot: if not domasking: maskdata = None newmedflux = np.median(flux_after + 1) bvsum_un_norm = newmedflux * (1 - bvsum) do_plot(lc_date, lc_flux, flux_after, bvsum_un_norm, lc_cad, good_data, lc_cad_o, version, maskdata, outfile, noninteractive) print("Writing output file {}...".format(outfile)) make_outfile(instr, outfile, flux_after_nans, bvsum_nans, version) # close input file instr.close() #print some results to screen: print(' ----- ') if iterate: flux_fit = n_flux_masked[fittedmask] sum_fit = bvsum_masked[fittedmask] err_fit = n_err_masked[fittedmask] else: flux_fit = n_flux_masked sum_fit = bvsum_masked err_fit = n_err_masked print('reduced chi2: {}'.format(chi2_gtf(flux_fit, sum_fit, err_fit, len(flux_fit) - len(coeffs)))) print('rms: {}'.format(medflux * rms(flux_fit, sum_fit))) for i in range(len(coeffs)): print('Coefficient of CBV #{0}: {1}'.format(i + 1, coeffs[i])) print(' ----- ') # end time kepmsg.clock('KEPCOTREND completed at', logfile, verbose) def kepcotrend_main(): import argparse parser = argparse.ArgumentParser( description=('Remove systematic trends in photometry using' ' cotrending basis vectors (CBV)'), formatter_class=PyKEArgumentHelpFormatter) parser.add_argument('infile', help='Name of input file', type=str) parser.add_argument('cbvfile', help='Name of file containing the CBVs', type=str) parser.add_argument('listbv', help='The CBVs to use', type=str) parser.add_argument('--outfile', help=('Name of FITS file to output.' ' If None, outfile is infile-kepcotrend.'), default=None) parser.add_argument('--method', '-m', help='Fitting method', default='llsq', dest='fitmethod', type=str, choices=['llsq', 'simplex', 'lst_sq']) parser.add_argument('--fitpower', '-f', help='The index of the merit function (simplex only)', default=1, type=float) parser.add_argument('--iterate', action='store_true', help='Fit iteratively ', dest='iterate') parser.add_argument('--sigmaclip', type=float, help='Sigma clip value when iteratively fitting', default=None, dest='sigma') parser.add_argument('--maskfile', '-q', help='Name of file containing a mask', default='', dest='maskfile', type=str) parser.add_argument('--scinterp', type=str, help='Short cadence interpolation method', default='linear', choices=['linear', 'nearest', 'slinear', 'quadratic', 'cubic']) parser.add_argument('--plot', '-p', action='store_true', help='Plot result?') parser.add_argument('--non-interactive', action='store_true', help='Pop up matplotlib plot window?', dest='noninteractive') parser.add_argument('--overwrite', action='store_true', help='Overwrite output file?') parser.add_argument('--verbose', action='store_true', help='Write to a log file?') parser.add_argument('--logfile', '-l', help='Name of ascii log file', default='kepcotrend.log', type=str) args = parser.parse_args() kepcotrend(args.infile, args.cbvfile, args.listbv, args.outfile, args.fitmethod, args.fitpower, args.iterate, args.sigma, args.maskfile, args.scinterp, args.plot, args.noninteractive, args.overwrite, args.verbose, args.logfile)
mit
1,889,564,801,688,682,000
41.793443
135
0.600334
false
jgrizou/explauto
explauto/environment/dynamic_environment.py
1
7153
import numpy as np from matplotlib import animation, rc from IPython.display import HTML from explauto.models.dmp import DmpPrimitive from explauto.utils.utils import bounds_min_max from explauto.environment.environment import Environment class DynamicEnvironment(Environment): def __init__(self, env_cls, env_cfg, m_mins, m_maxs, s_mins, s_maxs, n_bfs, move_steps, n_dynamic_motor_dims, n_dynamic_sensori_dims, max_params, motor_traj_type="DMP", sensori_traj_type="samples", optim_initial_position=False, optim_end_position=False, default_motor_initial_position=None, default_motor_end_position=None, default_sensori_initial_position=None, default_sensori_end_position=None): self.env = env_cls(**env_cfg) self.n_bfs = n_bfs self.n_motor_traj_points = self.n_bfs self.n_sensori_traj_points = self.n_bfs self.move_steps = move_steps self.n_dynamic_motor_dims = n_dynamic_motor_dims self.n_dynamic_sensori_dims = n_dynamic_sensori_dims self.max_params = max_params self.motor_traj_type = motor_traj_type self.sensori_traj_type = sensori_traj_type self.optim_initial_position = optim_initial_position self.optim_end_position = optim_end_position Environment.__init__(self, m_mins, m_maxs, s_mins, s_maxs) self.s_traj = None if self.motor_traj_type == "DMP": self.init_motor_DMP(optim_initial_position, optim_end_position, default_motor_initial_position, default_motor_end_position) else: raise NotImplementedError if self.sensori_traj_type == "DMP": self.init_sensori_DMP(optim_initial_position, optim_end_position, default_sensori_initial_position, default_sensori_end_position) elif self.sensori_traj_type == "samples": self.samples = np.array(np.linspace(-1, self.move_steps-1, self.n_sensori_traj_points + 1), dtype=int)[1:] elif self.sensori_traj_type == "end_point": self.end_point = self.move_steps-1 else: raise NotImplementedError def reset(self): self.env.reset() def init_motor_DMP(self, optim_initial_position=True, optim_end_position=True, default_motor_initial_position=None, default_motor_end_position=None): default = np.zeros(self.n_dynamic_motor_dims * (self.n_bfs + 2)) if not optim_initial_position: default[:self.n_dynamic_motor_dims] = default_motor_initial_position or [0.] * self.n_dynamic_motor_dims dims_optim = [False] * self.n_dynamic_motor_dims else: dims_optim = [True] * self.n_dynamic_motor_dims dims_optim += [True] * (self.n_dynamic_motor_dims * self.n_bfs) if not optim_end_position: default[-self.n_dynamic_motor_dims:] = default_motor_end_position or [0.] * self.n_dynamic_motor_dims dims_optim += [False] * self.n_dynamic_motor_dims else: dims_optim += [True] * self.n_dynamic_motor_dims self.motor_dmp = DmpPrimitive(self.n_dynamic_motor_dims, self.n_bfs, dims_optim, default, type='discrete', timesteps=self.move_steps) def init_sensori_DMP(self, optim_initial_position=True, optim_end_position=True, default_sensori_initial_position=None, default_sensori_end_position=None): default = np.zeros(self.n_dynamic_sensori_dims * (self.n_sensori_traj_points + 2)) if not optim_initial_position: default[:self.n_dynamic_sensori_dims] = default_sensori_initial_position dims_optim = [False] * self.n_dynamic_sensori_dims else: dims_optim = [True] * self.n_dynamic_sensori_dims dims_optim += [True] * (self.n_dynamic_sensori_dims * self.n_sensori_traj_points) if not optim_end_position: default[-self.n_dynamic_sensori_dims:] = default_sensori_end_position dims_optim += [False] * self.n_dynamic_sensori_dims else: dims_optim += [True] * self.n_dynamic_sensori_dims self.sensori_dmp = DmpPrimitive(self.n_dynamic_sensori_dims, self.n_sensori_traj_points, dims_optim, default, type='discrete', timesteps=self.move_steps) def compute_motor_command(self, m_ag): m_ag = bounds_min_max(m_ag, self.conf.m_mins, self.conf.m_maxs) if self.motor_traj_type == "DMP": dyn_idx = range(self.n_dynamic_motor_dims * self.n_motor_traj_points) m_weighted = m_ag[dyn_idx] * self.max_params if self.optim_initial_position: m_weighted[:self.n_dynamic_motor_dims] = m_weighted[:self.n_dynamic_motor_dims] / self.max_params if self.optim_end_position: m_weighted[-self.n_dynamic_motor_dims:] = m_weighted[-self.n_dynamic_motor_dims:] / self.max_params m_dyn = self.motor_dmp.trajectory(m_weighted) static_idx = range(self.n_dynamic_motor_dims * self.n_motor_traj_points, self.conf.m_ndims) m_static = m_ag[static_idx] m = [list(m_dyn_param) + list(m_static) for m_dyn_param in list(m_dyn)] else: raise NotImplementedError return m def compute_sensori_effect(self, m_traj): s = self.env.update(m_traj, reset=False, log=False) self.s_traj = s y = np.array(s[:self.move_steps]) if self.sensori_traj_type == "DMP": self.sensori_dmp.dmp.imitate_path(np.transpose(y)) w = self.sensori_dmp.dmp.w.flatten() s_ag = list(w) elif self.sensori_traj_type == "samples": w = y[self.samples,:] s_ag = list(np.transpose(w).flatten()) elif self.sensori_traj_type == "end_point": s_ag = y[self.end_point,:].flatten() else: raise NotImplementedError s = s_ag return bounds_min_max(s, self.conf.s_mins, self.conf.s_maxs) def update(self, m_ag, reset=True, log=False): if reset: self.reset() if len(np.array(m_ag).shape) == 1: s = self.one_update(m_ag, log) else: s = [] for m in m_ag: s.append(self.one_update(m, log)) s = np.array(s) return s def plot(self, fig, ax, **kwargs): ax.cla() ax.set_aspect('equal') ax.set_xlim((-1.5, 1.5)) ax.set_ylim((-1.5, 1.5)) def animate(i): return tuple(self.env.plot_update(ax, i)) return animation.FuncAnimation(fig, animate, frames=50, interval=50, blit=True).to_html5_video()
gpl-3.0
8,928,876,743,781,344,000
47.006711
159
0.572207
false
marrabld/planarradpy
gui/matplotlibwidgetFile.py
1
3421
#!/usr/bin/env python import sys from PyQt4 import QtGui from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas import scipy from matplotlib.figure import Figure import matplotlib.pyplot import sys sys.path.append('..') sys.path.append('../gui') class MplCanvas(FigureCanvas): def __init__(self): """ This function initializes the figure. Inputs : FigureCanvas : The canvas for the figure. """ self.fig = Figure() self.picture = self.fig.add_subplot(111) FigureCanvas.__init__(self, self.fig) self.x_data = [] self.y_data = [[]] self.num_plot = 0 def update_fields(self, x_data, y_data, num_plot): """ This function will update data that we need to display curves, from "data_processing" from "gui_mainLayout" Inputs : x_data : An array with wavelengths. y_data : An array with curve's data. num_plot : The line, curve to plot. """ self.x_data = x_data self.y_data = y_data self.num_plot = num_plot def display_graphic(self, flag_curves, ui): """ This function plots results of a file into the canvas. Inputs : flag_curves : A boolean to know with we have to plot all curves or not. ui : The main_Window. """ ui.graphic_widget.canvas.picture.clear() x = scipy.linspace(self.x_data[0], self.x_data[-1], len(self.x_data)) #X-axis curve_wanted = 0 #Iterator on lines of y_data for curve in self.y_data: if flag_curves: if curve_wanted == self.num_plot: #If the iterator is equal of the slider's value, this curve is different ui.graphic_widget.canvas.picture.plot(x, curve, '-r', label='Case : {0}/{1}'.format(str(curve_wanted + 1), str(len(self.y_data))), linewidth=4) else: ui.graphic_widget.canvas.picture.plot(x, curve, '0.75') else: if curve_wanted == self.num_plot: ui.graphic_widget.canvas.picture.plot(x, curve, '-r', label='Case : {0}/{1}'.format(str(curve_wanted + 1), str(len(self.y_data)))) curve_wanted += 1 ui.graphic_widget.canvas.picture.set_title('Rrs.csv') ui.graphic_widget.canvas.picture.set_xlabel('Wavelength (${nm}$)') ui.graphic_widget.canvas.picture.set_ylabel('Reflectance ($Sr^{-1}$)') self.legend = ui.graphic_widget.canvas.picture.legend() #Display in a legend curves's labels. ui.graphic_widget.canvas.picture.legend(bbox_to_anchor=(1.1, 1.05)) ui.graphic_widget.canvas.draw() class matplotlibWidget(QtGui.QWidget): def __init__(self, parent=None): """ This function initializes the place where the figure will be display. """ QtGui.QWidget.__init__(self, parent) self.canvas = MplCanvas() self.vbl = QtGui.QVBoxLayout() self.vbl.addWidget(self.canvas) self.setLayout(self.vbl)
gpl-2.0
-188,828,367,548,104,700
38.790698
123
0.539608
false
NicWayand/xray
xarray/test/test_combine.py
1
13342
from copy import deepcopy import numpy as np import pandas as pd from xarray import Dataset, DataArray, auto_combine, concat, Variable from xarray.core.pycompat import iteritems, OrderedDict from . import TestCase, InaccessibleArray, requires_dask from .test_dataset import create_test_data class TestConcatDataset(TestCase): def test_concat(self): # TODO: simplify and split this test case # drop the third dimension to keep things relatively understandable data = create_test_data().drop('dim3') split_data = [data.isel(dim1=slice(3)), data.isel(dim1=slice(3, None))] self.assertDatasetIdentical(data, concat(split_data, 'dim1')) def rectify_dim_order(dataset): # return a new dataset with all variable dimensions transposed into # the order in which they are found in `data` return Dataset(dict((k, v.transpose(*data[k].dims)) for k, v in iteritems(dataset.data_vars)), dataset.coords, attrs=dataset.attrs) for dim in ['dim1', 'dim2']: datasets = [g for _, g in data.groupby(dim, squeeze=False)] self.assertDatasetIdentical(data, concat(datasets, dim)) self.assertDatasetIdentical( data, concat(datasets, data[dim])) self.assertDatasetIdentical( data, concat(datasets, data[dim], coords='minimal')) datasets = [g for _, g in data.groupby(dim, squeeze=True)] concat_over = [k for k, v in iteritems(data.coords) if dim in v.dims and k != dim] actual = concat(datasets, data[dim], coords=concat_over) self.assertDatasetIdentical(data, rectify_dim_order(actual)) actual = concat(datasets, data[dim], coords='different') self.assertDatasetIdentical(data, rectify_dim_order(actual)) # make sure the coords argument behaves as expected data.coords['extra'] = ('dim4', np.arange(3)) for dim in ['dim1', 'dim2']: datasets = [g for _, g in data.groupby(dim, squeeze=True)] actual = concat(datasets, data[dim], coords='all') expected = np.array([data['extra'].values for _ in range(data.dims[dim])]) self.assertArrayEqual(actual['extra'].values, expected) actual = concat(datasets, data[dim], coords='different') self.assertDataArrayEqual(data['extra'], actual['extra']) actual = concat(datasets, data[dim], coords='minimal') self.assertDataArrayEqual(data['extra'], actual['extra']) # verify that the dim argument takes precedence over # concatenating dataset variables of the same name dim = (2 * data['dim1']).rename('dim1') datasets = [g for _, g in data.groupby('dim1', squeeze=False)] expected = data.copy() expected['dim1'] = dim self.assertDatasetIdentical(expected, concat(datasets, dim)) def test_concat_data_vars(self): data = Dataset({'foo': ('x', np.random.randn(10))}) objs = [data.isel(x=slice(5)), data.isel(x=slice(5, None))] for data_vars in ['minimal', 'different', 'all', [], ['foo']]: actual = concat(objs, dim='x', data_vars=data_vars) self.assertDatasetIdentical(data, actual) def test_concat_coords(self): data = Dataset({'foo': ('x', np.random.randn(10))}) expected = data.assign_coords(c=('x', [0] * 5 + [1] * 5)) objs = [data.isel(x=slice(5)).assign_coords(c=0), data.isel(x=slice(5, None)).assign_coords(c=1)] for coords in ['different', 'all', ['c']]: actual = concat(objs, dim='x', coords=coords) self.assertDatasetIdentical(expected, actual) for coords in ['minimal', []]: with self.assertRaisesRegexp(ValueError, 'not equal across'): concat(objs, dim='x', coords=coords) def test_concat_constant_index(self): # GH425 ds1 = Dataset({'foo': 1.5}, {'y': 1}) ds2 = Dataset({'foo': 2.5}, {'y': 1}) expected = Dataset({'foo': ('y', [1.5, 2.5]), 'y': [1, 1]}) for mode in ['different', 'all', ['foo']]: actual = concat([ds1, ds2], 'y', data_vars=mode) self.assertDatasetIdentical(expected, actual) with self.assertRaisesRegexp(ValueError, 'not equal across datasets'): concat([ds1, ds2], 'y', data_vars='minimal') def test_concat_size0(self): data = create_test_data() split_data = [data.isel(dim1=slice(0, 0)), data] actual = concat(split_data, 'dim1') self.assertDatasetIdentical(data, actual) actual = concat(split_data[::-1], 'dim1') self.assertDatasetIdentical(data, actual) def test_concat_autoalign(self): ds1 = Dataset({'foo': DataArray([1, 2], coords={'x': [1, 2]})}) ds2 = Dataset({'foo': DataArray([1, 2], coords={'x': [1, 3]})}) actual = concat([ds1, ds2], 'y') expected = Dataset({'foo': DataArray([[1, 2, np.nan], [1, np.nan, 2]], dims=['y', 'x'], coords={'y': [0, 1], 'x': [1, 2, 3]})}) self.assertDatasetIdentical(expected, actual) def test_concat_errors(self): data = create_test_data() split_data = [data.isel(dim1=slice(3)), data.isel(dim1=slice(3, None))] with self.assertRaisesRegexp(ValueError, 'must supply at least one'): concat([], 'dim1') with self.assertRaisesRegexp(ValueError, 'are not coordinates'): concat([data, data], 'new_dim', coords=['not_found']) with self.assertRaisesRegexp(ValueError, 'global attributes not'): data0, data1 = deepcopy(split_data) data1.attrs['foo'] = 'bar' concat([data0, data1], 'dim1', compat='identical') self.assertDatasetIdentical( data, concat([data0, data1], 'dim1', compat='equals')) with self.assertRaisesRegexp(ValueError, 'encountered unexpected'): data0, data1 = deepcopy(split_data) data1['foo'] = ('bar', np.random.randn(10)) concat([data0, data1], 'dim1') with self.assertRaisesRegexp(ValueError, 'compat.* invalid'): concat(split_data, 'dim1', compat='foobar') with self.assertRaisesRegexp(ValueError, 'unexpected value for'): concat([data, data], 'new_dim', coords='foobar') with self.assertRaisesRegexp( ValueError, 'coordinate in some datasets but not others'): concat([Dataset({'x': 0}), Dataset({'x': [1]})], dim='z') with self.assertRaisesRegexp( ValueError, 'coordinate in some datasets but not others'): concat([Dataset({'x': 0}), Dataset({}, {'x': 1})], dim='z') with self.assertRaisesRegexp(ValueError, 'no longer a valid'): concat([data, data], 'new_dim', mode='different') with self.assertRaisesRegexp(ValueError, 'no longer a valid'): concat([data, data], 'new_dim', concat_over='different') def test_concat_promote_shape(self): # mixed dims within variables objs = [Dataset({}, {'x': 0}), Dataset({'x': [1]})] actual = concat(objs, 'x') expected = Dataset({'x': [0, 1]}) self.assertDatasetIdentical(actual, expected) objs = [Dataset({'x': [0]}), Dataset({}, {'x': 1})] actual = concat(objs, 'x') self.assertDatasetIdentical(actual, expected) # mixed dims between variables objs = [Dataset({'x': [2], 'y': 3}), Dataset({'x': [4], 'y': 5})] actual = concat(objs, 'x') expected = Dataset({'x': [2, 4], 'y': ('x', [3, 5])}) self.assertDatasetIdentical(actual, expected) # mixed dims in coord variable objs = [Dataset({'x': [0]}, {'y': -1}), Dataset({'x': [1]}, {'y': ('x', [-2])})] actual = concat(objs, 'x') expected = Dataset({'x': [0, 1]}, {'y': ('x', [-1, -2])}) self.assertDatasetIdentical(actual, expected) # scalars with mixed lengths along concat dim -- values should repeat objs = [Dataset({'x': [0]}, {'y': -1}), Dataset({'x': [1, 2]}, {'y': -2})] actual = concat(objs, 'x') expected = Dataset({}, {'y': ('x', [-1, -2, -2])}) self.assertDatasetIdentical(actual, expected) # broadcast 1d x 1d -> 2d objs = [Dataset({'z': ('x', [-1])}, {'x': [0], 'y': [0]}), Dataset({'z': ('y', [1])}, {'x': [1], 'y': [0]})] actual = concat(objs, 'x') expected = Dataset({'z': (('x', 'y'), [[-1], [1]])}) self.assertDatasetIdentical(actual, expected) def test_concat_do_not_promote(self): # GH438 objs = [Dataset({'y': ('t', [1])}, {'x': 1}), Dataset({'y': ('t', [2])}, {'x': 1})] expected = Dataset({'y': ('t', [1, 2])}, {'x': 1, 't': [0, 0]}) actual = concat(objs, 't') self.assertDatasetIdentical(expected, actual) objs = [Dataset({'y': ('t', [1])}, {'x': 1}), Dataset({'y': ('t', [2])}, {'x': 2})] with self.assertRaises(ValueError): concat(objs, 't', coords='minimal') def test_concat_dim_is_variable(self): objs = [Dataset({'x': 0}), Dataset({'x': 1})] coord = Variable('y', [3, 4]) expected = Dataset({'x': ('y', [0, 1]), 'y': [3, 4]}) actual = concat(objs, coord) self.assertDatasetIdentical(actual, expected) def test_concat_multiindex(self): x = pd.MultiIndex.from_product([[1, 2, 3], ['a', 'b']]) expected = Dataset({'x': x}) actual = concat([expected.isel(x=slice(2)), expected.isel(x=slice(2, None))], 'x') assert expected.equals(actual) assert isinstance(actual.x.to_index(), pd.MultiIndex) @requires_dask # only for toolz def test_auto_combine(self): objs = [Dataset({'x': [0]}), Dataset({'x': [1]})] actual = auto_combine(objs) expected = Dataset({'x': [0, 1]}) self.assertDatasetIdentical(expected, actual) actual = auto_combine([actual]) self.assertDatasetIdentical(expected, actual) objs = [Dataset({'x': [0, 1]}), Dataset({'x': [2]})] actual = auto_combine(objs) expected = Dataset({'x': [0, 1, 2]}) self.assertDatasetIdentical(expected, actual) # ensure auto_combine handles non-sorted dimensions objs = [Dataset(OrderedDict([('x', ('a', [0])), ('y', ('a', [0]))])), Dataset(OrderedDict([('y', ('a', [1])), ('x', ('a', [1]))]))] actual = auto_combine(objs) expected = Dataset({'x': ('a', [0, 1]), 'y': ('a', [0, 1]), 'a': [0, 0]}) self.assertDatasetIdentical(expected, actual) objs = [Dataset({'x': [0], 'y': [0]}), Dataset({'y': [1], 'x': [1]})] with self.assertRaisesRegexp(ValueError, 'too many .* dimensions'): auto_combine(objs) objs = [Dataset({'x': 0}), Dataset({'x': 1})] with self.assertRaisesRegexp(ValueError, 'cannot infer dimension'): auto_combine(objs) objs = [Dataset({'x': [0], 'y': [0]}), Dataset({'x': [0]})] with self.assertRaises(KeyError): auto_combine(objs) class TestConcatDataArray(TestCase): def test_concat(self): ds = Dataset({'foo': (['x', 'y'], np.random.random((10, 20))), 'bar': (['x', 'y'], np.random.random((10, 20)))}) foo = ds['foo'] bar = ds['bar'] # from dataset array: expected = DataArray(np.array([foo.values, bar.values]), dims=['w', 'x', 'y']) actual = concat([foo, bar], 'w') self.assertDataArrayEqual(expected, actual) # from iteration: grouped = [g for _, g in foo.groupby('x')] stacked = concat(grouped, ds['x']) self.assertDataArrayIdentical(foo, stacked) # with an index as the 'dim' argument stacked = concat(grouped, ds.indexes['x']) self.assertDataArrayIdentical(foo, stacked) actual = concat([foo[0], foo[1]], pd.Index([0, 1])).reset_coords(drop=True) expected = foo[:2].rename({'x': 'concat_dim'}) self.assertDataArrayIdentical(expected, actual) actual = concat([foo[0], foo[1]], [0, 1]).reset_coords(drop=True) expected = foo[:2].rename({'x': 'concat_dim'}) self.assertDataArrayIdentical(expected, actual) with self.assertRaisesRegexp(ValueError, 'not identical'): concat([foo, bar], dim='w', compat='identical') with self.assertRaisesRegexp(ValueError, 'not a valid argument'): concat([foo, bar], dim='w', data_vars='minimal') @requires_dask def test_concat_lazy(self): import dask.array as da arrays = [DataArray( da.from_array(InaccessibleArray(np.zeros((3, 3))), 3), dims=['x', 'y']) for _ in range(2)] # should not raise combined = concat(arrays, dim='z') self.assertEqual(combined.shape, (2, 3, 3)) self.assertEqual(combined.dims, ('z', 'x', 'y'))
apache-2.0
-419,925,198,479,077,700
42.888158
101
0.550367
false
mraspaud/dask
dask/dataframe/utils.py
1
21094
from __future__ import absolute_import, division, print_function import re import textwrap from distutils.version import LooseVersion from collections import Iterator import sys import traceback from contextlib import contextmanager import numpy as np import pandas as pd import pandas.util.testing as tm from pandas.api.types import is_categorical_dtype, is_scalar try: from pandas.api.types import is_datetime64tz_dtype except ImportError: # pandas < 0.19.2 from pandas.core.common import is_datetime64tz_dtype from ..core import get_deps from ..local import get_sync PANDAS_VERSION = LooseVersion(pd.__version__) def shard_df_on_index(df, divisions): """ Shard a DataFrame by ranges on its index Examples -------- >>> df = pd.DataFrame({'a': [0, 10, 20, 30, 40], 'b': [5, 4 ,3, 2, 1]}) >>> df a b 0 0 5 1 10 4 2 20 3 3 30 2 4 40 1 >>> shards = list(shard_df_on_index(df, [2, 4])) >>> shards[0] a b 0 0 5 1 10 4 >>> shards[1] a b 2 20 3 3 30 2 >>> shards[2] a b 4 40 1 >>> list(shard_df_on_index(df, []))[0] # empty case a b 0 0 5 1 10 4 2 20 3 3 30 2 4 40 1 """ if isinstance(divisions, Iterator): divisions = list(divisions) if not len(divisions): yield df else: divisions = np.array(divisions) df = df.sort_index() index = df.index if is_categorical_dtype(index): index = index.as_ordered() indices = index.searchsorted(divisions) yield df.iloc[:indices[0]] for i in range(len(indices) - 1): yield df.iloc[indices[i]: indices[i + 1]] yield df.iloc[indices[-1]:] _META_TYPES = "meta : pd.DataFrame, pd.Series, dict, iterable, tuple, optional" _META_DESCRIPTION = """\ An empty ``pd.DataFrame`` or ``pd.Series`` that matches the dtypes and column names of the output. This metadata is necessary for many algorithms in dask dataframe to work. For ease of use, some alternative inputs are also available. Instead of a ``DataFrame``, a ``dict`` of ``{name: dtype}`` or iterable of ``(name, dtype)`` can be provided. Instead of a series, a tuple of ``(name, dtype)`` can be used. If not provided, dask will try to infer the metadata. This may lead to unexpected results, so providing ``meta`` is recommended. For more information, see ``dask.dataframe.utils.make_meta``. """ def insert_meta_param_description(*args, **kwargs): """Replace `$META` in docstring with param description. If pad keyword is provided, will pad description by that number of spaces (default is 8).""" if not args: return lambda f: insert_meta_param_description(f, **kwargs) f = args[0] indent = " " * kwargs.get('pad', 8) body = textwrap.wrap(_META_DESCRIPTION, initial_indent=indent, subsequent_indent=indent, width=78) descr = '{0}\n{1}'.format(_META_TYPES, '\n'.join(body)) if f.__doc__: if '$META' in f.__doc__: f.__doc__ = f.__doc__.replace('$META', descr) else: # Put it at the end of the parameters section parameter_header = 'Parameters\n%s----------' % indent[4:] first, last = re.split('Parameters\\n[ ]*----------', f.__doc__) parameters, rest = last.split('\n\n', 1) f.__doc__ = '{0}{1}{2}\n{3}{4}\n\n{5}'.format(first, parameter_header, parameters, indent[4:], descr, rest) return f @contextmanager def raise_on_meta_error(funcname=None): """Reraise errors in this block to show metadata inference failure. Parameters ---------- funcname : str, optional If provided, will be added to the error message to indicate the name of the method that failed. """ try: yield except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() tb = ''.join(traceback.format_tb(exc_traceback)) msg = ("Metadata inference failed{0}.\n\n" "Original error is below:\n" "------------------------\n" "{1}\n\n" "Traceback:\n" "---------\n" "{2}" ).format(" in `{0}`".format(funcname) if funcname else "", repr(e), tb) raise ValueError(msg) UNKNOWN_CATEGORIES = '__UNKNOWN_CATEGORIES__' def has_known_categories(x): """Returns whether the categories in `x` are known. Parameters ---------- x : Series or CategoricalIndex """ x = getattr(x, '_meta', x) if isinstance(x, pd.Series): return UNKNOWN_CATEGORIES not in x.cat.categories elif isinstance(x, pd.CategoricalIndex): return UNKNOWN_CATEGORIES not in x.categories raise TypeError("Expected Series or CategoricalIndex") def strip_unknown_categories(x): """Replace any unknown categoricals with empty categoricals. Useful for preventing ``UNKNOWN_CATEGORIES`` from leaking into results. """ if isinstance(x, (pd.Series, pd.DataFrame)): x = x.copy() if isinstance(x, pd.DataFrame): cat_mask = x.dtypes == 'category' if cat_mask.any(): cats = cat_mask[cat_mask].index for c in cats: if not has_known_categories(x[c]): x[c].cat.set_categories([], inplace=True) elif isinstance(x, pd.Series): if is_categorical_dtype(x.dtype) and not has_known_categories(x): x.cat.set_categories([], inplace=True) if (isinstance(x.index, pd.CategoricalIndex) and not has_known_categories(x.index)): x.index = x.index.set_categories([]) elif isinstance(x, pd.CategoricalIndex) and not has_known_categories(x): x = x.set_categories([]) return x def clear_known_categories(x, cols=None, index=True): """Set categories to be unknown. Parameters ---------- x : DataFrame, Series, Index cols : iterable, optional If x is a DataFrame, set only categoricals in these columns to unknown. By default, all categorical columns are set to unknown categoricals index : bool, optional If True and x is a Series or DataFrame, set the clear known categories in the index as well. """ if isinstance(x, (pd.Series, pd.DataFrame)): x = x.copy() if isinstance(x, pd.DataFrame): mask = x.dtypes == 'category' if cols is None: cols = mask[mask].index elif not mask.loc[cols].all(): raise ValueError("Not all columns are categoricals") for c in cols: x[c].cat.set_categories([UNKNOWN_CATEGORIES], inplace=True) elif isinstance(x, pd.Series): if is_categorical_dtype(x.dtype): x.cat.set_categories([UNKNOWN_CATEGORIES], inplace=True) if index and isinstance(x.index, pd.CategoricalIndex): x.index = x.index.set_categories([UNKNOWN_CATEGORIES]) elif isinstance(x, pd.CategoricalIndex): x = x.set_categories([UNKNOWN_CATEGORIES]) return x def _empty_series(name, dtype, index=None): if isinstance(dtype, str) and dtype == 'category': return pd.Series(pd.Categorical([UNKNOWN_CATEGORIES]), name=name, index=index).iloc[:0] return pd.Series([], dtype=dtype, name=name, index=index) def make_meta(x, index=None): """Create an empty pandas object containing the desired metadata. Parameters ---------- x : dict, tuple, list, pd.Series, pd.DataFrame, pd.Index, dtype, scalar To create a DataFrame, provide a `dict` mapping of `{name: dtype}`, or an iterable of `(name, dtype)` tuples. To create a `Series`, provide a tuple of `(name, dtype)`. If a pandas object, names, dtypes, and index should match the desired output. If a dtype or scalar, a scalar of the same dtype is returned. index : pd.Index, optional Any pandas index to use in the metadata. If none provided, a `RangeIndex` will be used. Examples -------- >>> make_meta([('a', 'i8'), ('b', 'O')]) Empty DataFrame Columns: [a, b] Index: [] >>> make_meta(('a', 'f8')) Series([], Name: a, dtype: float64) >>> make_meta('i8') 1 """ if hasattr(x, '_meta'): return x._meta if isinstance(x, (pd.Series, pd.DataFrame)): return x.iloc[0:0] elif isinstance(x, pd.Index): return x[0:0] index = index if index is None else index[0:0] if isinstance(x, dict): return pd.DataFrame({c: _empty_series(c, d, index=index) for (c, d) in x.items()}, index=index) if isinstance(x, tuple) and len(x) == 2: return _empty_series(x[0], x[1], index=index) elif isinstance(x, (list, tuple)): if not all(isinstance(i, tuple) and len(i) == 2 for i in x): raise ValueError("Expected iterable of tuples of (name, dtype), " "got {0}".format(x)) return pd.DataFrame({c: _empty_series(c, d, index=index) for (c, d) in x}, columns=[c for c, d in x], index=index) elif not hasattr(x, 'dtype') and x is not None: # could be a string, a dtype object, or a python type. Skip `None`, # because it is implictly converted to `dtype('f8')`, which we don't # want here. try: dtype = np.dtype(x) return _scalar_from_dtype(dtype) except: # Continue on to next check pass if is_scalar(x): return _nonempty_scalar(x) raise TypeError("Don't know how to create metadata from {0}".format(x)) def _nonempty_index(idx): typ = type(idx) if typ is pd.RangeIndex: return pd.RangeIndex(2, name=idx.name) elif typ in (pd.Int64Index, pd.Float64Index): return typ([1, 2], name=idx.name) elif typ is pd.Index: return pd.Index(['a', 'b'], name=idx.name) elif typ is pd.DatetimeIndex: start = '1970-01-01' data = [start, start] if idx.freq is None else None return pd.DatetimeIndex(data, start=start, periods=2, freq=idx.freq, tz=idx.tz, name=idx.name) elif typ is pd.PeriodIndex: return pd.PeriodIndex(start='1970-01-01', periods=2, freq=idx.freq, name=idx.name) elif typ is pd.TimedeltaIndex: start = np.timedelta64(1, 'D') data = [start, start] if idx.freq is None else None return pd.TimedeltaIndex(data, start=start, periods=2, freq=idx.freq, name=idx.name) elif typ is pd.CategoricalIndex: if len(idx.categories): data = [idx.categories[0]] * 2 cats = idx.categories else: data = _nonempty_index(idx.categories) cats = None return pd.CategoricalIndex(data, categories=cats, ordered=idx.ordered, name=idx.name) elif typ is pd.MultiIndex: levels = [_nonempty_index(i) for i in idx.levels] labels = [[0, 0] for i in idx.levels] return pd.MultiIndex(levels=levels, labels=labels, names=idx.names) raise TypeError("Don't know how to handle index of " "type {0}".format(type(idx).__name__)) _simple_fake_mapping = { 'b': np.bool_(True), 'V': np.void(b' '), 'M': np.datetime64('1970-01-01'), 'm': np.timedelta64(1), 'S': np.str_('foo'), 'a': np.str_('foo'), 'U': np.unicode_('foo'), 'O': 'foo' } def _scalar_from_dtype(dtype): if dtype.kind in ('i', 'f', 'u'): return dtype.type(1) elif dtype.kind == 'c': return dtype.type(complex(1, 0)) elif dtype.kind in _simple_fake_mapping: o = _simple_fake_mapping[dtype.kind] return o.astype(dtype) if dtype.kind in ('m', 'M') else o else: raise TypeError("Can't handle dtype: {0}".format(dtype)) def _nonempty_scalar(x): if isinstance(x, (pd.Timestamp, pd.Timedelta, pd.Period)): return x elif np.isscalar(x): dtype = x.dtype if hasattr(x, 'dtype') else np.dtype(type(x)) return _scalar_from_dtype(dtype) else: raise TypeError("Can't handle meta of type " "'{0}'".format(type(x).__name__)) def _nonempty_series(s, idx): dtype = s.dtype if is_datetime64tz_dtype(dtype): entry = pd.Timestamp('1970-01-01', tz=dtype.tz) data = [entry, entry] elif is_categorical_dtype(dtype): if len(s.cat.categories): data = [s.cat.categories[0]] * 2 cats = s.cat.categories else: data = _nonempty_index(s.cat.categories) cats = None data = pd.Categorical(data, categories=cats, ordered=s.cat.ordered) else: entry = _scalar_from_dtype(dtype) data = np.array([entry, entry], dtype=dtype) return pd.Series(data, name=s.name, index=idx) def meta_nonempty(x): """Create a nonempty pandas object from the given metadata. Returns a pandas DataFrame, Series, or Index that contains two rows of fake data. """ if isinstance(x, pd.Index): return _nonempty_index(x) elif isinstance(x, pd.Series): idx = _nonempty_index(x.index) return _nonempty_series(x, idx) elif isinstance(x, pd.DataFrame): idx = _nonempty_index(x.index) data = {i: _nonempty_series(x.iloc[:, i], idx) for i, c in enumerate(x.columns)} res = pd.DataFrame(data, index=idx, columns=np.arange(len(x.columns))) res.columns = x.columns return res elif is_scalar(x): return _nonempty_scalar(x) else: raise TypeError("Expected Index, Series, DataFrame, or scalar, " "got {0}".format(type(x).__name__)) ############################################################### # Testing ############################################################### def _check_dask(dsk, check_names=True, check_dtypes=True, result=None): import dask.dataframe as dd if hasattr(dsk, 'dask'): if result is None: result = dsk.compute(get=get_sync) if isinstance(dsk, dd.Index): assert isinstance(result, pd.Index), type(result) assert isinstance(dsk._meta, pd.Index), type(dsk._meta) if check_names: assert dsk.name == result.name assert dsk._meta.name == result.name if isinstance(result, pd.MultiIndex): assert result.names == dsk._meta.names if check_dtypes: assert_dask_dtypes(dsk, result) elif isinstance(dsk, dd.Series): assert isinstance(result, pd.Series), type(result) assert isinstance(dsk._meta, pd.Series), type(dsk._meta) if check_names: assert dsk.name == result.name, (dsk.name, result.name) assert dsk._meta.name == result.name if check_dtypes: assert_dask_dtypes(dsk, result) _check_dask(dsk.index, check_names=check_names, check_dtypes=check_dtypes, result=result.index) elif isinstance(dsk, dd.DataFrame): assert isinstance(result, pd.DataFrame), type(result) assert isinstance(dsk.columns, pd.Index), type(dsk.columns) assert isinstance(dsk._meta, pd.DataFrame), type(dsk._meta) if check_names: tm.assert_index_equal(dsk.columns, result.columns) tm.assert_index_equal(dsk._meta.columns, result.columns) if check_dtypes: assert_dask_dtypes(dsk, result) _check_dask(dsk.index, check_names=check_names, check_dtypes=check_dtypes, result=result.index) elif isinstance(dsk, dd.core.Scalar): assert (np.isscalar(result) or isinstance(result, (pd.Timestamp, pd.Timedelta))) if check_dtypes: assert_dask_dtypes(dsk, result) else: msg = 'Unsupported dask instance {0} found'.format(type(dsk)) raise AssertionError(msg) return result return dsk def _maybe_sort(a): # sort by value, then index try: if isinstance(a, pd.DataFrame): a = a.sort_values(by=a.columns.tolist()) else: a = a.sort_values() except (TypeError, IndexError, ValueError): pass return a.sort_index() def assert_eq(a, b, check_names=True, check_dtypes=True, check_divisions=True, check_index=True, **kwargs): if check_divisions: assert_divisions(a) assert_divisions(b) if hasattr(a, 'divisions') and hasattr(b, 'divisions'): at = type(np.asarray(a.divisions).tolist()[0]) # numpy to python bt = type(np.asarray(b.divisions).tolist()[0]) # scalar conversion assert at == bt, (at, bt) assert_sane_keynames(a) assert_sane_keynames(b) a = _check_dask(a, check_names=check_names, check_dtypes=check_dtypes) b = _check_dask(b, check_names=check_names, check_dtypes=check_dtypes) if not check_index: a = a.reset_index(drop=True) b = b.reset_index(drop=True) if isinstance(a, pd.DataFrame): a = _maybe_sort(a) b = _maybe_sort(b) tm.assert_frame_equal(a, b, **kwargs) elif isinstance(a, pd.Series): a = _maybe_sort(a) b = _maybe_sort(b) tm.assert_series_equal(a, b, check_names=check_names, **kwargs) elif isinstance(a, pd.Index): tm.assert_index_equal(a, b, **kwargs) else: if a == b: return True else: if np.isnan(a): assert np.isnan(b) else: assert np.allclose(a, b) return True def assert_dask_graph(dask, label): if hasattr(dask, 'dask'): dask = dask.dask assert isinstance(dask, dict) for k in dask: if isinstance(k, tuple): k = k[0] if k.startswith(label): return True else: msg = "given dask graph doesn't contan label: {0}" raise AssertionError(msg.format(label)) def assert_divisions(ddf): if not hasattr(ddf, 'divisions'): return if not hasattr(ddf, 'index'): return if not ddf.known_divisions: return index = lambda x: x if isinstance(x, pd.Index) else x.index results = get_sync(ddf.dask, ddf._keys()) for i, df in enumerate(results[:-1]): if len(df): assert index(df).min() >= ddf.divisions[i] assert index(df).max() < ddf.divisions[i + 1] if len(results[-1]): assert index(results[-1]).min() >= ddf.divisions[-2] assert index(results[-1]).max() <= ddf.divisions[-1] def assert_sane_keynames(ddf): if not hasattr(ddf, 'dask'): return for k in ddf.dask.keys(): while isinstance(k, tuple): k = k[0] assert isinstance(k, (str, bytes)) assert len(k) < 100 assert ' ' not in k if sys.version_info[0] >= 3: assert k.split('-')[0].isidentifier() def assert_dask_dtypes(ddf, res, numeric_equal=True): """Check that the dask metadata matches the result. If `numeric_equal`, integer and floating dtypes compare equal. This is useful due to the implicit conversion of integer to floating upon encountering missingness, which is hard to infer statically.""" eq_types = {'O', 'S', 'U', 'a'} # treat object and strings alike if numeric_equal: eq_types.update(('i', 'f')) if isinstance(res, pd.DataFrame): for col, a, b in pd.concat([ddf._meta.dtypes, res.dtypes], axis=1).itertuples(): assert (a.kind in eq_types and b.kind in eq_types) or (a == b) elif isinstance(res, (pd.Series, pd.Index)): a = ddf._meta.dtype b = res.dtype assert (a.kind in eq_types and b.kind in eq_types) or (a == b) else: if hasattr(ddf._meta, 'dtype'): a = ddf._meta.dtype if not hasattr(res, 'dtype'): assert np.isscalar(res) b = np.dtype(type(res)) else: b = res.dtype assert (a.kind in eq_types and b.kind in eq_types) or (a == b) else: assert type(ddf._meta) == type(res) def assert_max_deps(x, n, eq=True): dependencies, dependents = get_deps(x.dask) if eq: assert max(map(len, dependencies.values())) == n else: assert max(map(len, dependencies.values())) <= n
bsd-3-clause
1,418,236,844,497,588,200
33.866116
82
0.566986
false
mattions/TimeScales
helpers/while_loop_comparison.py
1
5103
import numpy as np import matplotlib.pyplot as plt from decimal import Decimal, getcontext FIRST_TRAIN_STIM = 18 SECOND_TRAIN_STIM = 6 Num_inputs = np.array([10, 20, 30, 40]) x = Num_inputs * FIRST_TRAIN_STIM + Num_inputs * SECOND_TRAIN_STIM # Time Analysis # while_sync organized in dictionary: # Key: numb_inputs, Value: Time [s] while_1ms = {10 : 190999.43, 20 : 252372.22, 30 : 282201.69, 40 : 270703.95 } while_10ms = {10 : 80233.95, 20 : 107879.38, 30 : 113829.47, 40: 116230.06} while_100ms= {10 : 1706.60, 20 : 1609.41, 30 : 2000.67, 40 : 1872.98} while_0_5ms = {10 : 330934.74, 20 : 332132.42 , 30 : 331733.19, 40 : 302128.32} events = {10 : 74833.435, 20 : 76188.83, 30 : 79059.62, 40 : 83309.78} while_sync_sparseness = {2: 18733.74, 4: 21888.38, 16: 29467.67, 32: 45435.57} events_sparseness = {2: 16722.49, 4: 15879.96, 16: 19695.96, 32: 18482.28} def build_array_time(dict_time): "Return the time in frequencies order, scaled to mins" times = [] for t in [10,20,30,40]: times.append(dict_time[t]) times_in_mins = np.array(times)/60.0 return times_in_mins def create_inputs_list(delay, numbers, t_stims): getcontext().prec = 7 tot_inputs = [] for t_stim in t_stims: first_input = Decimal(t_stim) + Decimal(delay) inputs_time = [first_input] for i in range(numbers - 1): new_time = inputs_time[-1] + Decimal(delay) inputs_time.append(new_time) tot_inputs.extend(inputs_time) return tot_inputs def calculate_number_of_events(inputs_time, delta_sync): syncs_time = np.arange(0, 20000, delta_sync) number_of_event_to_18_spines = 0 number_of_event_to_6_spines = 0 for x in inputs_time: if x in syncs_time: # print "event in sync time: %s" %x if x < 15000: number_of_event_to_18_spines += 1 else: number_of_event_to_6_spines += 1 number_of_events = number_of_event_to_18_spines * 18 + number_of_event_to_6_spines * 6 return number_of_events # Number of events missed per delta def calc_missed_events(): plt.figure() delta_t = [10, 100] #delta_t = [1] delay = 50 numbers = 30 t_stims = [2000,15000] for dt in delta_t: n_events_missed = [] for input in Num_inputs: inputs_time = create_inputs_list(delay, input, t_stims) n_events_hit = calculate_number_of_events(inputs_time, dt) n_events_tot = input * FIRST_TRAIN_STIM + input * SECOND_TRAIN_STIM n_events_missed.append( n_events_tot - n_events_hit) # n_events * FIRST_TRAIN_STIM + n_events * SECOND_TRAIN_STIM print "dt %s tot events: %s missed events: %s" %(dt, x, n_events_missed) plt.plot(x, n_events_missed, marker='o', linestyle='-', label=str(dt)) # sparseness def plot_sparseness_comparison(): x = events_sparseness.keys() x.sort() y_whi = [] y_ev = [] for p in x: y_whi.append(while_sync_sparseness[p]) y_ev.append(events_sparseness[p]) plt.figure() y_whi_array = np.array(y_whi)/60. y_ev_array = np.array(y_ev)/60. plt.plot(x, y_whi_array, marker='o', linestyle='-', label="while_sync") plt.plot(x, y_ev_array, marker='o', linestyle='-', label="events") print "Sparseness comparison report:" print "Standard deviation [min]: while loop: %s, event-algorithm: %s" %(y_whi_array.std(), y_ev_array.std() ) print "Mean: while loop [min]: %s, event-aglorithm: %s" %(y_whi_array.mean(), y_ev_array.mean() ) plt.ylabel('Time [min]') plt.xlabel("number of spines") plt.title('Sparseness comparison') plt.legend(loc=0) while_0_5ms_times = build_array_time(while_0_5ms) plt.plot(x, while_0_5ms_times, marker='o', linestyle='-', label='while 0.5ms', color='magenta') while_1ms_times = build_array_time(while_1ms) plt.plot(x, while_1ms_times, marker='o', linestyle='-', label='while 1ms', color='blue') while_10ms_times = build_array_time(while_10ms) plt.plot(x, while_10ms_times, marker='o', linestyle='-', label='while 10ms', color='red') events_times = build_array_time(events) plt.plot(x, events_times, marker='o', linestyle='-', label='events', color='green') while_100ms_times = build_array_time(while_100ms) plt.plot(x, while_100ms_times, marker='o', linestyle='-', label='while 100ms', color='pink') #plt.legend([p1, p2, p3, p4, p5], ['events', 'while 1ms', 'while 10ms', 'while 100ms', 'while 0.5ms' ], loc=0) plt.legend(loc=0) plt.xlabel("Number of Events") plt.ylabel("Time [min]") plt.ylim(-100, 6000) plot_sparseness_comparison()
bsd-3-clause
3,595,581,679,148,356,000
32.136364
110
0.569469
false
eirikgje/healpy
healpy/test/test_visufunc.py
1
1058
import matplotlib matplotlib.use("agg") import unittest import numpy as np import healpy as hp from ..visufunc import * from ..zoomtool import mollzoom class TestNoCrash(unittest.TestCase): def setUp(self): self.nside = 1 self.m = np.arange(hp.nside2npix(self.nside), dtype=np.double) self.ma = self.m.copy() self.ma[3] = hp.UNSEEN self.ma = hp.ma(self.ma) def test_cartview_nocrash(self): cartview(self.m) def test_mollview_nocrash(self): mollview(self.m) def test_gnomview_nocrash(self): gnomview(self.m) def test_orthview_nocrash(self): orthview(self.m) def test_mollzoom_nocrash(self): mollzoom(self.m) def test_cartview_ma_nocrash(self): cartview(self.ma) def test_mollview_ma_nocrash(self): mollview(self.ma) def test_gnomview_ma_nocrash(self): gnomview(self.ma) def test_orthview_ma_nocrash(self): orthview(self.ma) def test_mollzoom_ma_nocrash(self): mollzoom(self.ma)
gpl-2.0
-1,362,372,736,777,402,400
21.510638
70
0.636106
false
potash/drain
drain/aggregation.py
1
14812
from .step import Step from .aggregate import Aggregator from . import util, data from itertools import chain import pandas as pd import logging class AggregationBase(Step): """ AggregationBase uses aggregate.Aggregator to aggregate data. It can include aggregations over multiple indexes and multiple data transformations (e.g. subsets). The combinations can be run in parallel and can be returned disjoint or concatenated. Finally the results may be pivoted and joined to other datasets. """ def __init__(self, insert_args, aggregator_args, concat_args, parallel=False, prefix=None, inputs=None): """ Args: insert_args: collection of argument names to insert into results aggregator_args: collection of argument names to pass to get_aggregator concat_args: collection of argument names on which to concatenate results. Typically a subset (or equal to) aggregator_args. parallel: whether to distribute the aggregation over many inputs. uses self._parallel_kwargs to determine how to distribute. prefix: used as a prefix for feature names by join() """ Step.__init__(self, insert_args=insert_args, concat_args=concat_args, aggregator_args=aggregator_args, prefix=prefix, parallel=parallel, inputs=inputs) if parallel: # create a new Aggregation according to parallel_kwargs # pass our input to those steps # those become the inputs to this step pkwargs = self.get_arguments() pkwargs.update(parallel=False) self.inputs = [] for pk in self._parallel_kwargs: pkwargs.update(pk) a = self.__class__(**pkwargs) self.inputs.append(a) self._aggregators = {} """ arguments is a list of dictionaries of argument names and values. it must include the special 'index' argument, whose values are keys to plug into the self.indexes dictionary, whose values are the actual index that is passed to Aggregator.aggregate() """ @property def argument_names(self): return list(util.union(map(set, self.arguments))) def args_prefix(self, args): prefix = '' if self.prefix is None else self.prefix + '_' prefix += str.join('_', map(str, args)) + '_' return prefix # left join to the specified DataFrame # left should contain the index of the concatenated agg in its columns def join(self, left): fillna_value = pd.Series() concat_result = self.get_concat_result() # TODO: is it more efficient to first collect indexes from concat # then outer join all of the dfs # then left join that to left? for concat_args, df in concat_result.items(): # TODO: print warning if df.index.names is not a subset of left.columns # and skip this df logging.info('Joining %s %s' % (self.prefix, str(concat_args))) data.prefix_columns(df, self.args_prefix(concat_args)) if not set(df.index.names).issubset(left.columns): logging.info("Skipping join since aggregation index not in left: %s" % df.index.names) continue left = left.merge(df, left_on=df.index.names, right_index=True, how='left', copy=False) fillna_value = fillna_value.append(self.fillna_value( df=df, left=left, **{k: v for k, v in zip(self.concat_args, concat_args)})) logging.info('Filling missing values') left.fillna(fillna_value, inplace=True) return left def fillna_value(self, df, left, **concat_args): """ This method gives subclasses the opportunity to define how join() fills missing values. Return value must be compatible with DataFrame.fillna() value argument. Examples: - return 0: replace missing values with zero - return df.mean(): replace missing values with column mean This default implimentation fills counts with zero. TODO: identify counts more robustly instead of relying on column name Typically fill other fields with mean but can't do that during the join because that would leak information across a train/test split """ value = pd.Series( 0, index=[c for c in df.columns if c.endswith('_count') and c.find('_per_') == -1]) return value def select(self, df, args, inplace=False): """ After joining, selects a subset of arguments df: the result of a call to self.join(left) args: a collcetion of arguments to select, as accepted by drain.util.list_expand: - a tuple corresponding to concat_args, e.g. [('District', '12h'), ('Distict', '24h')] - a dict to be exanded into the above, e.g. {'District': ['12h', '24h']} """ if self.prefix is None: raise ValueError('Cannot do selection on an Aggregation without a prefix') # run list_expand and ensure all args to tuples for validation args = [tuple(i) for i in util.list_expand(args)] # check that the args passed are valid for a in args: has_arg = False for argument in self.arguments: if a == tuple(argument[k] for k in self.concat_args): has_arg = True break if not has_arg: raise ValueError('Invalid argument for selection: %s' % str(a)) df = data.select_features( df, exclude=[self.prefix + '_.*'], include=map(lambda a: self.args_prefix(a) + '.*', args), inplace=inplace) return df def run(self, *args, **kwargs): if self.parallel: # use tuple to avoid mapping to positional arguments by step.merge_results() return tuple(chain(*args)) if not self.parallel: dfs = [] for argument in self.arguments: logging.info('Aggregating %s %s' % (self.prefix, argument)) aggregator = self._get_aggregator(**argument) df = aggregator.aggregate(self.indexes[argument['index']]) logging.info('Aggregated %s: %s' % (argument, df.shape)) # insert insert_args for k in argument: if k in self.insert_args: df[k] = argument[k] df.set_index(self.insert_args, append=True, inplace=True) dfs.append(df) return tuple(dfs) def load(self): # overload load in order to restore result to a tuple Step.load(self) self.result = tuple(self.result) def get_concat_result(self): to_concat = {} dfs = self.result for argument, df in zip(self.arguments, dfs): concat_args = tuple(argument[k] for k in self.concat_args) if concat_args not in to_concat: to_concat[concat_args] = [df] else: to_concat[concat_args].append(df) dfs = {concat_args: pd.concat(dfs, copy=False) for concat_args, dfs in to_concat.items()} return dfs def _get_aggregator(self, **kwargs): args_tuple = (kwargs[k] for k in self.aggregator_args) if args_tuple in self._aggregators: return self._aggregators[args_tuple] else: aggregator = self.get_aggregator( **util.dict_subset(kwargs, self.aggregator_args)) self._aggregators[args_tuple] = aggregator return aggregator def get_aggregator(self, **kwargs): """ Given the arguments, return an aggregator This method exists to allow subclasses to use Aggregator objects efficiently, i.e. only apply AggregateSeries once per set of Aggregates. If the set of Aggregates depends on some or none of the arguments the subclass need not recreate Aggregators """ raise NotImplementedError class AggregationJoin(Step): """ first input is left and second input is aggregation if left step returned a dict, use MapResults to clarify e.g.: mapping=[{'aux': None}] """ def __init__(self, inputs, **kwargs): Step.__init__(self, inputs=inputs, **kwargs) def run(self, aggregations, left): # aggregations = iter(self.inputs) # next(aggregations) # first input is left, not aggregation # for aggregation in aggregations: left_columns = list(left.columns) left = self.inputs[0].join(left) left = left.drop(left_columns, axis=1) return left class SpacetimeAggregationJoin(AggregationJoin): """ Specify a temporal lag between the aggregations and left Useful for simulating a delay in receipt of aggregation data sources """ def __init__(self, inputs, lag=None, **kwargs): AggregationJoin.__init__(self, lag=lag, inputs=inputs, **kwargs) def run(self, aggregations, left): if self.lag is not None: delta = data.parse_delta(self.lag) for a in aggregations: a.reset_index(level='date', inplace=True) a.date = a.date.apply(lambda d: d + delta) a.set_index('date', append=True, inplace=True) return AggregationJoin.run(self, aggregations, left) class SimpleAggregation(AggregationBase): """ A simple AggreationBase subclass with a single aggregrator The only argument is the index An implementation need only define an aggregates attributes, see test_aggregation.SimpleCrimeAggregation for an example. """ def __init__(self, inputs, indexes, prefix=None, parallel=False): # if indexes was not a dict but a list, make it a dict if not isinstance(indexes, dict): indexes = {index: index for index in indexes} self.indexes = indexes self.inputs = inputs AggregationBase.__init__(self, insert_args=[], concat_args=['index'], aggregator_args=[], parallel=parallel, prefix=prefix) def get_aggregator(self, **kwargs): return Aggregator(self.inputs[0].result, self.aggregates) @property def _parallel_kwargs(self): """ Returns: a list of kwargs for each parallel input """ return [{'indexes': {name: index}} for name, index in self.indexes.items()] @property def arguments(self): return [{'index': name} for name in self.indexes] class SpacetimeAggregation(AggregationBase): """ SpacetimeAggregation is an Aggregation over space and time. Specifically, the index is a spatial index and an additional date and delta argument select a subset of the data to aggregate. We assume that the index and deltas are independent of the date, so every date is aggregated to all spacedeltas By default the aggregator_args are date and delta (i.e. independent of aggregation index). To change that, pass aggregator_args=['date', 'delta', 'index'] and override get_aggregator to accept an index argument. Note that dates should be datetime.datetime, not numpy.datetime64, for yaml serialization and to work with dateutil.relativedelta. However since pandas automatically turns a datetime column in the index into datetime64 DatetimeIndex, the left dataframe passed to join() should use datetime64! See test_aggregation.SpacetimeCrimeAggregation for an example. """ def __init__(self, spacedeltas, dates, date_column, parallel=False, max_date_column=None, censor_columns=None, aggregator_args=None, concat_args=None, inputs=None, prefix=None): if aggregator_args is None: aggregator_args = ['date', 'delta'] if concat_args is None: concat_args = ['index', 'delta'] self.censor_columns = censor_columns if censor_columns is not None else {} self.date_column = date_column self.max_date_column = max_date_column self.dates = dates self.spacedeltas = spacedeltas """ spacedeltas is a dict of the form {name: (index, deltas)} where deltas is an array of delta strings dates are end dates for the aggregators """ AggregationBase.__init__(self, insert_args=['date'], aggregator_args=aggregator_args, concat_args=concat_args, prefix=prefix, parallel=parallel, inputs=inputs) @property def indexes(self): return {name: value[0] for name, value in self.spacedeltas.items()} @property def arguments(self): names = list(self.spacedeltas.keys()) names.sort() a = [] for date in self.dates: for name in names: for delta in self.spacedeltas[name][1]: a.append({'date': date, 'delta': delta, 'index': name}) return a @property def _parallel_kwargs(self): return [{'spacedeltas': self.spacedeltas, 'dates': [date]} for date in self.dates] def join(self, left): # this check doesn't work with lag! # TODO: fix by moving Aggregation.join() code to AggregationJoin.sun() # difference = set(pd.to_datetime(left.date.unique()))\ # .difference(pd.to_datetime(self.dates)) # if len(difference) > 0: # raise ValueError('Left contains unaggregated dates: %s' % difference) return AggregationBase.join(self, left) def get_aggregator(self, date, delta): df = self.get_data(date, delta) aggregator = Aggregator(df, self.get_aggregates(date, delta)) return aggregator def get_data(self, date, delta): df = self.inputs[0].result df = data.date_select(df, self.date_column, date, delta, self.max_date_column) df = data.date_censor(df.copy(), self.censor_columns, date) return df def get_aggregates(self, date, delta): raise NotImplementedError
mit
4,057,079,826,865,282,600
38.817204
95
0.59445
false
stuliveshere/PySeis
PySeis/launchGui.py
1
8281
import numpy as np import os import glob import Tkinter as tk import ttk import ScrolledText import matplotlib import matplotlib.pyplot as plt matplotlib.use('TkAgg') from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg from matplotlib.backend_bases import key_press_handler import ConfigParser import toolbox import processing import pydoc todo = ''' for the viewer, hold the 3 types of view permanently in memory and just swap in the active view as needed. so when the gui starts, it will need to hold default views. ''' import sys, inspect def is_mod_function(mod, func): return inspect.isfunction(func) and inspect.getmodule(func) == mod def list_functions(mod): return [func.__name__ for func in mod.__dict__.itervalues() if is_mod_function(mod, func)] class Controller(tk.PanedWindow): def __init__(self, parent, *args, **kwargs): tk.PanedWindow.__init__(self, parent, *args, **kwargs) self.parent = parent self.cwd = os.getcwd() menubar = tk.Menu(parent) menubar.add_command(label="File", command=None) menubar.add_command(label="Edit", command=None) menubar.add_command(label="Run", command=None) menubar.add_command(label="Help", command=None) parent.config(menu=menubar) self.tree = TreeView(self, self.cwd) self.tree.tree.bind('<Double-Button-1>', self.view_file) self.add(self.tree) self.viewer = PanelView(self) self.add(self.viewer) self.viewer.fileView.tkraise() def view_file(self, event): curItem = self.tree.tree.focus() fname = self.tree.tree.item(curItem)['text'] fpath= self.tree.tree.item(curItem)['values'][0] type = fname.split(".")[-1] if type == "su": self.viewer.plotView.update(fpath) #self.viewer.plotView.tkraise() elif type == "py": self.viewer.scriptView.update(fpath) class PanelView(tk.Frame): def __init__(self, parent, *args, **kwargs): tk.Frame.__init__(self, parent, *args, **kwargs) self.parent = parent self.pack(side="top", fill="both", expand=True) self.grid_rowconfigure(0, weight=1) self.grid_columnconfigure(0, weight=1) self.fileView = FileView(self, self.parent.cwd) self.fileView.grid(row=0, column=0, sticky="nsew") self.scriptView = ScriptView(self) self.scriptView.grid(row=0, column=0, sticky="nsew") self.plotView = PlotView(self, None) self.plotView.grid(row=0, column=0, sticky="nsew") class FileView(tk.Frame): def __init__(self, parent, cwd, *args, **kwargs): tk.Frame.__init__(self, parent, *args, **kwargs) self.parent = parent self.cwd = cwd listbox = tk.Listbox(self) #for file in os.listdir(cwd+"/Projects"): #listbox.insert(tk.END, file) #for i in pydoc.render_doc(toolbox, "Help on %s").split('\n'): stuff = pydoc.plain(pydoc.render_doc(toolbox, "Help on %s")) for line in stuff.split('\n'): listbox.insert(tk.END, line) listbox.pack(fill=tk.BOTH, expand=tk.YES) class ScriptView(tk.Frame): def __init__(self, parent, *args, **kwargs): tk.Frame.__init__(self, parent, *args, **kwargs) self.pad = ScrolledText.ScrolledText(self, width=100, height=80) self.pad.pack(fill=tk.BOTH, expand=tk.YES) def update(self, fpath): self.pad.delete('1.0', tk.END) self.pad.insert('1.0', open(fpath, 'r').read()) self.tkraise() class PlotView(tk.Frame): def __init__(self, parent, *args, **kwargs): tk.Frame.__init__(self, parent, *args, **kwargs) self.parent = parent self.fig = fig = plt.figure() self.ax = ax = fig.add_subplot(111) # a tk.DrawingArea canvas = FigureCanvasTkAgg(fig, master=self) canvas.show() canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1) toolbar = NavigationToolbar2TkAgg(canvas, self) toolbar.update() canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=1) def update(self, fpath): dataset, params = processing.initialise(fpath, memmap=True) params['primary'] = None params['secondary'] = 'cdp' eventManager = toolbox.KeyHandler(self.fig, self.ax, dataset, params) self.fig.canvas.mpl_connect('key_press_event',eventManager) #~ self.ax.set_data(dataset['trace']) #~ self.fig.canvas.draw() self.tkraise() class TreeView(tk.Frame): def __init__(self, parent, cwd, *args, **kwargs): tk.Frame.__init__(self, parent, *args, **kwargs) self.parent = parent self.cwd = cwd vsb = ttk.Scrollbar(self, orient="vertical") hsb = ttk.Scrollbar(self, orient="horizontal") self.tree = tree = ttk.Treeview(self, columns=("path", "type", "size", "date"), displaycolumns="size", yscrollcommand=lambda f, l: self.autoscroll(vsb, f, l), xscrollcommand=lambda f, l:self.autoscroll(hsb, f, l)) vsb['command'] = tree.yview hsb['command'] = tree.xview tree.heading("#0", text="Projects", anchor='w') tree.heading("size", text="File Size", anchor='w') tree.column("size", stretch=0, width=100) self.populate_roots(tree) tree.bind('<<TreeviewOpen>>', self.update_tree) # Arrange the tree and its scrollbars in the toplevel tree.grid(column=0, row=0, sticky='nswe') vsb.grid(column=1, row=0, sticky='ns') hsb.grid(column=0, row=1, sticky='ew') self.grid_columnconfigure(0, weight=1) self.grid_rowconfigure(0, weight=1) def populate_tree(self, tree, node): if tree.set(node, "type") != 'directory': return path = tree.set(node, "path") tree.delete(*tree.get_children(node)) parent = tree.parent(node) for p in sorted(os.listdir(path)): ptype = None p = os.path.join(path, p).replace('\\', '/') if os.path.isdir(p): ptype = "directory" elif os.path.isfile(p): ptype = "file" fname = os.path.split(p)[1] id = tree.insert(node, "end", text=fname, values=[p, ptype]) if ptype == 'directory': if fname not in ('.', '..'): tree.insert(id, 0, text="dummy") tree.item(id, text=fname) elif ptype == 'file': size = os.stat(p).st_size * 1e-3 date = os.stat(p).st_mtime tree.set(id, "size", "%d Kb" % size) def populate_roots(self, tree): #dir = os.path.abspath('.').replace('\\', '/') dir = self.cwd+"/Projects" node = tree.insert('', 'end', text="Projects", values=[dir, "directory"]) self.populate_tree(tree, node) def update_tree(self, event): tree = event.widget self.populate_tree(tree, tree.focus()) def change_dir(self, event): tree = event.widget node = tree.focus() if tree.parent(node): path = os.path.abspath(tree.set(node, "path")) if os.path.isdir(path): os.chdir(path) tree.delete(tree.get_children('')) self.populate_roots(tree) def autoscroll(self, sbar, first, last): """Hide and show scrollbar as needed.""" first, last = float(first), float(last) if first <= 0 and last >= 1: sbar.grid_remove() else: sbar.grid() sbar.set(first, last) def main(): root = tk.Tk() a = Controller(root,orient=tk.HORIZONTAL) a.pack(side="top", fill="both", expand=True) root.geometry("1366x768") print 'functions in current module:\n', list_functions(sys.modules[__name__]) print 'functions in inspect module:\n', list_functions(inspect) root.mainloop() if __name__ == "__main__": os.chdir("../") main()
mit
-625,467,408,256,857,900
33.360996
106
0.57638
false
wathen/PhD
MHD/FEniCS/MHD/CG/PicardIter_Direct/DecoupleTest/KappaChange/tests/MD.py
1
12360
#!/usr/bin/python # interpolate scalar gradient onto nedelec space from dolfin import * import petsc4py import sys petsc4py.init(sys.argv) from petsc4py import PETSc Print = PETSc.Sys.Print # from MatrixOperations import * import numpy as np #import matplotlib.pylab as plt import PETScIO as IO import common import scipy import scipy.io import time as t import BiLinear as forms import IterOperations as Iter import MatrixOperations as MO import CheckPetsc4py as CP import Solver as S import ExactSol import P as Precond import cProfile, pstats, StringIO m = 2 IterType = 'MD' errL2u =np.zeros((m-1,1)) errH1u =np.zeros((m-1,1)) errL2p =np.zeros((m-1,1)) errL2b =np.zeros((m-1,1)) errCurlb =np.zeros((m-1,1)) errL2r =np.zeros((m-1,1)) errH1r =np.zeros((m-1,1)) l2uorder = np.zeros((m-1,1)) H1uorder =np.zeros((m-1,1)) l2porder = np.zeros((m-1,1)) l2border = np.zeros((m-1,1)) Curlborder =np.zeros((m-1,1)) l2rorder = np.zeros((m-1,1)) H1rorder = np.zeros((m-1,1)) NN = np.zeros((m-1,1)) DoF = np.zeros((m-1,1)) Velocitydim = np.zeros((m-1,1)) Magneticdim = np.zeros((m-1,1)) Pressuredim = np.zeros((m-1,1)) Lagrangedim = np.zeros((m-1,1)) Wdim = np.zeros((m-1,1)) iterations = np.zeros((m-1,1)) SolTime = np.zeros((m-1,1)) udiv = np.zeros((m-1,1)) MU = np.zeros((m-1,1)) level = np.zeros((m-1,1)) NSave = np.zeros((m-1,1)) Mave = np.zeros((m-1,1)) TotalTime = np.zeros((m-1,1)) nn = 2 dim = 2 ShowResultPlots = 'yes' split = 'Linear' nn = 2 mm = 4 MUsave = np.zeros((mm*3,1)) MUit = np.zeros((m-1,mm*3)) print MUit[0,0] dim = 2 ShowResultPlots = 'yes' split = 'Linear' MU[0]= 1e0 R = 0.01 jj = -2 for yy in xrange(1,mm+1): jj +=3 MU =(R*10**(yy)) print "++++++++",MU # MU[0]= 1e0 for xx in xrange(1,m): MUsave[jj-1] = MU print xx level[xx-1] = xx+3 nn = 2**(level[xx-1]) # Create mesh and define function space nn = int(nn) NN[xx-1] = nn/2 mesh = RectangleMesh(0, 0, 1, 1, nn, nn,'left') parameters["form_compiler"]["precision"] = 15 parameters["form_compiler"]["quadrature_degree"] = -1 order = 2 parameters['reorder_dofs_serial'] = False Velocity = VectorFunctionSpace(mesh, "CG", order) Pressure = FunctionSpace(mesh, "CG", order-1) Magnetic = FunctionSpace(mesh, "N1curl", order) Lagrange = FunctionSpace(mesh, "CG", order) W = MixedFunctionSpace([Velocity,Pressure,Magnetic,Lagrange]) # W = Velocity*Pressure*Magnetic*Lagrange Velocitydim[xx-1] = Velocity.dim() Pressuredim[xx-1] = Pressure.dim() Magneticdim[xx-1] = Magnetic.dim() Lagrangedim[xx-1] = Lagrange.dim() Wdim[xx-1] = W.dim() print "\n\nW: ",Wdim[xx-1],"Velocity: ",Velocitydim[xx-1],"Pressure: ",Pressuredim[xx-1],"Magnetic: ",Magneticdim[xx-1],"Lagrange: ",Lagrangedim[xx-1],"\n\n" dim = [Velocity.dim(), Pressure.dim(), Magnetic.dim(), Lagrange.dim()] def boundary(x, on_boundary): return on_boundary u0, p0,b0, r0, Laplacian, Advection, gradPres,CurlCurl, gradR, NS_Couple, M_Couple = ExactSol.MHD2D(4,1) bcu = DirichletBC(W.sub(0),u0, boundary) bcp = DirichletBC(W.sub(1),p0, boundary) bcb = DirichletBC(W.sub(2),b0, boundary) bcr = DirichletBC(W.sub(3),r0, boundary) # bc = [u0,p0,b0,r0] bcs = [bcu,bcb,bcr] FSpaces = [Velocity,Pressure,Magnetic,Lagrange] (u, p, b, r) = TrialFunctions(W) (v, q, c,s ) = TestFunctions(W) kappa = 1.0 Mu_m =10 F_NS = -kappa*Laplacian+Advection+gradPres-kappa*NS_Couple if kappa == 0: F_M = Mu_m*CurlCurl+gradR -kappa*M_Couple else: F_M = Mu_m*kappa*CurlCurl+gradR -kappa*M_Couple params = [MU,Mu_m,kappa] F_NS = -kappa*Laplacian+Advection+gradPres-kappa*NS_Couple F_M = Mu_m*MU*CurlCurl+gradR -kappa*M_Couple # params = [Mu,Mu_m,kappa] u_k,p_k,b_k,r_k = common.InitialGuess(FSpaces,[u0,p0,b0,r0],[F_NS,F_M],params,Neumann=Expression(("0","0")),options ="New") p_k.vector()[:]= p_k.vector().array()+np.abs(np.min(p_k.vector().array())) # bcu.apply(u_k) # bcb.apply(b_k) # bcr.apply(r_k) x = Iter.u_prev(u_k,p_k,b_k,r_k) ns,maxwell,CoupleTerm,Lmaxwell,Lns = forms.MHD2D(mesh, W,F_M,F_NS, u_k,b_k,params,IterType) print CoupleTerm parameters['linear_algebra_backend'] = 'PETSc' RHSform = forms.PicardRHS(mesh, W, u_k, p_k, b_k, r_k, params) bcu = DirichletBC(W.sub(0),Expression(("0","0")), boundary) bcb = DirichletBC(W.sub(2),Expression(("0","0")), boundary) bcr = DirichletBC(W.sub(3),Expression(("0")), boundary) bcs = [bcu,bcb,bcr] eps = 1.0 # error measure ||u-u_k|| tol = 1.0E-4 # iteration counter maxiter = 20 # max no of iterations allowed SolutionTime = 0 iter = 0 outer = 0 parameters['linear_algebra_backend'] = 'uBLAS' p = forms.Preconditioner(mesh,W,u_k,b_k,params,IterType) PP,Pb = assemble_system(p, Lns,bcs) NS_is = PETSc.IS().createGeneral(range(Velocity.dim()+Pressure.dim())) M_is = PETSc.IS().createGeneral(range(Velocity.dim()+Pressure.dim(),W.dim())) if IterType == "Full" or IterType == "MD": (pQ) = TrialFunction(Pressure) (qQ) = TestFunction(Pressure) print kappa Q = assemble(inner(pQ,qQ)*dx) L = assemble(inner(grad(pQ),grad(qQ))*dx) n = FacetNormal(mesh) fp = kappa*inner(grad(qQ), grad(pQ))*dx+inner((u_k[0]*grad(pQ)[0]+u_k[1]*grad(pQ)[1]),qQ)*dx + (1/2)*div(u_k)*inner(pQ,qQ)*dx - (1/2)*(u_k[0]*n[0]+u_k[1]*n[1])*inner(pQ,qQ)*ds L = CP.Assemble(L) if IterType == "CD": AA, bb = assemble_system(maxwell+ns+CoupleTerm, (Lmaxwell + Lns) - RHSform, bcs) A,b = CP.Assemble(AA,bb) P = CP.Assemble(PP) u = b.duplicate() Mits = 0 NSits = 0 time InnerTol = [] OuterTol = [] OuterTol = 1e-6 # InnerTol.append(1e-6*((iter)*50)) InnerTol = 1e-6 TotalStart = t.clock() while eps > tol and iter < maxiter: iter += 1 if IterType == "CD": bb = assemble((Lmaxwell + Lns) - RHSform) for bc in bcs: bc.apply(bb) A,b = CP.Assemble(AA,bb) P = CP.Assemble(PP) print b else: # tic() AA, bb = assemble_system(maxwell+ns+CoupleTerm, (Lmaxwell + Lns) - RHSform, bcs) A,b = CP.Assemble(AA,bb) del AA F = assemble(fp) F = CP.Assemble(F) P = CP.Assemble(PP) # P = S.ExactPrecond(PP,Q,L,F,FSpaces) Mass = CP.Assemble(Q) # print "Assemble time >>>>>>",toc() # if iter == 1: uu = b.duplicate() # else: # uu = uu pr = cProfile.Profile() start = t.clock() pr.enable() print InnerTol print OuterTol u,it1,it2 = S.solve(A,b,uu,P,[NS_is,M_is],FSpaces,IterType,OuterTol,InnerTol,Mass,L,F) del A # print InnerTol[iter-1] pr.disable() # time = toc() time = (t.clock() - start) s = StringIO.StringIO() print "Solve time >>>>>>", time print it1,it2 NSits += it1 Mits +=it2 SolutionTime = SolutionTime +time # tic() u, p, b, r, eps= Iter.PicardToleranceDecouple(u,x,FSpaces,dim,"2",iter) u_k.assign(u) p_k.assign(p) b_k.assign(b) r_k.assign(r) # print "Correction time >>>>>>", toc() # p_k.vector()[:]= p_k.vector().array()+np.abs(np.min(p_k.vector().array())) x = Iter.u_prev(u_k,p_k,b_k,r_k) if eps > 1e2 and iter>10: iter = 10000000000000 break # u_k,b_k,epsu,epsb=Direct.PicardTolerance(x,u_k,b_k,FSpaces,dim,"inf",iter) print xx MUit[xx-1,jj-1] = iter MUit[xx-1,jj] = (float(NSits)/iter) MUit[xx-1,jj+1] = (float(Mits)/iter) TotalTime[xx-1] = t.clock()-TotalStart SolTime[xx-1] = SolutionTime/iter ue = u0 pe = p0 be = b0 re = r0 ExactSolution = [ue,pe,be,re] #errL2u[xx-1], errH1u[xx-1], errL2p[xx-1], errL2b[xx-1], errCurlb[xx-1], errL2r[xx-1], errH1r[xx-1] = Iter.Errors(x,mesh,FSpaces,ExactSolution,order,dim) if xx == 1: l2uorder[xx-1] = 0 else: l2uorder[xx-1] = np.abs(np.log2(errL2u[xx-2]/errL2u[xx-1])) H1uorder[xx-1] = np.abs(np.log2(errH1u[xx-2]/errH1u[xx-1])) l2porder[xx-1] = np.abs(np.log2(errL2p[xx-2]/errL2p[xx-1])) l2border[xx-1] = np.abs(np.log2(errL2b[xx-2]/errL2b[xx-1])) Curlborder[xx-1] = np.abs(np.log2(errCurlb[xx-2]/errCurlb[xx-1])) l2rorder[xx-1] = np.abs(np.log2(errL2r[xx-2]/errL2r[xx-1])) H1rorder[xx-1] = np.abs(np.log2(errH1r[xx-2]/errH1r[xx-1])) import pandas as pd # print "\n\n Velocity convergence" # VelocityTitles = ["l","Total DoF","V DoF","Soln Time","V-L2","L2-order","V-H1","H1-order"] # VelocityValues = np.concatenate((level,Wdim,Velocitydim,SolTime,errL2u,l2uorder,errH1u,H1uorder),axis=1) # VelocityTable= pd.DataFrame(VelocityValues, columns = VelocityTitles) # pd.set_option('precision',3) # VelocityTable = MO.PandasFormat(VelocityTable,"V-L2","%2.4e") # VelocityTable = MO.PandasFormat(VelocityTable,'V-H1',"%2.4e") # VelocityTable = MO.PandasFormat(VelocityTable,"H1-order","%1.2f") # VelocityTable = MO.PandasFormat(VelocityTable,'L2-order',"%1.2f") # print VelocityTable.to_latex() # print "\n\n Pressure convergence" # PressureTitles = ["l","Total DoF","P DoF","Soln Time","P-L2","L2-order"] # PressureValues = np.concatenate((level,Wdim,Pressuredim,SolTime,errL2p,l2porder),axis=1) # PressureTable= pd.DataFrame(PressureValues, columns = PressureTitles) # pd.set_option('precision',3) # PressureTable = MO.PandasFormat(PressureTable,"P-L2","%2.4e") # PressureTable = MO.PandasFormat(PressureTable,'L2-order',"%1.2f") # print PressureTable.to_latex() # print "\n\n Iteration table" # if IterType == "Full": # IterTitles = ["l","DoF","AV solve Time","Total picard time","picard iterations","Av Outer its","Av Inner its",] # else: # IterTitles = ["l","DoF","AV solve Time","Total picard time","picard iterations","Av NS iters","Av M iters"] # IterValues = np.concatenate((level,Wdim,SolTime,TotalTime,iterations,NSave,Mave),axis=1) # IterTable= pd.DataFrame(IterValues, columns = IterTitles) # if IterType == "Full": # IterTable = MO.PandasFormat(IterTable,'Av Outer its',"%2.1f") # IterTable = MO.PandasFormat(IterTable,'Av Inner its',"%2.1f") # else: # IterTable = MO.PandasFormat(IterTable,'Av NS iters',"%2.1f") # IterTable = MO.PandasFormat(IterTable,'Av M iters',"%2.1f") # print IterTable.to_latex() # print " \n Outer Tol: ",OuterTol, "Inner Tol: ", InnerTol print MUit print MUsave import pandas as pd LatexTitles = ["l","DoF"] for x in xrange(1,mm+1): LatexTitles.extend(["it","it","it"]) pd.set_option('precision',3) LatexValues = np.concatenate((level,Wdim,MUit), axis=1) title = np.concatenate((np.array([[0,0]]),MUsave.T),axis=1) MU = ["0","0"] for x in xrange(1,mm+1): MU.extend(["Full","MD","CD"]) LatexValues = np.vstack((title,LatexValues)) LatexTable = pd.DataFrame(LatexValues, columns = LatexTitles) name = "Output/"+IterType+"kappatest" # LatexTable.to_csv(name) print LatexTable.to_latex() # # # if (ShowResultPlots == 'yes'): # plot(u_k) # # plot(interpolate(ue,Velocity)) # plot(p_k) # # pe = interpolate(pe,Pressure) # # pe.vector()[:] -= np.max(pe.vector().array() )/2 # # plot(interpolate(pe,Pressure)) # plot(b_k) # # plot(interpolate(be,Magnetic)) # plot(r_k) # # plot(interpolate(re,Lagrange)) # # # interactive() # interactive()
mit
-7,926,150,116,614,495,000
29.977444
187
0.572816
false
Christoph/tag-connect
brushing/create_correlation.py
1
1278
import pandas as pd import numpy as np import codecs, json data = [] for i in range(0, 1000): element = {} temp = [] p1 = [10, 20] p2 = [50, 60] p3 = [80, 70] temp.append({"t": 0, "out": i}) temp.append({"t": 1, "out": i}) temp.append({"t": 2, "out": i}) element["data"] = temp if i < 150: x = p1[0] + np.random.randint(-7, 12) y = p1[1] + np.random.randint(0, 23) elif i >= 150 and i < 300: x = p1[0] + np.random.randint(-9, 31) y = p1[1] + np.random.randint(0, 26) elif i >= 300 and i < 400: x = p2[0] - 10 + np.random.randint(-18, 21) y = x + np.random.randint(-4, 7) elif i >= 400 and i < 600: x = p2[0] + np.random.randint(-37, 21) y = p2[1] + np.random.randint(-26, 28) elif i >= 600 and i < 800: x = p2[0] + 10 + np.random.randint(-25, 8) y = p2[1] + np.random.randint(-36, 6) elif i >= 800: x = p3[0] + np.random.randint(-10, 12) y = p3[1] + np.random.randint(-12, 15) element["params"] = { "x": x, "y": y } data.append(element) data file_path = "correlation.json" json.dump(data, codecs.open(file_path, 'w', encoding='utf-8'), separators=(',', ':'), sort_keys=True, indent=4)
mit
3,463,273,536,815,678,000
25.081633
111
0.497653
false
rxuriguera/bibtexIndexMaker
src/benchmark/plotdetailedextractionstats.py
1
5345
import matplotlib.pyplot as plt #@UnresolvedImport from matplotlib.font_manager import FontProperties #@UnresolvedImport def plot_bar(values): v00, v01, v02 = values #@UnusedVariable width = v02 xpos = 0.0 h0 = plt.bar([xpos], [1], width=width, color=colors[2], edgecolor=None, linewidth=0.0) width = v01 xpos = v02 h1 = plt.bar([xpos], [1], width=width, color=colors[1], edgecolor=None, linewidth=0.0) width = (1.0 - v01 - v02) xpos = v01 + v02 h2 = plt.bar([xpos], [1], width=width, color=colors[0], edgecolor=None, linewidth=0.0) return [h0, h1, h2] def set_plot_look(values): plt.axis([0, 1.0, 0, 1]) v00, v01, v02 = values #@UnusedVariable v00 = 1 - v01 - v02 ticks = [] labels = [] if v02: ticks.append(v02 / 2) labels.append(''.join([str(v02 * 100), '%'])) if v01: ticks.append(v02 + (v01 / 2)) labels.append(''.join([str(v01 * 100), '%'])) if v02 == 0.6 and v01 == 0.2: pass elif v00: ticks.append(v02 + v01 + (v00 / 2)) labels.append(''.join([str(v00 * 100), '%'])) plt.xticks(ticks, labels) plt.yticks([]) if __name__ == '__main__': labels = ['Incorrecte', 'Parcial', 'Correcte'] #colors = ['#4272DB', '#60a63a', '#FFA615', '#7D3883'] colors = ['#DDDDDD', '#666666', '#333333'] #markers = ['o', '-', '^', 's'] """ nexamples = 2 author = {'acm': [0.0, 0.0, 1.0], 'springer': [0.0, 1.0, 0.0], 'informaworld': [1.0, 0.0, 0.0], 'ideas': [0.0, 0.0, 1.0] } journal = {'acm': [0.0, 0.4, 0.6], 'springer': [0.2, 0.2, 0.6], 'informaworld': [1.0, 0.0, 0.0], 'ideas': [0.0, 0.0, 1.0] } title = {'acm': [0.0, 0.0, 1.0], 'springer': [0.0, 0.0, 1.0], 'informaworld': [1.0, 0.0, 0.0], 'ideas': [0.0, 0.0, 1.0] } year = {'acm': [0.0, 0.0, 1.0], 'springer': [1.0, 0.0, 0.0], 'informaworld': [0.6, 0, 0.4], 'ideas': [0.6, 0, 0.4] } """ nexamples = 4 author = {'acm': [0.0, 0.0, 1.0], 'springer': [0.0, 0.0, 1.0], 'informaworld': [1.0, 0.0, 0.0], 'ideas': [0.0, 0.0, 1.0] } journal = {'acm': [0.0, 0.4, 0.6], 'springer': [0.0, 0.2, 0.8], 'informaworld': [0.0, 0.0, 1.0], 'ideas': [0.0, 0.0, 1.0] } title = {'acm': [0.0, 0.0, 1.0], 'springer': [0.0, 0.0, 1.0], 'informaworld': [0.0, 0.0, 1.0], 'ideas': [0.0, 0.0, 1.0] } year = {'acm': [0.0, 0.0, 1.0], 'springer': [0.25, 0.0, 0.75], 'informaworld': [0.6, 0.0, 0.4], 'ideas': [0.6, 0.0, 0.4] } ##############################################3 plt.rc("font", family="cmr10") plt.rc("font", size=10) width = 4.50 height = 3.30 fig = plt.figure(figsize=(width + 1, height + 1)) figtitle = 'Correctesa dels camps extrets (wrappers generats amb %d exemples)' % nexamples fig.text(0.5, 0.95, figtitle, horizontalalignment='center', fontproperties=FontProperties(size=12)) xlabel = 'Camps' fig.text(0.5, 0.13, xlabel, horizontalalignment='center', fontproperties=FontProperties(size=12)) ylabel = 'Biblioteques' fig.text(0.02, 0.5, ylabel, verticalalignment='center', rotation='vertical', fontproperties=FontProperties(size=12)) plt.subplots_adjust(left=0.10, top=0.85, bottom=0.23, right=0.95, hspace=1.0) #plt.title('Amb %d exemples' % nexamples) bwidth = 1.0 keys = ['acm', 'springer', 'informaworld', 'ideas'] for i in range(len(keys)): key = keys[i] bplot = i * 4 plt.subplot(4, 4, bplot + 1) if not i: plt.title('"title"') plt.ylabel(key) handles = plot_bar(title[key]) set_plot_look(title[key]) plt.subplot(4, 4, bplot + 2) if not i: plt.title('"author"') plot_bar(author[key]) set_plot_look(author[key]) plt.subplot(4, 4, bplot + 3) if not i: plt.title('"journal"') plot_bar(journal[key]) set_plot_look(journal[key]) plt.subplot(4, 4, bplot + 4) if not i: plt.title('"year"') plot_bar(year[key]) set_plot_look(year[key]) legend = fig.legend(tuple(handles), ('Correcte', 'Parcialment correcte', 'Incorrecte'), 'lower center', ncol=3) legend.draw_frame(False) #plt.show() plt.savefig(''.join(['results:extraction-', str(nexamples), '.pdf'])) print 'Finished'
gpl-3.0
4,419,617,249,933,716,500
27.891892
115
0.432928
false
tswast/google-cloud-python
grafeas/docs/conf.py
2
11653
# -*- coding: utf-8 -*- # # grafeas documentation build configuration file # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import shlex # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath("..")) __version__ = "0.1.0" # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = "1.6.3" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.autosummary", "sphinx.ext.intersphinx", "sphinx.ext.coverage", "sphinx.ext.napoleon", "sphinx.ext.todo", "sphinx.ext.viewcode", ] # autodoc/autosummary flags autoclass_content = "both" autodoc_default_flags = ["members"] autosummary_generate = True # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # Allow markdown includes (so releases.md can include CHANGLEOG.md) # http://www.sphinx-doc.org/en/master/markdown.html source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = [".rst", ".md"] # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = "index" # General information about the project. project = u"grafeas" copyright = u"2017, Google" author = u"Google APIs" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The full version, including alpha/beta/rc tags. release = __version__ # The short X.Y version. version = ".".join(release.split(".")[0:2]) # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ["_build"] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = "alabaster" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { "description": "Google Cloud Client Libraries for Python", "github_user": "googleapis", "github_repo": "google-cloud-python", "github_banner": True, "font_family": "'Roboto', Georgia, sans", "head_font_family": "'Roboto', Georgia, serif", "code_font_family": "'Roboto Mono', 'Consolas', monospace", } # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' # html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value # html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. # html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = "grafeas-doc" # -- Options for warnings ------------------------------------------------------ suppress_warnings = [ # Temporarily suppress this to avoid "more than one target found for # cross-reference" warning, which are intractable for us to avoid while in # a mono-repo. # See https://github.com/sphinx-doc/sphinx/blob # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 "ref.python" ] # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, "grafeas.tex", u"grafeas Documentation", author, "manual") ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [(master_doc, "grafeas", u"grafeas Documentation", [author], 1)] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( master_doc, "grafeas", u"grafeas Documentation", author, "grafeas", "GAPIC library for the {metadata.shortName} v1 service", "APIs", ) ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { "python": ("http://python.readthedocs.org/en/latest/", None), "gax": ("https://gax-python.readthedocs.org/en/latest/", None), "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), "google.api_core": ("https://googleapis.dev/python/google-api-core/latest", None), "grpc": ("https://grpc.io/grpc/python/", None), "requests": ("https://requests.kennethreitz.org/en/stable/", None), "fastavro": ("https://fastavro.readthedocs.io/en/stable/", None), "pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None), } # Napoleon settings napoleon_google_docstring = True napoleon_numpy_docstring = True napoleon_include_private_with_doc = False napoleon_include_special_with_doc = True napoleon_use_admonition_for_examples = False napoleon_use_admonition_for_notes = False napoleon_use_admonition_for_references = False napoleon_use_ivar = False napoleon_use_param = True napoleon_use_rtype = True
apache-2.0
5,449,731,004,215,259,000
32.389685
86
0.694585
false
lukauskas/seaborn
seaborn/matrix.py
3
47133
"""Functions to visualize matrices of data.""" from __future__ import division import itertools import matplotlib as mpl from matplotlib.collections import LineCollection import matplotlib.pyplot as plt from matplotlib import gridspec import numpy as np import pandas as pd from scipy.cluster import hierarchy from . import cm from .axisgrid import Grid from .utils import (despine, axis_ticklabels_overlap, relative_luminance, to_utf8) from .external.six import string_types __all__ = ["heatmap", "clustermap"] def _index_to_label(index): """Convert a pandas index or multiindex to an axis label.""" if isinstance(index, pd.MultiIndex): return "-".join(map(to_utf8, index.names)) else: return index.name def _index_to_ticklabels(index): """Convert a pandas index or multiindex into ticklabels.""" if isinstance(index, pd.MultiIndex): return ["-".join(map(to_utf8, i)) for i in index.values] else: return index.values def _convert_colors(colors): """Convert either a list of colors or nested lists of colors to RGB.""" to_rgb = mpl.colors.colorConverter.to_rgb if isinstance(colors, pd.DataFrame): # Convert dataframe return pd.DataFrame({col: colors[col].map(to_rgb) for col in colors}) elif isinstance(colors, pd.Series): return colors.map(to_rgb) else: try: to_rgb(colors[0]) # If this works, there is only one level of colors return list(map(to_rgb, colors)) except ValueError: # If we get here, we have nested lists return [list(map(to_rgb, l)) for l in colors] def _matrix_mask(data, mask): """Ensure that data and mask are compatabile and add missing values. Values will be plotted for cells where ``mask`` is ``False``. ``data`` is expected to be a DataFrame; ``mask`` can be an array or a DataFrame. """ if mask is None: mask = np.zeros(data.shape, np.bool) if isinstance(mask, np.ndarray): # For array masks, ensure that shape matches data then convert if mask.shape != data.shape: raise ValueError("Mask must have the same shape as data.") mask = pd.DataFrame(mask, index=data.index, columns=data.columns, dtype=np.bool) elif isinstance(mask, pd.DataFrame): # For DataFrame masks, ensure that semantic labels match data if not mask.index.equals(data.index) \ and mask.columns.equals(data.columns): err = "Mask must have the same index and columns as data." raise ValueError(err) # Add any cells with missing data to the mask # This works around an issue where `plt.pcolormesh` doesn't represent # missing data properly mask = mask | pd.isnull(data) return mask class _HeatMapper(object): """Draw a heatmap plot of a matrix with nice labels and colormaps.""" def __init__(self, data, vmin, vmax, cmap, center, robust, annot, fmt, annot_kws, cbar, cbar_kws, xticklabels=True, yticklabels=True, mask=None): """Initialize the plotting object.""" # We always want to have a DataFrame with semantic information # and an ndarray to pass to matplotlib if isinstance(data, pd.DataFrame): plot_data = data.values else: plot_data = np.asarray(data) data = pd.DataFrame(plot_data) # Validate the mask and convet to DataFrame mask = _matrix_mask(data, mask) plot_data = np.ma.masked_where(np.asarray(mask), plot_data) # Get good names for the rows and columns xtickevery = 1 if isinstance(xticklabels, int): xtickevery = xticklabels xticklabels = _index_to_ticklabels(data.columns) elif xticklabels is True: xticklabels = _index_to_ticklabels(data.columns) elif xticklabels is False: xticklabels = [] ytickevery = 1 if isinstance(yticklabels, int): ytickevery = yticklabels yticklabels = _index_to_ticklabels(data.index) elif yticklabels is True: yticklabels = _index_to_ticklabels(data.index) elif yticklabels is False: yticklabels = [] # Get the positions and used label for the ticks nx, ny = data.T.shape if not len(xticklabels): self.xticks = [] self.xticklabels = [] elif isinstance(xticklabels, string_types) and xticklabels == "auto": self.xticks = "auto" self.xticklabels = _index_to_ticklabels(data.columns) else: self.xticks, self.xticklabels = self._skip_ticks(xticklabels, xtickevery) if not len(yticklabels): self.yticks = [] self.yticklabels = [] elif isinstance(yticklabels, string_types) and yticklabels == "auto": self.yticks = "auto" self.yticklabels = _index_to_ticklabels(data.index) else: self.yticks, self.yticklabels = self._skip_ticks(yticklabels, ytickevery) # Get good names for the axis labels xlabel = _index_to_label(data.columns) ylabel = _index_to_label(data.index) self.xlabel = xlabel if xlabel is not None else "" self.ylabel = ylabel if ylabel is not None else "" # Determine good default values for the colormapping self._determine_cmap_params(plot_data, vmin, vmax, cmap, center, robust) # Sort out the annotations if annot is None: annot = False annot_data = None elif isinstance(annot, bool): if annot: annot_data = plot_data else: annot_data = None else: try: annot_data = annot.values except AttributeError: annot_data = annot if annot.shape != plot_data.shape: raise ValueError('Data supplied to "annot" must be the same ' 'shape as the data to plot.') annot = True # Save other attributes to the object self.data = data self.plot_data = plot_data self.annot = annot self.annot_data = annot_data self.fmt = fmt self.annot_kws = {} if annot_kws is None else annot_kws self.cbar = cbar self.cbar_kws = {} if cbar_kws is None else cbar_kws self.cbar_kws.setdefault('ticks', mpl.ticker.MaxNLocator(6)) def _determine_cmap_params(self, plot_data, vmin, vmax, cmap, center, robust): """Use some heuristics to set good defaults for colorbar and range.""" calc_data = plot_data.data[~np.isnan(plot_data.data)] if vmin is None: vmin = np.percentile(calc_data, 2) if robust else calc_data.min() if vmax is None: vmax = np.percentile(calc_data, 98) if robust else calc_data.max() self.vmin, self.vmax = vmin, vmax # Choose default colormaps if not provided if cmap is None: if center is None: self.cmap = cm.rocket else: self.cmap = cm.icefire elif isinstance(cmap, string_types): self.cmap = mpl.cm.get_cmap(cmap) elif isinstance(cmap, list): self.cmap = mpl.colors.ListedColormap(cmap) else: self.cmap = cmap # Recenter a divergent colormap if center is not None: vrange = max(vmax - center, center - vmin) normlize = mpl.colors.Normalize(center - vrange, center + vrange) cmin, cmax = normlize([vmin, vmax]) cc = np.linspace(cmin, cmax, 256) self.cmap = mpl.colors.ListedColormap(self.cmap(cc)) def _annotate_heatmap(self, ax, mesh): """Add textual labels with the value in each cell.""" mesh.update_scalarmappable() height, width = self.annot_data.shape xpos, ypos = np.meshgrid(np.arange(width) + .5, np.arange(height) + .5) for x, y, m, color, val in zip(xpos.flat, ypos.flat, mesh.get_array(), mesh.get_facecolors(), self.annot_data.flat): if m is not np.ma.masked: lum = relative_luminance(color) text_color = ".15" if lum > .408 else "w" annotation = ("{:" + self.fmt + "}").format(val) text_kwargs = dict(color=text_color, ha="center", va="center") text_kwargs.update(self.annot_kws) ax.text(x, y, annotation, **text_kwargs) def _skip_ticks(self, labels, tickevery): """Return ticks and labels at evenly spaced intervals.""" n = len(labels) if tickevery == 0: ticks, labels = [], [] elif tickevery == 1: ticks, labels = np.arange(n) + .5, labels else: start, end, step = 0, n, tickevery ticks = np.arange(start, end, step) + .5 labels = labels[start:end:step] return ticks, labels def _auto_ticks(self, ax, labels, axis): """Determine ticks and ticklabels that minimize overlap.""" transform = ax.figure.dpi_scale_trans.inverted() bbox = ax.get_window_extent().transformed(transform) size = [bbox.width, bbox.height][axis] axis = [ax.xaxis, ax.yaxis][axis] tick, = axis.set_ticks([0]) fontsize = tick.label.get_size() max_ticks = int(size // (fontsize / 72)) if max_ticks < 1: return [], [] tick_every = len(labels) // max_ticks + 1 tick_every = 1 if tick_every == 0 else tick_every ticks, labels = self._skip_ticks(labels, tick_every) return ticks, labels def plot(self, ax, cax, kws): """Draw the heatmap on the provided Axes.""" # Remove all the Axes spines despine(ax=ax, left=True, bottom=True) # Draw the heatmap mesh = ax.pcolormesh(self.plot_data, vmin=self.vmin, vmax=self.vmax, cmap=self.cmap, **kws) # Set the axis limits ax.set(xlim=(0, self.data.shape[1]), ylim=(0, self.data.shape[0])) # Invert the y axis to show the plot in matrix form ax.invert_yaxis() # Possibly add a colorbar if self.cbar: cb = ax.figure.colorbar(mesh, cax, ax, **self.cbar_kws) cb.outline.set_linewidth(0) # If rasterized is passed to pcolormesh, also rasterize the # colorbar to avoid white lines on the PDF rendering if kws.get('rasterized', False): cb.solids.set_rasterized(True) # Add row and column labels if isinstance(self.xticks, string_types) and self.xticks == "auto": xticks, xticklabels = self._auto_ticks(ax, self.xticklabels, 0) else: xticks, xticklabels = self.xticks, self.xticklabels if isinstance(self.yticks, string_types) and self.yticks == "auto": yticks, yticklabels = self._auto_ticks(ax, self.yticklabels, 1) else: yticks, yticklabels = self.yticks, self.yticklabels ax.set(xticks=xticks, yticks=yticks) xtl = ax.set_xticklabels(xticklabels) ytl = ax.set_yticklabels(yticklabels, rotation="vertical") # Possibly rotate them if they overlap if hasattr(ax.figure.canvas, "get_renderer"): ax.figure.draw(ax.figure.canvas.get_renderer()) if axis_ticklabels_overlap(xtl): plt.setp(xtl, rotation="vertical") if axis_ticklabels_overlap(ytl): plt.setp(ytl, rotation="horizontal") # Add the axis labels ax.set(xlabel=self.xlabel, ylabel=self.ylabel) # Annotate the cells with the formatted values if self.annot: self._annotate_heatmap(ax, mesh) def heatmap(data, vmin=None, vmax=None, cmap=None, center=None, robust=False, annot=None, fmt=".2g", annot_kws=None, linewidths=0, linecolor="white", cbar=True, cbar_kws=None, cbar_ax=None, square=False, xticklabels="auto", yticklabels="auto", mask=None, ax=None, **kwargs): """Plot rectangular data as a color-encoded matrix. This is an Axes-level function and will draw the heatmap into the currently-active Axes if none is provided to the ``ax`` argument. Part of this Axes space will be taken and used to plot a colormap, unless ``cbar`` is False or a separate Axes is provided to ``cbar_ax``. Parameters ---------- data : rectangular dataset 2D dataset that can be coerced into an ndarray. If a Pandas DataFrame is provided, the index/column information will be used to label the columns and rows. vmin, vmax : floats, optional Values to anchor the colormap, otherwise they are inferred from the data and other keyword arguments. cmap : matplotlib colormap name or object, or list of colors, optional The mapping from data values to color space. If not provided, the default will depend on whether ``center`` is set. center : float, optional The value at which to center the colormap when plotting divergant data. Using this parameter will change the default ``cmap`` if none is specified. robust : bool, optional If True and ``vmin`` or ``vmax`` are absent, the colormap range is computed with robust quantiles instead of the extreme values. annot : bool or rectangular dataset, optional If True, write the data value in each cell. If an array-like with the same shape as ``data``, then use this to annotate the heatmap instead of the raw data. fmt : string, optional String formatting code to use when adding annotations. annot_kws : dict of key, value mappings, optional Keyword arguments for ``ax.text`` when ``annot`` is True. linewidths : float, optional Width of the lines that will divide each cell. linecolor : color, optional Color of the lines that will divide each cell. cbar : boolean, optional Whether to draw a colorbar. cbar_kws : dict of key, value mappings, optional Keyword arguments for `fig.colorbar`. cbar_ax : matplotlib Axes, optional Axes in which to draw the colorbar, otherwise take space from the main Axes. square : boolean, optional If True, set the Axes aspect to "equal" so each cell will be square-shaped. xticklabels, yticklabels : "auto", bool, list-like, or int, optional If True, plot the column names of the dataframe. If False, don't plot the column names. If list-like, plot these alternate labels as the xticklabels. If an integer, use the column names but plot only every n label. If "auto", try to densely plot non-overlapping labels. mask : boolean array or DataFrame, optional If passed, data will not be shown in cells where ``mask`` is True. Cells with missing values are automatically masked. ax : matplotlib Axes, optional Axes in which to draw the plot, otherwise use the currently-active Axes. kwargs : other keyword arguments All other keyword arguments are passed to ``ax.pcolormesh``. Returns ------- ax : matplotlib Axes Axes object with the heatmap. See also -------- clustermap : Plot a matrix using hierachical clustering to arrange the rows and columns. Examples -------- Plot a heatmap for a numpy array: .. plot:: :context: close-figs >>> import numpy as np; np.random.seed(0) >>> import seaborn as sns; sns.set() >>> uniform_data = np.random.rand(10, 12) >>> ax = sns.heatmap(uniform_data) Change the limits of the colormap: .. plot:: :context: close-figs >>> ax = sns.heatmap(uniform_data, vmin=0, vmax=1) Plot a heatmap for data centered on 0 with a diverging colormap: .. plot:: :context: close-figs >>> normal_data = np.random.randn(10, 12) >>> ax = sns.heatmap(normal_data, center=0) Plot a dataframe with meaningful row and column labels: .. plot:: :context: close-figs >>> flights = sns.load_dataset("flights") >>> flights = flights.pivot("month", "year", "passengers") >>> ax = sns.heatmap(flights) Annotate each cell with the numeric value using integer formatting: .. plot:: :context: close-figs >>> ax = sns.heatmap(flights, annot=True, fmt="d") Add lines between each cell: .. plot:: :context: close-figs >>> ax = sns.heatmap(flights, linewidths=.5) Use a different colormap: .. plot:: :context: close-figs >>> ax = sns.heatmap(flights, cmap="YlGnBu") Center the colormap at a specific value: .. plot:: :context: close-figs >>> ax = sns.heatmap(flights, center=flights.loc["January", 1955]) Plot every other column label and don't plot row labels: .. plot:: :context: close-figs >>> data = np.random.randn(50, 20) >>> ax = sns.heatmap(data, xticklabels=2, yticklabels=False) Don't draw a colorbar: .. plot:: :context: close-figs >>> ax = sns.heatmap(flights, cbar=False) Use different axes for the colorbar: .. plot:: :context: close-figs >>> grid_kws = {"height_ratios": (.9, .05), "hspace": .3} >>> f, (ax, cbar_ax) = plt.subplots(2, gridspec_kw=grid_kws) >>> ax = sns.heatmap(flights, ax=ax, ... cbar_ax=cbar_ax, ... cbar_kws={"orientation": "horizontal"}) Use a mask to plot only part of a matrix .. plot:: :context: close-figs >>> corr = np.corrcoef(np.random.randn(10, 200)) >>> mask = np.zeros_like(corr) >>> mask[np.triu_indices_from(mask)] = True >>> with sns.axes_style("white"): ... ax = sns.heatmap(corr, mask=mask, vmax=.3, square=True) """ # Initialize the plotter object plotter = _HeatMapper(data, vmin, vmax, cmap, center, robust, annot, fmt, annot_kws, cbar, cbar_kws, xticklabels, yticklabels, mask) # Add the pcolormesh kwargs here kwargs["linewidths"] = linewidths kwargs["edgecolor"] = linecolor # Draw the plot and return the Axes if ax is None: ax = plt.gca() if square: ax.set_aspect("equal") plotter.plot(ax, cbar_ax, kwargs) return ax class _DendrogramPlotter(object): """Object for drawing tree of similarities between data rows/columns""" def __init__(self, data, linkage, metric, method, axis, label, rotate): """Plot a dendrogram of the relationships between the columns of data Parameters ---------- data : pandas.DataFrame Rectangular data """ self.axis = axis if self.axis == 1: data = data.T if isinstance(data, pd.DataFrame): array = data.values else: array = np.asarray(data) data = pd.DataFrame(array) self.array = array self.data = data self.shape = self.data.shape self.metric = metric self.method = method self.axis = axis self.label = label self.rotate = rotate if linkage is None: self.linkage = self.calculated_linkage else: self.linkage = linkage self.dendrogram = self.calculate_dendrogram() # Dendrogram ends are always at multiples of 5, who knows why ticks = 10 * np.arange(self.data.shape[0]) + 5 if self.label: ticklabels = _index_to_ticklabels(self.data.index) ticklabels = [ticklabels[i] for i in self.reordered_ind] if self.rotate: self.xticks = [] self.yticks = ticks self.xticklabels = [] self.yticklabels = ticklabels self.ylabel = _index_to_label(self.data.index) self.xlabel = '' else: self.xticks = ticks self.yticks = [] self.xticklabels = ticklabels self.yticklabels = [] self.ylabel = '' self.xlabel = _index_to_label(self.data.index) else: self.xticks, self.yticks = [], [] self.yticklabels, self.xticklabels = [], [] self.xlabel, self.ylabel = '', '' self.dependent_coord = self.dendrogram['dcoord'] self.independent_coord = self.dendrogram['icoord'] def _calculate_linkage_scipy(self): if np.product(self.shape) >= 10000: UserWarning('This will be slow... (gentle suggestion: ' '"pip install fastcluster")') linkage = hierarchy.linkage(self.array, method=self.method, metric=self.metric) return linkage def _calculate_linkage_fastcluster(self): import fastcluster # Fastcluster has a memory-saving vectorized version, but only # with certain linkage methods, and mostly with euclidean metric # vector_methods = ('single', 'centroid', 'median', 'ward') euclidean_methods = ('centroid', 'median', 'ward') euclidean = self.metric == 'euclidean' and self.method in \ euclidean_methods if euclidean or self.method == 'single': return fastcluster.linkage_vector(self.array, method=self.method, metric=self.metric) else: linkage = fastcluster.linkage(self.array, method=self.method, metric=self.metric) return linkage @property def calculated_linkage(self): try: return self._calculate_linkage_fastcluster() except ImportError: return self._calculate_linkage_scipy() def calculate_dendrogram(self): """Calculates a dendrogram based on the linkage matrix Made a separate function, not a property because don't want to recalculate the dendrogram every time it is accessed. Returns ------- dendrogram : dict Dendrogram dictionary as returned by scipy.cluster.hierarchy .dendrogram. The important key-value pairing is "reordered_ind" which indicates the re-ordering of the matrix """ return hierarchy.dendrogram(self.linkage, no_plot=True, color_threshold=-np.inf) @property def reordered_ind(self): """Indices of the matrix, reordered by the dendrogram""" return self.dendrogram['leaves'] def plot(self, ax): """Plots a dendrogram of the similarities between data on the axes Parameters ---------- ax : matplotlib.axes.Axes Axes object upon which the dendrogram is plotted """ line_kwargs = dict(linewidths=.5, colors='k') if self.rotate and self.axis == 0: lines = LineCollection([list(zip(x, y)) for x, y in zip(self.dependent_coord, self.independent_coord)], **line_kwargs) else: lines = LineCollection([list(zip(x, y)) for x, y in zip(self.independent_coord, self.dependent_coord)], **line_kwargs) ax.add_collection(lines) number_of_leaves = len(self.reordered_ind) max_dependent_coord = max(map(max, self.dependent_coord)) if self.rotate: ax.yaxis.set_ticks_position('right') # Constants 10 and 1.05 come from # `scipy.cluster.hierarchy._plot_dendrogram` ax.set_ylim(0, number_of_leaves * 10) ax.set_xlim(0, max_dependent_coord * 1.05) ax.invert_xaxis() ax.invert_yaxis() else: # Constants 10 and 1.05 come from # `scipy.cluster.hierarchy._plot_dendrogram` ax.set_xlim(0, number_of_leaves * 10) ax.set_ylim(0, max_dependent_coord * 1.05) despine(ax=ax, bottom=True, left=True) ax.set(xticks=self.xticks, yticks=self.yticks, xlabel=self.xlabel, ylabel=self.ylabel) xtl = ax.set_xticklabels(self.xticklabels) ytl = ax.set_yticklabels(self.yticklabels, rotation='vertical') # Force a draw of the plot to avoid matplotlib window error if hasattr(ax.figure.canvas, "get_renderer"): ax.figure.draw(ax.figure.canvas.get_renderer()) if len(ytl) > 0 and axis_ticklabels_overlap(ytl): plt.setp(ytl, rotation="horizontal") if len(xtl) > 0 and axis_ticklabels_overlap(xtl): plt.setp(xtl, rotation="vertical") return self def dendrogram(data, linkage=None, axis=1, label=True, metric='euclidean', method='average', rotate=False, ax=None): """Draw a tree diagram of relationships within a matrix Parameters ---------- data : pandas.DataFrame Rectangular data linkage : numpy.array, optional Linkage matrix axis : int, optional Which axis to use to calculate linkage. 0 is rows, 1 is columns. label : bool, optional If True, label the dendrogram at leaves with column or row names metric : str, optional Distance metric. Anything valid for scipy.spatial.distance.pdist method : str, optional Linkage method to use. Anything valid for scipy.cluster.hierarchy.linkage rotate : bool, optional When plotting the matrix, whether to rotate it 90 degrees counter-clockwise, so the leaves face right ax : matplotlib axis, optional Axis to plot on, otherwise uses current axis Returns ------- dendrogramplotter : _DendrogramPlotter A Dendrogram plotter object. Notes ----- Access the reordered dendrogram indices with dendrogramplotter.reordered_ind """ plotter = _DendrogramPlotter(data, linkage=linkage, axis=axis, metric=metric, method=method, label=label, rotate=rotate) if ax is None: ax = plt.gca() return plotter.plot(ax=ax) class ClusterGrid(Grid): def __init__(self, data, pivot_kws=None, z_score=None, standard_scale=None, figsize=None, row_colors=None, col_colors=None, mask=None): """Grid object for organizing clustered heatmap input on to axes""" if isinstance(data, pd.DataFrame): self.data = data else: self.data = pd.DataFrame(data) self.data2d = self.format_data(self.data, pivot_kws, z_score, standard_scale) self.mask = _matrix_mask(self.data2d, mask) if figsize is None: width, height = 10, 10 figsize = (width, height) self.fig = plt.figure(figsize=figsize) self.row_colors, self.row_color_labels = \ self._preprocess_colors(data, row_colors, axis=0) self.col_colors, self.col_color_labels = \ self._preprocess_colors(data, col_colors, axis=1) width_ratios = self.dim_ratios(self.row_colors, figsize=figsize, axis=1) height_ratios = self.dim_ratios(self.col_colors, figsize=figsize, axis=0) nrows = 3 if self.col_colors is None else 4 ncols = 3 if self.row_colors is None else 4 self.gs = gridspec.GridSpec(nrows, ncols, wspace=0.01, hspace=0.01, width_ratios=width_ratios, height_ratios=height_ratios) self.ax_row_dendrogram = self.fig.add_subplot(self.gs[nrows - 1, 0:2]) self.ax_col_dendrogram = self.fig.add_subplot(self.gs[0:2, ncols - 1]) self.ax_row_dendrogram.set_axis_off() self.ax_col_dendrogram.set_axis_off() self.ax_row_colors = None self.ax_col_colors = None if self.row_colors is not None: self.ax_row_colors = self.fig.add_subplot( self.gs[nrows - 1, ncols - 2]) if self.col_colors is not None: self.ax_col_colors = self.fig.add_subplot( self.gs[nrows - 2, ncols - 1]) self.ax_heatmap = self.fig.add_subplot(self.gs[nrows - 1, ncols - 1]) # colorbar for scale to left corner self.cax = self.fig.add_subplot(self.gs[0, 0]) self.dendrogram_row = None self.dendrogram_col = None def _preprocess_colors(self, data, colors, axis): """Preprocess {row/col}_colors to extract labels and convert colors.""" labels = None if colors is not None: if isinstance(colors, (pd.DataFrame, pd.Series)): # Ensure colors match data indices if axis == 0: colors = colors.loc[data.index] else: colors = colors.loc[data.columns] # Replace na's with background color # TODO We should set these to transparent instead colors = colors.fillna('white') # Extract color values and labels from frame/series if isinstance(colors, pd.DataFrame): labels = list(colors.columns) colors = colors.T.values else: if colors.name is None: labels = [""] else: labels = [colors.name] colors = colors.values colors = _convert_colors(colors) return colors, labels def format_data(self, data, pivot_kws, z_score=None, standard_scale=None): """Extract variables from data or use directly.""" # Either the data is already in 2d matrix format, or need to do a pivot if pivot_kws is not None: data2d = data.pivot(**pivot_kws) else: data2d = data if z_score is not None and standard_scale is not None: raise ValueError( 'Cannot perform both z-scoring and standard-scaling on data') if z_score is not None: data2d = self.z_score(data2d, z_score) if standard_scale is not None: data2d = self.standard_scale(data2d, standard_scale) return data2d @staticmethod def z_score(data2d, axis=1): """Standarize the mean and variance of the data axis Parameters ---------- data2d : pandas.DataFrame Data to normalize axis : int Which axis to normalize across. If 0, normalize across rows, if 1, normalize across columns. Returns ------- normalized : pandas.DataFrame Noramlized data with a mean of 0 and variance of 1 across the specified axis. """ if axis == 1: z_scored = data2d else: z_scored = data2d.T z_scored = (z_scored - z_scored.mean()) / z_scored.std() if axis == 1: return z_scored else: return z_scored.T @staticmethod def standard_scale(data2d, axis=1): """Divide the data by the difference between the max and min Parameters ---------- data2d : pandas.DataFrame Data to normalize axis : int Which axis to normalize across. If 0, normalize across rows, if 1, normalize across columns. vmin : int If 0, then subtract the minimum of the data before dividing by the range. Returns ------- standardized : pandas.DataFrame Noramlized data with a mean of 0 and variance of 1 across the specified axis. """ # Normalize these values to range from 0 to 1 if axis == 1: standardized = data2d else: standardized = data2d.T subtract = standardized.min() standardized = (standardized - subtract) / ( standardized.max() - standardized.min()) if axis == 1: return standardized else: return standardized.T def dim_ratios(self, side_colors, axis, figsize, side_colors_ratio=0.05): """Get the proportions of the figure taken up by each axes """ figdim = figsize[axis] # Get resizing proportion of this figure for the dendrogram and # colorbar, so only the heatmap gets bigger but the dendrogram stays # the same size. dendrogram = min(2. / figdim, .2) # add the colorbar colorbar_width = .8 * dendrogram colorbar_height = .2 * dendrogram if axis == 0: ratios = [colorbar_width, colorbar_height] else: ratios = [colorbar_height, colorbar_width] if side_colors is not None: # Add room for the colors ratios += [side_colors_ratio] # Add the ratio for the heatmap itself ratios += [.8] return ratios @staticmethod def color_list_to_matrix_and_cmap(colors, ind, axis=0): """Turns a list of colors into a numpy matrix and matplotlib colormap These arguments can now be plotted using heatmap(matrix, cmap) and the provided colors will be plotted. Parameters ---------- colors : list of matplotlib colors Colors to label the rows or columns of a dataframe. ind : list of ints Ordering of the rows or columns, to reorder the original colors by the clustered dendrogram order axis : int Which axis this is labeling Returns ------- matrix : numpy.array A numpy array of integer values, where each corresponds to a color from the originally provided list of colors cmap : matplotlib.colors.ListedColormap """ # check for nested lists/color palettes. # Will fail if matplotlib color is list not tuple if any(issubclass(type(x), list) for x in colors): all_colors = set(itertools.chain(*colors)) n = len(colors) m = len(colors[0]) else: all_colors = set(colors) n = 1 m = len(colors) colors = [colors] color_to_value = dict((col, i) for i, col in enumerate(all_colors)) matrix = np.array([color_to_value[c] for color in colors for c in color]) shape = (n, m) matrix = matrix.reshape(shape) matrix = matrix[:, ind] if axis == 0: # row-side: matrix = matrix.T cmap = mpl.colors.ListedColormap(all_colors) return matrix, cmap def savefig(self, *args, **kwargs): if 'bbox_inches' not in kwargs: kwargs['bbox_inches'] = 'tight' self.fig.savefig(*args, **kwargs) def plot_dendrograms(self, row_cluster, col_cluster, metric, method, row_linkage, col_linkage): # Plot the row dendrogram if row_cluster: self.dendrogram_row = dendrogram( self.data2d, metric=metric, method=method, label=False, axis=0, ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage) else: self.ax_row_dendrogram.set_xticks([]) self.ax_row_dendrogram.set_yticks([]) # PLot the column dendrogram if col_cluster: self.dendrogram_col = dendrogram( self.data2d, metric=metric, method=method, label=False, axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage) else: self.ax_col_dendrogram.set_xticks([]) self.ax_col_dendrogram.set_yticks([]) despine(ax=self.ax_row_dendrogram, bottom=True, left=True) despine(ax=self.ax_col_dendrogram, bottom=True, left=True) def plot_colors(self, xind, yind, **kws): """Plots color labels between the dendrogram and the heatmap Parameters ---------- heatmap_kws : dict Keyword arguments heatmap """ # Remove any custom colormap and centering kws = kws.copy() kws.pop('cmap', None) kws.pop('center', None) kws.pop('annot', None) kws.pop('vmin', None) kws.pop('vmax', None) kws.pop('robust', None) kws.pop('xticklabels', None) kws.pop('yticklabels', None) # Plot the row colors if self.row_colors is not None: matrix, cmap = self.color_list_to_matrix_and_cmap( self.row_colors, yind, axis=0) # Get row_color labels if self.row_color_labels is not None: row_color_labels = self.row_color_labels else: row_color_labels = False heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors, xticklabels=row_color_labels, yticklabels=False, **kws) # Adjust rotation of labels if row_color_labels is not False: plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90) else: despine(self.ax_row_colors, left=True, bottom=True) # Plot the column colors if self.col_colors is not None: matrix, cmap = self.color_list_to_matrix_and_cmap( self.col_colors, xind, axis=1) # Get col_color labels if self.col_color_labels is not None: col_color_labels = self.col_color_labels else: col_color_labels = False heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors, xticklabels=False, yticklabels=col_color_labels, **kws) # Adjust rotation of labels, place on right side if col_color_labels is not False: self.ax_col_colors.yaxis.tick_right() plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0) else: despine(self.ax_col_colors, left=True, bottom=True) def plot_matrix(self, colorbar_kws, xind, yind, **kws): self.data2d = self.data2d.iloc[yind, xind] self.mask = self.mask.iloc[yind, xind] # Try to reorganize specified tick labels, if provided xtl = kws.pop("xticklabels", "auto") try: xtl = np.asarray(xtl)[xind] except (TypeError, IndexError): pass ytl = kws.pop("yticklabels", "auto") try: ytl = np.asarray(ytl)[yind] except (TypeError, IndexError): pass heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.cax, cbar_kws=colorbar_kws, mask=self.mask, xticklabels=xtl, yticklabels=ytl, **kws) ytl = self.ax_heatmap.get_yticklabels() ytl_rot = None if not ytl else ytl[0].get_rotation() self.ax_heatmap.yaxis.set_ticks_position('right') self.ax_heatmap.yaxis.set_label_position('right') if ytl_rot is not None: ytl = self.ax_heatmap.get_yticklabels() plt.setp(ytl, rotation=ytl_rot) def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster, row_linkage, col_linkage, **kws): colorbar_kws = {} if colorbar_kws is None else colorbar_kws self.plot_dendrograms(row_cluster, col_cluster, metric, method, row_linkage=row_linkage, col_linkage=col_linkage) try: xind = self.dendrogram_col.reordered_ind except AttributeError: xind = np.arange(self.data2d.shape[1]) try: yind = self.dendrogram_row.reordered_ind except AttributeError: yind = np.arange(self.data2d.shape[0]) self.plot_colors(xind, yind, **kws) self.plot_matrix(colorbar_kws, xind, yind, **kws) return self def clustermap(data, pivot_kws=None, method='average', metric='euclidean', z_score=None, standard_scale=None, figsize=None, cbar_kws=None, row_cluster=True, col_cluster=True, row_linkage=None, col_linkage=None, row_colors=None, col_colors=None, mask=None, **kwargs): """Plot a matrix dataset as a hierarchically-clustered heatmap. Parameters ---------- data: 2D array-like Rectangular data for clustering. Cannot contain NAs. pivot_kws : dict, optional If `data` is a tidy dataframe, can provide keyword arguments for pivot to create a rectangular dataframe. method : str, optional Linkage method to use for calculating clusters. See scipy.cluster.hierarchy.linkage documentation for more information: https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html metric : str, optional Distance metric to use for the data. See scipy.spatial.distance.pdist documentation for more options https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.pdist.html To use different metrics (or methods) for rows and columns, you may construct each linkage matrix yourself and provide them as {row,col}_linkage. z_score : int or None, optional Either 0 (rows) or 1 (columns). Whether or not to calculate z-scores for the rows or the columns. Z scores are: z = (x - mean)/std, so values in each row (column) will get the mean of the row (column) subtracted, then divided by the standard deviation of the row (column). This ensures that each row (column) has mean of 0 and variance of 1. standard_scale : int or None, optional Either 0 (rows) or 1 (columns). Whether or not to standardize that dimension, meaning for each row or column, subtract the minimum and divide each by its maximum. figsize: tuple of two ints, optional Size of the figure to create. cbar_kws : dict, optional Keyword arguments to pass to ``cbar_kws`` in ``heatmap``, e.g. to add a label to the colorbar. {row,col}_cluster : bool, optional If True, cluster the {rows, columns}. {row,col}_linkage : numpy.array, optional Precomputed linkage matrix for the rows or columns. See scipy.cluster.hierarchy.linkage for specific formats. {row,col}_colors : list-like or pandas DataFrame/Series, optional List of colors to label for either the rows or columns. Useful to evaluate whether samples within a group are clustered together. Can use nested lists or DataFrame for multiple color levels of labeling. If given as a DataFrame or Series, labels for the colors are extracted from the DataFrames column names or from the name of the Series. DataFrame/Series colors are also matched to the data by their index, ensuring colors are drawn in the correct order. mask : boolean array or DataFrame, optional If passed, data will not be shown in cells where ``mask`` is True. Cells with missing values are automatically masked. Only used for visualizing, not for calculating. kwargs : other keyword arguments All other keyword arguments are passed to ``sns.heatmap`` Returns ------- clustergrid : ClusterGrid A ClusterGrid instance. Notes ----- The returned object has a ``savefig`` method that should be used if you want to save the figure object without clipping the dendrograms. To access the reordered row indices, use: ``clustergrid.dendrogram_row.reordered_ind`` Column indices, use: ``clustergrid.dendrogram_col.reordered_ind`` Examples -------- Plot a clustered heatmap: .. plot:: :context: close-figs >>> import seaborn as sns; sns.set(color_codes=True) >>> iris = sns.load_dataset("iris") >>> species = iris.pop("species") >>> g = sns.clustermap(iris) Use a different similarity metric: .. plot:: :context: close-figs >>> g = sns.clustermap(iris, metric="correlation") Use a different clustering method: .. plot:: :context: close-figs >>> g = sns.clustermap(iris, method="single") Use a different colormap and ignore outliers in colormap limits: .. plot:: :context: close-figs >>> g = sns.clustermap(iris, cmap="mako", robust=True) Change the size of the figure: .. plot:: :context: close-figs >>> g = sns.clustermap(iris, figsize=(6, 7)) Plot one of the axes in its original organization: .. plot:: :context: close-figs >>> g = sns.clustermap(iris, col_cluster=False) Add colored labels: .. plot:: :context: close-figs >>> lut = dict(zip(species.unique(), "rbg")) >>> row_colors = species.map(lut) >>> g = sns.clustermap(iris, row_colors=row_colors) Standardize the data within the columns: .. plot:: :context: close-figs >>> g = sns.clustermap(iris, standard_scale=1) Normalize the data within the rows: .. plot:: :context: close-figs >>> g = sns.clustermap(iris, z_score=0) """ plotter = ClusterGrid(data, pivot_kws=pivot_kws, figsize=figsize, row_colors=row_colors, col_colors=col_colors, z_score=z_score, standard_scale=standard_scale, mask=mask) return plotter.plot(metric=metric, method=method, colorbar_kws=cbar_kws, row_cluster=row_cluster, col_cluster=col_cluster, row_linkage=row_linkage, col_linkage=col_linkage, **kwargs)
bsd-3-clause
5,199,684,022,988,790,000
35.228286
97
0.579615
false
adusia/pox
ext/NonCyclicPathsClusters.py
1
16077
''' *KMeans clustering algorithm is used to compute subgraphs *This approach finds the non-cyclic probes from cluster center node to all nodes in the subgraph with a constraint* #Constraint: Probe length <= Depth of Subgraph from cluster center #Constraint: Edges and Nodes cannot be repeated #Constraint: Symmetric probes are not added *The path from source node of the complete graph to the cluster center is added to each probe of that cluster. *IterativeBFS is used to search the graph for paths ''' import math import numpy as np import collections import networkx as nx #from networkx import Graph import matplotlib.pyplot as plt from dijkstras import Graph from collections import deque from termcolor import colored import copy import timeit import os from KMeans import KMeans class AllPaths: def __init__(self, Graph): self.num_of_nodes = Graph.number_of_nodes() self.G = Graph self.DependencyMatrix = [] self.AllProbes = [] self.probeno = 0 self.MaxdepthSubGraph = -1 self.Node = collections.namedtuple('Node', 'NodeNum, parents') self.ProbeToClus = [] distToNode = {} for n in range(0,self.num_of_nodes): distToNode[n] = nx.shortest_path_length(self.G,0,n) self.Maxdepth = max(distToNode.itervalues()) self.initialization() def initialization(self): self.ProbeSet = [] self.NumberOfProbesSelected = 0 self.DecompositionSet = [] self.DecompositionSet.append([]) self.ProbeCount = {} for x in range(1,self.num_of_nodes): self.DecompositionSet[0].append(x) self.ProbeCount[x] = 0 self.DecompositionSet[0].append(self.num_of_nodes) #print self.DecompositionSet def compute_dependency_Matrix(self,currnode,clusters): for clus, nodes in clusters.items(): if currnode in nodes: nodes.remove(currnode) #print clus, nodes SubGraph = self.G.subgraph(nodes) #self.PrintGraph(SubGraph) if clus != currnode: #source node is not one of the clusters self.ProbeToClus = nx.shortest_path(self.G,source=currnode,target=clus) #AllProbesToClus = nx.all_simple_paths(self.G,source=currnode,target=clus,cutoff=self.Maxdepth) #cutoff = self.Maxdepth AllProbesToClus = [] AllProbesToClus.append(self.ProbeToClus) for probe in list(AllProbesToClus): DependencyMatrixRow = [0]*(self.num_of_nodes + 1) for node in probe: DependencyMatrixRow[int(node)] += 1 if DependencyMatrixRow not in self.DependencyMatrix: self.DependencyMatrix.append(DependencyMatrixRow) self.AllProbes.append(probe + [self.probeno]) self.probeno += 1 self.ProbeToClus.pop() #print "ProbeToClus = %s"%self.ProbeToClus distToNode = {} for n in SubGraph.nodes(): try: distToNode[n] = nx.shortest_path_length(SubGraph,n,clus) except: print "EXCEPTION!!!! Disconnected Graph" #print SubGraph.nodes() #print SubGraph.edges() #print distToNode #print n #print self.G.edges(n) self.MaxdepthSubGraph = max(distToNode.itervalues()) for node in nodes: if node != clus and node != currnode: self.iterBFS(clus, node, SubGraph) return self.DependencyMatrix def iterBFS(self, currnode, endnode, SubGraph): #print "endnode=%s" % endnode startnode = self.Node(currnode, str(currnode)) #stack = [] stack = deque() stack.append(startnode) #d = 0 while (stack): #d += 1 top = stack.popleft() nbrs = SubGraph.neighbors(top.NodeNum) for nbr in nbrs: #if str(top.NodeNum)+"-"+str(nbr) not in top.parents and str(nbr)+"-"+str(top.NodeNum) not in top.parents: if str(nbr) not in top.parents.split("-") or (nbr == currnode and str(top.NodeNum)+"-"+str(nbr) not in top.parents and str(nbr)+"-"+str(top.NodeNum) not in top.parents): if nbr == endnode: probe = self.ProbeToClus + top.parents.split("-") + [str(nbr)] DependencyMatrixRow = [0]*(self.num_of_nodes + 1) for node in probe: DependencyMatrixRow[int(node)] += 1 if DependencyMatrixRow not in self.DependencyMatrix: self.DependencyMatrix.append(DependencyMatrixRow) self.AllProbes.append(map(int, probe + [str(self.probeno)])) self.probeno += 1 #self.ProbeCount[endnode] += 1 #print "top.NodeNum=%d" % top.NodeNum #print "Probe=%s" % str(probe) else: #if len((top.parents+"-"+str(nbr)).split("-")) <= distToEndnode[currnode]: #shortest length probe if len((top.parents+"-"+str(nbr)).split("-")) <= self.MaxdepthSubGraph: #maxdepth of the sub graph #if len((top.parents+"-"+str(nbr)).split("-")) <= MaxDepth: #maxDepth of the node #if len((top.parents+"-"+str(nbr)).split("-")) <= self.num_of_nodes: stack.append(self.Node(nbr, top.parents+"-"+str(nbr))) #print stack #print nodeDegree #if d <= 10: # print stack def select_probes_path_cost(self): candidate_set = list(self.AllProbes) candidate_set.sort(key = lambda x: (len(x) -2) if x[0] == x[len(x)-2] else (len(x)-2)*2) DM = list(self.DependencyMatrix) while True: for probe in candidate_set: probeno = int(probe[-1]) #int(probe.pop()) #print probeno #print "DecompositionSet=" + str(self.DecompositionSet) DecomposedSet = [] RemovedSet = [] for nodeset in self.DecompositionSet: if len(nodeset) > 1: #print "nodeset=" + str(nodeset) set1 = [] set0 = [] for node in nodeset: #print "node=" + str(node) if DM[probeno][int(node)] > 0: set1.append(node) else: set0.append(node) if len(set1) > 0 and len(set0) > 0: #self.DecompositionSet.remove(nodeset) RemovedSet.append(nodeset) DecomposedSet.append(set1) DecomposedSet.append(set0) if probe not in self.ProbeSet: #print "DecompositionSet=" + str(self.DecompositionSet) #print probe self.ProbeSet.append(probe) for s in RemovedSet: self.DecompositionSet.remove(s) for s in DecomposedSet: self.DecompositionSet.append(s) if len(self.DecompositionSet) == (self.num_of_nodes): self.NumberOfProbesSelected = len(self.ProbeSet) candidate_set = None DM = None return #self.NumberOfProbesSelected = len(self.ProbeSet) print colored("Undecomposed Set=" + str(self.DecompositionSet),'green') print colored("Total number of probes with undecomposed set = %d" % len(self.ProbeSet), 'blue') f = lambda x: (len(x) -2) if x[0] == x[len(x)-2] else (len(x)-2)*2 print colored("Total length of all probes with undecomposed set = %d" % sum(f(lst) for lst in self.ProbeSet), 'blue') #print "Candidat Set=%s" % str(candidate_set) candidate_set = [] for ds in self.DecompositionSet: if len(ds) > 1: for node in ds: if self.G.__contains__(int(node)): newprobe = nx.shortest_path(self.G,0,int(node))+[self.probeno] candidate_set.append(newprobe) self.probeno += 1 DependencyMatrixRow = [0]*(self.num_of_nodes + 1) for node in newprobe[:-1]: DependencyMatrixRow[int(node)] += 1 DM.append(DependencyMatrixRow) #self.AllProbes.append(newprobe) print "New Candidate Set=%s" % str(candidate_set) def get_probe_entropy (self, probe, DecompositionSet, get_DecompositionSet = False): if get_DecompositionSet: ds = DecompositionSet else: ds = list(DecompositionSet) DecomposedSet = [] RemovedSet = [] for nodeset in ds: if len(nodeset) > 1: set1 = [] set0 = [] for node in nodeset: if node in probe: set1.append(node) else: set0.append(node) if len(set1) > 0 and len(set0) > 0: RemovedSet.append(nodeset) DecomposedSet.append(set1) DecomposedSet.append(set0) for s in RemovedSet: ds.remove(s) for s in DecomposedSet: ds.append(s) if get_DecompositionSet: return ds else: if len(ds) > len(DecompositionSet): return self.get_entropy_ds(ds) else: return 10000 def get_entropy_ds (self, DecompositionSet): ds = list(DecompositionSet) entropy = 0 for nodeset in ds: length = len(nodeset) entropy = entropy + (float(length)/(self.num_of_nodes+1)) * math.log(length,2) return entropy def select_probes_entropy (self): candidate_set = list(self.AllProbes) f = lambda x: (len(x) -2) if x[0] == x[len(x)-2] else (len(x)-2)*2 #print "DecompositionSet=" + str(self.DecompositionSet) while True: minentropy = 9999 entropy = 0 probe_selected = -1 minProbeLen = 9999 ProbeLen = -1 if len(self.DecompositionSet) == (self.num_of_nodes): self.NumberOfProbesSelected = len(self.ProbeSet) return for probe in candidate_set: #print "Probe= %s"%probe #print "DecompositionSet=%s"%self.DecompositionSet entropy = self.get_probe_entropy(probe[:-1],self.DecompositionSet,False) #ProbeLen = len(probe[:-1])-1 ProbeLen = f(probe) if entropy == minentropy and ProbeLen < minProbeLen: #print "minentropy=%s"%str(minentropy) #print "ProbeLen= %d"%ProbeLen #print "minProbeLen= %d"%minProbeLen minentropy = entropy minProbeLen = ProbeLen probe_selected = probe if entropy < minentropy: #print "entropy=%s"%str(entropy) #print "ProbeLen= %d"%ProbeLen #print "minProbeLen= %d"%minProbeLen minentropy = entropy minProbeLen = ProbeLen probe_selected = probe if probe_selected != -1: #print "minentropy="+str(minentropy) #print "ProbeSelected=%s " % str(probe_selected[:-1]) self.ProbeSet.append(probe_selected) candidate_set.remove(probe_selected) self.DecompositionSet = self.get_probe_entropy(probe_selected[:-1],self.DecompositionSet,True) #print "DecompositionSet=" + str(self.DecompositionSet) else: #self.NumberOfProbesSelected = len(self.ProbeSet) print colored("Undecomposed Set=" + str(self.DecompositionSet),'green') print colored("Total number of probes with undecomposed set = %d" % len(self.ProbeSet), 'blue') #f = lambda x: (len(x) -1) if x[0] == x[len(x)-1] else (len(x)-1)*2 print colored("Total length of all probes with undecomposed set = %d" % sum(f(lst) for lst in self.ProbeSet), 'blue') #print "Candidat Set=%s" % str(candidate_set) candidate_set = [] for ds in self.DecompositionSet: if len(ds) > 1: for node in ds: if self.G.__contains__(int(node)): newprobe = nx.shortest_path(self.G,0,int(node))+[self.probeno] candidate_set.append(newprobe) self.probeno += 1 DependencyMatrixRow = [0]*(self.num_of_nodes + 1) for node in newprobe[:-1]: DependencyMatrixRow[int(node)] += 1 self.DependencyMatrix.append(DependencyMatrixRow) self.AllProbes.append(newprobe) print "New Candidate Set=%s" % str(candidate_set) def PrintGraph(self,Graph): labels = {} plt.figure(figsize=(12,12)) pox=nx.spring_layout(Graph) for node in Graph.nodes(): labels[node]='$'+str(node)+'$' nx.draw_networkx(Graph,pox,labels) plt.show() def main(): #filename = "Topologies/Node"+str(nodes)+"/result-GraphDepth"+str(nodes)+".txt" filename = "Topologies/ClusterResults/GraphDepth.txt" if os.path.exists(filename): os.remove(filename) with open(filename,'a') as fR: fR.write("Nodes\t"+"AvgDeg\t"+"TotPrbs\t"+"MaxDepth\t"+"PCExecTime\t"+"No.ofPrbs\t"+"TotLenOfPrbs\t"+"PEExecTime\t"+"No.ofPrbs\t"+"TotLenOfPrbs\n") for nodes in range(10,11,10): for j in range(0,1): print "nodes = %d" % nodes, " j=%d" % j G = nx.Graph() #labels = {} ResultRow = [] ResultRow.append(nodes) for x in range(0,nodes): G.add_node(x) #labels[x]='$'+str(x)+'$' #print colored("Number of nodes = %d" % nodes,'red') mat = np.loadtxt("Topologies/Node"+str(nodes)+"/out"+str(nodes)+"-"+str(j)+".txt",delimiter="\t") for i in mat: G.add_edges_from([(int(i[0]),int(i[1])), (int(i[1]),int(i[0]))]) mat = None obj = AllPaths(G) startnode = 0 clusters = KMeans(G,5) #print "K = 2" print "clusters = %s"%clusters obj.compute_dependency_Matrix(startnode,clusters) #for row in obj.compute_dependency_Matrix(startnode): # print row #+ [row.count(1)] #for probe in obj.AllProbes: #print probe[:-1] ResultRow.append(float(sum(nx.degree(G).values()))/nodes) ResultRow.append(obj.Maxdepth) #print colored("Total Probes = %s" % len(obj.AllProbes),'red') #print "----------------------------------------------------------------------------------" #print colored("Probe selection using PathCost/ProbeLength", 'white') start = timeit.default_timer() obj.select_probes_path_cost() stop = timeit.default_timer() ResultRow.append(str(stop - start)) #for probe in obj.ProbeSet: #print probe[:-1] ResultRow.append(obj.NumberOfProbesSelected) #print colored("Total number of probes = %d" % obj.NumberOfProbesSelected, 'red') f = lambda x: (len(x) - 2) if x[0] == x[len(x)-2] else (len(x)-2)*2 #print colored("Total length of all probes = %d" % sum(f(lst) for lst in obj.ProbeSet), 'red') #print obj.ProbeCount #print "----------------------------------------------------------------------------------" ResultRow.append(sum(f(lst) for lst in obj.ProbeSet)) obj.initialization() #print colored("Probe selection using entropy", 'white') start = timeit.default_timer() obj.select_probes_entropy() stop = timeit.default_timer() ResultRow.insert(2,len(obj.AllProbes)) ResultRow.append(str(stop - start)) #for probe in obj.ProbeSet: # print probe[:-1] ResultRow.append(obj.NumberOfProbesSelected) #print colored("Total number of probes = %d" % obj.NumberOfProbesSelected, 'red') #print colored("Total length of all probes = %d" % sum(f(lst) for lst in obj.ProbeSet), 'red') #print obj.ProbeCount print colored("Total Probes after adding new probes= %s" % len(obj.AllProbes),'red') #print "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" ResultRow.append(sum(f(lst) for lst in obj.ProbeSet)) with open(filename,'a') as fR: for res in ResultRow: fR.write(str(res)+"\t") fR.write("\n") #obj.PrintGraph(G) if __name__ == '__main__': main()
apache-2.0
-8,318,516,807,553,912,000
36.917453
177
0.577595
false
jsilter/scipy
scipy/misc/common.py
2
11606
""" Functions which are common and require SciPy Base and Level 1 SciPy (special, linalg) """ from __future__ import division, print_function, absolute_import import numpy import numpy as np from numpy import (exp, log, asarray, arange, newaxis, hstack, product, array, zeros, eye, poly1d, r_, sum, fromstring, isfinite, squeeze, amax, reshape) from scipy.lib._version import NumpyVersion __all__ = ['logsumexp', 'central_diff_weights', 'derivative', 'pade', 'lena', 'ascent', 'face'] _NUMPY_170 = (NumpyVersion(numpy.__version__) >= NumpyVersion('1.7.0')) def logsumexp(a, axis=None, b=None, keepdims=False): """Compute the log of the sum of exponentials of input elements. Parameters ---------- a : array_like Input array. axis : None or int or tuple of ints, optional Axis or axes over which the sum is taken. By default `axis` is None, and all elements are summed. Tuple of ints is not accepted if NumPy version is lower than 1.7.0. .. versionadded:: 0.11.0 keepdims: bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original array. .. versionadded:: 0.15.0 b : array-like, optional Scaling factor for exp(`a`) must be of the same shape as `a` or broadcastable to `a`. .. versionadded:: 0.12.0 Returns ------- res : ndarray The result, ``np.log(np.sum(np.exp(a)))`` calculated in a numerically more stable way. If `b` is given then ``np.log(np.sum(b*np.exp(a)))`` is returned. See Also -------- numpy.logaddexp, numpy.logaddexp2 Notes ----- Numpy has a logaddexp function which is very similar to `logsumexp`, but only handles two arguments. `logaddexp.reduce` is similar to this function, but may be less stable. Examples -------- >>> from scipy.misc import logsumexp >>> a = np.arange(10) >>> np.log(np.sum(np.exp(a))) 9.4586297444267107 >>> logsumexp(a) 9.4586297444267107 With weights >>> a = np.arange(10) >>> b = np.arange(10, 0, -1) >>> logsumexp(a, b=b) 9.9170178533034665 >>> np.log(np.sum(b*np.exp(a))) 9.9170178533034647 """ a = asarray(a) # keepdims is available in numpy.sum and numpy.amax since NumPy 1.7.0 # # Because SciPy supports versions earlier than 1.7.0, we have to handle # those old versions differently if not _NUMPY_170: # When support for Numpy < 1.7.0 is dropped, this implementation can be # removed. This implementation is a bit hacky. Similarly to old NumPy's # sum and amax functions, 'axis' must be an integer or None, tuples and # lists are not supported. Although 'keepdims' is not supported by these # old NumPy's functions, this function supports it. # Solve the shape of the reduced array if axis is None: sh_keepdims = (1,) * a.ndim else: sh_keepdims = list(a.shape) sh_keepdims[axis] = 1 a_max = amax(a, axis=axis) if a_max.ndim > 0: a_max[~isfinite(a_max)] = 0 elif not isfinite(a_max): a_max = 0 if b is not None: b = asarray(b) tmp = b * exp(a - reshape(a_max, sh_keepdims)) else: tmp = exp(a - reshape(a_max, sh_keepdims)) # suppress warnings about log of zero with np.errstate(divide='ignore'): out = log(sum(tmp, axis=axis)) out += a_max if keepdims: # Put back the reduced axes with size one out = reshape(out, sh_keepdims) else: # This is a more elegant implementation, requiring NumPy >= 1.7.0 a_max = amax(a, axis=axis, keepdims=True) if a_max.ndim > 0: a_max[~isfinite(a_max)] = 0 elif not isfinite(a_max): a_max = 0 if b is not None: b = asarray(b) tmp = b * exp(a - a_max) else: tmp = exp(a - a_max) # suppress warnings about log of zero with np.errstate(divide='ignore'): out = log(sum(tmp, axis=axis, keepdims=keepdims)) if not keepdims: a_max = squeeze(a_max, axis=axis) out += a_max return out def central_diff_weights(Np, ndiv=1): """ Return weights for an Np-point central derivative. Assumes equally-spaced function points. If weights are in the vector w, then derivative is w[0] * f(x-ho*dx) + ... + w[-1] * f(x+h0*dx) Parameters ---------- Np : int Number of points for the central derivative. ndiv : int, optional Number of divisions. Default is 1. Notes ----- Can be inaccurate for large number of points. """ if Np < ndiv + 1: raise ValueError("Number of points must be at least the derivative order + 1.") if Np % 2 == 0: raise ValueError("The number of points must be odd.") from scipy import linalg ho = Np >> 1 x = arange(-ho,ho+1.0) x = x[:,newaxis] X = x**0.0 for k in range(1,Np): X = hstack([X,x**k]) w = product(arange(1,ndiv+1),axis=0)*linalg.inv(X)[ndiv] return w def derivative(func, x0, dx=1.0, n=1, args=(), order=3): """ Find the n-th derivative of a function at a point. Given a function, use a central difference formula with spacing `dx` to compute the `n`-th derivative at `x0`. Parameters ---------- func : function Input function. x0 : float The point at which `n`-th derivative is found. dx : int, optional Spacing. n : int, optional Order of the derivative. Default is 1. args : tuple, optional Arguments order : int, optional Number of points to use, must be odd. Notes ----- Decreasing the step size too small can result in round-off error. Examples -------- >>> def f(x): ... return x**3 + x**2 ... >>> derivative(f, 1.0, dx=1e-6) 4.9999999999217337 """ if order < n + 1: raise ValueError("'order' (the number of points used to compute the derivative), " "must be at least the derivative order 'n' + 1.") if order % 2 == 0: raise ValueError("'order' (the number of points used to compute the derivative) " "must be odd.") # pre-computed for n=1 and 2 and low-order for speed. if n == 1: if order == 3: weights = array([-1,0,1])/2.0 elif order == 5: weights = array([1,-8,0,8,-1])/12.0 elif order == 7: weights = array([-1,9,-45,0,45,-9,1])/60.0 elif order == 9: weights = array([3,-32,168,-672,0,672,-168,32,-3])/840.0 else: weights = central_diff_weights(order,1) elif n == 2: if order == 3: weights = array([1,-2.0,1]) elif order == 5: weights = array([-1,16,-30,16,-1])/12.0 elif order == 7: weights = array([2,-27,270,-490,270,-27,2])/180.0 elif order == 9: weights = array([-9,128,-1008,8064,-14350,8064,-1008,128,-9])/5040.0 else: weights = central_diff_weights(order,2) else: weights = central_diff_weights(order, n) val = 0.0 ho = order >> 1 for k in range(order): val += weights[k]*func(x0+(k-ho)*dx,*args) return val / product((dx,)*n,axis=0) def pade(an, m): """ Return Pade approximation to a polynomial as the ratio of two polynomials. Parameters ---------- an : (N,) array_like Taylor series coefficients. m : int The order of the returned approximating polynomials. Returns ------- p, q : Polynomial class The pade approximation of the polynomial defined by `an` is `p(x)/q(x)`. Examples -------- >>> from scipy import misc >>> e_exp = [1.0, 1.0, 1.0/2.0, 1.0/6.0, 1.0/24.0, 1.0/120.0] >>> p, q = misc.pade(e_exp, 2) >>> e_exp.reverse() >>> e_poly = np.poly1d(e_exp) Compare ``e_poly(x)`` and the pade approximation ``p(x)/q(x)`` >>> e_poly(1) 2.7166666666666668 >>> p(1)/q(1) 2.7179487179487181 """ from scipy import linalg an = asarray(an) N = len(an) - 1 n = N - m if n < 0: raise ValueError("Order of q <m> must be smaller than len(an)-1.") Akj = eye(N+1, n+1) Bkj = zeros((N+1, m), 'd') for row in range(1, m+1): Bkj[row,:row] = -(an[:row])[::-1] for row in range(m+1, N+1): Bkj[row,:] = -(an[row-m:row])[::-1] C = hstack((Akj, Bkj)) pq = linalg.solve(C, an) p = pq[:n+1] q = r_[1.0, pq[n+1:]] return poly1d(p[::-1]), poly1d(q[::-1]) def lena(): """ Get classic image processing example image, Lena, at 8-bit grayscale bit-depth, 512 x 512 size. Parameters ---------- None Returns ------- lena : ndarray Lena image Examples -------- >>> import scipy.misc >>> lena = scipy.misc.lena() >>> lena.shape (512, 512) >>> lena.max() 245 >>> lena.dtype dtype('int32') >>> import matplotlib.pyplot as plt >>> plt.gray() >>> plt.imshow(lena) >>> plt.show() """ import pickle import os fname = os.path.join(os.path.dirname(__file__),'lena.dat') f = open(fname,'rb') lena = array(pickle.load(f)) f.close() return lena def ascent(): """ Get an 8-bit grayscale bit-depth, 512 x 512 derived image for easy use in demos The image is derived from accent-to-the-top.jpg at http://www.public-domain-image.com/people-public-domain-images-pictures/ Parameters ---------- None Returns ------- ascent : ndarray convenient image to use for testing and demonstration Examples -------- >>> import scipy.misc >>> ascent = scipy.misc.ascent() >>> ascent.shape (512, 512) >>> ascent.max() 255 >>> import matplotlib.pyplot as plt >>> plt.gray() >>> plt.imshow(ascent) >>> plt.show() """ import pickle import os fname = os.path.join(os.path.dirname(__file__),'ascent.dat') with open(fname, 'rb') as f: ascent = array(pickle.load(f)) return ascent def face(gray=False): """ Get a 1024 x 768, color image of a raccoon face. raccoon-procyon-lotor.jpg at http://www.public-domain-image.com Parameters ---------- gray : bool, optional If True then return color image, otherwise return an 8-bit gray-scale Returns ------- face : ndarray image of a racoon face Examples -------- >>> import scipy.misc >>> face = scipy.misc.face() >>> face.shape (768, 1024, 3) >>> face.max() 230 >>> face.dtype dtype('uint8') >>> import matplotlib.pyplot as plt >>> plt.gray() >>> plt.imshow(face) >>> plt.show() """ import bz2 import os with open(os.path.join(os.path.dirname(__file__), 'face.dat'), 'rb') as f: rawdata = f.read() data = bz2.decompress(rawdata) face = fromstring(data, dtype='uint8') face.shape = (768, 1024, 3) if gray is True: face = (0.21 * face[:,:,0] + 0.71 * face[:,:,1] + 0.07 * face[:,:,2]).astype('uint8') return face
bsd-3-clause
-4,139,392,418,925,590,500
25.558352
93
0.554713
false
ntucllab/striatum
simulation/simulation_exp4p.py
1
3289
import six from six.moves import range, zip from sklearn.naive_bayes import MultinomialNB from sklearn.linear_model import LogisticRegression from sklearn.multiclass import OneVsRestClassifier import numpy as np import matplotlib.pyplot as plt from striatum.storage import MemoryHistoryStorage, MemoryModelStorage from striatum.bandit import Exp4P from striatum.bandit.bandit import Action from striatum import simulation def train_expert(history_context, history_action): n_round = len(history_context) history_context = np.array([history_context[t] for t in range(n_round)]) history_action = np.array([history_action[t] for t in range(n_round)]) logreg = OneVsRestClassifier(LogisticRegression()) mnb = OneVsRestClassifier(MultinomialNB()) logreg.fit(history_context, history_action) mnb.fit(history_context, history_action) return [logreg, mnb] def get_advice(context, action_ids, experts): advice = {} for t, context_t in six.viewitems(context): advice[t] = {} for exp_i, expert in enumerate(experts): prob = expert.predict_proba(context_t[np.newaxis, :])[0] advice[t][exp_i] = {} for action_id, action_prob in zip(action_ids, prob): advice[t][exp_i][action_id] = action_prob return advice def main(): # pylint: disable=too-many-locals n_rounds = 1000 context_dimension = 5 actions = [Action(i) for i in range(5)] action_ids = [0, 1, 2, 3, 4] context1, desired_actions1 = simulation.simulate_data( 3000, context_dimension, actions, "Exp4P", random_state=0) experts = train_expert(context1, desired_actions1) # Parameter tuning tuning_region = np.arange(0.01, 1, 0.05) ctr_tuning = np.empty(len(tuning_region)) advice1 = get_advice(context1, action_ids, experts) for delta_i, delta in enumerate(tuning_region): historystorage = MemoryHistoryStorage() modelstorage = MemoryModelStorage() policy = Exp4P(actions, historystorage, modelstorage, delta=delta, p_min=None) cum_regret = simulation.evaluate_policy(policy, advice1, desired_actions1) ctr_tuning[delta_i] = n_rounds - cum_regret[-1] ctr_tuning /= n_rounds delta_opt = tuning_region[np.argmax(ctr_tuning)] simulation.plot_tuning_curve(tuning_region, ctr_tuning, label="delta changes") # Regret Analysis n_rounds = 10000 context2, desired_actions2 = simulation.simulate_data( n_rounds, context_dimension, actions, "Exp4P", random_state=1) advice2 = get_advice(context2, action_ids, experts) historystorage = MemoryHistoryStorage() modelstorage = MemoryModelStorage() policy = Exp4P(actions, historystorage, modelstorage, delta=delta_opt, p_min=None) for t in range(n_rounds): history_id, action = policy.get_action(advice2[t], 1) action_id = action[0]['action'].action_id if desired_actions2[t] != action_id: policy.reward(history_id, {action_id: 0}) else: policy.reward(history_id, {action_id: 1}) policy.plot_avg_regret() plt.show() if __name__ == '__main__': main()
bsd-2-clause
-5,611,123,558,188,196,000
35.955056
76
0.655822
false
buck06191/ABROAD
abroad/data_creation/merge_data.py
1
2303
""" .. module:: merge_data :platform: Unix, Windows :synopsis: This module contains the :func:`merge_data.df_concat`. .. moduleauthor:: Joshua Russell-Buckland <joshua.russell-buckland.15@ucl.ac.uk> """ import pandas as pd from . import data_creation as dc import os from .. import utility as utility import copy BASEDIR = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) assert utility.base_dir_check(BASEDIR, "ABROAD", verbose=True), "incorrect BASEDIR in merge_data" HDF_FILE = os.path.join(BASEDIR,'data','subject_data.h5') def df_concat(sensor_num, subject_number): """ Create a pandas dataframe for each subject and then concatenate to generate appropriate data structures for use in machine learning. :param int sensor_num: The sensor number. :param subject_number: either the max subject number or a list of numbers. :type subject_number: list or int or float """ df_list = [] if type(subject_number) == int: subject_list = list(range(1, subject_number+1)) elif type(subject_number) == float: subject_list = list(range(1, int(round(subject_number))+1)) elif type(subject_number) == list: subject_list = subject_number else: print('Not a valid sensor number specification. See help(df_concat)') for i in subject_list: print('Subject %d' % i) data = dc.SubjectData(HDF_FILE, i, sensor_num) data.artefact_row_extraction() data.group_labels() data.label_raw_data() data.label_conc_data() df_list.append(data.raw_df) data.conc_df.to_csv(os.path.join(BASEDIR,'data','conc_subject_%d.csv' % i), index=False) all_df = pd.concat(df_list) print('\nWriting data to file\n') all_df.to_csv(os.path.join(BASEDIR,'data','raw_sensor_%s.csv' % sensor_num), index=False) print('\nraw_sensor_%s.csv written to file.\n' % sensor_num) return None def merge_main(sensor_num, hdf_file=HDF_FILE): """ Main function to call in order to merge hdf into csv :param sensor_num: Sensor number :param hdf_file: hdf file location. (Default: data/subject_data.h5) :return: None - will write to file """ df_concat(sensor_num, 8) return None
mit
4,312,798,417,506,994,700
31.9
97
0.654364
false
HWal/paparazzi
sw/ground_segment/python/gvf/gvfframe.py
1
13600
import wx import time from scipy import linalg as la from matplotlib.path import Path from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas import matplotlib.pyplot as pl import matplotlib.patches as patches import numpy as np import sys from os import path, getenv PPRZ_SRC = getenv("PAPARAZZI_SRC", path.normpath(path.join(path.dirname(path.abspath(__file__)), '../../../../'))) sys.path.append(PPRZ_SRC + "/sw/lib/python") sys.path.append(PPRZ_SRC + "/sw/ext/pprzlink/lib/v1.0/python") from pprzlink.ivy import IvyMessagesInterface from pprzlink.message import PprzMessage from settings_xml_parse import PaparazziACSettings WIDTH = 800 HEIGHT = 800 class GVFFrame(wx.Frame): def __init__(self, ac_id): wx.Frame.__init__(self, id=-1, parent=None, \ name=u'GVF', size=wx.Size(WIDTH, HEIGHT), \ style=wx.DEFAULT_FRAME_STYLE, title=u'Guidance Vector Field') # Vehicle variables self.ac_id = ac_id self.course = 0 self.yaw = 0 self.XY = np.array([0, 0]) # Desired trajectory self.timer_traj = 0 # We do not update the traj every time we receive a msg self.timer_traj_lim = 7 # (7+1) * 0.25secs self.s = 0 self.ke = 0 self.map_gvf = map2d(np.array([0, 0]), 150000) self.traj = None # Frame self.canvas = FigureCanvas(self, -1, self.map_gvf.fig) self.Bind(wx.EVT_CLOSE, self.OnClose) self.redraw_timer = wx.Timer(self) self.Bind(wx.EVT_TIMER, self.OnRedrawTimer, self.redraw_timer) self.redraw_timer.Start(100) # Ivy self.interface = IvyMessagesInterface("GVF") self.interface.subscribe(self.message_recv) settings = PaparazziACSettings(ac_id) def message_recv(self, ac_id, msg): if int(ac_id) == self.ac_id: if msg.name == 'GPS': self.course = int(msg.get_field(3))*np.pi/1800 if msg.name == 'NAVIGATION': self.XY[0] = float(msg.get_field(2)) self.XY[1] = float(msg.get_field(3)) if msg.name == 'ATTITUDE': self.yaw = float(msg.get_field(1)) if msg.name == 'GVF': self.gvf_error = float(msg.get_field(0)) # Straight line if int(msg.get_field(1)) == 0 \ and self.timer_traj == self.timer_traj_lim: self.s = int(msg.get_field(2)) self.ke = float(msg.get_field(3)) param = [float(x) for x in msg.get_field(4)] a = param[0] b = param[1] c = param[2] self.traj = traj_line(np.array([-100,100]), a, b, c) self.traj.vector_field(self.traj.XYoff, self.map_gvf.area, \ self.s, self.ke) # Ellipse if int(msg.get_field(1)) == 1 \ and self.timer_traj == self.timer_traj_lim: self.s = int(msg.get_field(2)) self.ke = float(msg.get_field(3)) param = [float(x) for x in msg.get_field(4)] ex = param[0] ey = param[1] ea = param[2] eb = param[3] ealpha = param[4] self.traj = traj_ellipse(np.array([ex, ey]), ealpha, ea, eb) self.traj.vector_field(self.traj.XYoff, \ self.map_gvf.area, self.s, self.ke) # Sin if int(msg.get_field(1)) == 2 \ and self.timer_traj == self.timer_traj_lim: self.s = int(msg.get_field(2)) self.ke = float(msg.get_field(3)) param = [float(x) for x in msg.get_field(4)] a = param[0] b = param[1] alpha = param[2] w = param[3] off = param[4] A = param[5] self.traj = traj_sin(np.array([-100, 100]), a, b, alpha, \ w, off, A) self.traj.vector_field(self.traj.XYoff, \ self.map_gvf.area, self.s, self.ke) self.timer_traj = self.timer_traj + 1 if self.timer_traj > self.timer_traj_lim: self.timer_traj = 0 def draw_gvf(self, XY, yaw, course): if self.traj is not None: self.map_gvf.draw(XY, yaw, course, self.traj) def OnClose(self, event): self.interface.shutdown() self.Destroy() def OnRedrawTimer(self, event): self.draw_gvf(self.XY, self.yaw, self.course) self.canvas.draw() class map2d: def __init__(self, XYoff, area): self.XYoff = XYoff self.area = area self.fig, self.ax = pl.subplots() self.ax.set_xlabel('South [m]') self.ax.set_ylabel('West [m]') self.ax.set_title('2D Map') self.ax.annotate('HOME', xy = (0, 0)) self.ax.set_xlim(XYoff[0]-0.5*np.sqrt(area), XYoff[0]+0.5*np.sqrt(area)) self.ax.set_ylim(XYoff[1]-0.5*np.sqrt(area), XYoff[1]+0.5*np.sqrt(area)) self.ax.axis('equal') def vehicle_patch(self, XY, yaw): Rot = np.array([[np.cos(yaw), np.sin(yaw)],[-np.sin(yaw), np.cos(yaw)]]) apex = 45*np.pi/180 # 30 degrees apex angle b = np.sqrt(2*(self.area/2000) / np.sin(apex)) a = b*np.sin(apex/2) h = b*np.cos(apex/2) z1 = np.array([a/2, -h*0.3]) z2 = np.array([-a/2, -h*0.3]) z3 = np.array([0, h*0.6]) z1 = Rot.dot(z1) z2 = Rot.dot(z2) z3 = Rot.dot(z3) verts = [(XY[0]+z1[0], XY[1]+z1[1]), \ (XY[0]+z2[0], XY[1]+z2[1]), \ (XY[0]+z3[0], XY[1]+z3[1]), \ (0, 0)] codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY] path = Path(verts, codes) return patches.PathPatch(path, facecolor='red', lw=2) def draw(self, XY, yaw, course, traj): self.ax.clear() self.ax.plot(traj.traj_points[0, :], traj.traj_points[1, :]) self.ax.quiver(traj.mapgrad_X, traj.mapgrad_Y, \ traj.mapgrad_U, traj.mapgrad_V, color='Teal', \ pivot='mid', width=0.002) self.ax.add_patch(self.vehicle_patch(XY, yaw)) # In radians apex = 45*np.pi/180 # 30 degrees apex angle b = np.sqrt(2*(self.area/2000) / np.sin(apex)) h = b*np.cos(apex/2) self.ax.arrow(XY[0], XY[1], \ h*np.sin(course), h*np.cos(course),\ head_width=5, head_length=10, fc='k', ec='k') self.ax.annotate('HOME', xy = (0, 0)) if isinstance(traj, traj_ellipse): self.ax.annotate('ELLIPSE', xy = (traj.XYoff[0], traj.XYoff[1])) self.ax.plot(0, 0, 'kx', ms=10, mew=2) self.ax.plot(traj.XYoff[0], traj.XYoff[1], 'kx', ms=10, mew=2) elif isinstance(traj, traj_sin): self.ax.annotate('SIN', xy = (traj.XYoff[0], traj.XYoff[1])) self.ax.plot(0, 0, 'kx', ms=10, mew=2) self.ax.plot(traj.XYoff[0], traj.XYoff[1], 'kx', ms=10, mew=2) elif isinstance(traj, traj_line): self.ax.annotate('LINE', xy = (traj.XYoff[0], traj.XYoff[1])) self.ax.plot(0, 0, 'kx', ms=10, mew=2) self.ax.plot(traj.XYoff[0], traj.XYoff[1], 'kx', ms=10, mew=2) self.ax.set_xlabel('South [m]') self.ax.set_ylabel('West [m]') self.ax.set_title('2D Map') self.ax.set_xlim(self.XYoff[0]-0.5*np.sqrt(self.area), \ self.XYoff[0]+0.5*np.sqrt(self.area)) self.ax.set_ylim(self.XYoff[1]-0.5*np.sqrt(self.area), \ self.XYoff[1]+0.5*np.sqrt(self.area)) self.ax.axis('equal') self.ax.grid() class traj_line: def float_range(self, start, end, step): while start <= end: yield start start += step def __init__(self, Xminmax, a, b, alpha): self.XYoff = np.array([a, b]) self.Xminmax = Xminmax self.a, self.b, self.alpha = a, b, alpha self.traj_points = np.zeros((2, 200)) self.mapgrad_X = [] self.mapgrad_Y = [] self.mapgrad_U = [] self.mapgrad_V = [] i = 0 for t in self.float_range(0, 1, 0.005): x = (self.Xminmax[1]-self.Xminmax[0])*t + self.Xminmax[0] i = i + 1 xtr = np.linspace(-200, 200, 400) xl = xtr*np.sin(self.alpha) + a yl = xtr*np.cos(self.alpha) + b self.traj_points = np.vstack((xl, yl)) def param_point(self, t): i = 0 def vector_field(self, XYoff, area, s, ke): self.mapgrad_X, self.mapgrad_Y = np.mgrid[XYoff[0]-0.5*np.sqrt(area):\ XYoff[0]+0.5*np.sqrt(area):30j, \ XYoff[1]-0.5*np.sqrt(area):\ XYoff[1]+0.5*np.sqrt(area):30j] nx = -np.cos(self.alpha) ny = np.sin(self.alpha) tx = s*ny ty = -s*nx ke = 1e-2*ke e = (self.mapgrad_X-self.a)*nx + (self.mapgrad_Y-self.b)*ny self.mapgrad_U = tx -ke*e*nx self.mapgrad_V = ty -ke*e*ny norm = np.sqrt(self.mapgrad_U**2 + self.mapgrad_V**2) self.mapgrad_U = self.mapgrad_U/norm self.mapgrad_V = self.mapgrad_V/norm class traj_ellipse: def float_range(self, start, end, step): while start <= end: yield start start += step def __init__(self, XYoff, rot, a, b): self.XYoff = XYoff self.a, self.b = a, b self.rot = rot self.traj_points = np.zeros((2, 200)) self.mapgrad_X = [] self.mapgrad_Y = [] self.mapgrad_U = [] self.mapgrad_V = [] i = 0 for t in self.float_range(0, 1, 0.005): self.traj_points[:, i] = self.param_point(t) i = i + 1 def param_point(self, t): angle = 2*np.pi*t return self.XYoff \ + np.array([self.a*np.cos(angle)*np.cos(-self.rot) - \ self.b*np.sin(angle)*np.sin(-self.rot), \ self.a*np.cos(angle)*np.sin(-self.rot) + \ self.b*np.sin(angle)*np.cos(-self.rot)]) def vector_field(self, XYoff, area, s, ke): self.mapgrad_X, self.mapgrad_Y = np.mgrid[XYoff[0]-0.5*np.sqrt(area):\ XYoff[0]+0.5*np.sqrt(area):30j, \ XYoff[1]-0.5*np.sqrt(area):\ XYoff[1]+0.5*np.sqrt(area):30j] Xel = (self.mapgrad_X-self.XYoff[0])*np.cos(self.rot) \ - (self.mapgrad_Y-self.XYoff[1])*np.sin(self.rot) Yel = (self.mapgrad_X-self.XYoff[0])*np.sin(self.rot) \ + (self.mapgrad_Y-self.XYoff[1])*np.cos(self.rot) nx = 2*Xel*np.cos(self.rot)/self.a**2 \ + 2*Yel*np.sin(self.rot)/self.b**2 ny = -2*Xel*np.sin(self.rot)/self.a**2 \ + 2*Yel*np.cos(self.rot)/self.b**2 tx = s*ny ty = -s*nx e = (Xel/self.a)**2 + (Yel/self.b)**2 - 1 self.mapgrad_U = tx -ke*e*nx self.mapgrad_V = ty -ke*e*ny norm = np.sqrt(self.mapgrad_U**2 + self.mapgrad_V**2) self.mapgrad_U = self.mapgrad_U/norm self.mapgrad_V = self.mapgrad_V/norm class traj_sin: def float_range(self, start, end, step): while start <= end: yield start start += step def __init__(self, Xminmax, a, b, alpha, w, off, A): self.XYoff = np.array([a, b]) self.Xminmax = Xminmax self.a, self.b, self.alpha, self.w, self.off, self.A = \ a, b, alpha, w, off, A self.traj_points = np.zeros((2, 200)) self.mapgrad_X = [] self.mapgrad_Y = [] self.mapgrad_U = [] self.mapgrad_V = [] i = 0 for t in self.float_range(0, 1, 0.005): x = (self.Xminmax[1]-self.Xminmax[0])*t + self.Xminmax[0] i = i + 1 xtr = np.linspace(-200, 200, 400) ytr = self.A*np.sin(self.w*xtr + self.off) xsin = -xtr*np.sin(self.alpha) + ytr*np.cos(self.alpha) + a ysin = xtr*np.cos(self.alpha) + ytr*np.sin(self.alpha) + b self.traj_points = np.vstack((xsin, ysin)) def param_point(self, t): i = 0 def vector_field(self, XYoff, area, s, ke): self.mapgrad_X, self.mapgrad_Y = np.mgrid[XYoff[0]-0.5*np.sqrt(area):\ XYoff[0]+0.5*np.sqrt(area):30j, \ XYoff[1]-0.5*np.sqrt(area):\ XYoff[1]+0.5*np.sqrt(area):30j] xs = (self.mapgrad_X-self.XYoff[0])*np.sin(self.alpha) \ - (self.mapgrad_Y-self.XYoff[1])*np.cos(self.alpha) ys = -(self.mapgrad_X-self.XYoff[0])*np.cos(self.alpha) \ - (self.mapgrad_Y-self.XYoff[1])*np.sin(self.alpha) ang = self.w*xs + self.off nx = -np.cos(self.alpha) - \ self.A*self.w*np.cos(ang)*np.sin(self.alpha) ny = -np.sin(self.alpha) + \ self.A*self.w*np.cos(ang)*np.cos(self.alpha) tx = s*ny ty = -s*nx ke = 1e-2*ke e = ys - self.A*np.sin(ang) self.mapgrad_U = tx -ke*e*nx self.mapgrad_V = ty -ke*e*ny norm = np.sqrt(self.mapgrad_U**2 + self.mapgrad_V**2) self.mapgrad_U = self.mapgrad_U/norm self.mapgrad_V = self.mapgrad_V/norm
gpl-2.0
6,854,179,692,631,883,000
34.509138
114
0.505588
false
GraphProcessor/CommunityDetectionCodes
NonOverlappingCodes/2009-Community-Infomap-MapEquation/examples/python/example-networkx.py
1
2373
#!/usr/bin/env python import networkx as nx import matplotlib.pyplot as plt import matplotlib.colors as colors from infomap import infomap """ Generate and draw a network with NetworkX, colored according to the community structure found by Infomap. """ def findCommunities(G): """ Partition network with the Infomap algorithm. Annotates nodes with 'community' id and return number of communities found. """ conf = infomap.init("--two-level"); # Input data network = infomap.Network(conf); # Output data tree = infomap.HierarchicalNetwork(conf) print("Building network...") for e in G.edges_iter(): network.addLink(*e) network.finalizeAndCheckNetwork(True, nx.number_of_nodes(G)); # Cluster network infomap.run(network, tree); print("Found %d top modules with codelength: %f" % (tree.numTopModules(), tree.codelength())) communities = {} clusterIndexLevel = 1 # 1, 2, ... or -1 for top, second, ... or lowest cluster level for node in tree.leafIter(clusterIndexLevel): communities[node.originalLeafIndex] = node.clusterIndex() nx.set_node_attributes(G, 'community', communities) return tree.numTopModules() def drawNetwork(G): # position map pos = nx.spring_layout(G) # community ids communities = [v for k,v in nx.get_node_attributes(G, 'community').items()] numCommunities = max(communities) + 1 # color map from http://colorbrewer2.org/ cmapLight = colors.ListedColormap(['#a6cee3', '#b2df8a', '#fb9a99', '#fdbf6f', '#cab2d6'], 'indexed', numCommunities) cmapDark = colors.ListedColormap(['#1f78b4', '#33a02c', '#e31a1c', '#ff7f00', '#6a3d9a'], 'indexed', numCommunities) # edges nx.draw_networkx_edges(G, pos) # nodes nodeCollection = nx.draw_networkx_nodes(G, pos = pos, node_color = communities, cmap = cmapLight ) # set node border color to the darker shade darkColors = [cmapDark(v) for v in communities] nodeCollection.set_edgecolor(darkColors) # Print node labels separately instead for n in G.nodes_iter(): plt.annotate(n, xy = pos[n], textcoords = 'offset points', horizontalalignment = 'center', verticalalignment = 'center', xytext = [0, 2], color = cmapDark(communities[n]) ) plt.axis('off') # plt.savefig("karate.png") plt.show() G=nx.karate_club_graph() numCommunities = findCommunities(G) print("Number of communities found: %d" % numCommunities) drawNetwork(G)
gpl-2.0
-8,834,627,640,442,599,000
25.366667
118
0.712179
false
apache/beam
sdks/python/apache_beam/dataframe/frames.py
3
174092
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Analogs for :class:`pandas.DataFrame` and :class:`pandas.Series`: :class:`DeferredDataFrame` and :class:`DeferredSeries`. These classes are effectively wrappers around a `schema-aware`_ :class:`~apache_beam.pvalue.PCollection` that provide a set of operations compatible with the `pandas`_ API. Note that we aim for the Beam DataFrame API to be completely compatible with the pandas API, but there are some features that are currently unimplemented for various reasons. Pay particular attention to the **'Differences from pandas'** section for each operation to understand where we diverge. .. _schema-aware: https://beam.apache.org/documentation/programming-guide/#what-is-a-schema .. _pandas: https://pandas.pydata.org/ """ import collections import inspect import itertools import math import re import warnings from typing import List from typing import Optional import numpy as np import pandas as pd from pandas.core.groupby.generic import DataFrameGroupBy from apache_beam.dataframe import expressions from apache_beam.dataframe import frame_base from apache_beam.dataframe import io from apache_beam.dataframe import partitionings __all__ = [ 'DeferredSeries', 'DeferredDataFrame', ] def populate_not_implemented(pd_type): def wrapper(deferred_type): for attr in dir(pd_type): # Don't auto-define hidden methods or dunders if attr.startswith('_'): continue if not hasattr(deferred_type, attr): pd_value = getattr(pd_type, attr) if isinstance(pd_value, property) or inspect.isclass(pd_value): # Some of the properties on pandas types (cat, dt, sparse), are # actually attributes with class values, not properties setattr( deferred_type, attr, property( frame_base.not_implemented_method(attr, base_type=pd_type))) elif callable(pd_value): setattr( deferred_type, attr, frame_base.not_implemented_method(attr, base_type=pd_type)) return deferred_type return wrapper def _fillna_alias(method): def wrapper(self, *args, **kwargs): return self.fillna(*args, method=method, **kwargs) wrapper.__name__ = method wrapper.__doc__ = ( f'{method} is only supported for axis="columns". ' 'axis="index" is order-sensitive.') return frame_base.with_docs_from(pd.DataFrame)( frame_base.args_to_kwargs(pd.DataFrame)( frame_base.populate_defaults(pd.DataFrame)(wrapper))) LIFTABLE_AGGREGATIONS = ['all', 'any', 'max', 'min', 'prod', 'sum'] LIFTABLE_WITH_SUM_AGGREGATIONS = ['size', 'count'] UNLIFTABLE_AGGREGATIONS = [ 'mean', 'median', 'quantile', 'describe', # TODO: The below all have specialized distributed # implementations, but they require tracking # multiple intermediate series, which is difficult # to lift in groupby 'std', 'var', 'corr', 'cov', 'nunique' ] ALL_AGGREGATIONS = ( LIFTABLE_AGGREGATIONS + LIFTABLE_WITH_SUM_AGGREGATIONS + UNLIFTABLE_AGGREGATIONS) def _agg_method(base, func): def wrapper(self, *args, **kwargs): return self.agg(func, *args, **kwargs) if func in UNLIFTABLE_AGGREGATIONS: wrapper.__doc__ = ( f"``{func}`` cannot currently be parallelized. It will " "require collecting all data on a single node.") wrapper.__name__ = func return frame_base.with_docs_from(base)(wrapper) # Docstring to use for head and tail (commonly used to peek at datasets) _PEEK_METHOD_EXPLANATION = ( "because it is `order-sensitive " "<https://s.apache.org/dataframe-order-sensitive-operations>`_.\n\n" "If you want to peek at a large dataset consider using interactive Beam's " ":func:`ib.collect " "<apache_beam.runners.interactive.interactive_beam.collect>` " "with ``n`` specified, or :meth:`sample`. If you want to find the " "N largest elements, consider using :meth:`DeferredDataFrame.nlargest`.") class DeferredDataFrameOrSeries(frame_base.DeferredFrame): def _render_indexes(self): if self.index.nlevels == 1: return 'index=' + ( '<unnamed>' if self.index.name is None else repr(self.index.name)) else: return 'indexes=[' + ', '.join( '<unnamed>' if ix is None else repr(ix) for ix in self.index.names) + ']' __array__ = frame_base.wont_implement_method( pd.Series, '__array__', reason="non-deferred-result") @frame_base.with_docs_from(pd.DataFrame) @frame_base.args_to_kwargs(pd.DataFrame) @frame_base.populate_defaults(pd.DataFrame) @frame_base.maybe_inplace def drop(self, labels, axis, index, columns, errors, **kwargs): """drop is not parallelizable when dropping from the index and ``errors="raise"`` is specified. It requires collecting all data on a single node in order to detect if one of the index values is missing.""" if labels is not None: if index is not None or columns is not None: raise ValueError("Cannot specify both 'labels' and 'index'/'columns'") if axis in (0, 'index'): index = labels columns = None elif axis in (1, 'columns'): index = None columns = labels else: raise ValueError( "axis must be one of (0, 1, 'index', 'columns'), " "got '%s'" % axis) if columns is not None: # Compute the proxy based on just the columns that are dropped. proxy = self._expr.proxy().drop(columns=columns, errors=errors) else: proxy = self._expr.proxy() if index is not None and errors == 'raise': # In order to raise an error about missing index values, we'll # need to collect the entire dataframe. # TODO: This could be parallelized by putting index values in a # ConstantExpression and partitioning by index. requires = partitionings.Singleton( reason=( "drop(errors='raise', axis='index') is not currently " "parallelizable. This requires collecting all data on a single " f"node in order to detect if one of {index!r} is missing.")) else: requires = partitionings.Arbitrary() return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'drop', lambda df: df.drop( axis=axis, index=index, columns=columns, errors=errors, **kwargs), [self._expr], proxy=proxy, requires_partition_by=requires)) @frame_base.with_docs_from(pd.DataFrame) @frame_base.args_to_kwargs(pd.DataFrame) @frame_base.populate_defaults(pd.DataFrame) def droplevel(self, level, axis): return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'droplevel', lambda df: df.droplevel(level, axis=axis), [self._expr], requires_partition_by=partitionings.Arbitrary(), preserves_partition_by=partitionings.Arbitrary() if axis in (1, 'column') else partitionings.Singleton())) @frame_base.with_docs_from(pd.DataFrame) @frame_base.args_to_kwargs(pd.DataFrame) @frame_base.populate_defaults(pd.DataFrame) @frame_base.maybe_inplace def fillna(self, value, method, axis, limit, **kwargs): """When ``axis="index"``, both ``method`` and ``limit`` must be ``None``. otherwise this operation is order-sensitive.""" # Default value is None, but is overriden with index. axis = axis or 'index' if axis in (0, 'index'): if method is not None: raise frame_base.WontImplementError( f"fillna(method={method!r}, axis={axis!r}) is not supported " "because it is order-sensitive. Only fillna(method=None) is " f"supported with axis={axis!r}.", reason="order-sensitive") if limit is not None: raise frame_base.WontImplementError( f"fillna(limit={method!r}, axis={axis!r}) is not supported because " "it is order-sensitive. Only fillna(limit=None) is supported with " f"axis={axis!r}.", reason="order-sensitive") if isinstance(self, DeferredDataFrame) and isinstance(value, DeferredSeries): # If self is a DataFrame and value is a Series we want to broadcast value # to all partitions of self. # This is OK, as its index must be the same size as the columns set of # self, so cannot be too large. class AsScalar(object): def __init__(self, value): self.value = value with expressions.allow_non_parallel_operations(): value_expr = expressions.ComputedExpression( 'as_scalar', lambda df: AsScalar(df), [value._expr], requires_partition_by=partitionings.Singleton()) get_value = lambda x: x.value requires = partitionings.Arbitrary() elif isinstance(value, frame_base.DeferredBase): # For other DeferredBase combinations, use Index partitioning to # co-locate on the Index value_expr = value._expr get_value = lambda x: x requires = partitionings.Index() else: # Default case, pass value through as a constant, no particular # partitioning requirement value_expr = expressions.ConstantExpression(value) get_value = lambda x: x requires = partitionings.Arbitrary() return frame_base.DeferredFrame.wrap( # yapf: disable expressions.ComputedExpression( 'fillna', lambda df, value: df.fillna( get_value(value), method=method, axis=axis, limit=limit, **kwargs), [self._expr, value_expr], preserves_partition_by=partitionings.Arbitrary(), requires_partition_by=requires)) ffill = _fillna_alias('ffill') bfill = _fillna_alias('bfill') backfill = _fillna_alias('backfill') pad = _fillna_alias('pad') @frame_base.with_docs_from(pd.DataFrame) def first(self, offset): per_partition = expressions.ComputedExpression( 'first-per-partition', lambda df: df.sort_index().first(offset=offset), [self._expr], preserves_partition_by=partitionings.Arbitrary(), requires_partition_by=partitionings.Arbitrary()) with expressions.allow_non_parallel_operations(True): return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'first', lambda df: df.sort_index().first(offset=offset), [per_partition], preserves_partition_by=partitionings.Arbitrary(), requires_partition_by=partitionings.Singleton())) @frame_base.with_docs_from(pd.DataFrame) def last(self, offset): per_partition = expressions.ComputedExpression( 'last-per-partition', lambda df: df.sort_index().last(offset=offset), [self._expr], preserves_partition_by=partitionings.Arbitrary(), requires_partition_by=partitionings.Arbitrary()) with expressions.allow_non_parallel_operations(True): return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'last', lambda df: df.sort_index().last(offset=offset), [per_partition], preserves_partition_by=partitionings.Arbitrary(), requires_partition_by=partitionings.Singleton())) @frame_base.with_docs_from(pd.DataFrame) @frame_base.args_to_kwargs(pd.DataFrame) @frame_base.populate_defaults(pd.DataFrame) def groupby(self, by, level, axis, as_index, group_keys, **kwargs): """``as_index`` and ``group_keys`` must both be ``True``. Aggregations grouping by a categorical column with ``observed=False`` set are not currently parallelizable (`BEAM-11190 <https://issues.apache.org/jira/browse/BEAM-11190>`_). """ if not as_index: raise NotImplementedError('groupby(as_index=False)') if not group_keys: raise NotImplementedError('groupby(group_keys=False)') if axis in (1, 'columns'): return _DeferredGroupByCols( expressions.ComputedExpression( 'groupbycols', lambda df: df.groupby(by, axis=axis, **kwargs), [self._expr], requires_partition_by=partitionings.Arbitrary(), preserves_partition_by=partitionings.Arbitrary())) if level is None and by is None: raise TypeError("You have to supply one of 'by' and 'level'") elif level is not None: if isinstance(level, (list, tuple)): grouping_indexes = level else: grouping_indexes = [level] grouping_columns = [] index = self._expr.proxy().index # Translate to level numbers only grouping_indexes = [ l if isinstance(l, int) else index.names.index(l) for l in grouping_indexes ] if index.nlevels == 1: to_group_with_index = self._expr to_group = self._expr else: levels_to_drop = [ i for i in range(index.nlevels) if i not in grouping_indexes ] # Reorder so the grouped indexes are first to_group_with_index = self.reorder_levels( grouping_indexes + levels_to_drop) grouping_indexes = list(range(len(grouping_indexes))) levels_to_drop = list(range(len(grouping_indexes), index.nlevels)) if levels_to_drop: to_group = to_group_with_index.droplevel(levels_to_drop)._expr else: to_group = to_group_with_index._expr to_group_with_index = to_group_with_index._expr elif callable(by): def map_index(df): df = df.copy() df.index = df.index.map(by) return df to_group = expressions.ComputedExpression( 'map_index', map_index, [self._expr], requires_partition_by=partitionings.Arbitrary(), preserves_partition_by=partitionings.Singleton()) orig_nlevels = self._expr.proxy().index.nlevels def prepend_mapped_index(df): df = df.copy() index = df.index.to_frame() index.insert(0, None, df.index.map(by)) df.index = pd.MultiIndex.from_frame( index, names=[None] + list(df.index.names)) return df to_group_with_index = expressions.ComputedExpression( 'map_index_keep_orig', prepend_mapped_index, [self._expr], requires_partition_by=partitionings.Arbitrary(), # Partitioning by the original indexes is preserved preserves_partition_by=partitionings.Index( list(range(1, orig_nlevels + 1)))) grouping_columns = [] # The index we need to group by is the last one grouping_indexes = [0] elif isinstance(by, DeferredSeries): if isinstance(self, DeferredSeries): def set_index(s, by): df = pd.DataFrame(s) df, by = df.align(by, axis=0, join='inner') return df.set_index(by).iloc[:, 0] def prepend_index(s, by): df = pd.DataFrame(s) df, by = df.align(by, axis=0, join='inner') return df.set_index([by, df.index]).iloc[:, 0] else: def set_index(df, by): # type: ignore df, by = df.align(by, axis=0, join='inner') return df.set_index(by) def prepend_index(df, by): # type: ignore df, by = df.align(by, axis=0, join='inner') return df.set_index([by, df.index]) to_group = expressions.ComputedExpression( 'set_index', set_index, [self._expr, by._expr], requires_partition_by=partitionings.Index(), preserves_partition_by=partitionings.Singleton()) orig_nlevels = self._expr.proxy().index.nlevels to_group_with_index = expressions.ComputedExpression( 'prependindex', prepend_index, [self._expr, by._expr], requires_partition_by=partitionings.Index(), preserves_partition_by=partitionings.Index( list(range(1, orig_nlevels + 1)))) grouping_columns = [] grouping_indexes = [0] elif isinstance(by, np.ndarray): raise frame_base.WontImplementError( "Grouping by a concrete ndarray is order sensitive.", reason="order-sensitive") elif isinstance(self, DeferredDataFrame): if not isinstance(by, list): by = [by] # Find the columns that we need to move into the index so we can group by # them column_names = self._expr.proxy().columns grouping_columns = list(set(by).intersection(column_names)) index_names = self._expr.proxy().index.names for label in by: if label not in index_names and label not in self._expr.proxy().columns: raise KeyError(label) grouping_indexes = list(set(by).intersection(index_names)) if grouping_indexes: if set(by) == set(index_names): to_group = self._expr elif set(by).issubset(index_names): to_group = self.droplevel(index_names.difference(by))._expr else: to_group = self.reset_index(grouping_indexes).set_index(by)._expr else: to_group = self.set_index(by)._expr if grouping_columns: # TODO(BEAM-11711): It should be possible to do this without creating an # expression manually, by using DeferredDataFrame.set_index, i.e.: # to_group_with_index = self.set_index([self.index] + # grouping_columns)._expr to_group_with_index = expressions.ComputedExpression( 'move_grouped_columns_to_index', lambda df: df.set_index([df.index] + grouping_columns, drop=False), [self._expr], requires_partition_by=partitionings.Arbitrary(), preserves_partition_by=partitionings.Index( list(range(self._expr.proxy().index.nlevels)))) else: to_group_with_index = self._expr else: raise NotImplementedError(by) return DeferredGroupBy( expressions.ComputedExpression( 'groupbyindex', lambda df: df.groupby( level=list(range(df.index.nlevels)), **kwargs), [to_group], requires_partition_by=partitionings.Index(), preserves_partition_by=partitionings.Arbitrary()), kwargs, to_group, to_group_with_index, grouping_columns=grouping_columns, grouping_indexes=grouping_indexes) @property # type: ignore @frame_base.with_docs_from(pd.DataFrame) def loc(self): return _DeferredLoc(self) @property # type: ignore @frame_base.with_docs_from(pd.DataFrame) def iloc(self): """Position-based indexing with `iloc` is order-sensitive in almost every case. Beam DataFrame users should prefer label-based indexing with `loc`. """ return _DeferredILoc(self) @frame_base.with_docs_from(pd.DataFrame) @frame_base.args_to_kwargs(pd.DataFrame) @frame_base.populate_defaults(pd.DataFrame) @frame_base.maybe_inplace def reset_index(self, level=None, **kwargs): """Dropping the entire index (e.g. with ``reset_index(level=None)``) is not parallelizable. It is also only guaranteed that the newly generated index values will be unique. The Beam DataFrame API makes no guarantee that the same index values as the equivalent pandas operation will be generated, because that implementation is order-sensitive.""" if level is not None and not isinstance(level, (tuple, list)): level = [level] if level is None or len(level) == self._expr.proxy().index.nlevels: # TODO(BEAM-12182): Could do distributed re-index with offsets. requires_partition_by = partitionings.Singleton( reason=( f"reset_index(level={level!r}) drops the entire index and " "creates a new one, so it cannot currently be parallelized " "(BEAM-12182).")) else: requires_partition_by = partitionings.Arbitrary() return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'reset_index', lambda df: df.reset_index(level=level, **kwargs), [self._expr], preserves_partition_by=partitionings.Singleton(), requires_partition_by=requires_partition_by)) abs = frame_base._elementwise_method('abs', base=pd.core.generic.NDFrame) @frame_base.with_docs_from(pd.core.generic.NDFrame) @frame_base.args_to_kwargs(pd.core.generic.NDFrame) @frame_base.populate_defaults(pd.core.generic.NDFrame) def astype(self, dtype, copy, errors): """astype is not parallelizable when ``errors="ignore"`` is specified. ``copy=False`` is not supported because it relies on memory-sharing semantics. ``dtype="category`` is not supported because the type of the output column depends on the data. Please use ``pd.CategoricalDtype`` with explicit categories instead. """ requires = partitionings.Arbitrary() if errors == "ignore": # We need all data in order to ignore errors and propagate the original # data. requires = partitionings.Singleton( reason=( f"astype(errors={errors!r}) is currently not parallelizable, " "because all data must be collected on one node to determine if " "the original data should be propagated instead.")) if not copy: raise frame_base.WontImplementError( f"astype(copy={copy!r}) is not supported because it relies on " "memory-sharing semantics that are not compatible with the Beam " "model.") if dtype == 'category': raise frame_base.WontImplementError( "astype(dtype='category') is not supported because the type of the " "output column depends on the data. Please use pd.CategoricalDtype " "with explicit categories instead.", reason="non-deferred-columns") return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'astype', lambda df: df.astype(dtype=dtype, copy=copy, errors=errors), [self._expr], requires_partition_by=requires, preserves_partition_by=partitionings.Arbitrary())) copy = frame_base._elementwise_method('copy', base=pd.core.generic.NDFrame) @frame_base.with_docs_from(pd.DataFrame) @frame_base.args_to_kwargs(pd.DataFrame) @frame_base.populate_defaults(pd.DataFrame) @frame_base.maybe_inplace def replace(self, to_replace, value, limit, method, **kwargs): """``method`` is not supported in the Beam DataFrame API because it is order-sensitive. It cannot be specified. If ``limit`` is specified this operation is not parallelizable.""" if method is not None and not isinstance(to_replace, dict) and value is None: # pandas only relies on method if to_replace is not a dictionary, and # value is None raise frame_base.WontImplementError( f"replace(method={method!r}) is not supported because it is " "order sensitive. Only replace(method=None) is supported.", reason="order-sensitive") if limit is None: requires_partition_by = partitionings.Arbitrary() else: requires_partition_by = partitionings.Singleton( reason=( f"replace(limit={limit!r}) cannot currently be parallelized. It " "requires collecting all data on a single node.")) return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'replace', lambda df: df.replace( to_replace=to_replace, value=value, limit=limit, method=method, **kwargs), [self._expr], preserves_partition_by=partitionings.Arbitrary(), requires_partition_by=requires_partition_by)) @frame_base.with_docs_from(pd.DataFrame) @frame_base.args_to_kwargs(pd.DataFrame) @frame_base.populate_defaults(pd.DataFrame) def tz_localize(self, ambiguous, **kwargs): """``ambiguous`` cannot be set to ``"infer"`` as its semantics are order-sensitive. Similarly, specifying ``ambiguous`` as an :class:`~numpy.ndarray` is order-sensitive, but you can achieve similar functionality by specifying ``ambiguous`` as a Series.""" if isinstance(ambiguous, np.ndarray): raise frame_base.WontImplementError( "tz_localize(ambiguous=ndarray) is not supported because it makes " "this operation sensitive to the order of the data. Please use a " "DeferredSeries instead.", reason="order-sensitive") elif isinstance(ambiguous, frame_base.DeferredFrame): return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'tz_localize', lambda df, ambiguous: df.tz_localize(ambiguous=ambiguous, **kwargs), [self._expr, ambiguous._expr], requires_partition_by=partitionings.Index(), preserves_partition_by=partitionings.Singleton())) elif ambiguous == 'infer': # infer attempts to infer based on the order of the timestamps raise frame_base.WontImplementError( f"tz_localize(ambiguous={ambiguous!r}) is not allowed because it " "makes this operation sensitive to the order of the data.", reason="order-sensitive") return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'tz_localize', lambda df: df.tz_localize(ambiguous=ambiguous, **kwargs), [self._expr], requires_partition_by=partitionings.Arbitrary(), preserves_partition_by=partitionings.Singleton())) @property # type: ignore @frame_base.with_docs_from(pd.DataFrame) def size(self): sizes = expressions.ComputedExpression( 'get_sizes', # Wrap scalar results in a Series for easier concatenation later lambda df: pd.Series(df.size), [self._expr], requires_partition_by=partitionings.Arbitrary(), preserves_partition_by=partitionings.Singleton()) with expressions.allow_non_parallel_operations(True): return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'sum_sizes', lambda sizes: sizes.sum(), [sizes], requires_partition_by=partitionings.Singleton(), preserves_partition_by=partitionings.Singleton())) def length(self): """Alternative to ``len(df)`` which returns a deferred result that can be used in arithmetic with :class:`DeferredSeries` or :class:`DeferredDataFrame` instances.""" lengths = expressions.ComputedExpression( 'get_lengths', # Wrap scalar results in a Series for easier concatenation later lambda df: pd.Series(len(df)), [self._expr], requires_partition_by=partitionings.Arbitrary(), preserves_partition_by=partitionings.Singleton()) with expressions.allow_non_parallel_operations(True): return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'sum_lengths', lambda lengths: lengths.sum(), [lengths], requires_partition_by=partitionings.Singleton(), preserves_partition_by=partitionings.Singleton())) def __len__(self): raise frame_base.WontImplementError( "len(df) is not currently supported because it produces a non-deferred " "result. Consider using df.length() instead.", reason="non-deferred-result") @property # type: ignore @frame_base.with_docs_from(pd.DataFrame) def empty(self): empties = expressions.ComputedExpression( 'get_empties', # Wrap scalar results in a Series for easier concatenation later lambda df: pd.Series(df.empty), [self._expr], requires_partition_by=partitionings.Arbitrary(), preserves_partition_by=partitionings.Singleton()) with expressions.allow_non_parallel_operations(True): return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'check_all_empty', lambda empties: empties.all(), [empties], requires_partition_by=partitionings.Singleton(), preserves_partition_by=partitionings.Singleton())) @frame_base.with_docs_from(pd.DataFrame) def bool(self): # TODO: Documentation about DeferredScalar # Will throw if any partition has >1 element bools = expressions.ComputedExpression( 'get_bools', # Wrap scalar results in a Series for easier concatenation later lambda df: pd.Series([], dtype=bool) if df.empty else pd.Series([df.bool()]), [self._expr], requires_partition_by=partitionings.Arbitrary(), preserves_partition_by=partitionings.Singleton()) with expressions.allow_non_parallel_operations(True): # Will throw if overall dataset has != 1 element return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'combine_all_bools', lambda bools: bools.bool(), [bools], proxy=bool(), requires_partition_by=partitionings.Singleton(), preserves_partition_by=partitionings.Singleton())) @frame_base.with_docs_from(pd.DataFrame) def equals(self, other): intermediate = expressions.ComputedExpression( 'equals_partitioned', # Wrap scalar results in a Series for easier concatenation later lambda df, other: pd.Series(df.equals(other)), [self._expr, other._expr], requires_partition_by=partitionings.Index(), preserves_partition_by=partitionings.Singleton()) with expressions.allow_non_parallel_operations(True): return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'aggregate_equals', lambda df: df.all(), [intermediate], requires_partition_by=partitionings.Singleton(), preserves_partition_by=partitionings.Singleton())) @frame_base.args_to_kwargs(pd.DataFrame) @frame_base.populate_defaults(pd.DataFrame) def sort_values(self, axis, **kwargs): """``sort_values`` is not implemented. It is not implemented for ``axis=index`` because it imposes an ordering on the dataset, and it likely will not be maintained (see https://s.apache.org/dataframe-order-sensitive-operations). It is not implemented for ``axis=columns`` because it makes the order of the columns depend on the data (see https://s.apache.org/dataframe-non-deferred-columns).""" if axis in (0, 'index'): # axis=index imposes an ordering on the DataFrame rows which we do not # support raise frame_base.WontImplementError( "sort_values(axis=index) is not supported because it imposes an " "ordering on the dataset which likely will not be preserved.", reason="order-sensitive") else: # axis=columns will reorder the columns based on the data raise frame_base.WontImplementError( "sort_values(axis=columns) is not supported because the order of the " "columns in the result depends on the data.", reason="non-deferred-columns") @frame_base.with_docs_from(pd.DataFrame) @frame_base.args_to_kwargs(pd.DataFrame) @frame_base.populate_defaults(pd.DataFrame) @frame_base.maybe_inplace def sort_index(self, axis, **kwargs): """``axis=index`` is not allowed because it imposes an ordering on the dataset, and we cannot guarantee it will be maintained (see https://s.apache.org/dataframe-order-sensitive-operations). Only ``axis=columns`` is allowed.""" if axis in (0, 'index'): # axis=rows imposes an ordering on the DataFrame which we do not support raise frame_base.WontImplementError( "sort_index(axis=index) is not supported because it imposes an " "ordering on the dataset which we cannot guarantee will be " "preserved.", reason="order-sensitive") # axis=columns reorders the columns by name return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'sort_index', lambda df: df.sort_index(axis, **kwargs), [self._expr], requires_partition_by=partitionings.Arbitrary(), preserves_partition_by=partitionings.Arbitrary(), )) @frame_base.with_docs_from(pd.DataFrame) @frame_base.args_to_kwargs(pd.DataFrame) @frame_base.populate_defaults(pd.DataFrame) @frame_base.maybe_inplace def where(self, cond, other, errors, **kwargs): """where is not parallelizable when ``errors="ignore"`` is specified.""" requires = partitionings.Arbitrary() deferred_args = {} actual_args = {} # TODO(bhulette): This is very similar to the logic in # frame_base.elementwise_method, can we unify it? if isinstance(cond, frame_base.DeferredFrame): deferred_args['cond'] = cond requires = partitionings.Index() else: actual_args['cond'] = cond if isinstance(other, frame_base.DeferredFrame): deferred_args['other'] = other requires = partitionings.Index() else: actual_args['other'] = other if errors == "ignore": # We need all data in order to ignore errors and propagate the original # data. requires = partitionings.Singleton( reason=( f"where(errors={errors!r}) is currently not parallelizable, " "because all data must be collected on one node to determine if " "the original data should be propagated instead.")) actual_args['errors'] = errors def where_execution(df, *args): runtime_values = { name: value for (name, value) in zip(deferred_args.keys(), args) } return df.where(**runtime_values, **actual_args, **kwargs) return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( "where", where_execution, [self._expr] + [df._expr for df in deferred_args.values()], requires_partition_by=requires, preserves_partition_by=partitionings.Index(), )) @frame_base.with_docs_from(pd.DataFrame) @frame_base.args_to_kwargs(pd.DataFrame) @frame_base.populate_defaults(pd.DataFrame) @frame_base.maybe_inplace def mask(self, cond, **kwargs): """mask is not parallelizable when ``errors="ignore"`` is specified.""" return self.where(~cond, **kwargs) @frame_base.with_docs_from(pd.DataFrame) @frame_base.args_to_kwargs(pd.DataFrame) @frame_base.populate_defaults(pd.DataFrame) def xs(self, key, axis, level, **kwargs): """Note that ``xs(axis='index')`` will raise a ``KeyError`` at execution time if the key does not exist in the index.""" if axis in ('columns', 1): # Special case for axis=columns. This is a simple project that raises a # KeyError at construction time for missing columns. return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'xs', lambda df: df.xs(key, axis=axis, **kwargs), [self._expr], requires_partition_by=partitionings.Arbitrary(), preserves_partition_by=partitionings.Arbitrary())) elif axis not in ('index', 0): # Make sure that user's axis is valid raise ValueError( "axis must be one of ('index', 0, 'columns', 1). " f"got {axis!r}.") if not isinstance(key, tuple): key = (key, ) key_size = len(key) key_series = pd.Series([key], pd.MultiIndex.from_tuples([key])) key_expr = expressions.ConstantExpression( key_series, proxy=key_series.iloc[:0]) if level is None: reindexed = self else: if not isinstance(level, list): level = [level] # If user specifed levels, reindex so those levels are at the beginning. # Keep the others and preserve their order. level = [ l if isinstance(l, int) else list(self.index.names).index(l) for l in level ] reindexed = self.reorder_levels( level + [i for i in range(self.index.nlevels) if i not in level]) def xs_partitioned(frame, key): if not len(key): # key is not in this partition, return empty dataframe return frame.iloc[:0].droplevel(list(range(key_size))) # key should be in this partition, call xs. Will raise KeyError if not # present. return frame.xs(key.item()) return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'xs', xs_partitioned, [reindexed._expr, key_expr], requires_partition_by=partitionings.Index(list(range(key_size))), # Drops index levels, so partitioning is not preserved preserves_partition_by=partitionings.Singleton())) @property def dtype(self): return self._expr.proxy().dtype isin = frame_base._elementwise_method('isin', base=pd.DataFrame) combine_first = frame_base._elementwise_method( 'combine_first', base=pd.DataFrame) combine = frame_base._proxy_method( 'combine', base=pd.DataFrame, requires_partition_by=expressions.partitionings.Singleton( reason="combine() is not parallelizable because func might operate " "on the full dataset."), preserves_partition_by=expressions.partitionings.Singleton()) @property # type: ignore @frame_base.with_docs_from(pd.DataFrame) def ndim(self): return self._expr.proxy().ndim @property # type: ignore @frame_base.with_docs_from(pd.DataFrame) def index(self): return _DeferredIndex(self) @index.setter def _set_index(self, value): # TODO: assigning the index is generally order-sensitive, but we could # support it in some rare cases, e.g. when assigning the index from one # of a DataFrame's columns raise NotImplementedError( "Assigning an index is not yet supported. " "Consider using set_index() instead.") reindex = frame_base.wont_implement_method( pd.DataFrame, 'reindex', reason="order-sensitive") hist = frame_base.wont_implement_method( pd.DataFrame, 'hist', reason="plotting-tools") attrs = property( frame_base.wont_implement_method( pd.DataFrame, 'attrs', reason='experimental')) reorder_levels = frame_base._proxy_method( 'reorder_levels', base=pd.DataFrame, requires_partition_by=partitionings.Arbitrary(), preserves_partition_by=partitionings.Singleton()) resample = frame_base.wont_implement_method( pd.DataFrame, 'resample', reason='event-time-semantics') rolling = frame_base.wont_implement_method( pd.DataFrame, 'rolling', reason='event-time-semantics') sparse = property( frame_base.not_implemented_method( 'sparse', 'BEAM-12425', base_type=pd.DataFrame)) transform = frame_base._elementwise_method('transform', base=pd.DataFrame) tz_convert = frame_base._proxy_method( 'tz_convert', base=pd.DataFrame, requires_partition_by=partitionings.Arbitrary(), # Manipulates index, partitioning is not preserved preserves_partition_by=partitionings.Singleton()) @populate_not_implemented(pd.Series) @frame_base.DeferredFrame._register_for(pd.Series) class DeferredSeries(DeferredDataFrameOrSeries): def __repr__(self): return ( f'DeferredSeries(name={self.name!r}, dtype={self.dtype}, ' f'{self._render_indexes()})') @property # type: ignore @frame_base.with_docs_from(pd.Series) def name(self): return self._expr.proxy().name @name.setter def name(self, value): def fn(s): s = s.copy() s.name = value return s self._expr = expressions.ComputedExpression( 'series_set_name', fn, [self._expr], requires_partition_by=partitionings.Arbitrary(), preserves_partition_by=partitionings.Arbitrary()) @property # type: ignore @frame_base.with_docs_from(pd.Series) def dtype(self): return self._expr.proxy().dtype dtypes = dtype def __getitem__(self, key): if _is_null_slice(key) or key is Ellipsis: return self elif (isinstance(key, int) or _is_integer_slice(key) ) and self._expr.proxy().index._should_fallback_to_positional(): raise frame_base.WontImplementError( "Accessing an item by an integer key is order sensitive for this " "Series.", reason="order-sensitive") elif isinstance(key, slice) or callable(key): return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( # yapf: disable 'getitem', lambda df: df[key], [self._expr], requires_partition_by=partitionings.Arbitrary(), preserves_partition_by=partitionings.Arbitrary())) elif isinstance(key, DeferredSeries) and key._expr.proxy().dtype == bool: return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( # yapf: disable 'getitem', lambda df, indexer: df[indexer], [self._expr, key._expr], requires_partition_by=partitionings.Index(), preserves_partition_by=partitionings.Arbitrary())) elif pd.core.series.is_iterator(key) or pd.core.common.is_bool_indexer(key): raise frame_base.WontImplementError( "Accessing a DeferredSeries with an iterator is sensitive to the " "order of the data.", reason="order-sensitive") else: # We could consider returning a deferred scalar, but that might # be more surprising than a clear error. raise frame_base.WontImplementError( f"Indexing a series with key of type {type(key)} is not supported " "because it produces a non-deferred result.", reason="non-deferred-result") @frame_base.with_docs_from(pd.Series) def keys(self): return self.index # Series.T == transpose. Both are a no-op T = frame_base._elementwise_method('T', base=pd.Series) transpose = frame_base._elementwise_method('transpose', base=pd.Series) shape = property( frame_base.wont_implement_method( pd.Series, 'shape', reason="non-deferred-result")) @frame_base.with_docs_from(pd.Series) @frame_base.args_to_kwargs(pd.Series) @frame_base.populate_defaults(pd.Series) def append(self, to_append, ignore_index, verify_integrity, **kwargs): """``ignore_index=True`` is not supported, because it requires generating an order-sensitive index.""" if not isinstance(to_append, DeferredSeries): raise frame_base.WontImplementError( "append() only accepts DeferredSeries instances, received " + str(type(to_append))) if ignore_index: raise frame_base.WontImplementError( "append(ignore_index=True) is order sensitive because it requires " "generating a new index based on the order of the data.", reason="order-sensitive") if verify_integrity: # We can verify the index is non-unique within index partitioned data. requires = partitionings.Index() else: requires = partitionings.Arbitrary() return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'append', lambda s, to_append: s.append( to_append, verify_integrity=verify_integrity, **kwargs), [self._expr, to_append._expr], requires_partition_by=requires, preserves_partition_by=partitionings.Arbitrary())) @frame_base.with_docs_from(pd.Series) @frame_base.args_to_kwargs(pd.Series) @frame_base.populate_defaults(pd.Series) def align(self, other, join, axis, level, method, **kwargs): """Aligning per-level is not yet supported. Only the default, ``level=None``, is allowed. Filling NaN values via ``method`` is not supported, because it is `order-sensitive <https://s.apache.org/dataframe-order-sensitive-operations>`_. Only the default, ``method=None``, is allowed.""" if level is not None: raise NotImplementedError('per-level align') if method is not None: raise frame_base.WontImplementError( f"align(method={method!r}) is not supported because it is " "order sensitive. Only align(method=None) is supported.", reason="order-sensitive") # We're using pd.concat here as expressions don't yet support # multiple return values. aligned = frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'align', lambda x, y: pd.concat([x, y], axis=1, join='inner'), [self._expr, other._expr], requires_partition_by=partitionings.Index(), preserves_partition_by=partitionings.Arbitrary())) return aligned.iloc[:, 0], aligned.iloc[:, 1] argsort = frame_base.wont_implement_method( pd.Series, 'argsort', reason="order-sensitive") array = property( frame_base.wont_implement_method( pd.Series, 'array', reason="non-deferred-result")) # We can't reliably predict the output type, it depends on whether `key` is: # - not in the index (default_value) # - in the index once (constant) # - in the index multiple times (Series) get = frame_base.wont_implement_method( pd.Series, 'get', reason="non-deferred-columns") ravel = frame_base.wont_implement_method( pd.Series, 'ravel', reason="non-deferred-result") rename = frame_base._elementwise_method('rename', base=pd.Series) between = frame_base._elementwise_method('between', base=pd.Series) add_suffix = frame_base._proxy_method( 'add_suffix', base=pd.DataFrame, requires_partition_by=partitionings.Arbitrary(), preserves_partition_by=partitionings.Singleton()) add_prefix = frame_base._proxy_method( 'add_prefix', base=pd.DataFrame, requires_partition_by=partitionings.Arbitrary(), preserves_partition_by=partitionings.Singleton()) @frame_base.with_docs_from(pd.DataFrame) def dot(self, other): """``other`` must be a :class:`DeferredDataFrame` or :class:`DeferredSeries` instance. Computing the dot product with an array-like is not supported because it is order-sensitive.""" left = self._expr if isinstance(other, DeferredSeries): right = expressions.ComputedExpression( 'to_dataframe', pd.DataFrame, [other._expr], requires_partition_by=partitionings.Arbitrary(), preserves_partition_by=partitionings.Arbitrary()) right_is_series = True elif isinstance(other, DeferredDataFrame): right = other._expr right_is_series = False else: raise frame_base.WontImplementError( "other must be a DeferredDataFrame or DeferredSeries instance. " "Passing a concrete list or numpy array is not supported. Those " "types have no index and must be joined based on the order of the " "data.", reason="order-sensitive") dots = expressions.ComputedExpression( 'dot', # Transpose so we can sum across rows. (lambda left, right: pd.DataFrame(left @ right).T), [left, right], requires_partition_by=partitionings.Index()) with expressions.allow_non_parallel_operations(True): sums = expressions.ComputedExpression( 'sum', lambda dots: dots.sum(), # [dots], requires_partition_by=partitionings.Singleton()) if right_is_series: result = expressions.ComputedExpression( 'extract', lambda df: df[0], [sums], requires_partition_by=partitionings.Singleton()) else: result = sums return frame_base.DeferredFrame.wrap(result) __matmul__ = dot @frame_base.with_docs_from(pd.Series) @frame_base.args_to_kwargs(pd.Series) @frame_base.populate_defaults(pd.Series) def nunique(self, **kwargs): return self.drop_duplicates(keep="any").size @frame_base.with_docs_from(pd.Series) @frame_base.args_to_kwargs(pd.Series) @frame_base.populate_defaults(pd.Series) def quantile(self, q, **kwargs): """quantile is not parallelizable. See `BEAM-12167 <https://issues.apache.org/jira/browse/BEAM-12167>`_ tracking the possible addition of an approximate, parallelizable implementation of quantile.""" # TODO(BEAM-12167): Provide an option for approximate distributed # quantiles requires = partitionings.Singleton( reason=( "Computing quantiles across index cannot currently be " "parallelized. See BEAM-12167 tracking the possible addition of an " "approximate, parallelizable implementation of quantile.")) return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'quantile', lambda df: df.quantile(q=q, **kwargs), [self._expr], requires_partition_by=requires, preserves_partition_by=partitionings.Singleton())) @frame_base.with_docs_from(pd.Series) def std(self, *args, **kwargs): # Compute variance (deferred scalar) with same args, then sqrt it return self.var(*args, **kwargs).apply(lambda var: math.sqrt(var)) @frame_base.with_docs_from(pd.Series) @frame_base.args_to_kwargs(pd.Series) @frame_base.populate_defaults(pd.Series) def var(self, axis, skipna, level, ddof, **kwargs): """Per-level aggregation is not yet supported (BEAM-11777). Only the default, ``level=None``, is allowed.""" if level is not None: raise NotImplementedError("per-level aggregation") if skipna is None or skipna: self = self.dropna() # pylint: disable=self-cls-assignment # See the online, numerically stable formulae at # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm # and # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm def compute_moments(x): n = len(x) m = x.std(ddof=0)**2 * n s = x.sum() return pd.DataFrame(dict(m=[m], s=[s], n=[n])) def combine_moments(data): m = s = n = 0.0 for datum in data.itertuples(): if datum.n == 0: continue elif n == 0: m, s, n = datum.m, datum.s, datum.n else: delta = s / n - datum.s / datum.n m += datum.m + delta**2 * n * datum.n / (n + datum.n) s += datum.s n += datum.n if n <= ddof: return float('nan') else: return m / (n - ddof) moments = expressions.ComputedExpression( 'compute_moments', compute_moments, [self._expr], requires_partition_by=partitionings.Arbitrary()) with expressions.allow_non_parallel_operations(True): return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'combine_moments', combine_moments, [moments], requires_partition_by=partitionings.Singleton())) @frame_base.with_docs_from(pd.Series) @frame_base.args_to_kwargs(pd.Series) @frame_base.populate_defaults(pd.Series) def corr(self, other, method, min_periods): """Only ``method='pearson'`` is currently parallelizable.""" if method == 'pearson': # Note that this is the default. x, y = self.dropna().align(other.dropna(), 'inner') return x._corr_aligned(y, min_periods) else: reason = ( f"Encountered corr(method={method!r}) which cannot be " "parallelized. Only corr(method='pearson') is currently " "parallelizable.") # The rank-based correlations are not obviously parallelizable, though # perhaps an approximation could be done with a knowledge of quantiles # and custom partitioning. return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'corr', lambda df, other: df.corr(other, method=method, min_periods=min_periods), [self._expr, other._expr], requires_partition_by=partitionings.Singleton(reason=reason))) def _corr_aligned(self, other, min_periods): std_x = self.std() std_y = other.std() cov = self._cov_aligned(other, min_periods) return cov.apply( lambda cov, std_x, std_y: cov / (std_x * std_y), args=[std_x, std_y]) @frame_base.with_docs_from(pd.Series) @frame_base.args_to_kwargs(pd.Series) @frame_base.populate_defaults(pd.Series) def cov(self, other, min_periods, ddof): x, y = self.dropna().align(other.dropna(), 'inner') return x._cov_aligned(y, min_periods, ddof) def _cov_aligned(self, other, min_periods, ddof=1): # Use the formulae from # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Covariance def compute_co_moments(x, y): n = len(x) if n <= 1: c = 0 else: c = x.cov(y) * (n - 1) sx = x.sum() sy = y.sum() return pd.DataFrame(dict(c=[c], sx=[sx], sy=[sy], n=[n])) def combine_co_moments(data): c = sx = sy = n = 0.0 for datum in data.itertuples(): if datum.n == 0: continue elif n == 0: c, sx, sy, n = datum.c, datum.sx, datum.sy, datum.n else: c += ( datum.c + (sx / n - datum.sx / datum.n) * (sy / n - datum.sy / datum.n) * n * datum.n / (n + datum.n)) sx += datum.sx sy += datum.sy n += datum.n if n < max(2, ddof, min_periods or 0): return float('nan') else: return c / (n - ddof) moments = expressions.ComputedExpression( 'compute_co_moments', compute_co_moments, [self._expr, other._expr], requires_partition_by=partitionings.Index()) with expressions.allow_non_parallel_operations(True): return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'combine_co_moments', combine_co_moments, [moments], requires_partition_by=partitionings.Singleton())) @frame_base.with_docs_from(pd.Series) @frame_base.args_to_kwargs(pd.Series) @frame_base.populate_defaults(pd.Series) @frame_base.maybe_inplace def dropna(self, **kwargs): return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'dropna', lambda df: df.dropna(**kwargs), [self._expr], preserves_partition_by=partitionings.Arbitrary(), requires_partition_by=partitionings.Arbitrary())) isnull = isna = frame_base._elementwise_method('isna', base=pd.Series) notnull = notna = frame_base._elementwise_method('notna', base=pd.Series) items = frame_base.wont_implement_method( pd.Series, 'items', reason="non-deferred-result") iteritems = frame_base.wont_implement_method( pd.Series, 'iteritems', reason="non-deferred-result") tolist = frame_base.wont_implement_method( pd.Series, 'tolist', reason="non-deferred-result") to_numpy = frame_base.wont_implement_method( pd.Series, 'to_numpy', reason="non-deferred-result") to_string = frame_base.wont_implement_method( pd.Series, 'to_string', reason="non-deferred-result") def _wrap_in_df(self): return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'wrap_in_df', lambda s: pd.DataFrame(s), [self._expr], requires_partition_by=partitionings.Arbitrary(), preserves_partition_by=partitionings.Arbitrary(), )) @frame_base.with_docs_from(pd.Series) @frame_base.args_to_kwargs(pd.Series) @frame_base.populate_defaults(pd.Series) @frame_base.maybe_inplace def duplicated(self, keep): """Only ``keep=False`` and ``keep="any"`` are supported. Other values of ``keep`` make this an order-sensitive operation. Note ``keep="any"`` is a Beam-specific option that guarantees only one duplicate will be kept, but unlike ``"first"`` and ``"last"`` it makes no guarantees about _which_ duplicate element is kept.""" # Re-use the DataFrame based duplcated, extract the series back out df = self._wrap_in_df() return df.duplicated(keep=keep)[df.columns[0]] @frame_base.with_docs_from(pd.Series) @frame_base.args_to_kwargs(pd.Series) @frame_base.populate_defaults(pd.Series) @frame_base.maybe_inplace def drop_duplicates(self, keep): """Only ``keep=False`` and ``keep="any"`` are supported. Other values of ``keep`` make this an order-sensitive operation. Note ``keep="any"`` is a Beam-specific option that guarantees only one duplicate will be kept, but unlike ``"first"`` and ``"last"`` it makes no guarantees about _which_ duplicate element is kept.""" # Re-use the DataFrame based drop_duplicates, extract the series back out df = self._wrap_in_df() return df.drop_duplicates(keep=keep)[df.columns[0]] @frame_base.with_docs_from(pd.Series) @frame_base.args_to_kwargs(pd.Series) @frame_base.populate_defaults(pd.Series) @frame_base.maybe_inplace def sample(self, **kwargs): """Only ``n`` and/or ``weights`` may be specified. ``frac``, ``random_state``, and ``replace=True`` are not yet supported. See `BEAM-12476 <https://issues.apache.org/jira/BEAM-12476>`_. Note that pandas will raise an error if ``n`` is larger than the length of the dataset, while the Beam DataFrame API will simply return the full dataset in that case.""" # Re-use the DataFrame based sample, extract the series back out df = self._wrap_in_df() return df.sample(**kwargs)[df.columns[0]] @frame_base.with_docs_from(pd.Series) @frame_base.args_to_kwargs(pd.Series) @frame_base.populate_defaults(pd.Series) def aggregate(self, func, axis, *args, **kwargs): """Some aggregation methods cannot be parallelized, and computing them will require collecting all data on a single machine.""" if kwargs.get('skipna', False): # Eagerly generate a proxy to make sure skipna is a valid argument # for this aggregation method _ = self._expr.proxy().aggregate(func, axis, *args, **kwargs) kwargs.pop('skipna') return self.dropna().aggregate(func, axis, *args, **kwargs) if isinstance(func, list) and len(func) > 1: # level arg is ignored for multiple aggregations _ = kwargs.pop('level', None) # Aggregate with each method separately, then stick them all together. rows = [self.agg([f], *args, **kwargs) for f in func] return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'join_aggregate', lambda *rows: pd.concat(rows), [row._expr for row in rows])) else: # We're only handling a single column. It could be 'func' or ['func'], # which produce different results. 'func' produces a scalar, ['func'] # produces a single element Series. base_func = func[0] if isinstance(func, list) else func if (_is_numeric(base_func) and not pd.core.dtypes.common.is_numeric_dtype(self.dtype)): warnings.warn( f"Performing a numeric aggregation, {base_func!r}, on " f"Series {self._expr.proxy().name!r} with non-numeric type " f"{self.dtype!r}. This can result in runtime errors or surprising " "results.") if 'level' in kwargs: # Defer to groupby.agg for level= mode return self.groupby( level=kwargs.pop('level'), axis=axis).agg(func, *args, **kwargs) singleton_reason = None if 'min_count' in kwargs: # Eagerly generate a proxy to make sure min_count is a valid argument # for this aggregation method _ = self._expr.proxy().agg(func, axis, *args, **kwargs) singleton_reason = ( "Aggregation with min_count= requires collecting all data on a " "single node.") # We have specialized distributed implementations for these if base_func in ('quantile', 'std', 'var', 'nunique', 'corr', 'cov'): result = getattr(self, base_func)(*args, **kwargs) if isinstance(func, list): with expressions.allow_non_parallel_operations(True): return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'wrap_aggregate', lambda x: pd.Series(x, index=[base_func]), [result._expr], requires_partition_by=partitionings.Singleton(), preserves_partition_by=partitionings.Singleton())) else: return result agg_kwargs = kwargs.copy() if ((_is_associative(base_func) or _is_liftable_with_sum(base_func)) and singleton_reason is None): intermediate = expressions.ComputedExpression( 'pre_aggregate', # Coerce to a Series, if the result is scalar we still want a Series # so we can combine and do the final aggregation next. lambda s: pd.Series(s.agg(func, *args, **kwargs)), [self._expr], requires_partition_by=partitionings.Arbitrary(), preserves_partition_by=partitionings.Singleton()) allow_nonparallel_final = True if _is_associative(base_func): agg_func = func else: agg_func = ['sum'] if isinstance(func, list) else 'sum' else: intermediate = self._expr allow_nonparallel_final = None # i.e. don't change the value agg_func = func singleton_reason = ( f"Aggregation function {func!r} cannot currently be " "parallelized. It requires collecting all data for " "this Series on a single node.") with expressions.allow_non_parallel_operations(allow_nonparallel_final): return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'aggregate', lambda s: s.agg(agg_func, *args, **agg_kwargs), [intermediate], preserves_partition_by=partitionings.Singleton(), requires_partition_by=partitionings.Singleton( reason=singleton_reason))) agg = aggregate @property # type: ignore @frame_base.with_docs_from(pd.Series) def axes(self): return [self.index] clip = frame_base._elementwise_method('clip', base=pd.Series) all = _agg_method(pd.Series, 'all') any = _agg_method(pd.Series, 'any') # TODO(BEAM-12074): Document that Series.count(level=) will drop NaN's count = _agg_method(pd.Series, 'count') describe = _agg_method(pd.Series, 'describe') min = _agg_method(pd.Series, 'min') max = _agg_method(pd.Series, 'max') prod = product = _agg_method(pd.Series, 'prod') sum = _agg_method(pd.Series, 'sum') mean = _agg_method(pd.Series, 'mean') median = _agg_method(pd.Series, 'median') argmax = frame_base.wont_implement_method( pd.Series, 'argmax', reason='order-sensitive') argmin = frame_base.wont_implement_method( pd.Series, 'argmin', reason='order-sensitive') cummax = frame_base.wont_implement_method( pd.Series, 'cummax', reason='order-sensitive') cummin = frame_base.wont_implement_method( pd.Series, 'cummin', reason='order-sensitive') cumprod = frame_base.wont_implement_method( pd.Series, 'cumprod', reason='order-sensitive') cumsum = frame_base.wont_implement_method( pd.Series, 'cumsum', reason='order-sensitive') diff = frame_base.wont_implement_method( pd.Series, 'diff', reason='order-sensitive') interpolate = frame_base.wont_implement_method( pd.Series, 'interpolate', reason='order-sensitive') searchsorted = frame_base.wont_implement_method( pd.Series, 'searchsorted', reason='order-sensitive') shift = frame_base.wont_implement_method( pd.Series, 'shift', reason='order-sensitive') head = frame_base.wont_implement_method( pd.Series, 'head', explanation=_PEEK_METHOD_EXPLANATION) tail = frame_base.wont_implement_method( pd.Series, 'tail', explanation=_PEEK_METHOD_EXPLANATION) filter = frame_base._elementwise_method('filter', base=pd.Series) memory_usage = frame_base.wont_implement_method( pd.Series, 'memory_usage', reason="non-deferred-result") # In Series __contains__ checks the index __contains__ = frame_base.wont_implement_method( pd.Series, '__contains__', reason="non-deferred-result") @frame_base.with_docs_from(pd.Series) @frame_base.args_to_kwargs(pd.Series) @frame_base.populate_defaults(pd.Series) def nlargest(self, keep, **kwargs): """Only ``keep=False`` and ``keep="any"`` are supported. Other values of ``keep`` make this an order-sensitive operation. Note ``keep="any"`` is a Beam-specific option that guarantees only one duplicate will be kept, but unlike ``"first"`` and ``"last"`` it makes no guarantees about _which_ duplicate element is kept.""" # TODO(robertwb): Document 'any' option. # TODO(robertwb): Consider (conditionally) defaulting to 'any' if no # explicit keep parameter is requested. if keep == 'any': keep = 'first' elif keep != 'all': raise frame_base.WontImplementError( f"nlargest(keep={keep!r}) is not supported because it is " "order sensitive. Only keep=\"all\" is supported.", reason="order-sensitive") kwargs['keep'] = keep per_partition = expressions.ComputedExpression( 'nlargest-per-partition', lambda df: df.nlargest(**kwargs), [self._expr], preserves_partition_by=partitionings.Arbitrary(), requires_partition_by=partitionings.Arbitrary()) with expressions.allow_non_parallel_operations(True): return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'nlargest', lambda df: df.nlargest(**kwargs), [per_partition], preserves_partition_by=partitionings.Arbitrary(), requires_partition_by=partitionings.Singleton())) @frame_base.with_docs_from(pd.Series) @frame_base.args_to_kwargs(pd.Series) @frame_base.populate_defaults(pd.Series) def nsmallest(self, keep, **kwargs): """Only ``keep=False`` and ``keep="any"`` are supported. Other values of ``keep`` make this an order-sensitive operation. Note ``keep="any"`` is a Beam-specific option that guarantees only one duplicate will be kept, but unlike ``"first"`` and ``"last"`` it makes no guarantees about _which_ duplicate element is kept.""" if keep == 'any': keep = 'first' elif keep != 'all': raise frame_base.WontImplementError( f"nsmallest(keep={keep!r}) is not supported because it is " "order sensitive. Only keep=\"all\" is supported.", reason="order-sensitive") kwargs['keep'] = keep per_partition = expressions.ComputedExpression( 'nsmallest-per-partition', lambda df: df.nsmallest(**kwargs), [self._expr], preserves_partition_by=partitionings.Arbitrary(), requires_partition_by=partitionings.Arbitrary()) with expressions.allow_non_parallel_operations(True): return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'nsmallest', lambda df: df.nsmallest(**kwargs), [per_partition], preserves_partition_by=partitionings.Arbitrary(), requires_partition_by=partitionings.Singleton())) @property # type: ignore @frame_base.with_docs_from(pd.Series) def is_unique(self): def set_index(s): s = s[:] s.index = s return s self_index = expressions.ComputedExpression( 'set_index', set_index, [self._expr], requires_partition_by=partitionings.Arbitrary(), preserves_partition_by=partitionings.Singleton()) is_unique_distributed = expressions.ComputedExpression( 'is_unique_distributed', lambda s: pd.Series(s.is_unique), [self_index], requires_partition_by=partitionings.Index(), preserves_partition_by=partitionings.Singleton()) with expressions.allow_non_parallel_operations(): return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'combine', lambda s: s.all(), [is_unique_distributed], requires_partition_by=partitionings.Singleton(), preserves_partition_by=partitionings.Singleton())) plot = frame_base.wont_implement_method( pd.Series, 'plot', reason="plotting-tools") pop = frame_base.wont_implement_method( pd.Series, 'pop', reason="non-deferred-result") rename_axis = frame_base._elementwise_method('rename_axis', base=pd.Series) round = frame_base._elementwise_method('round', base=pd.Series) take = frame_base.wont_implement_method( pd.Series, 'take', reason='deprecated') to_dict = frame_base.wont_implement_method( pd.Series, 'to_dict', reason="non-deferred-result") to_frame = frame_base._elementwise_method('to_frame', base=pd.Series) @frame_base.with_docs_from(pd.Series) def unique(self, as_series=False): """unique is not supported by default because it produces a non-deferred result: an :class:`~numpy.ndarray`. You can use the Beam-specific argument ``unique(as_series=True)`` to get the result as a :class:`DeferredSeries`""" if not as_series: raise frame_base.WontImplementError( "unique() is not supported by default because it produces a " "non-deferred result: a numpy array. You can use the Beam-specific " "argument unique(as_series=True) to get the result as a " "DeferredSeries", reason="non-deferred-result") return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'unique', lambda df: pd.Series(df.unique()), [self._expr], preserves_partition_by=partitionings.Singleton(), requires_partition_by=partitionings.Singleton( reason="unique() cannot currently be parallelized."))) @frame_base.with_docs_from(pd.Series) def update(self, other): self._expr = expressions.ComputedExpression( 'update', lambda df, other: df.update(other) or df, [self._expr, other._expr], preserves_partition_by=partitionings.Arbitrary(), requires_partition_by=partitionings.Index()) unstack = frame_base.wont_implement_method( pd.Series, 'unstack', reason='non-deferred-columns') @frame_base.with_docs_from(pd.Series) def value_counts( self, sort=False, normalize=False, ascending=False, bins=None, dropna=True): """``sort`` is ``False`` by default, and ``sort=True`` is not supported because it imposes an ordering on the dataset which likely will not be preserved. When ``bin`` is specified this operation is not parallelizable. See [BEAM-12441](https://issues.apache.org/jira/browse/BEAM-12441) tracking the possible addition of a distributed implementation.""" if sort: raise frame_base.WontImplementError( "value_counts(sort=True) is not supported because it imposes an " "ordering on the dataset which likely will not be preserved.", reason="order-sensitive") if bins is not None: return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'value_counts', lambda s: s.value_counts( normalize=normalize, bins=bins, dropna=dropna)[self._expr], requires_partition_by=partitionings.Singleton( reason=( "value_counts with bin specified requires collecting " "the entire dataset to identify the range.")), preserves_partition_by=partitionings.Singleton(), )) if dropna: column = self.dropna() else: column = self result = column.groupby(column).size() # groupby.size() names the index, which we don't need result.index.name = None if normalize: return result / column.length() else: return result values = property( frame_base.wont_implement_method( pd.Series, 'values', reason="non-deferred-result")) view = frame_base.wont_implement_method( pd.Series, 'view', explanation=( "because it relies on memory-sharing semantics that are " "not compatible with the Beam model.")) @property # type: ignore @frame_base.with_docs_from(pd.Series) def str(self): return _DeferredStringMethods(self._expr) @property # type: ignore @frame_base.with_docs_from(pd.Series) def cat(self): return _DeferredCategoricalMethods(self._expr) @property # type: ignore @frame_base.with_docs_from(pd.Series) def dt(self): return _DeferredDatetimeMethods(self._expr) @frame_base.with_docs_from(pd.Series) def mode(self, *args, **kwargs): """mode is not currently parallelizable. An approximate, parallelizable implementation of mode may be added in the future (`BEAM-12181 <https://issues.apache.org/jira/BEAM-12181>`_).""" return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'mode', lambda df: df.mode(*args, **kwargs), [self._expr], #TODO(BEAM-12181): Can we add an approximate implementation? requires_partition_by=partitionings.Singleton( reason=( "mode cannot currently be parallelized. See " "BEAM-12181 tracking the possble addition of " "an approximate, parallelizable implementation of mode.")), preserves_partition_by=partitionings.Singleton())) apply = frame_base._elementwise_method('apply', base=pd.Series) map = frame_base._elementwise_method('map', base=pd.Series) # TODO(BEAM-11636): Implement transform using type inference to determine the # proxy #transform = frame_base._elementwise_method('transform', base=pd.Series) @frame_base.with_docs_from(pd.Series) @frame_base.args_to_kwargs(pd.Series) @frame_base.populate_defaults(pd.Series) def repeat(self, repeats, axis): """``repeats`` must be an ``int`` or a :class:`DeferredSeries`. Lists are not supported because they make this operation order-sensitive.""" if isinstance(repeats, int): return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'repeat', lambda series: series.repeat(repeats), [self._expr], requires_partition_by=partitionings.Arbitrary(), preserves_partition_by=partitionings.Arbitrary())) elif isinstance(repeats, frame_base.DeferredBase): return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'repeat', lambda series, repeats_series: series.repeat(repeats_series), [self._expr, repeats._expr], requires_partition_by=partitionings.Index(), preserves_partition_by=partitionings.Arbitrary())) elif isinstance(repeats, list): raise frame_base.WontImplementError( "repeat(repeats=) repeats must be an int or a DeferredSeries. " "Lists are not supported because they make this operation sensitive " "to the order of the data.", reason="order-sensitive") else: raise TypeError( "repeat(repeats=) value must be an int or a " f"DeferredSeries (encountered {type(repeats)}).") @populate_not_implemented(pd.DataFrame) @frame_base.DeferredFrame._register_for(pd.DataFrame) class DeferredDataFrame(DeferredDataFrameOrSeries): def __repr__(self): return ( f'DeferredDataFrame(columns={list(self.columns)}, ' f'{self._render_indexes()})') @property # type: ignore @frame_base.with_docs_from(pd.DataFrame) def columns(self): return self._expr.proxy().columns @columns.setter def columns(self, columns): def set_columns(df): df = df.copy() df.columns = columns return df return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'set_columns', set_columns, [self._expr], requires_partition_by=partitionings.Arbitrary(), preserves_partition_by=partitionings.Arbitrary())) @frame_base.with_docs_from(pd.DataFrame) def keys(self): return self.columns def __getattr__(self, name): # Column attribute access. if name in self._expr.proxy().columns: return self[name] else: return object.__getattribute__(self, name) def __getitem__(self, key): # TODO: Replicate pd.DataFrame.__getitem__ logic if isinstance(key, DeferredSeries) and key._expr.proxy().dtype == bool: return self.loc[key] elif isinstance(key, frame_base.DeferredBase): # Fail early if key is a DeferredBase as it interacts surprisingly with # key in self._expr.proxy().columns raise NotImplementedError( "Indexing with a non-bool deferred frame is not yet supported. " "Consider using df.loc[...]") elif isinstance(key, slice): if _is_null_slice(key): return self elif _is_integer_slice(key): # This depends on the contents of the index. raise frame_base.WontImplementError( "Integer slices are not supported as they are ambiguous. Please " "use iloc or loc with integer slices.") else: return self.loc[key] elif ( (isinstance(key, list) and all(key_column in self._expr.proxy().columns for key_column in key)) or key in self._expr.proxy().columns): return self._elementwise(lambda df: df[key], 'get_column') else: raise NotImplementedError(key) def __contains__(self, key): # Checks if proxy has the given column return self._expr.proxy().__contains__(key) def __setitem__(self, key, value): if isinstance( key, str) or (isinstance(key, list) and all(isinstance(c, str) for c in key)) or (isinstance(key, DeferredSeries) and key._expr.proxy().dtype == bool): # yapf: disable return self._elementwise( lambda df, key, value: df.__setitem__(key, value), 'set_column', (key, value), inplace=True) else: raise NotImplementedError(key) @frame_base.with_docs_from(pd.DataFrame) @frame_base.args_to_kwargs(pd.DataFrame) @frame_base.populate_defaults(pd.DataFrame) def align(self, other, join, axis, copy, level, method, **kwargs): """Aligning per level is not yet supported. Only the default, ``level=None``, is allowed. Filling NaN values via ``method`` is not supported, because it is `order-sensitive <https://s.apache.org/dataframe-order-sensitive-operations>`_. Only the default, ``method=None``, is allowed. ``copy=False`` is not supported because its behavior (whether or not it is an inplace operation) depends on the data.""" if not copy: raise frame_base.WontImplementError( "align(copy=False) is not supported because it might be an inplace " "operation depending on the data. Please prefer the default " "align(copy=True).") if method is not None: raise frame_base.WontImplementError( f"align(method={method!r}) is not supported because it is " "order sensitive. Only align(method=None) is supported.", reason="order-sensitive") if kwargs: raise NotImplementedError('align(%s)' % ', '.join(kwargs.keys())) if level is not None: # Could probably get by partitioning on the used levels. requires_partition_by = partitionings.Singleton(reason=( f"align(level={level}) is not currently parallelizable. Only " "align(level=None) can be parallelized.")) elif axis in ('columns', 1): requires_partition_by = partitionings.Arbitrary() else: requires_partition_by = partitionings.Index() return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'align', lambda df, other: df.align(other, join=join, axis=axis), [self._expr, other._expr], requires_partition_by=requires_partition_by, preserves_partition_by=partitionings.Arbitrary())) @frame_base.with_docs_from(pd.DataFrame) @frame_base.args_to_kwargs(pd.DataFrame) @frame_base.populate_defaults(pd.DataFrame) def append(self, other, ignore_index, verify_integrity, sort, **kwargs): """``ignore_index=True`` is not supported, because it requires generating an order-sensitive index.""" if not isinstance(other, DeferredDataFrame): raise frame_base.WontImplementError( "append() only accepts DeferredDataFrame instances, received " + str(type(other))) if ignore_index: raise frame_base.WontImplementError( "append(ignore_index=True) is order sensitive because it requires " "generating a new index based on the order of the data.", reason="order-sensitive") if verify_integrity: # We can verify the index is non-unique within index partitioned data. requires = partitionings.Index() else: requires = partitionings.Arbitrary() return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'append', lambda s, other: s.append(other, sort=sort, verify_integrity=verify_integrity, **kwargs), [self._expr, other._expr], requires_partition_by=requires, preserves_partition_by=partitionings.Arbitrary() ) ) # If column name exists this is a simple project, otherwise it is a constant # (default_value) @frame_base.with_docs_from(pd.DataFrame) def get(self, key, default_value=None): if key in self.columns: return self[key] else: return default_value @frame_base.with_docs_from(pd.DataFrame) @frame_base.args_to_kwargs(pd.DataFrame) @frame_base.populate_defaults(pd.DataFrame) @frame_base.maybe_inplace def set_index(self, keys, **kwargs): """``keys`` must be a ``str`` or ``List[str]``. Passing an Index or Series is not yet supported (`BEAM-11711 <https://issues.apache.org/jira/browse/BEAM-11711>`_).""" if isinstance(keys, str): keys = [keys] if any(isinstance(k, (_DeferredIndex, frame_base.DeferredFrame)) for k in keys): raise NotImplementedError("set_index with Index or Series instances is " "not yet supported (BEAM-11711).") return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'set_index', lambda df: df.set_index(keys, **kwargs), [self._expr], requires_partition_by=partitionings.Arbitrary(), preserves_partition_by=partitionings.Singleton())) @property # type: ignore @frame_base.with_docs_from(pd.DataFrame) def axes(self): return (self.index, self.columns) @property # type: ignore @frame_base.with_docs_from(pd.DataFrame) def dtypes(self): return self._expr.proxy().dtypes @frame_base.with_docs_from(pd.DataFrame) def assign(self, **kwargs): """``value`` must be a ``callable`` or :class:`DeferredSeries`. Other types make this operation order-sensitive.""" for name, value in kwargs.items(): if not callable(value) and not isinstance(value, DeferredSeries): raise frame_base.WontImplementError( f"Unsupported value for new column '{name}': '{value}'. Only " "callables and DeferredSeries instances are supported. Other types " "make this operation sensitive to the order of the data", reason="order-sensitive") return self._elementwise( lambda df, *args, **kwargs: df.assign(*args, **kwargs), 'assign', other_kwargs=kwargs) @frame_base.with_docs_from(pd.DataFrame) @frame_base.args_to_kwargs(pd.DataFrame) @frame_base.populate_defaults(pd.DataFrame) def explode(self, column, ignore_index): # ignoring the index will not preserve it preserves = (partitionings.Singleton() if ignore_index else partitionings.Index()) return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'explode', lambda df: df.explode(column, ignore_index), [self._expr], preserves_partition_by=preserves, requires_partition_by=partitionings.Arbitrary())) @frame_base.with_docs_from(pd.DataFrame) @frame_base.args_to_kwargs(pd.DataFrame) @frame_base.populate_defaults(pd.DataFrame) def insert(self, value, **kwargs): """``value`` cannot be a ``List`` because aligning it with this DeferredDataFrame is order-sensitive.""" if isinstance(value, list): raise frame_base.WontImplementError( "insert(value=list) is not supported because it joins the input " "list to the deferred DataFrame based on the order of the data.", reason="order-sensitive") if isinstance(value, pd.core.generic.NDFrame): value = frame_base.DeferredFrame.wrap( expressions.ConstantExpression(value)) if isinstance(value, frame_base.DeferredFrame): def func_zip(df, value): df = df.copy() df.insert(value=value, **kwargs) return df inserted = frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'insert', func_zip, [self._expr, value._expr], requires_partition_by=partitionings.Index(), preserves_partition_by=partitionings.Arbitrary())) else: def func_elementwise(df): df = df.copy() df.insert(value=value, **kwargs) return df inserted = frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'insert', func_elementwise, [self._expr], requires_partition_by=partitionings.Arbitrary(), preserves_partition_by=partitionings.Arbitrary())) self._expr = inserted._expr @staticmethod @frame_base.with_docs_from(pd.DataFrame) def from_dict(*args, **kwargs): return frame_base.DeferredFrame.wrap( expressions.ConstantExpression(pd.DataFrame.from_dict(*args, **kwargs))) @staticmethod @frame_base.with_docs_from(pd.DataFrame) def from_records(*args, **kwargs): return frame_base.DeferredFrame.wrap( expressions.ConstantExpression(pd.DataFrame.from_records(*args, **kwargs))) @frame_base.with_docs_from(pd.DataFrame) @frame_base.args_to_kwargs(pd.DataFrame) @frame_base.populate_defaults(pd.DataFrame) @frame_base.maybe_inplace def duplicated(self, keep, subset): """Only ``keep=False`` and ``keep="any"`` are supported. Other values of ``keep`` make this an order-sensitive operation. Note ``keep="any"`` is a Beam-specific option that guarantees only one duplicate will be kept, but unlike ``"first"`` and ``"last"`` it makes no guarantees about _which_ duplicate element is kept.""" # TODO(BEAM-12074): Document keep="any" if keep == 'any': keep = 'first' elif keep is not False: raise frame_base.WontImplementError( f"duplicated(keep={keep!r}) is not supported because it is " "sensitive to the order of the data. Only keep=False and " "keep=\"any\" are supported.", reason="order-sensitive") by = subset or list(self.columns) # Workaround a bug where groupby.apply() that returns a single-element # Series moves index label to column return self.groupby(by).apply( lambda df: pd.DataFrame(df.duplicated(keep=keep, subset=subset), columns=[None]))[None] @frame_base.with_docs_from(pd.DataFrame) @frame_base.args_to_kwargs(pd.DataFrame) @frame_base.populate_defaults(pd.DataFrame) @frame_base.maybe_inplace def drop_duplicates(self, keep, subset, ignore_index): """Only ``keep=False`` and ``keep="any"`` are supported. Other values of ``keep`` make this an order-sensitive operation. Note ``keep="any"`` is a Beam-specific option that guarantees only one duplicate will be kept, but unlike ``"first"`` and ``"last"`` it makes no guarantees about _which_ duplicate element is kept.""" # TODO(BEAM-12074): Document keep="any" if keep == 'any': keep = 'first' elif keep is not False: raise frame_base.WontImplementError( f"drop_duplicates(keep={keep!r}) is not supported because it is " "sensitive to the order of the data. Only keep=False and " "keep=\"any\" are supported.", reason="order-sensitive") if ignore_index is not False: raise frame_base.WontImplementError( "drop_duplicates(ignore_index=False) is not supported because it " "requires generating a new index that is sensitive to the order of " "the data.", reason="order-sensitive") by = subset or list(self.columns) return self.groupby(by).apply( lambda df: df.drop_duplicates(keep=keep, subset=subset)).droplevel(by) @frame_base.with_docs_from(pd.DataFrame) @frame_base.args_to_kwargs(pd.DataFrame) @frame_base.populate_defaults(pd.DataFrame) def aggregate(self, func, axis, *args, **kwargs): # We have specialized implementations for these. if func in ('quantile',): return getattr(self, func)(*args, axis=axis, **kwargs) # Maps to a property, args are ignored if func in ('size',): return getattr(self, func) # We also have specialized distributed implementations for these. They only # support axis=0 (implicitly) though. axis=1 should fall through if func in ('corr', 'cov') and axis in (0, 'index'): return getattr(self, func)(*args, **kwargs) if axis is None: # Aggregate across all elements by first aggregating across columns, # then across rows. return self.agg(func, *args, **dict(kwargs, axis=1)).agg( func, *args, **dict(kwargs, axis=0)) elif axis in (1, 'columns'): # This is an easy elementwise aggregation. return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'aggregate', lambda df: df.agg(func, axis=1, *args, **kwargs), [self._expr], requires_partition_by=partitionings.Arbitrary())) elif len(self._expr.proxy().columns) == 0: # For this corner case, just colocate everything. return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'aggregate', lambda df: df.agg(func, *args, **kwargs), [self._expr], requires_partition_by=partitionings.Singleton())) else: # In the general case, we will compute the aggregation of each column # separately, then recombine. # First, handle any kwargs that cause a projection, by eagerly generating # the proxy, and only including the columns that are in the output. PROJECT_KWARGS = ('numeric_only', 'bool_only', 'include', 'exclude') proxy = self._expr.proxy().agg(func, axis, *args, **kwargs) if isinstance(proxy, pd.DataFrame): projected = self[list(proxy.columns)] elif isinstance(proxy, pd.Series): projected = self[list(proxy.index)] else: projected = self nonnumeric_columns = [name for (name, dtype) in projected.dtypes.items() if not pd.core.dtypes.common.is_numeric_dtype(dtype)] if _is_numeric(func) and nonnumeric_columns: if 'numeric_only' in kwargs and kwargs['numeric_only'] is False: # User has opted in to execution with non-numeric columns, they # will accept runtime errors pass else: raise frame_base.WontImplementError( f"Numeric aggregation ({func!r}) on a DataFrame containing " f"non-numeric columns ({*nonnumeric_columns,!r} is not " "supported, unless `numeric_only=` is specified.\n" "Use `numeric_only=True` to only aggregate over numeric " "columns.\nUse `numeric_only=False` to aggregate over all " "columns. Note this is not recommended, as it could result in " "execution time errors.") for key in PROJECT_KWARGS: if key in kwargs: kwargs.pop(key) if not isinstance(func, dict): col_names = list(projected._expr.proxy().columns) func_by_col = {col: func for col in col_names} else: func_by_col = func col_names = list(func.keys()) aggregated_cols = [] has_lists = any(isinstance(f, list) for f in func_by_col.values()) for col in col_names: funcs = func_by_col[col] if has_lists and not isinstance(funcs, list): # If any of the columns do multiple aggregations, they all must use # "list" style output funcs = [funcs] aggregated_cols.append(projected[col].agg(funcs, *args, **kwargs)) # The final shape is different depending on whether any of the columns # were aggregated by a list of aggregators. with expressions.allow_non_parallel_operations(): if isinstance(proxy, pd.Series): return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'join_aggregate', lambda *cols: pd.Series( {col: value for col, value in zip(col_names, cols)}), [col._expr for col in aggregated_cols], requires_partition_by=partitionings.Singleton())) elif isinstance(proxy, pd.DataFrame): return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'join_aggregate', lambda *cols: pd.DataFrame( {col: value for col, value in zip(col_names, cols)}), [col._expr for col in aggregated_cols], requires_partition_by=partitionings.Singleton())) else: raise AssertionError("Unexpected proxy type for " f"DataFrame.aggregate!: proxy={proxy!r}, " f"type(proxy)={type(proxy)!r}") agg = aggregate applymap = frame_base._elementwise_method('applymap', base=pd.DataFrame) add_prefix = frame_base._elementwise_method('add_prefix', base=pd.DataFrame) add_suffix = frame_base._elementwise_method('add_suffix', base=pd.DataFrame) memory_usage = frame_base.wont_implement_method( pd.DataFrame, 'memory_usage', reason="non-deferred-result") info = frame_base.wont_implement_method( pd.DataFrame, 'info', reason="non-deferred-result") @frame_base.with_docs_from(pd.DataFrame) @frame_base.args_to_kwargs(pd.DataFrame) @frame_base.populate_defaults(pd.DataFrame) @frame_base.maybe_inplace def clip(self, axis, **kwargs): """``lower`` and ``upper`` must be :class:`DeferredSeries` instances, or constants. Array-like arguments are not supported because they are order-sensitive.""" if any(isinstance(kwargs.get(arg, None), frame_base.DeferredFrame) for arg in ('upper', 'lower')) and axis not in (0, 'index'): raise frame_base.WontImplementError( "axis must be 'index' when upper and/or lower are a DeferredFrame", reason='order-sensitive') return frame_base._elementwise_method('clip', base=pd.DataFrame)(self, axis=axis, **kwargs) @frame_base.with_docs_from(pd.DataFrame) @frame_base.args_to_kwargs(pd.DataFrame) @frame_base.populate_defaults(pd.DataFrame) def corr(self, method, min_periods): """Only ``method="pearson"`` can be parallelized. Other methods require collecting all data on a single worker (see https://s.apache.org/dataframe-non-parallel-operations for details). """ if method == 'pearson': proxy = self._expr.proxy().corr() columns = list(proxy.columns) args = [] arg_indices = [] for col1, col2 in itertools.combinations(columns, 2): arg_indices.append((col1, col2)) args.append(self[col1].corr(self[col2], method=method, min_periods=min_periods)) def fill_matrix(*args): data = collections.defaultdict(dict) for col in columns: data[col][col] = 1.0 for ix, (col1, col2) in enumerate(arg_indices): data[col1][col2] = data[col2][col1] = args[ix] return pd.DataFrame(data, columns=columns, index=columns) with expressions.allow_non_parallel_operations(True): return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'fill_matrix', fill_matrix, [arg._expr for arg in args], requires_partition_by=partitionings.Singleton(), proxy=proxy)) else: reason = (f"Encountered corr(method={method!r}) which cannot be " "parallelized. Only corr(method='pearson') is currently " "parallelizable.") return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'corr', lambda df: df.corr(method=method, min_periods=min_periods), [self._expr], requires_partition_by=partitionings.Singleton(reason=reason))) @frame_base.with_docs_from(pd.DataFrame) @frame_base.args_to_kwargs(pd.DataFrame) @frame_base.populate_defaults(pd.DataFrame) def cov(self, min_periods, ddof): proxy = self._expr.proxy().corr() columns = list(proxy.columns) args = [] arg_indices = [] for col in columns: arg_indices.append((col, col)) std = self[col].std(ddof) args.append(std.apply(lambda x: x*x, 'square')) for ix, col1 in enumerate(columns): for col2 in columns[ix+1:]: arg_indices.append((col1, col2)) # Note that this set may be different for each pair. no_na = self.loc[self[col1].notna() & self[col2].notna()] args.append(no_na[col1]._cov_aligned(no_na[col2], min_periods, ddof)) def fill_matrix(*args): data = collections.defaultdict(dict) for ix, (col1, col2) in enumerate(arg_indices): data[col1][col2] = data[col2][col1] = args[ix] return pd.DataFrame(data, columns=columns, index=columns) with expressions.allow_non_parallel_operations(True): return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'fill_matrix', fill_matrix, [arg._expr for arg in args], requires_partition_by=partitionings.Singleton(), proxy=proxy)) @frame_base.with_docs_from(pd.DataFrame) @frame_base.args_to_kwargs(pd.DataFrame) @frame_base.populate_defaults(pd.DataFrame) def corrwith(self, other, axis, drop, method): if axis in (1, 'columns'): return self._elementwise( lambda df, other: df.corrwith(other, axis=axis, drop=drop, method=method), 'corrwith', other_args=(other,)) if not isinstance(other, frame_base.DeferredFrame): other = frame_base.DeferredFrame.wrap( expressions.ConstantExpression(other)) if isinstance(other, DeferredSeries): proxy = self._expr.proxy().corrwith(other._expr.proxy(), axis=axis, drop=drop, method=method) self, other = self.align(other, axis=0, join='inner') col_names = proxy.index other_cols = [other] * len(col_names) elif isinstance(other, DeferredDataFrame): proxy = self._expr.proxy().corrwith( other._expr.proxy(), axis=axis, method=method, drop=drop) self, other = self.align(other, axis=0, join='inner') col_names = list( set(self.columns) .intersection(other.columns) .intersection(proxy.index)) other_cols = [other[col_name] for col_name in col_names] else: # Raise the right error. self._expr.proxy().corrwith(other._expr.proxy(), axis=axis, drop=drop, method=method) # Just in case something else becomes valid. raise NotImplementedError('corrwith(%s)' % type(other._expr.proxy)) # Generate expressions to compute the actual correlations. corrs = [ self[col_name].corr(other_col, method) for col_name, other_col in zip(col_names, other_cols)] # Combine the results def fill_dataframe(*args): result = proxy.copy(deep=True) for col, value in zip(proxy.index, args): result[col] = value return result with expressions.allow_non_parallel_operations(True): return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'fill_dataframe', fill_dataframe, [corr._expr for corr in corrs], requires_partition_by=partitionings.Singleton(), proxy=proxy)) cummax = frame_base.wont_implement_method(pd.DataFrame, 'cummax', reason='order-sensitive') cummin = frame_base.wont_implement_method(pd.DataFrame, 'cummin', reason='order-sensitive') cumprod = frame_base.wont_implement_method(pd.DataFrame, 'cumprod', reason='order-sensitive') cumsum = frame_base.wont_implement_method(pd.DataFrame, 'cumsum', reason='order-sensitive') # TODO(BEAM-12071): Consider adding an order-insensitive implementation for # diff that relies on the index diff = frame_base.wont_implement_method(pd.DataFrame, 'diff', reason='order-sensitive') interpolate = frame_base.wont_implement_method(pd.DataFrame, 'interpolate', reason='order-sensitive') head = frame_base.wont_implement_method(pd.DataFrame, 'head', explanation=_PEEK_METHOD_EXPLANATION) tail = frame_base.wont_implement_method(pd.DataFrame, 'tail', explanation=_PEEK_METHOD_EXPLANATION) @frame_base.with_docs_from(pd.DataFrame) @frame_base.args_to_kwargs(pd.DataFrame) @frame_base.populate_defaults(pd.DataFrame) def sample(self, n, frac, replace, weights, random_state, axis): """When ``axis='index'``, only ``n`` and/or ``weights`` may be specified. ``frac``, ``random_state``, and ``replace=True`` are not yet supported. See `BEAM-12476 <https://issues.apache.org/jira/BEAM-12476>`_. Note that pandas will raise an error if ``n`` is larger than the length of the dataset, while the Beam DataFrame API will simply return the full dataset in that case. sample is fully supported for axis='columns'.""" if axis in (1, 'columns'): # Sampling on axis=columns just means projecting random columns # Eagerly generate proxy to determine the set of columns at construction # time proxy = self._expr.proxy().sample(n=n, frac=frac, replace=replace, weights=weights, random_state=random_state, axis=axis) # Then do the projection return self[list(proxy.columns)] # axis='index' if frac is not None or random_state is not None or replace: raise NotImplementedError( f"When axis={axis!r}, only n and/or weights may be specified. " "frac, random_state, and replace=True are not yet supported " f"(got frac={frac!r}, random_state={random_state!r}, " f"replace={replace!r}). See BEAM-12476.") if n is None: n = 1 if isinstance(weights, str): weights = self[weights] tmp_weight_column_name = "___Beam_DataFrame_weights___" if weights is None: self_with_randomized_weights = frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'randomized_weights', lambda df: df.assign(**{tmp_weight_column_name: np.random.rand(len(df))}), [self._expr], requires_partition_by=partitionings.Index(), preserves_partition_by=partitionings.Arbitrary())) else: # See "Fast Parallel Weighted Random Sampling" by Efraimidis and Spirakis # https://www.cti.gr/images_gr/reports/99-06-02.ps def assign_randomized_weights(df, weights): non_zero_weights = (weights > 0) | pd.Series(dtype=bool, index=df.index) df = df.loc[non_zero_weights] weights = weights.loc[non_zero_weights] random_weights = np.log(np.random.rand(len(weights))) / weights return df.assign(**{tmp_weight_column_name: random_weights}) self_with_randomized_weights = frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'randomized_weights', assign_randomized_weights, [self._expr, weights._expr], requires_partition_by=partitionings.Index(), preserves_partition_by=partitionings.Arbitrary())) return self_with_randomized_weights.nlargest( n=n, columns=tmp_weight_column_name, keep='any').drop( tmp_weight_column_name, axis=1) @frame_base.with_docs_from(pd.DataFrame) def dot(self, other): # We want to broadcast the right hand side to all partitions of the left. # This is OK, as its index must be the same size as the columns set of self, # so cannot be too large. class AsScalar(object): def __init__(self, value): self.value = value if isinstance(other, frame_base.DeferredFrame): proxy = other._expr.proxy() with expressions.allow_non_parallel_operations(): side = expressions.ComputedExpression( 'as_scalar', lambda df: AsScalar(df), [other._expr], requires_partition_by=partitionings.Singleton()) else: proxy = pd.DataFrame(columns=range(len(other[0]))) side = expressions.ConstantExpression(AsScalar(other)) return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'dot', lambda left, right: left @ right.value, [self._expr, side], requires_partition_by=partitionings.Arbitrary(), preserves_partition_by=partitionings.Arbitrary(), proxy=proxy)) __matmul__ = dot @frame_base.with_docs_from(pd.DataFrame) def mode(self, axis=0, *args, **kwargs): """mode with axis="columns" is not implemented because it produces non-deferred columns. mode with axis="index" is not currently parallelizable. An approximate, parallelizable implementation of mode may be added in the future (`BEAM-12181 <https://issues.apache.org/jira/BEAM-12181>`_).""" if axis == 1 or axis == 'columns': # Number of columns is max(number mode values for each row), so we can't # determine how many there will be before looking at the data. raise frame_base.WontImplementError( "mode(axis=columns) is not supported because it produces a variable " "number of columns depending on the data.", reason="non-deferred-columns") return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'mode', lambda df: df.mode(*args, **kwargs), [self._expr], #TODO(BEAM-12181): Can we add an approximate implementation? requires_partition_by=partitionings.Singleton(reason=( "mode(axis='index') cannot currently be parallelized. See " "BEAM-12181 tracking the possble addition of an approximate, " "parallelizable implementation of mode." )), preserves_partition_by=partitionings.Singleton())) @frame_base.with_docs_from(pd.DataFrame) @frame_base.args_to_kwargs(pd.DataFrame) @frame_base.populate_defaults(pd.DataFrame) @frame_base.maybe_inplace def dropna(self, axis, **kwargs): """dropna with axis="columns" specified cannot be parallelized.""" # TODO(robertwb): This is a common pattern. Generalize? if axis in (1, 'columns'): requires_partition_by = partitionings.Singleton(reason=( "dropna(axis=1) cannot currently be parallelized. It requires " "checking all values in each column for NaN values, to determine " "if that column should be dropped." )) else: requires_partition_by = partitionings.Arbitrary() return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'dropna', lambda df: df.dropna(axis=axis, **kwargs), [self._expr], preserves_partition_by=partitionings.Arbitrary(), requires_partition_by=requires_partition_by)) def _eval_or_query(self, name, expr, inplace, **kwargs): for key in ('local_dict', 'global_dict', 'level', 'target', 'resolvers'): if key in kwargs: raise NotImplementedError(f"Setting '{key}' is not yet supported") # look for '@<py identifier>' if re.search(r'\@[^\d\W]\w*', expr, re.UNICODE): raise NotImplementedError("Accessing locals with @ is not yet supported " "(BEAM-11202)") result_expr = expressions.ComputedExpression( name, lambda df: getattr(df, name)(expr, **kwargs), [self._expr], requires_partition_by=partitionings.Arbitrary(), preserves_partition_by=partitionings.Arbitrary()) if inplace: self._expr = result_expr else: return frame_base.DeferredFrame.wrap(result_expr) @frame_base.with_docs_from(pd.DataFrame) @frame_base.args_to_kwargs(pd.DataFrame) @frame_base.populate_defaults(pd.DataFrame) def eval(self, expr, inplace, **kwargs): """Accessing local variables with ``@<varname>`` is not yet supported (`BEAM-11202 <https://issues.apache.org/jira/browse/BEAM-11202>`_). Arguments ``local_dict``, ``global_dict``, ``level``, ``target``, and ``resolvers`` are not yet supported.""" return self._eval_or_query('eval', expr, inplace, **kwargs) @frame_base.with_docs_from(pd.DataFrame) @frame_base.args_to_kwargs(pd.DataFrame) @frame_base.populate_defaults(pd.DataFrame) def query(self, expr, inplace, **kwargs): """Accessing local variables with ``@<varname>`` is not yet supported (`BEAM-11202 <https://issues.apache.org/jira/browse/BEAM-11202>`_). Arguments ``local_dict``, ``global_dict``, ``level``, ``target``, and ``resolvers`` are not yet supported.""" return self._eval_or_query('query', expr, inplace, **kwargs) isnull = isna = frame_base._elementwise_method('isna', base=pd.DataFrame) notnull = notna = frame_base._elementwise_method('notna', base=pd.DataFrame) items = frame_base.wont_implement_method(pd.DataFrame, 'items', reason="non-deferred-result") itertuples = frame_base.wont_implement_method(pd.DataFrame, 'itertuples', reason="non-deferred-result") iterrows = frame_base.wont_implement_method(pd.DataFrame, 'iterrows', reason="non-deferred-result") iteritems = frame_base.wont_implement_method(pd.DataFrame, 'iteritems', reason="non-deferred-result") def _cols_as_temporary_index(self, cols, suffix=''): original_index_names = list(self._expr.proxy().index.names) new_index_names = [ '__apache_beam_temp_%d_%s' % (ix, suffix) for (ix, _) in enumerate(original_index_names)] def reindex(df): return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'reindex', lambda df: df.rename_axis(index=new_index_names, copy=False) .reset_index().set_index(cols), [df._expr], preserves_partition_by=partitionings.Singleton(), requires_partition_by=partitionings.Arbitrary())) def revert(df): return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'join_restoreindex', lambda df: df.reset_index().set_index(new_index_names) .rename_axis(index=original_index_names, copy=False), [df._expr], preserves_partition_by=partitionings.Singleton(), requires_partition_by=partitionings.Arbitrary())) return reindex, revert @frame_base.with_docs_from(pd.DataFrame) @frame_base.args_to_kwargs(pd.DataFrame) @frame_base.populate_defaults(pd.DataFrame) def join(self, other, on, **kwargs): if on is not None: reindex, revert = self._cols_as_temporary_index(on) return revert(reindex(self).join(other, **kwargs)) if isinstance(other, list): other_is_list = True else: other = [other] other_is_list = False placeholder = object() other_exprs = [ df._expr for df in other if isinstance(df, frame_base.DeferredFrame)] const_others = [ placeholder if isinstance(df, frame_base.DeferredFrame) else df for df in other] def fill_placeholders(values): values = iter(values) filled = [ next(values) if df is placeholder else df for df in const_others] if other_is_list: return filled else: return filled[0] return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'join', lambda df, *deferred_others: df.join( fill_placeholders(deferred_others), **kwargs), [self._expr] + other_exprs, preserves_partition_by=partitionings.Arbitrary(), requires_partition_by=partitionings.Index())) @frame_base.with_docs_from(pd.DataFrame) @frame_base.args_to_kwargs(pd.DataFrame) @frame_base.populate_defaults(pd.DataFrame) def merge( self, right, on, left_on, right_on, left_index, right_index, suffixes, **kwargs): """merge is not parallelizable unless ``left_index`` or ``right_index`` is ``True`, because it requires generating an entirely new unique index. See notes on :meth:`DeferredDataFrame.reset_index`. It is recommended to move the join key for one of your columns to the index to avoid this issue. For an example see the enrich pipeline in :mod:`apache_beam.examples.dataframe.taxiride`. ``how="cross"`` is not yet supported. """ self_proxy = self._expr.proxy() right_proxy = right._expr.proxy() # Validate with a pandas call. _ = self_proxy.merge( right_proxy, on=on, left_on=left_on, right_on=right_on, left_index=left_index, right_index=right_index, **kwargs) if kwargs.get('how', None) == 'cross': raise NotImplementedError("cross join is not yet implemented (BEAM-9547)") if not any([on, left_on, right_on, left_index, right_index]): on = [col for col in self_proxy.columns if col in right_proxy.columns] if not left_on: left_on = on if left_on and not isinstance(left_on, list): left_on = [left_on] if not right_on: right_on = on if right_on and not isinstance(right_on, list): right_on = [right_on] if left_index: indexed_left = self else: indexed_left = self.set_index(left_on, drop=False) if right_index: indexed_right = right else: indexed_right = right.set_index(right_on, drop=False) if left_on and right_on: common_cols = set(left_on).intersection(right_on) if len(common_cols): # When merging on the same column name from both dfs, we need to make # sure only one df has the column. Otherwise we end up with # two duplicate columns, one with lsuffix and one with rsuffix. # It's safe to drop from either because the data has already been duped # to the index. indexed_right = indexed_right.drop(columns=common_cols) merged = frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'merge', lambda left, right: left.merge(right, left_index=True, right_index=True, suffixes=suffixes, **kwargs), [indexed_left._expr, indexed_right._expr], preserves_partition_by=partitionings.Arbitrary(), requires_partition_by=partitionings.Index())) if left_index or right_index: return merged else: return merged.reset_index(drop=True) @frame_base.with_docs_from(pd.DataFrame) @frame_base.args_to_kwargs(pd.DataFrame) @frame_base.populate_defaults(pd.DataFrame) def nlargest(self, keep, **kwargs): """Only ``keep=False`` and ``keep="any"`` are supported. Other values of ``keep`` make this an order-sensitive operation. Note ``keep="any"`` is a Beam-specific option that guarantees only one duplicate will be kept, but unlike ``"first"`` and ``"last"`` it makes no guarantees about _which_ duplicate element is kept.""" if keep == 'any': keep = 'first' elif keep != 'all': raise frame_base.WontImplementError( f"nlargest(keep={keep!r}) is not supported because it is " "order sensitive. Only keep=\"all\" is supported.", reason="order-sensitive") kwargs['keep'] = keep per_partition = expressions.ComputedExpression( 'nlargest-per-partition', lambda df: df.nlargest(**kwargs), [self._expr], preserves_partition_by=partitionings.Arbitrary(), requires_partition_by=partitionings.Arbitrary()) with expressions.allow_non_parallel_operations(True): return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'nlargest', lambda df: df.nlargest(**kwargs), [per_partition], preserves_partition_by=partitionings.Singleton(), requires_partition_by=partitionings.Singleton())) @frame_base.with_docs_from(pd.DataFrame) @frame_base.args_to_kwargs(pd.DataFrame) @frame_base.populate_defaults(pd.DataFrame) def nsmallest(self, keep, **kwargs): """Only ``keep=False`` and ``keep="any"`` are supported. Other values of ``keep`` make this an order-sensitive operation. Note ``keep="any"`` is a Beam-specific option that guarantees only one duplicate will be kept, but unlike ``"first"`` and ``"last"`` it makes no guarantees about _which_ duplicate element is kept.""" if keep == 'any': keep = 'first' elif keep != 'all': raise frame_base.WontImplementError( f"nsmallest(keep={keep!r}) is not supported because it is " "order sensitive. Only keep=\"all\" is supported.", reason="order-sensitive") kwargs['keep'] = keep per_partition = expressions.ComputedExpression( 'nsmallest-per-partition', lambda df: df.nsmallest(**kwargs), [self._expr], preserves_partition_by=partitionings.Arbitrary(), requires_partition_by=partitionings.Arbitrary()) with expressions.allow_non_parallel_operations(True): return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'nsmallest', lambda df: df.nsmallest(**kwargs), [per_partition], preserves_partition_by=partitionings.Singleton(), requires_partition_by=partitionings.Singleton())) plot = frame_base.wont_implement_method(pd.DataFrame, 'plot', reason="plotting-tools") @frame_base.with_docs_from(pd.DataFrame) def pop(self, item): result = self[item] self._expr = expressions.ComputedExpression( 'popped', lambda df: df.drop(columns=[item]), [self._expr], preserves_partition_by=partitionings.Arbitrary(), requires_partition_by=partitionings.Arbitrary()) return result @frame_base.with_docs_from(pd.DataFrame) @frame_base.args_to_kwargs(pd.DataFrame) @frame_base.populate_defaults(pd.DataFrame) def quantile(self, q, axis, **kwargs): """``quantile(axis="index")`` is not parallelizable. See `BEAM-12167 <https://issues.apache.org/jira/browse/BEAM-12167>`_ tracking the possible addition of an approximate, parallelizable implementation of quantile. When using quantile with ``axis="columns"`` only a single ``q`` value can be specified.""" if axis in (1, 'columns'): if isinstance(q, list): raise frame_base.WontImplementError( "quantile(axis=columns) with multiple q values is not supported " "because it transposes the input DataFrame. Note computing " "an individual quantile across columns (e.g. " f"df.quantile(q={q[0]!r}, axis={axis!r}) is supported.", reason="non-deferred-columns") else: requires = partitionings.Arbitrary() else: # axis='index' # TODO(BEAM-12167): Provide an option for approximate distributed # quantiles requires = partitionings.Singleton(reason=( "Computing quantiles across index cannot currently be parallelized. " "See BEAM-12167 tracking the possible addition of an approximate, " "parallelizable implementation of quantile." )) return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'quantile', lambda df: df.quantile(q=q, axis=axis, **kwargs), [self._expr], requires_partition_by=requires, preserves_partition_by=partitionings.Singleton())) @frame_base.with_docs_from(pd.DataFrame) @frame_base.args_to_kwargs(pd.DataFrame) @frame_base.maybe_inplace def rename(self, **kwargs): """rename is not parallelizable when ``axis="index"`` and ``errors="raise"``. It requires collecting all data on a single node in order to detect if one of the index values is missing.""" rename_index = ( 'index' in kwargs or kwargs.get('axis', None) in (0, 'index') or ('columns' not in kwargs and 'axis' not in kwargs)) rename_columns = ( 'columns' in kwargs or kwargs.get('axis', None) in (1, 'columns')) if rename_index: # Technically, it's still partitioned by index, but it's no longer # partitioned by the hash of the index. preserves_partition_by = partitionings.Singleton() else: preserves_partition_by = partitionings.Index() if kwargs.get('errors', None) == 'raise' and rename_index: # TODO: We could do this in parallel by creating a ConstantExpression # with a series created from the mapper dict. Then Index() partitioning # would co-locate the necessary index values and we could raise # individually within each partition. Execution time errors are # discouraged anyway so probably not worth the effort. requires_partition_by = partitionings.Singleton(reason=( "rename(errors='raise', axis='index') requires collecting all " "data on a single node in order to detect missing index values." )) else: requires_partition_by = partitionings.Arbitrary() proxy = None if rename_index: # The proxy can't be computed by executing rename, it will error # renaming the index. if rename_columns: # Note if both are being renamed, index and columns must be specified # (not axis) proxy = self._expr.proxy().rename(**{k: v for (k, v) in kwargs.items() if not k == 'index'}) else: # No change in columns, reuse proxy proxy = self._expr.proxy() return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'rename', lambda df: df.rename(**kwargs), [self._expr], proxy=proxy, preserves_partition_by=preserves_partition_by, requires_partition_by=requires_partition_by)) rename_axis = frame_base._elementwise_method('rename_axis', base=pd.DataFrame) @frame_base.with_docs_from(pd.DataFrame) @frame_base.args_to_kwargs(pd.DataFrame) @frame_base.populate_defaults(pd.DataFrame) def round(self, decimals, *args, **kwargs): if isinstance(decimals, frame_base.DeferredFrame): # Disallow passing a deferred Series in, our current partitioning model # prevents us from using it correctly. raise NotImplementedError("Passing a deferred series to round() is not " "supported, please use a concrete pd.Series " "instance or a dictionary") return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'round', lambda df: df.round(decimals, *args, **kwargs), [self._expr], requires_partition_by=partitionings.Arbitrary(), preserves_partition_by=partitionings.Index() ) ) select_dtypes = frame_base._elementwise_method('select_dtypes', base=pd.DataFrame) @frame_base.with_docs_from(pd.DataFrame) @frame_base.args_to_kwargs(pd.DataFrame) @frame_base.populate_defaults(pd.DataFrame) def shift(self, axis, freq, **kwargs): """shift with ``axis="index" is only supported with ``freq`` specified and ``fill_value`` undefined. Other configurations make this operation order-sensitive.""" if axis in (1, 'columns'): preserves = partitionings.Arbitrary() proxy = None else: if freq is None or 'fill_value' in kwargs: fill_value = kwargs.get('fill_value', 'NOT SET') raise frame_base.WontImplementError( f"shift(axis={axis!r}) is only supported with freq defined, and " f"fill_value undefined (got freq={freq!r}," f"fill_value={fill_value!r}). Other configurations are sensitive " "to the order of the data because they require populating shifted " "rows with `fill_value`.", reason="order-sensitive") # proxy generation fails in pandas <1.2 # Seems due to https://github.com/pandas-dev/pandas/issues/14811, # bug with shift on empty indexes. # Fortunately the proxy should be identical to the input. proxy = self._expr.proxy().copy() # index is modified, so no partitioning is preserved. preserves = partitionings.Singleton() return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'shift', lambda df: df.shift(axis=axis, freq=freq, **kwargs), [self._expr], proxy=proxy, preserves_partition_by=preserves, requires_partition_by=partitionings.Arbitrary())) shape = property(frame_base.wont_implement_method( pd.DataFrame, 'shape', reason="non-deferred-result")) stack = frame_base._elementwise_method('stack', base=pd.DataFrame) all = _agg_method(pd.DataFrame, 'all') any = _agg_method(pd.DataFrame, 'any') count = _agg_method(pd.DataFrame, 'count') describe = _agg_method(pd.DataFrame, 'describe') max = _agg_method(pd.DataFrame, 'max') min = _agg_method(pd.DataFrame, 'min') prod = product = _agg_method(pd.DataFrame, 'prod') sum = _agg_method(pd.DataFrame, 'sum') mean = _agg_method(pd.DataFrame, 'mean') median = _agg_method(pd.DataFrame, 'median') nunique = _agg_method(pd.DataFrame, 'nunique') std = _agg_method(pd.DataFrame, 'std') var = _agg_method(pd.DataFrame, 'var') take = frame_base.wont_implement_method(pd.DataFrame, 'take', reason='deprecated') to_records = frame_base.wont_implement_method(pd.DataFrame, 'to_records', reason="non-deferred-result") to_dict = frame_base.wont_implement_method(pd.DataFrame, 'to_dict', reason="non-deferred-result") to_numpy = frame_base.wont_implement_method(pd.DataFrame, 'to_numpy', reason="non-deferred-result") to_string = frame_base.wont_implement_method(pd.DataFrame, 'to_string', reason="non-deferred-result") to_sparse = frame_base.wont_implement_method(pd.DataFrame, 'to_sparse', reason="non-deferred-result") transpose = frame_base.wont_implement_method( pd.DataFrame, 'transpose', reason='non-deferred-columns') T = property(frame_base.wont_implement_method( pd.DataFrame, 'T', reason='non-deferred-columns')) @frame_base.with_docs_from(pd.DataFrame) def unstack(self, *args, **kwargs): """unstack cannot be used on :class:`DeferredDataFrame` instances with multiple index levels, because the columns in the output depend on the data.""" if self._expr.proxy().index.nlevels == 1: return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'unstack', lambda df: df.unstack(*args, **kwargs), [self._expr], requires_partition_by=partitionings.Index())) else: raise frame_base.WontImplementError( "unstack() is not supported on DataFrames with a multiple indexes, " "because the columns in the output depend on the input data.", reason="non-deferred-columns") update = frame_base._proxy_method( 'update', inplace=True, base=pd.DataFrame, requires_partition_by=partitionings.Index(), preserves_partition_by=partitionings.Arbitrary()) values = property(frame_base.wont_implement_method( pd.DataFrame, 'values', reason="non-deferred-result")) style = property(frame_base.wont_implement_method( pd.DataFrame, 'style', reason="non-deferred-result")) @frame_base.with_docs_from(pd.DataFrame) @frame_base.args_to_kwargs(pd.DataFrame) @frame_base.populate_defaults(pd.DataFrame) def melt(self, ignore_index, **kwargs): """``ignore_index=True`` is not supported, because it requires generating an order-sensitive index.""" if ignore_index: raise frame_base.WontImplementError( "melt(ignore_index=True) is order sensitive because it requires " "generating a new index based on the order of the data.", reason="order-sensitive") return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'melt', lambda df: df.melt(ignore_index=False, **kwargs), [self._expr], requires_partition_by=partitionings.Arbitrary(), preserves_partition_by=partitionings.Singleton())) @frame_base.with_docs_from(pd.DataFrame) def value_counts(self, subset=None, sort=False, normalize=False, ascending=False): """``sort`` is ``False`` by default, and ``sort=True`` is not supported because it imposes an ordering on the dataset which likely will not be preserved.""" if sort: raise frame_base.WontImplementError( "value_counts(sort=True) is not supported because it imposes an " "ordering on the dataset which likely will not be preserved.", reason="order-sensitive") columns = subset or list(self.columns) result = self.groupby(columns).size() if normalize: return result/self.dropna().length() else: return result for io_func in dir(io): if io_func.startswith('to_'): setattr(DeferredDataFrame, io_func, getattr(io, io_func)) setattr(DeferredSeries, io_func, getattr(io, io_func)) for meth in ('filter', ): setattr(DeferredDataFrame, meth, frame_base._elementwise_method(meth, base=pd.DataFrame)) @populate_not_implemented(DataFrameGroupBy) class DeferredGroupBy(frame_base.DeferredFrame): def __init__(self, expr, kwargs, ungrouped: expressions.Expression[pd.core.generic.NDFrame], ungrouped_with_index: expressions.Expression[pd.core.generic.NDFrame], # pylint: disable=line-too-long grouping_columns, grouping_indexes, projection=None): """This object represents the result of:: ungrouped.groupby(level=[grouping_indexes + grouping_columns], **kwargs)[projection] :param expr: An expression to compute a pandas GroupBy object. Convenient for unliftable aggregations. :param ungrouped: An expression to compute the DataFrame pre-grouping, the (Multi)Index contains only the grouping columns/indexes. :param ungrouped_with_index: Same as ungrouped, except the index includes all of the original indexes as well as any grouping columns. This is important for operations that expose the original index, e.g. .apply(), but we only use it when necessary to avoid unnessary data transfer and GBKs. :param grouping_columns: list of column labels that were in the original groupby(..) ``by`` parameter. Only relevant for grouped DataFrames. :param grouping_indexes: list of index names (or index level numbers) to be grouped. :param kwargs: Keywords args passed to the original groupby(..) call.""" super(DeferredGroupBy, self).__init__(expr) self._ungrouped = ungrouped self._ungrouped_with_index = ungrouped_with_index self._projection = projection self._grouping_columns = grouping_columns self._grouping_indexes = grouping_indexes self._kwargs = kwargs if (self._kwargs.get('dropna', True) is False and self._ungrouped.proxy().index.nlevels > 1): raise NotImplementedError( "dropna=False does not work as intended in the Beam DataFrame API " "when grouping on multiple columns or indexes (See BEAM-12495).") def __getattr__(self, name): return DeferredGroupBy( expressions.ComputedExpression( 'groupby_project', lambda gb: getattr(gb, name), [self._expr], requires_partition_by=partitionings.Arbitrary(), preserves_partition_by=partitionings.Arbitrary()), self._kwargs, self._ungrouped, self._ungrouped_with_index, self._grouping_columns, self._grouping_indexes, projection=name) def __getitem__(self, name): return DeferredGroupBy( expressions.ComputedExpression( 'groupby_project', lambda gb: gb[name], [self._expr], requires_partition_by=partitionings.Arbitrary(), preserves_partition_by=partitionings.Arbitrary()), self._kwargs, self._ungrouped, self._ungrouped_with_index, self._grouping_columns, self._grouping_indexes, projection=name) @frame_base.with_docs_from(DataFrameGroupBy) def agg(self, fn, *args, **kwargs): if _is_associative(fn): return _liftable_agg(fn)(self, *args, **kwargs) elif _is_liftable_with_sum(fn): return _liftable_agg(fn, postagg_meth='sum')(self, *args, **kwargs) elif _is_unliftable(fn): return _unliftable_agg(fn)(self, *args, **kwargs) elif callable(fn): return DeferredDataFrame( expressions.ComputedExpression( 'agg', lambda gb: gb.agg(fn, *args, **kwargs), [self._expr], requires_partition_by=partitionings.Index(), preserves_partition_by=partitionings.Singleton())) else: raise NotImplementedError(f"GroupBy.agg(func={fn!r})") @property def ndim(self): return self._expr.proxy().ndim @frame_base.with_docs_from(DataFrameGroupBy) def apply(self, func, *args, **kwargs): """Note that ``func`` will be called once during pipeline construction time with an empty pandas object, so take care if ``func`` has a side effect. When called with an empty pandas object, ``func`` is expected to return an object of the same type as what will be returned when the pipeline is processing actual data. If the result is a pandas object it should have the same type and name (for a Series) or column types and names (for a DataFrame) as the actual results.""" project = _maybe_project_func(self._projection) grouping_indexes = self._grouping_indexes grouping_columns = self._grouping_columns # Unfortunately pandas does not execute func to determine the right proxy. # We run user func on a proxy here to detect the return type and generate # the proxy. fn_input = project(self._ungrouped_with_index.proxy().reset_index( grouping_columns, drop=True)) result = func(fn_input) if isinstance(result, pd.core.generic.NDFrame): if result.index is fn_input.index: proxy = result else: proxy = result[:0] def index_to_arrays(index): return [index.get_level_values(level) for level in range(index.nlevels)] # The final result will have the grouped indexes + the indexes from the # result proxy.index = pd.MultiIndex.from_arrays( index_to_arrays(self._ungrouped.proxy().index) + index_to_arrays(proxy.index), names=self._ungrouped.proxy().index.names + proxy.index.names) else: # The user fn returns some non-pandas type. The expected result is a # Series where each element is the result of one user fn call. dtype = pd.Series([result]).dtype proxy = pd.Series([], dtype=dtype, index=self._ungrouped.proxy().index) def do_partition_apply(df): # Remove columns from index, we only needed them there for partitioning df = df.reset_index(grouping_columns, drop=True) gb = df.groupby(level=grouping_indexes or None, by=grouping_columns or None) gb = project(gb) return gb.apply(func, *args, **kwargs) return DeferredDataFrame( expressions.ComputedExpression( 'apply', do_partition_apply, [self._ungrouped_with_index], proxy=proxy, requires_partition_by=partitionings.Index(grouping_indexes + grouping_columns), preserves_partition_by=partitionings.Index(grouping_indexes))) @frame_base.with_docs_from(DataFrameGroupBy) def transform(self, fn, *args, **kwargs): """Note that ``func`` will be called once during pipeline construction time with an empty pandas object, so take care if ``func`` has a side effect. When called with an empty pandas object, ``func`` is expected to return an object of the same type as what will be returned when the pipeline is processing actual data. The result should have the same type and name (for a Series) or column types and names (for a DataFrame) as the actual results.""" if not callable(fn): raise NotImplementedError( "String functions are not yet supported in transform.") if self._grouping_columns and not self._projection: grouping_columns = self._grouping_columns def fn_wrapper(x, *args, **kwargs): x = x.droplevel(grouping_columns) return fn(x, *args, **kwargs) else: fn_wrapper = fn project = _maybe_project_func(self._projection) # pandas cannot execute fn to determine the right proxy. # We run user fn on a proxy here to detect the return type and generate the # proxy. result = fn_wrapper(project(self._ungrouped_with_index.proxy())) parent_frame = self._ungrouped.args()[0].proxy() if isinstance(result, pd.core.generic.NDFrame): proxy = result[:0] else: # The user fn returns some non-pandas type. The expected result is a # Series where each element is the result of one user fn call. dtype = pd.Series([result]).dtype proxy = pd.Series([], dtype=dtype, name=project(parent_frame).name) if not isinstance(self._projection, list): proxy.name = self._projection # The final result will have the original indexes proxy.index = parent_frame.index levels = self._grouping_indexes + self._grouping_columns return DeferredDataFrame( expressions.ComputedExpression( 'transform', lambda df: project(df.groupby(level=levels)).transform( fn_wrapper, *args, **kwargs).droplevel(self._grouping_columns), [self._ungrouped_with_index], proxy=proxy, requires_partition_by=partitionings.Index(levels), preserves_partition_by=partitionings.Index(self._grouping_indexes))) @frame_base.with_docs_from(DataFrameGroupBy) def filter(self, func=None, dropna=True): if func is None or not callable(func): raise TypeError("func must be specified and it must be callable") def apply_fn(df): if func(df): return df elif not dropna: result = df.copy() result.iloc[:, :] = np.nan return result else: return df.iloc[:0] return self.apply(apply_fn).droplevel(self._grouping_columns) @property # type: ignore @frame_base.with_docs_from(DataFrameGroupBy) def dtypes(self): grouping_columns = self._grouping_columns return self.apply(lambda df: df.drop(grouping_columns, axis=1).dtypes) fillna = frame_base.wont_implement_method( DataFrameGroupBy, 'fillna', explanation=( "df.fillna() should be used instead. Only method=None is supported " "because other methods are order-sensitive. df.groupby(..).fillna() " "without a method is equivalent to df.fillna().")) ffill = frame_base.wont_implement_method(DataFrameGroupBy, 'ffill', reason="order-sensitive") bfill = frame_base.wont_implement_method(DataFrameGroupBy, 'bfill', reason="order-sensitive") pad = frame_base.wont_implement_method(DataFrameGroupBy, 'pad', reason="order-sensitive") backfill = frame_base.wont_implement_method(DataFrameGroupBy, 'backfill', reason="order-sensitive") aggregate = agg hist = frame_base.wont_implement_method(DataFrameGroupBy, 'hist', reason="plotting-tools") plot = frame_base.wont_implement_method(DataFrameGroupBy, 'plot', reason="plotting-tools") boxplot = frame_base.wont_implement_method(DataFrameGroupBy, 'boxplot', reason="plotting-tools") head = frame_base.wont_implement_method( DataFrameGroupBy, 'head', explanation=_PEEK_METHOD_EXPLANATION) tail = frame_base.wont_implement_method( DataFrameGroupBy, 'tail', explanation=_PEEK_METHOD_EXPLANATION) first = frame_base.not_implemented_method('first', base_type=DataFrameGroupBy) last = frame_base.not_implemented_method('last', base_type=DataFrameGroupBy) nth = frame_base.wont_implement_method( DataFrameGroupBy, 'nth', reason='order-sensitive') cumcount = frame_base.wont_implement_method( DataFrameGroupBy, 'cumcount', reason='order-sensitive') cummax = frame_base.wont_implement_method( DataFrameGroupBy, 'cummax', reason='order-sensitive') cummin = frame_base.wont_implement_method( DataFrameGroupBy, 'cummin', reason='order-sensitive') cumsum = frame_base.wont_implement_method( DataFrameGroupBy, 'cumsum', reason='order-sensitive') cumprod = frame_base.wont_implement_method( DataFrameGroupBy, 'cumprod', reason='order-sensitive') diff = frame_base.wont_implement_method(DataFrameGroupBy, 'diff', reason='order-sensitive') shift = frame_base.wont_implement_method(DataFrameGroupBy, 'shift', reason='order-sensitive') # TODO(BEAM-12169): Consider allowing this for categorical keys. __len__ = frame_base.wont_implement_method( DataFrameGroupBy, '__len__', reason="non-deferred-result") groups = property(frame_base.wont_implement_method( DataFrameGroupBy, 'groups', reason="non-deferred-result")) indices = property(frame_base.wont_implement_method( DataFrameGroupBy, 'indices', reason="non-deferred-result")) resample = frame_base.wont_implement_method( DataFrameGroupBy, 'resample', reason='event-time-semantics') rolling = frame_base.wont_implement_method( DataFrameGroupBy, 'rolling', reason='event-time-semantics') def _maybe_project_func(projection: Optional[List[str]]): """ Returns identity func if projection is empty or None, else returns a function that projects the specified columns. """ if projection: return lambda df: df[projection] else: return lambda x: x def _liftable_agg(meth, postagg_meth=None): agg_name, _ = frame_base.name_and_func(meth) if postagg_meth is None: post_agg_name = agg_name else: post_agg_name, _ = frame_base.name_and_func(postagg_meth) @frame_base.with_docs_from(DataFrameGroupBy, name=agg_name) def wrapper(self, *args, **kwargs): assert isinstance(self, DeferredGroupBy) if 'min_count' in kwargs: return _unliftable_agg(meth)(self, *args, **kwargs) to_group = self._ungrouped.proxy().index is_categorical_grouping = any(to_group.get_level_values(i).is_categorical() for i in self._grouping_indexes) groupby_kwargs = self._kwargs # Don't include un-observed categorical values in the preagg preagg_groupby_kwargs = groupby_kwargs.copy() preagg_groupby_kwargs['observed'] = True project = _maybe_project_func(self._projection) pre_agg = expressions.ComputedExpression( 'pre_combine_' + agg_name, lambda df: getattr( project( df.groupby(level=list(range(df.index.nlevels)), **preagg_groupby_kwargs) ), agg_name)(**kwargs), [self._ungrouped], requires_partition_by=partitionings.Arbitrary(), preserves_partition_by=partitionings.Arbitrary()) post_agg = expressions.ComputedExpression( 'post_combine_' + post_agg_name, lambda df: getattr( df.groupby(level=list(range(df.index.nlevels)), **groupby_kwargs), post_agg_name)(**kwargs), [pre_agg], requires_partition_by=(partitionings.Singleton(reason=( "Aggregations grouped by a categorical column are not currently " "parallelizable (BEAM-11190)." )) if is_categorical_grouping else partitionings.Index()), preserves_partition_by=partitionings.Arbitrary()) return frame_base.DeferredFrame.wrap(post_agg) return wrapper def _unliftable_agg(meth): agg_name, _ = frame_base.name_and_func(meth) @frame_base.with_docs_from(DataFrameGroupBy, name=agg_name) def wrapper(self, *args, **kwargs): assert isinstance(self, DeferredGroupBy) to_group = self._ungrouped.proxy().index is_categorical_grouping = any(to_group.get_level_values(i).is_categorical() for i in self._grouping_indexes) groupby_kwargs = self._kwargs project = _maybe_project_func(self._projection) post_agg = expressions.ComputedExpression( agg_name, lambda df: getattr(project( df.groupby(level=list(range(df.index.nlevels)), **groupby_kwargs), ), agg_name)(**kwargs), [self._ungrouped], requires_partition_by=(partitionings.Singleton(reason=( "Aggregations grouped by a categorical column are not currently " "parallelizable (BEAM-11190)." )) if is_categorical_grouping else partitionings.Index()), # Some aggregation methods (e.g. corr/cov) add additional index levels. # We only preserve the ones that existed _before_ the groupby. preserves_partition_by=partitionings.Index( list(range(self._ungrouped.proxy().index.nlevels)))) return frame_base.DeferredFrame.wrap(post_agg) return wrapper for meth in LIFTABLE_AGGREGATIONS: setattr(DeferredGroupBy, meth, _liftable_agg(meth)) for meth in LIFTABLE_WITH_SUM_AGGREGATIONS: setattr(DeferredGroupBy, meth, _liftable_agg(meth, postagg_meth='sum')) for meth in UNLIFTABLE_AGGREGATIONS: setattr(DeferredGroupBy, meth, _unliftable_agg(meth)) def _check_str_or_np_builtin(agg_func, func_list): return agg_func in func_list or ( getattr(agg_func, '__name__', None) in func_list and agg_func.__module__ in ('numpy', 'builtins')) def _is_associative(agg_func): return _check_str_or_np_builtin(agg_func, LIFTABLE_AGGREGATIONS) def _is_liftable_with_sum(agg_func): return _check_str_or_np_builtin(agg_func, LIFTABLE_WITH_SUM_AGGREGATIONS) def _is_unliftable(agg_func): return _check_str_or_np_builtin(agg_func, UNLIFTABLE_AGGREGATIONS) NUMERIC_AGGREGATIONS = ['max', 'min', 'prod', 'sum', 'mean', 'median', 'std', 'var'] def _is_numeric(agg_func): return _check_str_or_np_builtin(agg_func, NUMERIC_AGGREGATIONS) @populate_not_implemented(DataFrameGroupBy) class _DeferredGroupByCols(frame_base.DeferredFrame): # It's not clear that all of these make sense in Pandas either... agg = aggregate = frame_base._elementwise_method('agg', base=DataFrameGroupBy) any = frame_base._elementwise_method('any', base=DataFrameGroupBy) all = frame_base._elementwise_method('all', base=DataFrameGroupBy) boxplot = frame_base.wont_implement_method( DataFrameGroupBy, 'boxplot', reason="plotting-tools") describe = frame_base.not_implemented_method('describe', base_type=DataFrameGroupBy) diff = frame_base._elementwise_method('diff', base=DataFrameGroupBy) fillna = frame_base._elementwise_method('fillna', base=DataFrameGroupBy) filter = frame_base._elementwise_method('filter', base=DataFrameGroupBy) first = frame_base._elementwise_method('first', base=DataFrameGroupBy) get_group = frame_base._elementwise_method('get_group', base=DataFrameGroupBy) head = frame_base.wont_implement_method( DataFrameGroupBy, 'head', explanation=_PEEK_METHOD_EXPLANATION) hist = frame_base.wont_implement_method( DataFrameGroupBy, 'hist', reason="plotting-tools") idxmax = frame_base._elementwise_method('idxmax', base=DataFrameGroupBy) idxmin = frame_base._elementwise_method('idxmin', base=DataFrameGroupBy) last = frame_base._elementwise_method('last', base=DataFrameGroupBy) mad = frame_base._elementwise_method('mad', base=DataFrameGroupBy) max = frame_base._elementwise_method('max', base=DataFrameGroupBy) mean = frame_base._elementwise_method('mean', base=DataFrameGroupBy) median = frame_base._elementwise_method('median', base=DataFrameGroupBy) min = frame_base._elementwise_method('min', base=DataFrameGroupBy) nunique = frame_base._elementwise_method('nunique', base=DataFrameGroupBy) plot = frame_base.wont_implement_method( DataFrameGroupBy, 'plot', reason="plotting-tools") prod = frame_base._elementwise_method('prod', base=DataFrameGroupBy) quantile = frame_base._elementwise_method('quantile', base=DataFrameGroupBy) shift = frame_base._elementwise_method('shift', base=DataFrameGroupBy) size = frame_base._elementwise_method('size', base=DataFrameGroupBy) skew = frame_base._elementwise_method('skew', base=DataFrameGroupBy) std = frame_base._elementwise_method('std', base=DataFrameGroupBy) sum = frame_base._elementwise_method('sum', base=DataFrameGroupBy) tail = frame_base.wont_implement_method( DataFrameGroupBy, 'tail', explanation=_PEEK_METHOD_EXPLANATION) take = frame_base.wont_implement_method( DataFrameGroupBy, 'take', reason='deprecated') tshift = frame_base._elementwise_method('tshift', base=DataFrameGroupBy) var = frame_base._elementwise_method('var', base=DataFrameGroupBy) @property # type: ignore @frame_base.with_docs_from(DataFrameGroupBy) def groups(self): return self._expr.proxy().groups @property # type: ignore @frame_base.with_docs_from(DataFrameGroupBy) def indices(self): return self._expr.proxy().indices @property # type: ignore @frame_base.with_docs_from(DataFrameGroupBy) def ndim(self): return self._expr.proxy().ndim @property # type: ignore @frame_base.with_docs_from(DataFrameGroupBy) def ngroups(self): return self._expr.proxy().ngroups @populate_not_implemented(pd.core.indexes.base.Index) class _DeferredIndex(object): def __init__(self, frame): self._frame = frame @property def names(self): return self._frame._expr.proxy().index.names @names.setter def names(self, value): def set_index_names(df): df = df.copy() df.index.names = value return df self._frame._expr = expressions.ComputedExpression( 'set_index_names', set_index_names, [self._frame._expr], requires_partition_by=partitionings.Arbitrary(), preserves_partition_by=partitionings.Arbitrary()) @property def name(self): return self._frame._expr.proxy().index.name @name.setter def name(self, value): self.names = [value] @property def ndim(self): return self._frame._expr.proxy().index.ndim @property def dtype(self): return self._frame._expr.proxy().index.dtype @property def nlevels(self): return self._frame._expr.proxy().index.nlevels def __getattr__(self, name): raise NotImplementedError('index.%s' % name) @populate_not_implemented(pd.core.indexing._LocIndexer) class _DeferredLoc(object): def __init__(self, frame): self._frame = frame def __getitem__(self, key): if isinstance(key, tuple): rows, cols = key return self[rows][cols] elif isinstance(key, list) and key and isinstance(key[0], bool): # Aligned by numerical key. raise NotImplementedError(type(key)) elif isinstance(key, list): # Select rows, but behaves poorly on missing values. raise NotImplementedError(type(key)) elif isinstance(key, slice): args = [self._frame._expr] func = lambda df: df.loc[key] elif isinstance(key, frame_base.DeferredFrame): func = lambda df, key: df.loc[key] if pd.core.dtypes.common.is_bool_dtype(key._expr.proxy()): # Boolean indexer, just pass it in as-is args = [self._frame._expr, key._expr] else: # Likely a DeferredSeries of labels, overwrite the key's index with it's # values so we can colocate them with the labels they're selecting def data_to_index(s): s = s.copy() s.index = s return s reindexed_expr = expressions.ComputedExpression( 'data_to_index', data_to_index, [key._expr], requires_partition_by=partitionings.Arbitrary(), preserves_partition_by=partitionings.Singleton(), ) args = [self._frame._expr, reindexed_expr] elif callable(key): def checked_callable_key(df): computed_index = key(df) if isinstance(computed_index, tuple): row_index, _ = computed_index else: row_index = computed_index if isinstance(row_index, list) and row_index and isinstance( row_index[0], bool): raise NotImplementedError(type(row_index)) elif not isinstance(row_index, (slice, pd.Series)): raise NotImplementedError(type(row_index)) return computed_index args = [self._frame._expr] func = lambda df: df.loc[checked_callable_key] else: raise NotImplementedError(type(key)) return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'loc', func, args, requires_partition_by=( partitionings.Index() if len(args) > 1 else partitionings.Arbitrary()), preserves_partition_by=partitionings.Arbitrary())) __setitem__ = frame_base.not_implemented_method( 'loc.setitem', base_type=pd.core.indexing._LocIndexer) @populate_not_implemented(pd.core.indexing._iLocIndexer) class _DeferredILoc(object): def __init__(self, frame): self._frame = frame def __getitem__(self, index): if isinstance(index, tuple): rows, _ = index if rows != slice(None, None, None): raise frame_base.WontImplementError( "Using iloc to select rows is not supported because it's " "position-based indexing is sensitive to the order of the data.", reason="order-sensitive") return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'iloc', lambda df: df.iloc[index], [self._frame._expr], requires_partition_by=partitionings.Arbitrary(), preserves_partition_by=partitionings.Arbitrary())) else: raise frame_base.WontImplementError( "Using iloc to select rows is not supported because it's " "position-based indexing is sensitive to the order of the data.", reason="order-sensitive") def __setitem__(self, index, value): raise frame_base.WontImplementError( "Using iloc to mutate a frame is not supported because it's " "position-based indexing is sensitive to the order of the data.", reason="order-sensitive") class _DeferredStringMethods(frame_base.DeferredBase): @frame_base.with_docs_from(pd.core.strings.StringMethods) @frame_base.args_to_kwargs(pd.core.strings.StringMethods) @frame_base.populate_defaults(pd.core.strings.StringMethods) def cat(self, others, join, **kwargs): """If defined, ``others`` must be a :class:`DeferredSeries` or a ``list`` of ``DeferredSeries``.""" if others is None: # Concatenate series into a single String requires = partitionings.Singleton(reason=( "cat(others=None) concatenates all data in a Series into a single " "string, so it requires collecting all data on a single node." )) func = lambda df: df.str.cat(join=join, **kwargs) args = [self._expr] elif (isinstance(others, frame_base.DeferredBase) or (isinstance(others, list) and all(isinstance(other, frame_base.DeferredBase) for other in others))): if isinstance(others, frame_base.DeferredBase): others = [others] requires = partitionings.Index() def func(*args): return args[0].str.cat(others=args[1:], join=join, **kwargs) args = [self._expr] + [other._expr for other in others] else: raise frame_base.WontImplementError( "others must be None, DeferredSeries, or List[DeferredSeries] " f"(encountered {type(others)}). Other types are not supported " "because they make this operation sensitive to the order of the " "data.", reason="order-sensitive") return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'cat', func, args, requires_partition_by=requires, preserves_partition_by=partitionings.Arbitrary())) @frame_base.with_docs_from(pd.core.strings.StringMethods) @frame_base.args_to_kwargs(pd.core.strings.StringMethods) def repeat(self, repeats): """``repeats`` must be an ``int`` or a :class:`DeferredSeries`. Lists are not supported because they make this operation order-sensitive.""" if isinstance(repeats, int): return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'repeat', lambda series: series.str.repeat(repeats), [self._expr], # TODO(BEAM-11155): Defer to pandas to compute this proxy. # Currently it incorrectly infers dtype bool, may require upstream # fix. proxy=self._expr.proxy(), requires_partition_by=partitionings.Arbitrary(), preserves_partition_by=partitionings.Arbitrary())) elif isinstance(repeats, frame_base.DeferredBase): return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'repeat', lambda series, repeats_series: series.str.repeat(repeats_series), [self._expr, repeats._expr], # TODO(BEAM-11155): Defer to pandas to compute this proxy. # Currently it incorrectly infers dtype bool, may require upstream # fix. proxy=self._expr.proxy(), requires_partition_by=partitionings.Index(), preserves_partition_by=partitionings.Arbitrary())) elif isinstance(repeats, list): raise frame_base.WontImplementError( "str.repeat(repeats=) repeats must be an int or a DeferredSeries. " "Lists are not supported because they make this operation sensitive " "to the order of the data.", reason="order-sensitive") else: raise TypeError("str.repeat(repeats=) value must be an int or a " f"DeferredSeries (encountered {type(repeats)}).") get_dummies = frame_base.wont_implement_method( pd.core.strings.StringMethods, 'get_dummies', reason='non-deferred-columns') split = frame_base.wont_implement_method( pd.core.strings.StringMethods, 'split', reason='non-deferred-columns') rsplit = frame_base.wont_implement_method( pd.core.strings.StringMethods, 'rsplit', reason='non-deferred-columns') ELEMENTWISE_STRING_METHODS = [ 'capitalize', 'casefold', 'contains', 'count', 'endswith', 'extract', 'extractall', 'findall', 'fullmatch', 'get', 'isalnum', 'isalpha', 'isdecimal', 'isdigit', 'islower', 'isnumeric', 'isspace', 'istitle', 'isupper', 'join', 'len', 'lower', 'lstrip', 'match', 'pad', 'partition', 'replace', 'rpartition', 'rstrip', 'slice', 'slice_replace', 'startswith', 'strip', 'swapcase', 'title', 'upper', 'wrap', 'zfill', '__getitem__', ] def make_str_func(method): def func(df, *args, **kwargs): try: df_str = df.str except AttributeError: # If there's a non-string value in a Series passed to .str method, pandas # will generally just replace it with NaN in the result. However if # there are _only_ non-string values, pandas will raise: # # AttributeError: Can only use .str accessor with string values! # # This can happen to us at execution time if we split a partition that is # only non-strings. This branch just replaces all those values with NaN # in that case. return df.map(lambda _: np.nan) else: return getattr(df_str, method)(*args, **kwargs) return func for method in ELEMENTWISE_STRING_METHODS: setattr(_DeferredStringMethods, method, frame_base._elementwise_method(make_str_func(method), name=method, base=pd.core.strings.StringMethods)) def make_cat_func(method): def func(df, *args, **kwargs): return getattr(df.cat, method)(*args, **kwargs) return func class _DeferredCategoricalMethods(frame_base.DeferredBase): @property # type: ignore @frame_base.with_docs_from(pd.core.arrays.categorical.CategoricalAccessor) def categories(self): return self._expr.proxy().cat.categories @property # type: ignore @frame_base.with_docs_from(pd.core.arrays.categorical.CategoricalAccessor) def ordered(self): return self._expr.proxy().cat.ordered @property # type: ignore @frame_base.with_docs_from(pd.core.arrays.categorical.CategoricalAccessor) def codes(self): return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'codes', lambda s: s.cat.codes, [self._expr], requires_partition_by=partitionings.Arbitrary(), preserves_partition_by=partitionings.Arbitrary(), ) ) remove_unused_categories = frame_base.wont_implement_method( pd.core.arrays.categorical.CategoricalAccessor, 'remove_unused_categories', reason="non-deferred-columns") ELEMENTWISE_CATEGORICAL_METHODS = [ 'add_categories', 'as_ordered', 'as_unordered', 'remove_categories', 'rename_categories', 'reorder_categories', 'set_categories', ] for method in ELEMENTWISE_CATEGORICAL_METHODS: setattr(_DeferredCategoricalMethods, method, frame_base._elementwise_method( make_cat_func(method), name=method, base=pd.core.arrays.categorical.CategoricalAccessor)) class _DeferredDatetimeMethods(frame_base.DeferredBase): @property # type: ignore @frame_base.with_docs_from(pd.core.indexes.accessors.DatetimeProperties) def tz(self): return self._expr.proxy().dt.tz @property # type: ignore @frame_base.with_docs_from(pd.core.indexes.accessors.DatetimeProperties) def freq(self): return self._expr.proxy().dt.freq @frame_base.with_docs_from(pd.core.indexes.accessors.DatetimeProperties) def tz_localize(self, *args, ambiguous='infer', **kwargs): """``ambiguous`` cannot be set to ``"infer"`` as its semantics are order-sensitive. Similarly, specifying ``ambiguous`` as an :class:`~numpy.ndarray` is order-sensitive, but you can achieve similar functionality by specifying ``ambiguous`` as a Series.""" if isinstance(ambiguous, np.ndarray): raise frame_base.WontImplementError( "tz_localize(ambiguous=ndarray) is not supported because it makes " "this operation sensitive to the order of the data. Please use a " "DeferredSeries instead.", reason="order-sensitive") elif isinstance(ambiguous, frame_base.DeferredFrame): return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'tz_localize', lambda s, ambiguous: s.dt.tz_localize(*args, ambiguous=ambiguous, **kwargs), [self._expr, ambiguous._expr], requires_partition_by=partitionings.Index(), preserves_partition_by=partitionings.Arbitrary())) elif ambiguous == 'infer': # infer attempts to infer based on the order of the timestamps raise frame_base.WontImplementError( f"tz_localize(ambiguous={ambiguous!r}) is not allowed because it " "makes this operation sensitive to the order of the data.", reason="order-sensitive") return frame_base.DeferredFrame.wrap( expressions.ComputedExpression( 'tz_localize', lambda s: s.dt.tz_localize(*args, ambiguous=ambiguous, **kwargs), [self._expr], requires_partition_by=partitionings.Arbitrary(), preserves_partition_by=partitionings.Arbitrary())) to_period = frame_base.wont_implement_method( pd.core.indexes.accessors.DatetimeProperties, 'to_period', reason="event-time-semantics") to_pydatetime = frame_base.wont_implement_method( pd.core.indexes.accessors.DatetimeProperties, 'to_pydatetime', reason="non-deferred-result") to_pytimedelta = frame_base.wont_implement_method( pd.core.indexes.accessors.DatetimeProperties, 'to_pytimedelta', reason="non-deferred-result") def make_dt_property(method): def func(df): return getattr(df.dt, method) return func def make_dt_func(method): def func(df, *args, **kwargs): return getattr(df.dt, method)(*args, **kwargs) return func ELEMENTWISE_DATETIME_METHODS = [ 'ceil', 'day_name', 'month_name', 'floor', 'isocalendar', 'round', 'normalize', 'strftime', 'tz_convert', ] for method in ELEMENTWISE_DATETIME_METHODS: setattr(_DeferredDatetimeMethods, method, frame_base._elementwise_method( make_dt_func(method), name=method, base=pd.core.indexes.accessors.DatetimeProperties)) ELEMENTWISE_DATETIME_PROPERTIES = [ 'date', 'day', 'dayofweek', 'dayofyear', 'days_in_month', 'daysinmonth', 'hour', 'is_leap_year', 'is_month_end', 'is_month_start', 'is_quarter_end', 'is_quarter_start', 'is_year_end', 'is_year_start', 'microsecond', 'minute', 'month', 'nanosecond', 'quarter', 'second', 'time', 'timetz', 'week', 'weekday', 'weekofyear', 'year', ] for method in ELEMENTWISE_DATETIME_PROPERTIES: setattr(_DeferredDatetimeMethods, method, property(frame_base._elementwise_method( make_dt_property(method), name=method, base=pd.core.indexes.accessors.DatetimeProperties))) for base in ['add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'divmod', 'pow', 'and', 'or']: for p in ['%s', 'r%s', '__%s__', '__r%s__']: # TODO: non-trivial level? name = p % base if hasattr(pd.Series, name): setattr( DeferredSeries, name, frame_base._elementwise_method(name, restrictions={'level': None}, base=pd.Series)) if hasattr(pd.DataFrame, name): setattr( DeferredDataFrame, name, frame_base._elementwise_method(name, restrictions={'level': None}, base=pd.DataFrame)) inplace_name = '__i%s__' % base if hasattr(pd.Series, inplace_name): setattr( DeferredSeries, inplace_name, frame_base._elementwise_method(inplace_name, inplace=True, base=pd.Series)) if hasattr(pd.DataFrame, inplace_name): setattr( DeferredDataFrame, inplace_name, frame_base._elementwise_method(inplace_name, inplace=True, base=pd.DataFrame)) for name in ['lt', 'le', 'gt', 'ge', 'eq', 'ne']: for p in '%s', '__%s__': # Note that non-underscore name is used for both as the __xxx__ methods are # order-sensitive. setattr(DeferredSeries, p % name, frame_base._elementwise_method(name, base=pd.Series)) setattr(DeferredDataFrame, p % name, frame_base._elementwise_method(name, base=pd.DataFrame)) for name in ['__neg__', '__pos__', '__invert__']: setattr(DeferredSeries, name, frame_base._elementwise_method(name, base=pd.Series)) setattr(DeferredDataFrame, name, frame_base._elementwise_method(name, base=pd.DataFrame)) DeferredSeries.multiply = DeferredSeries.mul # type: ignore DeferredDataFrame.multiply = DeferredDataFrame.mul # type: ignore def _slice_parts(s): yield s.start yield s.stop yield s.step def _is_null_slice(s): return isinstance(s, slice) and all(x is None for x in _slice_parts(s)) def _is_integer_slice(s): return isinstance(s, slice) and all( x is None or isinstance(x, int) for x in _slice_parts(s)) and not _is_null_slice(s)
apache-2.0
3,203,481,022,032,065,500
38.638434
117
0.635785
false
myt00seven/svrg
nips2017_mnist/draw_3_BN.py
4
9604
# This is used to draw three comparisons for SGD+BN, SVRG+BN and Streaming SVRG +BN import matplotlib import matplotlib.pyplot as plt plt.switch_backend('agg') import pylab import numpy as np import sys # all the methods are with BN layers!!! PATH_DATA_adagrad = "data_3_BN/" PATH_DATA_SVRG = "data_3_BN/" PATH_DATA_Stream = "data_3_BN/" PATH_DATA = "data_3_BN/" PATH_FIGURE = "figure_3/" MAXLENGTH = 1000 STARTPOINT = 10 LINEWIDTH = 3 LOAD_SGD = True LOAD_SVRG = True DRAW_MLPBN_ADASGD = True DRAW_MLPBN_StreamingSVRG = True DRAW_MLPBN_SVRG = True Y_LIM_FINE_TUNING = True # SPEC_L1 = 'bo-' # SPEC_L1 = 'g^--' # SPEC_L3 = 'cs:' # SPEC_L4 = 'r*-.' SPEC_L1 = 'b-' SPEC_L2 = 'g:' SPEC_L3 = 'r-.' SPEC_L4 = 'c--' NUM_EPOCHS = 1000 def main(num_epochs=NUM_EPOCHS): str_epochs = str(num_epochs) # if LOAD_SGD: mlpbn_ADAsgd_acc_test= np.loadtxt(PATH_DATA +"_mlpbnTrue_adagrad_acc_test.txt") # if LOAD_SGD: mlpbn_ADAsgd_acc_train= np.loadtxt(PATH_DATA +"_mlpbnTrue_adagrad_acc_train.txt") if LOAD_SGD: mlpbn_ADAsgd_acc_val= np.loadtxt(PATH_DATA +"_mlpbnTrue_adagrad_acc_val.txt") # if LOAD_SGD: mlpbn_ADAsgd_loss_test= np.loadtxt(PATH_DATA +"_mlpbnTrue_adagrad_loss_test.txt") if LOAD_SGD: mlpbn_ADAsgd_loss_train= np.loadtxt(PATH_DATA +"_mlpbnTrue_adagrad_loss_train.txt") if LOAD_SGD: mlpbn_ADAsgd_loss_val= np.loadtxt(PATH_DATA +"_mlpbnTrue_adagrad_loss_val.txt") if LOAD_SVRG: mlpbn_Streamingsvrg_acc_train= np.loadtxt(PATH_DATA_SVRG +"_mlpbnTrue_StreamingSVRG_acc_train.txt") if LOAD_SVRG: mlpbn_Streamingsvrg_acc_val= np.loadtxt(PATH_DATA_SVRG +"_mlpbnTrue_StreamingSVRG_acc_val.txt") if LOAD_SVRG: mlpbn_Streamingsvrg_loss_train= np.loadtxt(PATH_DATA_SVRG +"_mlpbnTrue_StreamingSVRG_loss_train.txt") if LOAD_SVRG: mlpbn_Streamingsvrg_loss_val= np.loadtxt(PATH_DATA_SVRG +"_mlpbnTrue_StreamingSVRG_loss_val.txt") if LOAD_SVRG: mlpbn_Streamingsvrg_acc_test= np.loadtxt(PATH_DATA_SVRG +"_mlpbnTrue_StreamingSVRG_acc_test.txt") if LOAD_SVRG: mlpbn_Streamingsvrg_loss_test= np.loadtxt(PATH_DATA_SVRG +"_mlpbnTrue_StreamingSVRG_loss_test.txt") if LOAD_SVRG: mlpbn_svrg_acc_train= np.loadtxt(PATH_DATA +"_mlpbnTrue_SVRG_acc_train.txt") if LOAD_SVRG: mlpbn_svrg_acc_val= np.loadtxt(PATH_DATA +"_mlpbnTrue_SVRG_acc_val.txt") if LOAD_SVRG: mlpbn_svrg_loss_train= np.loadtxt(PATH_DATA +"_mlpbnTrue_SVRG_loss_train.txt") if LOAD_SVRG: mlpbn_svrg_loss_val= np.loadtxt(PATH_DATA +"_mlpbnTrue_SVRG_loss_val.txt") if LOAD_SVRG: mlpbn_svrg_acc_test= np.loadtxt(PATH_DATA +"_mlpbnTrue_SVRG_acc_test.txt") if LOAD_SVRG: mlpbn_svrg_loss_test= np.loadtxt(PATH_DATA +"_mlpbnTrue_SVRG_loss_test.txt") count_mlpbn_ADAsgd = 100 count_mlpbn_Streamingsvrg = 100 count_mlpbn_svrg = 100 if DRAW_MLPBN_ADASGD: count_mlpbn_ADAsgd = np.arange(mlpbn_ADAsgd_acc_val.shape[0])+1 if DRAW_MLPBN_StreamingSVRG: count_mlpbn_Streamingsvrg = np.arange(mlpbn_Streamingsvrg_acc_train.shape[0])+1 if DRAW_MLPBN_SVRG: count_mlpbn_svrg = np.arange(mlpbn_svrg_acc_val.shape[0])+1 # print mlp_sgd_acc_train # if (MAXLENGTH>0 or STARTPOINT>0): # if DRAW_MLPBN_ADASGD: count_mlpbn_ADAsgd = count_mlpbn_ADAsgd[STARTPOINT:MAXLENGTH+1] # if DRAW_MLPbn_StreamingSVRG: count_mlpbn_Streamingsvrg = count_mlpbn_Streamingsvrg[STARTPOINT:MAXLENGTH+1] # if DRAW_MLPBN_SVRG: count_mlpbn_svrg = count_mlpbn_svrg[STARTPOINT:MAXLENGTH+1] # if DRAW_MLPBN_ADASGD: mlpbn_ADAsgd_acc_test = mlpbn_ADAsgd_acc_test[STARTPOINT:MAXLENGTH+1] # if DRAW_MLPbn_StreamingSVRG: mlpbn_Streamingsvrg_acc_test = mlpbn_Streamingsvrg_acc_test[STARTPOINT:MAXLENGTH+1] # if DRAW_MLPBN_SVRG: mlpbn_svrg_acc_test = mlpbn_svrg_acc_test[STARTPOINT:MAXLENGTH+1] # if DRAW_MLPBN_ADASGD: mlpbn_ADAsgd_loss_test = mlpbn_ADAsgd_loss_test[STARTPOINT:MAXLENGTH+1] # if DRAW_MLPbn_StreamingSVRG: mlpbn_Streamingsvrg_loss_test = mlpbn_Streamingsvrg_loss_test[STARTPOINT:MAXLENGTH+1] # if DRAW_MLPBN_SVRG: mlpbn_svrg_loss_test = mlpbn_svrg_loss_test[STARTPOINT:MAXLENGTH+1] # if DRAW_MLPBN_ADASGD: mlpbn_ADAsgd_acc_val = mlpbn_ADAsgd_acc_val[STARTPOINT:MAXLENGTH+1] # if DRAW_MLPbn_StreamingSVRG: mlpbn_Streamingsvrg_acc_val = mlpbn_Streamingsvrg_acc_val[STARTPOINT:MAXLENGTH+1] # if DRAW_MLPBN_SVRG: mlpbn_svrg_acc_val = mlpbn_svrg_acc_val[STARTPOINT:MAXLENGTH+1] # if DRAW_MLPBN_ADASGD: mlpbn_ADAsgd_loss_val = mlpbn_ADAsgd_loss_val[STARTPOINT:MAXLENGTH+1] # if DRAW_MLPbn_StreamingSVRG: mlpbn_Streamingsvrg_loss_val = mlpbn_Streamingsvrg_loss_val[STARTPOINT:MAXLENGTH+1] # if DRAW_MLPBN_SVRG: mlpbn_svrg_loss_val = mlpbn_svrg_loss_val[STARTPOINT:MAXLENGTH+1] # if DRAW_MLPBN_ADASGD: mlpbn_ADAsgd_acc_train = mlpbn_ADAsgd_acc_train[STARTPOINT:MAXLENGTH+1] # if DRAW_MLPbn_StreamingSVRG: mlpbn_Streamingsvrg_acc_train = mlpbn_Streamingsvrg_acc_train[STARTPOINT:MAXLENGTH+1] # if DRAW_MLPBN_SVRG: mlpbn_svrg_acc_train = mlpbn_svrg_acc_train[STARTPOINT:MAXLENGTH+1] # if DRAW_MLPBN_ADASGD: mlpbn_ADAsgd_loss_train = mlpbn_ADAsgd_loss_train[STARTPOINT:MAXLENGTH+1] # if DRAW_MLPbn_StreamingSVRG: mlpbn_Streamingsvrg_loss_train = mlpbn_Streamingsvrg_loss_train[STARTPOINT:MAXLENGTH+1] # if DRAW_MLPBN_SVRG: mlpbn_svrg_loss_train = mlpbn_svrg_loss_train[STARTPOINT:MAXLENGTH+1] #PLOT matplotlib.rcParams.update({'font.size': 16}) plt.figure(1) plt.title('Loss of Validation Set') if DRAW_MLPBN_ADASGD: plt.plot(count_mlpbn_ADAsgd, mlpbn_ADAsgd_loss_val, SPEC_L1 ,label="AdaGrad", linewidth = LINEWIDTH) if DRAW_MLPBN_StreamingSVRG: plt.plot(count_mlpbn_Streamingsvrg, mlpbn_Streamingsvrg_loss_val, SPEC_L3 ,label="Streaming SVRG", linewidth = LINEWIDTH) if DRAW_MLPBN_SVRG: plt.plot(count_mlpbn_svrg, mlpbn_svrg_loss_val, SPEC_L4 ,label="SVRG", linewidth = LINEWIDTH) plt.xlabel('# Epochs') plt.ylabel('Loss') plt.legend() # plt.show() pylab.savefig(PATH_FIGURE+'CroszsModel_Validation_Set_Loss'+'.png',bbox_inches='tight') plt.figure(2) plt.title('Predict Accuracy of Validation Set') if DRAW_MLPBN_ADASGD: plt.plot(count_mlpbn_ADAsgd, mlpbn_ADAsgd_acc_val, SPEC_L1 ,label="AdaGrad", linewidth = LINEWIDTH) if DRAW_MLPBN_StreamingSVRG: plt.plot(count_mlpbn_Streamingsvrg, mlpbn_Streamingsvrg_acc_val, SPEC_L3 ,label="Streaming SVRG", linewidth = LINEWIDTH) if DRAW_MLPBN_SVRG: plt.plot(count_mlpbn_svrg, mlpbn_svrg_acc_val, SPEC_L4 ,label="SVRG", linewidth = LINEWIDTH) plt.xlabel('# Epochs') plt.ylabel('Predict Accuracy') plt.legend(bbox_to_anchor=(1,0.4)) # plt.show() pylab.savefig(PATH_FIGURE+'CrossModel_Validation_Set_Predict_Accuracy'+'.png',bbox_inches='tight') plt.figure(3) plt.title('Loss of Training Set') if Y_LIM_FINE_TUNING: pylab.ylim([-0.01,0.25]) if DRAW_MLPBN_ADASGD: plt.plot(count_mlpbn_ADAsgd, mlpbn_ADAsgd_loss_train, SPEC_L1 ,label="AdaGrad", linewidth = LINEWIDTH) if DRAW_MLPBN_StreamingSVRG: plt.plot(count_mlpbn_Streamingsvrg, mlpbn_Streamingsvrg_loss_train, SPEC_L3 ,label="Streaming SVRG", linewidth = LINEWIDTH) if DRAW_MLPBN_SVRG: plt.plot(count_mlpbn_svrg, mlpbn_svrg_loss_train, SPEC_L4 ,label="SVRG", linewidth = LINEWIDTH) plt.xlabel('# Epochs') plt.ylabel('Loss') plt.legend() # plt.show() pylab.savefig(PATH_FIGURE+'CrossModel_Training_Set_Loss'+'.png',bbox_inches='tight') # plt.figure(4) # plt.title('Predict Accuracy of Training Set') # if Y_LIM_FINE_TUNING: pylab.ylim([0.93,1.01]) # if DRAW_MLPBN_ADASGD: plt.plot(count_mlpbn_ADAsgd, mlpbn_ADAsgd_acc_train, SPEC_L1 ,label="AdaGrad", linewidth = LINEWIDTH) # if DRAW_MLPbn_StreamingSVRG: plt.plot(count_mlpbn_Streamingsvrg, mlpbn_Streamingsvrg_acc_train, SPEC_L3 ,label="Streaming SVRG", linewidth = LINEWIDTH) # if DRAW_MLPBN_SVRG: plt.plot(count_mlpbn_svrg, mlpbn_svrg_acc_train, SPEC_L4 ,label="SVRG", linewidth = LINEWIDTH) # plt.xlabel('# Epochs') # plt.ylabel('Predict Accuracy') # plt.legend(bbox_to_anchor=(1,0.4)) # # plt.show() # pylab.savefig(PATH_FIGURE+'CrossModel_Training_Set_Predict_Accuracy'+'.png',bbox_inches='tight') # plt.figure(5) # plt.title('Predict Accuracy of Test Set') # if DRAW_MLPBN_ADASGD: plt.plot(count_mlpbn_ADAsgd, mlpbn_ADAsgd_acc_test, SPEC_L1 ,label="AdaGrad", linewidth = LINEWIDTH) # if DRAW_MLPbn_StreamingSVRG: plt.plot(count_mlpbn_Streamingsvrg, mlpbn_Streamingsvrg_acc_test, SPEC_L3 ,label="Streaming SVRG", linewidth = LINEWIDTH) # if DRAW_MLPBN_SVRG: plt.plot(count_mlpbn_svrg, mlpbn_svrg_acc_test, SPEC_L4 ,label="SVRG", linewidth = LINEWIDTH) # plt.xlabel('# Epochs') # plt.ylabel('Predict Accuracy') # plt.legend(bbox_to_anchor=(1,0.4)) # # plt.show() # pylab.savefig(PATH_FIGURE+'CrossModel_Test_Set_Predict_Accuracy'+'.png',bbox_inches='tight') # plt.figure(6) # plt.title('Loss of Test Set') # if DRAW_MLPBN_ADASGD: plt.plot(count_mlpbn_ADAsgd, mlpbn_ADAsgd_loss_test, SPEC_L1 ,label="AdaGrad", linewidth = LINEWIDTH) # if DRAW_MLPbn_StreamingSVRG: plt.plot(count_mlpbn_Streamingsvrg, mlpbn_Streamingsvrg_loss_test, SPEC_L3 ,label="Streaming SVRG", linewidth = LINEWIDTH) # if DRAW_MLPBN_SVRG: plt.plot(count_mlpbn_svrg, mlpbn_svrg_loss_test, SPEC_L4 ,label="SVRG", linewidth = LINEWIDTH) # plt.xlabel('# Epochs') # plt.ylabel('Loss') # plt.legend() # pylab.savefig(PATH_FIGURE+'CrossModel_Test_Set_Loss'+'.png',bbox_inches='tight') # # plt.show() print ("Finish drawing cross model plots.") if __name__ == '__main__': if ('--help' in sys.argv) or ('-h' in sys.argv) or ('help' in sys.argv): print("arg: NUM_EPOCHS") else: kwargs = {} if len(sys.argv) > 1: kwargs['num_epochs'] = int(sys.argv[1]) main(**kwargs)
mit
-733,654,721,010,098,000
45.173077
156
0.721887
false
joshloyal/Nettie
examples/otto_nn.py
1
2482
import logging import sys import numpy as np import pandas as pd import plac np.random.seed(1337) # for reproducibility import nettie.backend as net from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import StandardScaler from keras.utils import np_utils # mxnet logger logger = logging.getLogger() logger.setLevel(logging.DEBUG) # nettie logger logging.getLogger('nettie').addHandler(logging.StreamHandler(stream=sys.stdout)) def load_data(path, train=True): df = pd.read_csv(path) X = df.values.copy() if train: np.random.shuffle(X) # https://youtu.be/uyUXoap67N8 X, labels = X[:, 1:-1].astype(np.float32), X[:, -1] return X, labels else: X, ids = X[:, 1:].astype(np.float32), X[:, 0].astype(str) return X, ids def preprocess_data(X, scaler=None): if not scaler: scaler = StandardScaler() scaler.fit(X) X = scaler.transform(X) return X, scaler def preprocess_labels(labels, encoder=None, categorical=True): if not encoder: encoder = LabelEncoder() encoder.fit(labels) y = encoder.transform(labels).astype(np.int32) if categorical: y = np_utils.to_categorical(y) return y, encoder def main(backend): print('Loading data...') X, labels = load_data('train.csv', train=True) X, scaler = preprocess_data(X) if backend == 'keras': y, encoder = preprocess_labels(labels) nb_classes = y.shape[1] print(nb_classes, 'classes') else: y, encoder = preprocess_labels(labels, categorical=False) nb_classes = len(np.unique(y)) print(nb_classes, 'classes') dims = X.shape[1] print(dims, 'dims') nn = net.set_backend(backend) model = nn.Sequential() model.add(nn.Dense(512, input_shape=(dims,))) model.add(nn.PReLU()) model.add(nn.BatchNormalization()) model.add(nn.Dropout(p=0.5)) model.add(nn.Dense(512)) model.add(nn.PReLU()) model.add(nn.BatchNormalization()) model.add(nn.Dropout(p=0.5)) model.add(nn.Dense(512)) model.add(nn.PReLU()) model.add(nn.BatchNormalization()) model.add(nn.Dropout(p=0.5)) model.add(nn.Dense(nb_classes)) model.add(nn.Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam') model.fit(X, y, nb_epoch=20, batch_size=128, validation_split=0.15) return model if __name__ == '__main__': model = plac.call(main)
apache-2.0
-1,944,420,355,367,230,500
24.587629
80
0.646656
false
cqychen/quants
quants/loaddata/skyeye_ods_invest_refer_restrict.py
1
3314
#coding=utf8 import tushare as ts; import pymysql; import time as dt from datashape.coretypes import string from pandas.io.sql import SQLDatabase import sqlalchemy import datetime from sqlalchemy import create_engine from pandas.io import sql import threading import pandas as pd; import numpy as np import math import sys sys.path.append('../') #添加配置文件 from common_function import * def create_table(table_name): cmd=''' create table if not exists %s ( `code` VARCHAR (10) comment '股票代码' ,`name` VARCHAR (63) comment '股票名称' ,`date` VARCHAR (63) comment '解禁日期' ,`count` DOUBLE comment '解禁数量' ,`ratio` DOUBLE comment '占总盘比例' ,`year` INT comment '解禁年份' ,`month` INT comment '解禁月份' ,PRIMARY KEY (`code`,`date`) ,index(code) )DEFAULT CHARSET=utf8 '''%table_name print (cmd) run_mysql_cmd(cmd,conn) def get_data_date(year,month): cmd='''delete from %s where `year`=%s and `month`=%s '''%(table_name,year,month) print cmd run_mysql_cmd(cmd=cmd,conn=conn) #删除指定年和月的数据 try: rs=ts.xsg_data(year=year,month=month) rs['year']=year rs['month']=month rs=rs.drop_duplicates() #去除重复的数据,没想到还有重复的,心塞塞,这个api不咋地啊,挖地兔 pd.DataFrame.to_sql(rs, table_name, con=conn , flavor='mysql', if_exists='append',index=False) return rs except: print("this year has no records") return None def load_data(): #下载公司基本信息,包括股票代码、pe、市盈率等数据 max_year=int(get_max_year_table(table_name)) #还要获取前一年的情况 (year,quarter,mon,day,hour,min,sec,wday,yday,isdst)=get_date_struct() while True: print(max_year,year) if(max_year<=year): print("geting year is %s"%max_year) get_data_date(year=max_year,month=1) get_data_date(year=max_year,month=2) get_data_date(year=max_year,month=3) get_data_date(year=max_year,month=4) get_data_date(year=max_year,month=5) get_data_date(year=max_year,month=6) get_data_date(year=max_year,month=7) get_data_date(year=max_year,month=8) get_data_date(year=max_year,month=9) get_data_date(year=max_year,month=10) get_data_date(year=max_year,month=11) get_data_date(year=max_year,month=12) print("\n\n") max_year=max_year+1 else: break if __name__ == '__main__': print("--------------任务开始-----------------------------") startTime=dt.time() iphost,user,passwd=get_mysql_conn() db='ods_data' charset='utf8' table_name='ods_invest_refer_restrict' conn = pymysql.connect(user=user, passwd=passwd,host=iphost, db=db,charset=charset) #--------------------脚本运行开始-------------------------------- create_table(table_name=table_name) #建立表格 load_data() endTime=dt.time() print("---------------脚本运行完毕,共计耗费时间%sS------------------"%(endTime-startTime))
epl-1.0
6,649,690,695,077,840,000
34.395349
102
0.580815
false
boland1992/seissuite_iran
bin/split_to_days.py
2
3796
# -*- coding: utf-8 -*- """ Created on Fri May 27 08:24:22 2016 @author: iese """ import os import warnings import datetime as dt import itertools as it import pickle import obspy.signal.cross_correlation import time import glob import sqlite3 as lite import shutil import numpy as np import matplotlib.pyplot as plt from obspy import read import datetime as dt from obspy.core import UTCDateTime as utc def get_filepaths(directory): """ This function will generate the file names in a directory tree by walking the tree either top-down or bottom-up. For each directory in the tree rooted at directory top (including top itself), it yields a 3-tuple (dirpath, dirnames, filenames). """ file_paths = [] # List which will store all of the full filepaths. # Walk the tree. for root, directories, files in os.walk(directory): for filename in files: # Join the two strings in order to form the full filepath. filepath = os.path.join(root, filename) file_paths.append(filepath) # Add it to the list. return file_paths # Self-explanatory. # set epoch timestamp epoch = dt.datetime(1970, 1, 1) total_verbose = True psd = False # create a list of configuration files that will be iterated over! # must be 1 or more from seissuite.ant.psconfig import (create_config_list, run_config, remove_config) config_list = create_config_list() total_time0 = dt.datetime.now() for config_file in config_list: # global variables MUST be defined # with the function in the seissuite.ant.psconfig module run_config(config_file) from seissuite.ant import (pscrosscorr, psstation, pspreprocess, pserrors, psstationSQL) # import CONFIG class initalised in ./configs/tmp_config.pickle config_pickle = 'configs/tmp_config.pickle' f = open(name=config_pickle, mode='rb') CONFIG = pickle.load(f) f.close() # import variables from initialised CONFIG class. MSEED_DIR = CONFIG.MSEED_DIR DATABASE_DIR = CONFIG.DATABASE_DIR abs_paths = get_filepaths(MSEED_DIR) #abs_paths = ['/home/iese/Documents/local_data/SAMOA/INPUT/DATA/Stations/AFI/Data/2005/335/IU_AFI_BHZ_10_2005_335.msd'] for path in abs_paths: try: st = read(path, headonly=True) times = [] for tr in st: start_time = tr.stats.starttime # reduce the first day to the beginning time i.e. midnight. start_time = start_time - (start_time.hour * 3600) - (start_time.minute * 60) - start_time.second start_time = start_time.timestamp end_time = tr.stats.endtime.timestamp #tr.stats.endtime times.append(start_time) times.append(end_time) days = int((end_time - start_time)/86400) + 1 time_intervals = [utc(i) for i in np.linspace(min(times), max(times), days)] for i in range(1, len(time_intervals)): starttime = time_intervals[i-1] endtime = time_intervals[i] st_partial = read(path, starttime=starttime, endtime=endtime) net = st_partial[0].stats.network stat = st_partial[0].stats.station loc = st_partial[0].stats.location channel = st_partial[0].stats.channel year = starttime.year jul_day = starttime.julday write_string = '{}_{}_{}_{}_{}_{}.msd'.format(net, stat, channel, loc, year, jul_day) print write_string mseed_write = os.path.join(os.path.dirname(path), write_string) st_partial.write(mseed_write, format='MSEED') except Exception as error: print error os.remove(path) quit()
gpl-3.0
-7,134,496,521,460,275,000
26.507246
123
0.636459
false
arcyfelix/ML-DL-AI
Supervised Learning/Image Recognition/SimpleParallelCNN/import_data.py
5
3739
import pandas as pd import numpy as np from tflearn.data_utils import to_categorical from import_data import * def import_csv(file_path, shuffle = False): data = pd.read_csv(file_path) print('*' * 70) print('Import CSV file has been successful!') if shuffle == True: data.reindex(np.random.permutation(data.index)) print('The data has been shuffled!') else: print('The data has not been shuffled!') return data def labels_info(output_data): labels_names = np.unique(output_data) number_of_labels = labels_names.shape[0] print('*' * 70) print("Number of uniques categories:", number_of_labels) labels_as_numbers = np.arange(number_of_labels) print("Categories as numbers", labels_as_numbers) for _ in labels_as_numbers: print('Category ' + str(_) + ' is ' + str(labels_names[_])) return number_of_labels def labels_as_numbers(output_data): _, output_data_as_numbers = np.unique(output_data, return_inverse=True) return output_data_as_numbers # ------------------------------------------------------------------------------- # Acquiring the data def get_data_MNIST(): folder = 'Digit Recognizer' file_name = 'train.csv' specific_dataset_source = folder + '/' + file_name output_columns = ['label'] data = import_csv(specific_dataset_source, shuffle = True) # Data split into the input and output x_data = data y_data = np.array(data.pop('label')) print('Shape of the input data:', x_data.shape) print('Shape of the output data:', y_data.shape) # Standalization x_data = x_data / 255 num_samples = x_data.shape[0] input_features = x_data.shape[1] print('Number of samples:', num_samples) print('Number of the input features:', input_features) y_data_as_numbers = labels_as_numbers(y_data) # Cross validation data preparation split_percentage = 80 split_index = int(x_data.shape[0]/(100/split_percentage)) x_train = np.array(x_data[:split_index]) x_val = np.array(x_data[split_index:]) y_train = np.array(y_data_as_numbers[:split_index]) y_val = np.array(y_data_as_numbers[split_index:]) # Information about the data print(x_train.shape) print(x_val.shape) print(y_train.shape) print(y_val.shape) # Shaping data into the correct shape. x_train = x_train.reshape([-1, 28, 28, 1]) x_val = x_val.reshape([-1, 28, 28, 1]) y_train = to_categorical(y_train, nb_classes = 10) y_val = to_categorical(y_val, nb_classes = 10) return x_train, x_val, y_train, y_val def get_data_MNIST_test(): # Loading the test data file_name_test = 'test.csv' folder = 'Digit Recognizer' source = folder + '/' + file_name_test data = pd.read_csv(source) test_input = data.loc[:, :] return test_input.as_matrix() # Oxford Flowers Dataset def get_data_oxford_flowers(): import tflearn.datasets.oxflower17 as oxflower17 X, Y = oxflower17.load_data(one_hot = True, resize_pics = (227, 227)) split_percentage = 80 split_index = int(X.shape[0]/(100/split_percentage)) x_train = np.array(X[:split_index]) x_val = np.array(X[split_index:]) y_train = np.array(Y[:split_index]) y_val = np.array(Y[split_index:]) return x_train, x_val, y_train, y_val def get_data_CIFAR10(dataset = 'Train + Val'): from tflearn.datasets import cifar10 (X, Y), (X_test, Y_test) = cifar10.load_data() # Size is 32, 32, 3 split_percentage = 80 split_index = int(X.shape[0]/(100/split_percentage)) x_train = np.array(X[:split_index]) x_val = np.array(X[split_index:]) y_train = np.array(Y[:split_index]) y_val = np.array(Y[split_index:]) Y = to_categorical(Y, 10) Y_test = to_categorical(Y_test, 10) if dataset == 'Test': return x_train, x_val, y_train, y_val else: return X_test, Y_test
apache-2.0
-5,756,571,852,618,478,000
25.707143
81
0.656593
false
yarikoptic/pystatsmodels
statsmodels/datasets/template_data.py
3
1650
#! /usr/bin/env python """Name of dataset.""" __docformat__ = 'restructuredtext' COPYRIGHT = """E.g., This is public domain.""" TITLE = """Title of the dataset""" SOURCE = """ This section should provide a link to the original dataset if possible and attribution and correspondance information for the dataset's original author if so desired. """ DESCRSHORT = """A short description.""" DESCRLONG = """A longer description of the dataset.""" #suggested notes NOTE = """ Number of observations: Number of variables: Variable name definitions: Any other useful information that does not fit into the above categories. """ import numpy as np from statsmodels.datasets import utils as du from os.path import dirname, abspath def load(): """ Load the data and return a Dataset class instance. Returns ------- Dataset instance: See DATASET_PROPOSAL.txt for more information. """ data = _get_data() ##### SET THE INDICES ##### #NOTE: None for exog_idx is the complement of endog_idx return du.process_recarray(data, endog_idx=0, exog_idx=None, dtype=float) def load_pandas(): data = _get_data() ##### SET THE INDICES ##### #NOTE: None for exog_idx is the complement of endog_idx return du.process_recarray_pandas(data, endog_idx=0, exog_idx=None, dtype=float) def _get_data(): filepath = dirname(abspath(__file__)) ##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv ##### data = np.recfromtxt(open(filepath + '/DatasetName.csv', 'rb'), delimiter=",", names = True, dtype=float) return data
bsd-3-clause
-619,918,122,027,384,400
27.448276
77
0.649091
false
has2k1/plotnine
doc/conf.py
1
14918
# -*- coding: utf-8 -*- # # plotnine documentation build configuration file, created by # sphinx-quickstart on Wed Dec 23 22:32:29 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os on_rtd = os.environ.get('READTHEDOCS') == 'True' # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. CUR_PATH = os.path.dirname(os.path.abspath(__file__)) PROJECT_PATH = os.path.abspath(CUR_PATH + '/../') sys.path.insert(0, CUR_PATH) sys.path.insert(0, PROJECT_PATH) if on_rtd: import mock from pprint import pprint MOCK_MODULES = [] for mod_name in MOCK_MODULES: sys.modules[mod_name] = mock.Mock() pprint(os.environ) pprint(sys.path) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = '3.3.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. sys.path.insert(0, os.path.abspath('.')) extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.extlinks', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.mathjax', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode', 'sphinx.ext.autosummary', 'sphinxext.examples_and_gallery', 'sphinxext.inline_code_highlight', 'nbsphinx', 'numpydoc', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'plotnine' copyright = '2021, Hassan Kibirige' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. try: import plotnine version = plotnine.__version__ except ImportError: version = 'unknown' # readthedocs modifies the repository which messes up the version. if on_rtd: import re version = version.rstrip('.dirty') version = re.sub(r'\+0\..+', '', version) version # The full version, including alpha/beta/rc tags. release = version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [ '_build', # Deprecated 'generated/plotnine.themes.themeable.facet_spacing.rst' ] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # https://github.com/ryan-roemer/sphinx-bootstrap-theme html_theme_options = { 'navbar_title': 'plotnine', 'globaltoc_depth': 2, 'globaltoc_includehidden': 'true', 'source_link_position': 'footer', 'navbar_sidebarrel': False, 'navbar_links': [ ('API', 'api'), ('Gallery', 'gallery'), ('Tutorials', 'tutorials') ], } # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ['.'] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = 'images/logo-32.png' # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = 'images/favicon.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # The default sidebars (for documents that don't match any pattern) are # defined by theme itself. Builtin themes are using these templates by # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', # 'searchbox.html']``. # html_sidebars = { # Default to no sidebar '**': [], # local table of contents for the API page 'api': ['localtoc.html'] } # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'plotninedoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). 'papersize': 'a4paper', # The font size ('10pt', '11pt' or '12pt'). 'pointsize': '12pt', # Additional stuff for the LaTeX preamble. 'preamble': r""" \usepackage{charter} \usepackage[defaultsans]{lato} \usepackage{inconsolata} """, } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'plotnine.tex', 'plotnine Documentation', 'Hassan Kibirige', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'plotnine', 'plotnine Documentation', ['Hassan Kibirige'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'plotnine', 'plotnine Documentation', 'Hassan Kibirige', 'plotnine', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Epub output ---------------------------------------------- # Bibliographic Dublin Core info. epub_title = 'plotnine' epub_author = 'Hassan Kibirige' epub_publisher = 'Hassan Kibirige' epub_copyright = '2021, Hassan Kibirige' # The basename for the epub file. It defaults to the project name. # epub_basename = 'plotnine' # The HTML theme for the epub output. Since the default themes are not # optimized for small screen space, using the same theme for HTML and epub # output is usually not wise. This defaults to 'epub', a theme designed to # save visual space. # epub_theme = 'epub' # The language of the text. It defaults to the language option # or en if the language is not set. # epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. # epub_scheme = '' # The unique identifier of the text. This can be a ISBN number # or the project homepage. # epub_identifier = '' # A unique identification for the text. # epub_uid = '' # A tuple containing the cover image and cover page html template filenames. # epub_cover = () # A sequence of (type, uri, title) tuples for the guide element of content.opf. # epub_guide = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. # epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. # epub_post_files = [] # A list of files that should not be packed into the epub file. epub_exclude_files = ['search.html'] # The depth of the table of contents in toc.ncx. # epub_tocdepth = 3 # Allow duplicate toc entries. # epub_tocdup = True # Choose between 'default' and 'includehidden'. # epub_tocscope = 'default' # Fix unsupported image types using the PIL. # epub_fix_images = False # Scale large images. # epub_max_image_width = 0 # How to display URL addresses: 'footnote', 'no', or 'inline'. # epub_show_urls = 'inline' # If false, no index is generated. # epub_use_index = True # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { 'python': ('https://docs.python.org/3/', None), 'matplotlib': ('https://matplotlib.org/', None), 'numpy': ('https://numpy.org/doc/stable/', None), 'scipy': ('https://docs.scipy.org/doc/scipy/reference', None), 'statsmodels': ('https://www.statsmodels.org/stable/', None), 'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None), 'sklearn': ('https://scikit-learn.org/stable/', None), 'skmisc': ('https://has2k1.github.io/scikit-misc/stable/', None), 'adjustText': ('https://adjusttext.readthedocs.io/en/latest/', None), 'patsy': ('https://patsy.readthedocs.io/en/stable', None) } # -- Extension configuration ---------------------------------------------- autodoc_member_order = 'bysource' autosummary_generate = True extlinks = { 'issue': ('https://github.com/has2k1/plotnine/issues/%s', 'GH') } # numpydoc numpydoc_show_class_members = False numpydoc_class_members_toctree = False numpydoc_xref_param_type = True numpydoc_xref_aliases = { # python 'sequence': ':term:`python:sequence`', 'iterable': ':term:`python:iterable`', 'string': 'str', 'tuples': 'tuple', 'boolean': 'bool', # numpy 'array': 'numpy.ndarray', 'np.array': 'numpy.ndarray', 'ndarray': 'numpy.ndarray', 'array-like': ':term:`array-like<numpy:array_like>`', 'array_like': ':term:`numpy:array_like`', # pandas 'dataframe': 'pandas.DataFrame', 'DataFrame': 'pandas.DataFrame', 'Series': 'pandas.Series', 'series': 'pandas.Series', # plotnine 'geom': ':term:`geom`', 'stat': ':term:`stat`', 'position': ':term:`position`', 'expression': ':term:`expression`', 'aes': 'plotnine.aes', 'ggplot': 'plotnine.ggplot', 'element_line': 'plotnine.themes.element_line', 'element_rect': 'plotnine.themes.element_rect', 'element_text': 'plotnine.themes.element_text', } numpydoc_xref_ignore = {'type', 'optional', 'default'} def link_to_tutorials(): # Linking to the directory does not work well with # nbsphinx. We link to the files themselves from glob import glob from plotnine_examples.tutorials import TUTPATH dest_dir = os.path.join(CUR_PATH, 'tutorials') # Unlink files from previous build for old_file in glob(dest_dir + '/*.ipynb'): os.unlink(old_file) # Link files for this build for file in glob(TUTPATH + '/*.ipynb'): basename = os.path.basename(file) dest = os.path.join(dest_dir, basename) os.symlink(file, dest) def setup(app): link_to_tutorials() app.add_css_file('custom.css')
gpl-2.0
-5,996,918,961,074,342,000
29.822314
79
0.680051
false
AnnieJumpCannon/RAVE
article/figures/plot_kordopatis_calibration_sample.py
1
3739
""" Plot giant abundances w.r.t. GES. """ import numpy as np import matplotlib.pyplot as plt from collections import OrderedDict from matplotlib.ticker import MaxNLocator from matplotlib.colors import LogNorm from mpl_toolkits.axes_grid1 import make_axes_locatable try: rave_cannon_dr1, kordopatis_comparisons except NameError: # Do you know who I am? That's Jeff Vader! from rave_io import get_cannon_dr1, get_kordopatis_comparisons rave_cannon_dr1 = get_cannon_dr1() kordopatis_comparisons = get_kordopatis_comparisons() from astropy.table import join data_table = join(rave_cannon_dr1, kordopatis_comparisons, keys=("Name", )) else: print("Warning: Using pre-loaded data.") ok = data_table["QC"]# * (data_table["R"] > 10) latex_labels = { "TEFF_2": r"$T_{\rm eff}$ $[{\rm K}]$ $({\rm Literature})$", "TEFF_1": r"$T_{\rm eff}$ $[{\rm K}]$ $({\rm \it{RAVE}}{\rm -on})$", "LOGG_1": r"$\log{g}$ $({\rm \it{RAVE}}{\rm -on})$", "LOGG_2": r"$\log{g}$ $({\rm Literature})$", "FE_H": r"$[{\rm Fe/H}]$ $({\rm \it{RAVE}}{\rm -on})$", "FEH": r"$[{\rm Fe/H}]$ $({\rm Literature})$" } cannon_labels = ("TEFF_1", "LOGG_1", "FE_H") literature_labels = ("TEFF_2", "LOGG_2", "FEH") limits = { "TEFF_1": [3500, 7500], "LOGG_1": [0, 5.5], "FE_H": [-3.5, 0.75] } kwds = dict(cmap="plasma", vmin=np.nanmin(data_table["snr"]), vmax=np.nanmax(data_table["snr"])) K = len(cannon_labels) factor = 3.5 lbdim = 0.25 * factor tdim = 0.1 * factor rdim = 0.2 * factor whspace = 0.05 yspace = factor xspace = factor * K + factor * (K - 1) * whspace + lbdim * (K - 1) xdim = lbdim + xspace + rdim ydim = lbdim + yspace + tdim fig, axes = plt.subplots(1, K, figsize=(xdim, ydim)) fig.subplots_adjust( left=lbdim/xdim, bottom=lbdim/ydim, right=(xspace + lbdim)/xdim, top=(yspace + lbdim)/ydim, wspace=whspace, hspace=whspace) for i, (ax, cannon_label, literature_label) \ in enumerate(zip(axes, cannon_labels, literature_labels)): x = data_table[literature_label] y = data_table[cannon_label] c = data_table["snr"] #xerr = data_table["e_{}".format(literature_label)] yerr = data_table["E_{}".format(cannon_label).strip("_1")] ax.errorbar(x[ok], y[ok], yerr=yerr[ok], fmt=None, ecolor="#666666", zorder=-1) scat = ax.scatter(x[ok], y[ok], c=c[ok], s=50, **kwds) _ = ax.scatter([-999], [-9999], c=[0], **kwds) for ax, cannon_label, literature_label in zip(axes, cannon_labels, literature_labels): lims = limits[cannon_label] ax.plot(lims, lims, c="#666666", zorder=-1, linestyle=":") ax.set_xlim(lims) ax.set_ylim(lims) ax.xaxis.set_major_locator(MaxNLocator(6)) ax.yaxis.set_major_locator(MaxNLocator(6)) ax.set_xlabel(latex_labels[literature_label]) ax.set_ylabel(latex_labels[cannon_label]) axes[0].set_xticks([4000, 5000, 6000, 7000]) axes[0].set_yticks([4000, 5000, 6000, 7000]) axes[-1].set_xticks([-3.5, -2.5, -1.5, -0.5, 0.5]) axes[-1].set_yticks([-3.5, -2.5, -1.5, -0.5, 0.5]) fig.tight_layout() cbar = plt.colorbar(_, cax=fig.add_axes([0.93, fig.subplotpars.bottom, 0.02, fig.subplotpars.top - fig.subplotpars.bottom])) cbar.set_label(r"${\rm S/N}$ ${\rm RAVE}$ $[{\rm pixel}^{-1}]$") fig.subplots_adjust(right=0.90) fig.savefig("kordopatis-calibration.pdf", dpi=300) fig.savefig("kordopatis-calibration.png") for ref in set(data_table["REF"]): for cannon_label, literature_label in zip(cannon_labels, literature_labels): match = data_table["REF"] == ref x = data_table[cannon_label][match] y = data_table[literature_label][match] diff = y - x print(ref, np.isfinite(diff).sum(), cannon_label, np.nanmean(diff), np.nanstd(diff))
mit
6,726,250,664,844,613,000
27.112782
105
0.625836
false
taliamo/Final_Project
organ_pitch/Scripts/pitch_munge2.py
1
2360
#T. Martz-Oberlander, 2015-11-15 #Script for wrangling pitch data into a dataframe with media and standard dev. of sound frequencies # To call this script: $ python Scripts/pitch_munge.py Data/[input_filename] Data/[output_pitch_dataframe_name] Figures/[output_pitch_fig_name] #Import useful libraries import pandas as pd import numpy as np import sys import matplotlib.pyplot as plt # I definte my arguments (input and output) input_filename = sys.argv[1] output_pitch_dataframe = sys.argv[2] output_pitch_fig = sys.argv[3] #I open my main function def main(): #import pitch data file pitch = pd.read_table(input_filename, sep=',') #use date/time timestamp values pitch['time'] = pd.to_datetime(pitch['time']) #create new column for mean frequency from 9 frequency measurements pitch['mean_freq'] = np.mean(pitch[['freq1','freq2','freq3', 'freq4', 'freq5', 'freq6', 'freq7', 'freq8', 'freq9']], axis=1) #Test to see if data is a float, and useable in a plot def test_data_type(data): '''Check to see if a column contains only floats''' obs = pitch['freq7'].dtype #I pass the dtype checking function through my test function #print(obs) exp = 'float64' assert obs == 'float64', 'Data is not a float' return #Call the test function on the 'freq5' column in the 'pitch' dataframe test_data_type(pitch['freq5']) #do the same for standard deviation pitch['stdev_freq'] = np.std(pitch['median_freq']) #select rows of the pitch dataframe for single div's (sections) of the chapel organized_pitch = pitch.groupby(['div']).get_group('choir') #save this dataframe as a file that can be called in later scripts organized_pitch.to_csv(output_pitch_dataframe, sep=',') #Function to plot the new dataframe for one chapel section def make_plot(data): '''Make line plot for measured pitch''' #Plot figure of change in pitch over time plt.figure(figsize=(8,5)) #Select data fig = plt.plot(organized_pitch['time'], organized_pitch['mean_freq'], color = 'navy') #Make title and labels for plot plt.title('Pitch of C5 Pipe Organ Note') plt.ylabel('Sound Frequency (Hz)') plt.xlabel('Time of Sample Taken (Apr. 13, 16, and 17, 2010)') #Save figure in Figures plt.savefig(output_pitch_fig) #Close visualization function return() #Call visualization function make_plot(organized_pitch) #close main function main()
mit
876,836,327,803,956,400
30.052632
143
0.722458
false
mlperf/training_results_v0.7
Google/benchmarks/gnmt/implementations/gnmt-research-TF-tpu-v4-16/nmt.py
2
23361
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """TensorFlow NMT model implementation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import os import random import sys # import matplotlib.image as mpimg import numpy as np import six from six.moves import range import tensorflow.compat.v1 as tf # copybara:strip_begin from REDACTED import REDACTED from REDACTED.tensorflow.contrib import training as contrib_training # copybara:strip_end from REDACTED.tensorflow.contrib.training.python.training import evaluation from REDACTED.tensorflow.python.ops import control_flow_util from REDACTED.tensorflow_models.mlperf.models.rough.mlp_log import mlp_log from REDACTED.tensorflow_models.mlperf.models.rough.nmt import estimator from REDACTED.tensorflow_models.mlperf.models.rough.nmt.utils import iterator_utils from REDACTED.tensorflow_models.mlperf.models.rough.nmt.utils import misc_utils as utils from REDACTED.tensorflow_models.mlperf.models.rough.nmt.utils import vocab_utils utils.check_tensorflow_version() FLAGS = None def add_arguments(parser): """Build ArgumentParser.""" parser.register("type", "bool", lambda v: v.lower() == "true") # network parser.add_argument( "--num_units", type=int, default=1024, help="Network size.") parser.add_argument( "--num_layers", type=int, default=4, help="Network depth.") parser.add_argument("--num_encoder_layers", type=int, default=None, help="Encoder depth, equal to num_layers if None.") parser.add_argument("--num_decoder_layers", type=int, default=None, help="Decoder depth, equal to num_layers if None.") parser.add_argument("--num_embeddings_partitions", type=int, default=0, help="Number of partitions for embedding vars.") # optimizer parser.add_argument( "--optimizer", type=str, default="adam", help="sgd | adam") parser.add_argument( "--learning_rate", type=float, default=0.001, help="Learning rate. Adam: 0.001 | 0.0001") parser.add_argument( "--warmup_steps", type=int, default=200, help="How many steps we inverse-decay learning.") parser.add_argument("--warmup_scheme", type=str, default="t2t", help="""\ How to warmup learning rates. Options include: t2t: Tensor2Tensor's way, start with lr 100 times smaller, then exponentiate until the specified lr.\ """) parser.add_argument( "--decay_start", type=int, default=3000, help="step to start decay") parser.add_argument( "--decay_interval", type=int, default=400, help="interval steps between 2 decays") parser.add_argument( "--decay_steps", type=int, default=5, help="number of decays") parser.add_argument( "--decay_factor", type=float, default=0.66, help="decay rate") parser.add_argument( "--max_train_epochs", type=int, default=8, help="Maximum number of training epochs.") parser.add_argument("--num_examples_per_epoch", type=int, default=3442299, help="Number of examples in one epoch") parser.add_argument("--label_smoothing", type=float, default=0.1, help=("If nonzero, smooth the labels towards " "1/num_classes.")) # initializer parser.add_argument("--init_op", type=str, default="uniform", help="uniform | glorot_normal | glorot_uniform") parser.add_argument("--init_weight", type=float, default=0.1, help=("for uniform init_op, initialize weights " "between [-this, this].")) # data parser.add_argument( "--src", type=str, default="en", help="Source suffix, e.g., en.") parser.add_argument( "--tgt", type=str, default="de", help="Target suffix, e.g., de.") parser.add_argument( "--data_dir", type=str, default="", help="Training/eval data directory.") parser.add_argument( "--local_data_dir", type=str, default="", help="Training/eval data directory.") parser.add_argument( "--train_prefix", type=str, default="train.tok.clean.bpe.32000", help="Train prefix, expect files with src/tgt suffixes.") parser.add_argument( "--test_prefix", type=str, default="newstest2014", help="Test prefix, expect files with src/tgt suffixes.") parser.add_argument( "--use_preprocessed_data", type="bool", default=True, help="Whether to use preprocessed training data.") parser.add_argument( "--out_dir", type=str, default=None, help="Store log/model files.") # Vocab parser.add_argument( "--vocab_prefix", type=str, default="vocab.bpe.32000", help="""\ Vocab prefix, expect files with src/tgt suffixes.\ """) parser.add_argument("--check_special_token", type="bool", default=True, help="""\ Whether check special sos, eos, unk tokens exist in the vocab files.\ """) # Sequence lengths parser.add_argument( "--src_max_len", type=int, default=48, help="Max length of src sequences during training.") parser.add_argument( "--tgt_max_len", type=int, default=48, help="Max length of tgt sequences during training.") parser.add_argument( "--src_max_len_infer", type=int, default=160, help="Max length of src sequences during inference.") parser.add_argument( "--tgt_max_len_infer", type=int, default=160, help="""\ Max length of tgt sequences during inference. Also use to restrict the maximum decoding length.\ """) # Default settings works well (rarely need to change) parser.add_argument("--forget_bias", type=float, default=0.0, help="Forget bias for BasicLSTMCell.") parser.add_argument("--dropout", type=float, default=0.2, help="Dropout rate (not keep_prob)") parser.add_argument("--max_gradient_norm", type=float, default=5.0, help="Clip gradients to this norm.") parser.add_argument("--batch_size", type=int, default=512, help="Batch size.") parser.add_argument("--steps_per_stats", type=int, default=5, help=("How many training steps to do per stats logging." "Save checkpoint every 10x steps_per_stats")) parser.add_argument( "--num_buckets", type=int, default=5, help="Put data into similar-length buckets.") parser.add_argument( "--choose_buckets", type=int, default=1, help="Choose from this number of length buckets per training step.") # SPM parser.add_argument("--subword_option", type=str, default="bpe", choices=["", "bpe", "spm"], help="""\ Set to bpe or spm to activate subword desegmentation.\ """) # Misc parser.add_argument( "--num_shards", type=int, default=8, help="Number of shards (TPU cores).") parser.add_argument( "--num_shards_per_host", type=int, default=8, help="Number of shards (TPU cores) per host.") parser.add_argument( "--num_gpus", type=int, default=4, help="Number of gpus in each worker.") parser.add_argument( "--num_infeed_workers", type=int, default=1, help="Number of TPU workers used for input generation.") parser.add_argument( "--num_tpu_workers", type=int, default=1, help="Number of TPU workers; if set, uses the distributed-sync pipeline.") parser.add_argument("--hparams_path", type=str, default=None, help=("Path to standard hparams json file that overrides" "hparams values from FLAGS.")) parser.add_argument( "--random_seed", type=int, default=None, help="Random seed (>0, set a specific seed).") # Inference parser.add_argument("--ckpt", type=str, default="", help="Checkpoint file to load a model for inference.") parser.add_argument( "--infer_batch_size", type=int, default=512, help="Batch size for inference mode.") parser.add_argument( "--examples_to_infer", type=int, default=3003, help="Number of examples to infer.") parser.add_argument("--detokenizer_file", type=str, default="mosesdecoder/scripts/tokenizer/detokenizer.perl", help=("""Detokenizer script file.""")) parser.add_argument("--use_REDACTED", type=bool, default=False) parser.add_argument( "--target_bleu", type=float, default=24.0, help="Target accuracy.") # Advanced inference arguments parser.add_argument("--infer_mode", type=str, default="beam_search", choices=["greedy", "sample", "beam_search"], help="Which type of decoder to use during inference.") parser.add_argument("--beam_width", type=int, default=5, help=("""\ beam width when using beam search decoder. If 0 (default), use standard decoder with greedy helper.\ """)) parser.add_argument( "--length_penalty_weight", type=float, default=0.6, help="Length penalty for beam search.") parser.add_argument( "--coverage_penalty_weight", type=float, default=0.1, help="Coverage penalty for beam search.") # Job info parser.add_argument("--jobid", type=int, default=0, help="Task id of the worker.") # TPU parser.add_argument("--use_tpu", type=bool, default=True) parser.add_argument("--master", type=str, default="", help=("Address of the master. Either --master or " "--tpu_name must be specified.")) parser.add_argument("--tpu_name", type=str, default=None, help=("Name of the TPU for Cluster Resolvers. Either " "--tpu_name or --master must be specified.")) parser.add_argument("--use_dynamic_rnn", type=bool, default=False) parser.add_argument("--use_synthetic_data", type=bool, default=False) parser.add_argument( "--mode", type=str, default="train_and_eval", choices=["train", "train_and_eval", "infer", "preprocess"]) parser.add_argument( "--activation_dtype", type=str, default="bfloat16", choices=["float32", "bfloat16"]) parser.add_argument("--tpu_job_name", type=str, default=None) # copybara:strip_begin # Vizier parser.add_argument("--client_handle", type=str, default="", help=("Client_handle for the tuner.")) parser.add_argument("--study_name", type=str, default=None, help=("Name of Vizier hparams tuning study.")) parser.add_argument("--REDACTED", type=int, default=REDACTED.StudyConfig.RANDOM_SEARCH, help=("Vizier search algorithm to use.")) # copybara:strip_end def create_hparams(flags): """Create training hparams.""" train_dir = flags.local_data_dir if flags.local_data_dir else flags.data_dir return contrib_training.HParams( # Data src=flags.src, tgt=flags.tgt, train_prefix=train_dir + flags.train_prefix, test_prefix=flags.data_dir + flags.test_prefix, vocab_prefix=flags.data_dir + flags.vocab_prefix, out_dir=flags.out_dir, # Networks num_units=flags.num_units, num_encoder_layers=(flags.num_encoder_layers or flags.num_layers), num_decoder_layers=(flags.num_decoder_layers or flags.num_layers), dropout=flags.dropout, num_embeddings_partitions=flags.num_embeddings_partitions, # Train optimizer=flags.optimizer, max_train_epochs=flags.max_train_epochs, num_examples_per_epoch=flags.num_examples_per_epoch, batch_size=flags.batch_size, num_train_steps=int(flags.num_examples_per_epoch / flags.batch_size * flags.max_train_epochs), init_op=flags.init_op, init_weight=flags.init_weight, max_gradient_norm=flags.max_gradient_norm, learning_rate=flags.learning_rate, label_smoothing=flags.label_smoothing, warmup_steps=flags.warmup_steps, warmup_scheme=flags.warmup_scheme, decay_start=flags.decay_start, decay_interval=flags.decay_interval, decay_steps=flags.decay_steps, decay_factor=flags.decay_factor, # Data constraints num_buckets=flags.num_buckets, choose_buckets=flags.choose_buckets, src_max_len=flags.src_max_len, tgt_max_len=flags.tgt_max_len, use_preprocessed_data=flags.use_preprocessed_data, # Inference src_max_len_infer=flags.src_max_len_infer, tgt_max_len_infer=flags.tgt_max_len_infer, infer_batch_size=flags.infer_batch_size, examples_to_infer=flags.examples_to_infer, detokenizer_file=flags.data_dir + flags.detokenizer_file, use_REDACTED=flags.use_REDACTED, target_bleu=flags.target_bleu, # Advanced inference arguments infer_mode=flags.infer_mode, beam_width=flags.beam_width, length_penalty_weight=flags.length_penalty_weight, coverage_penalty_weight=flags.coverage_penalty_weight, # Vocab sos=vocab_utils.SOS, eos=vocab_utils.EOS, subword_option=flags.subword_option, check_special_token=flags.check_special_token, # Misc forget_bias=flags.forget_bias, num_shards=flags.num_shards, num_shards_per_host=flags.num_shards_per_host, num_gpus=flags.num_gpus, num_infeed_workers=flags.num_infeed_workers, epoch_step=0, # record where we were within an epoch. steps_per_stats=flags.steps_per_stats, random_seed=flags.random_seed, # TPU use_tpu=flags.use_tpu, master=flags.master, tpu_name=flags.tpu_name, use_dynamic_rnn=flags.use_dynamic_rnn, use_synthetic_data=flags.use_synthetic_data, mode=flags.mode, activation_dtype=flags.activation_dtype, tpu_job_name=flags.tpu_job_name) def _add_argument(hparams, key, value, update=True): """Add an argument to hparams; if exists, change the value if update==True.""" if hasattr(hparams, key): if update: setattr(hparams, key, value) else: hparams.add_hparam(key, value) def extend_hparams(hparams): """Add new arguments to hparams.""" # Sanity checks if hparams.subword_option and hparams.subword_option not in ["spm", "bpe"]: raise ValueError("subword option must be either spm, or bpe") if hparams.infer_mode == "beam_search" and hparams.beam_width <= 0: raise ValueError("beam_width must greater than 0 when using beam_search" "decoder.") # Different number of encoder / decoder layers assert hparams.num_encoder_layers == hparams.num_decoder_layers # The first unidirectional layer (after the bi-directional layer) in # the GNMT encoder can't have residual connection due to the input is # the concatenation of fw_cell and bw_cell's outputs. num_encoder_residual_layers = hparams.num_encoder_layers - 2 num_decoder_residual_layers = num_encoder_residual_layers _add_argument(hparams, "num_encoder_residual_layers", num_encoder_residual_layers) _add_argument(hparams, "num_decoder_residual_layers", num_decoder_residual_layers) ## Vocab # Get vocab file names first if hparams.vocab_prefix: src_vocab_file = six.ensure_str( hparams.vocab_prefix) + "." + six.ensure_str(hparams.src) tgt_vocab_file = six.ensure_str( hparams.vocab_prefix) + "." + six.ensure_str(hparams.tgt) else: raise ValueError("hparams.vocab_prefix must be provided.") # Source vocab src_vocab_size, src_vocab_file = vocab_utils.check_vocab( src_vocab_file, hparams.out_dir, check_special_token=hparams.check_special_token, sos=hparams.sos, eos=hparams.eos, unk=vocab_utils.UNK) # Target vocab utils.print_out(" using source vocab for target") tgt_vocab_file = src_vocab_file tgt_vocab_size = src_vocab_size _add_argument(hparams, "src_vocab_size", src_vocab_size) _add_argument(hparams, "tgt_vocab_size", tgt_vocab_size) _add_argument(hparams, "src_vocab_file", src_vocab_file) _add_argument(hparams, "tgt_vocab_file", tgt_vocab_file) # Num embedding partitions _add_argument( hparams, "num_enc_emb_partitions", hparams.num_embeddings_partitions) _add_argument( hparams, "num_dec_emb_partitions", hparams.num_embeddings_partitions) # Pretrained Embeddings _add_argument(hparams, "src_embed_file", "") _add_argument(hparams, "tgt_embed_file", "") return hparams def create_or_load_hparams(default_hparams, hparams_path): """Create hparams or load hparams from out_dir.""" hparams = utils.maybe_parse_standard_hparams(default_hparams, hparams_path) hparams = extend_hparams(hparams) # Print HParams utils.print_hparams(hparams) return hparams def prepare_dataset(flags): """Generate the preprocessed dataset.""" src_file = "%s.%s" % (flags.data_dir + flags.train_prefix, flags.src) tgt_file = "%s.%s" % (flags.data_dir + flags.train_prefix, flags.tgt) vocab_file = flags.data_dir + flags.vocab_prefix _, vocab_file = vocab_utils.check_vocab(vocab_file, flags.out_dir) out_file = six.ensure_str(flags.out_dir) + "preprocessed_dataset" src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(vocab_file) src_dataset = tf.data.TextLineDataset(src_file) tgt_dataset = tf.data.TextLineDataset(tgt_file) iterator = iterator_utils.get_iterator( src_dataset, tgt_dataset, src_vocab_table, tgt_vocab_table, batch_size=1, global_batch_size=1, sos=vocab_utils.SOS, eos=vocab_utils.EOS, random_seed=1, num_buckets=flags.num_buckets, src_max_len=flags.src_max_len, tgt_max_len=flags.tgt_max_len, filter_oversized_sequences=True, return_raw=True).make_initializable_iterator() with tf.Session() as sess: sess.run(tf.tables_initializer()) sess.run(iterator.initializer) try: i = 0 while True: with open(out_file + "_%d" % i, "wb") as f: i += 1 for _ in range(100): for j in sess.run(iterator.get_next()): tf.logging.info(j) f.write(bytearray(j)) except tf.errors.OutOfRangeError: pass def run_main(flags, default_hparams, estimator_fn): """Run main.""" # Job jobid = flags.jobid utils.print_out("# Job id %d" % jobid) # Random random_seed = flags.random_seed if random_seed is not None and random_seed > 0: utils.print_out("# Set random seed to %d" % random_seed) random.seed(random_seed + jobid) np.random.seed(random_seed + jobid) tf.set_random_seed(random_seed) mlp_log.mlperf_print("cache_clear", True) mlp_log.mlperf_print("init_start", None) mlp_log.mlperf_print("submission_benchmark", "resnet") mlp_log.mlperf_print("submission_division", "closed") mlp_log.mlperf_print("submission_org", "google") mlp_log.mlperf_print("submission_platform", "tpu-v3-%d" % FLAGS.num_shards) mlp_log.mlperf_print("submission_status", "research") mlp_log.mlperf_print("global_batch_size", FLAGS.batch_size) mlp_log.mlperf_print("opt_learning_rate_alt_decay_func", "True") mlp_log.mlperf_print("opt_base_learning_rate", FLAGS.learning_rate) mlp_log.mlperf_print("opt_learning_rate_decay_interval", FLAGS.decay_interval) mlp_log.mlperf_print("opt_learning_rate_decay_factor", FLAGS.decay_factor) mlp_log.mlperf_print("opt_learning_rate_decay_steps", FLAGS.decay_steps) mlp_log.mlperf_print("opt_learning_rate_remain_steps", FLAGS.decay_start) mlp_log.mlperf_print("opt_learning_rate_alt_warmup_func", FLAGS.warmup_scheme) mlp_log.mlperf_print("opt_learning_rate_warmup_steps", FLAGS.warmup_steps) mlp_log.mlperf_print( "max_sequence_length", FLAGS.src_max_len, metadata={"method": "discard"}) mlp_log.mlperf_print("train_samples", FLAGS.num_examples_per_epoch) mlp_log.mlperf_print("eval_samples", FLAGS.examples_to_infer) # Model output directory out_dir = flags.out_dir if out_dir and not tf.gfile.Exists(out_dir): utils.print_out("# Creating output directory %s ..." % out_dir) tf.gfile.MakeDirs(out_dir) # Load hparams. hparams = create_or_load_hparams(default_hparams, flags.hparams_path) # Train or Evaluation return estimator_fn(hparams) def main(unused_argv): # pylint: disable=g-long-lambda control_flow_util.ENABLE_CONTROL_FLOW_V2 = True if FLAGS.mode == "preprocess": prepare_dataset(FLAGS) elif FLAGS.mode == "train": print("Running training mode.") default_hparams = create_hparams(FLAGS) run_main(FLAGS, default_hparams, estimator.train_fn) elif FLAGS.mode == "train_and_eval": print("Running training and evaluation mode.") default_hparams = create_hparams(FLAGS) run_main(FLAGS, default_hparams, estimator.train_and_eval_with_low_level_api) else: print("Running inference mode.") default_hparams = create_hparams(FLAGS) current_epoch = 0 last_step = 0 # Run evaluation when there's a new checkpoint for ckpt in evaluation.checkpoints_iterator(FLAGS.out_dir): # Terminate eval job once target score is reached current_step = int(six.ensure_str(os.path.basename(ckpt)).split("-")[1]) if current_step <= last_step: continue last_step = current_step tf.logging.info("Starting to evaluate...%s", ckpt) try: score = run_main(FLAGS, default_hparams, estimator.eval_fn) current_epoch += 1 if score > FLAGS.target_bleu: tf.logging.info( "Evaluation finished after training step %d" % current_step) break # Terminate eval job when final checkpoint is reached max_steps = default_hparams.num_train_steps if current_step >= max_steps: tf.logging.info( "Evaluation finished but failed to reach target score.") break except tf.errors.NotFoundError: tf.logging.info( "Checkpoint %s no longer exists, skipping checkpoint" % ckpt) if __name__ == "__main__": tf.logging.set_verbosity(tf.logging.INFO) nmt_parser = argparse.ArgumentParser() add_arguments(nmt_parser) FLAGS, unparsed = nmt_parser.parse_known_args() tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
apache-2.0
-4,903,790,235,716,215,000
36.080952
88
0.64719
false
jesuscript/topo-mpi
contrib/jacommands.py
1
46822
# -*- coding: utf-8 -*- import __main__ import numpy import pylab import os.path import os import copy import pdb import param import topo.pattern.basic import topo.command.analysis from math import pi, sqrt, exp, pow from numpy.oldnumeric import zeros, Float, sum from topo.projection.basic import CFProjection, SharedWeightCFProjection from topo.base.boundingregion import BoundingBox from topo import numbergen from topo.pattern.basic import Gaussian, Selector, Null from topo.transferfn.basic import DivisiveNormalizeL1, HomeostaticMaxEnt, TransferFnWithState, Sigmoid, PiecewiseLinear from topo.base.arrayutil import clip_lower from topo.sheet.lissom import LISSOM from topo.sheet.optimized import NeighborhoodMask_Opt, LISSOM_Opt from topo.plotting.plotfilesaver import * from topo.command.pylabplot import cyclic_tuning_curve, matrixplot from topo.command.analysis import save_plotgroup from param import normalize_path from topo.command.pylabplot import plot_tracked_attributes from topo.base.functionfamily import CoordinateMapperFn from topo.plotting.bitmap import MontageBitmap from topo.base.patterngenerator import PatternGenerator, Constant from topo.transferfn.basic import Sigmoid import matplotlib matplotlib.use('Agg') from pylab import * from matplotlib import * def save_tuning_curve_data(filename, sheet, x_axis, curve_label, x, y): i_value, j_value = sheet.sheet2matrixidx(x, y) x_values = sorted(sheet.curve_dict[x_axis][curve_label].keys()) y_values = [sheet.curve_dict[x_axis][curve_label][key].view()[0][i_value, j_value] for key in x_values] aa = (x_values, y_values) save(filename, aa, fmt='%.6f', delimiter=',') return aa def get_tuning_curve_data(sheet, x_axis, curve_label, x, y): i_value, j_value = sheet.sheet2matrixidx(x, y) x_values = sorted(sheet.curve_dict[x_axis][curve_label].keys()) y_values = [sheet.curve_dict[x_axis][curve_label][key].view()[0][i_value, j_value] for key in x_values] if(x_axis != "size"): x_values.pop(0);y_values.pop(0); if(x_axis == "size"): for i in xrange(len(x_values)): x_values[i] = x_values[i] / 2.0 #print [x_values,y_values] #print [x_values).pop(0).tolist(),fromlist(y_values).pop(0).tolist()] return [x_values, y_values] def save_tuning_curves(prefix): #directory = "./LateralLGNData/" directory = "./pokus/" filename = prefix + ",str=" + str(__main__.LGNLatStr) + ",freq=2.5" + ",surr_size=" + str(__main__.LGNSurroundSize) + ",lat_size=" + str(__main__.LGNLatSurroundSize) + ",const=1,tsettle=2," topo.mycommands.save_tuning_curve_data(directory + filename + "con=30%.dat", topo.sim["LGNOnSep"], "size", "Contrast = 30%", 0, 0) topo.mycommands.save_tuning_curve_data(directory + filename + "con=80%.dat", topo.sim["LGNOnSep"], "size", "Contrast = 80%", 0, 0) topo.mycommands.save_tuning_curve_data(directory + "Freq" + filename + "con=30%.dat", topo.sim["LGNOnSep"], "frequency", "Contrast = 30%", 0, 0) topo.mycommands.save_tuning_curve_data(directory + "Freq" + filename + "con=80%.dat", topo.sim["LGNOnSep"], "frequency", "Contrast = 80%", 0, 0) fit_curves(directory, filename, prefix) def fit_curves(directory, filename, prefix): stat_filename = "stats" #from mlabwrap import mlab # start a Matlab session c_asc30 = get_tuning_curve_data(topo.sim["LGNOnSep"], "size", "Contrast = 30%", 0, 0) c_asc80 = get_tuning_curve_data(topo.sim["LGNOnSep"], "size", "Contrast = 80%", 0, 0) c_freq30 = get_tuning_curve_data(topo.sim["LGNOnSep"], "frequency", "Contrast = 30%", 0, 0) c_freq80 = get_tuning_curve_data(topo.sim["LGNOnSep"], "frequency", "Contrast = 80%", 0, 0) #(trash1,trash2,asc30) = fmin_tnc(fitDoG, [0, 100 , 2, 0.001, 0.03], fprime= None, args = c_asc30, #approx_grad=True,bounds=[[-1,1],[0,200],[0,30],[0,0.01],[0,0.1]]) #(trash1,trash2,asc80) = fmin_tnc(fitDoG, [0, 100 , 2, 0.001, 0.03], fprime= None, args = c_asc80, #approx_grad=True,bounds=[[-1,1],[0,200],[0,30],[0,0.01],[0,0.1]]) #(trash1,trash2,dog30) = fmin_tnc(fitDoGfreq, [0,-0.2,-0.18,3, 0.7], fprime= None, args = c_freq30, #approx_grad=True,bounds=[[-1,1],[-1,0],[-1,0],[0,10],[0,1]]) #(trash1,trash2,dog80) = fmin_tnc(fitDoGfreq, [0,-0.2,-0.18,3,0.7], fprime= None, args = c_freq80, #approx_grad=True,bounds=[[-1,1],[-1,0],[-1,0],[0,10],[0,1]]) asc30 = fmin(fitDoG, [0.3, 30 , 4, 0.003, 0.015], args=c_asc30, xtol=0.00000001, ftol=0.00000001, maxiter=12000, maxfun=12000) asc80 = fmin(fitDoG, [0.3, 100 , 5, 0.002, 0.03], args=c_asc80, xtol=0.00000001, ftol=0.00000001, maxiter=12000, maxfun=12000) dog30 = fmin(fitDoGfreq, [0.3, - 0.2, - 0.18, 3, 0.7], args=c_freq30, xtol=0.000001, ftol=0.000001, maxiter=12000, maxfun=12000) dog80 = fmin(fitDoGfreq, [0.3, - 0.4, - 0.5, 3, 1], args=c_freq80, xtol=0.000001, ftol=0.000001, maxiter=12000, maxfun=12000) #save the graphs clf() plot(c_asc30[0], c_asc30[1]) plot(c_asc30[0], DoG(c_asc30[0], asc30[0], asc30[1], asc30[2], asc30[3], asc30[4])) savefig(directory + prefix + filename + "ASC30" + ".png"); clf() plot(c_asc80[0], c_asc80[1]) plot(c_asc80[0], DoG(c_asc80[0], asc80[0], asc80[1], asc80[2], asc80[3], asc80[4])) savefig(directory + prefix + filename + "ASC80" + ".png"); clf() plot(c_freq30[0], c_freq30[1]) plot(c_freq30[0], DoGfreq(c_freq30[0], dog30[0], dog30[1], dog30[2], dog30[3], dog30[4])) savefig(directory + prefix + filename + "DOG30" + ".png"); clf() plot(c_freq80[0], c_freq80[1]) plot(c_freq80[0], DoGfreq(c_freq80[0], dog80[0], dog80[1], dog80[2], dog80[3], dog80[4])) savefig(directory + prefix + filename + "DOG80" + ".png"); clf() # now save to file if(not os.path.exists(directory + stat_filename)): print "1\n" f = open(directory + stat_filename, "w") f.write(" Prefix LGNLatStr Frequency CRFSurrSize ECRFSurrSize Const Tsettle Contrast R_0 K_e K_i alpha beta\n") else: print "2\n" f = open(directory + stat_filename, "a") f.write(prefix + " " + str(__main__.LGNLatStr) + " 2.5 " + str(__main__.LGNSurroundSize) + " " + str(__main__.LGNLatSurroundSize) + " 1 2 " + " 80 " + str(asc80) + "\n") f.write(prefix + " " + str(__main__.LGNLatStr) + " 2.5 " + str(__main__.LGNSurroundSize) + " " + str(__main__.LGNLatSurroundSize) + " 1 2 " + " 30 " + str(asc30) + "\n") f.write("Freq" + prefix + " " + str(__main__.LGNLatStr) + " 2.5 " + str(__main__.LGNSurroundSize) + " " + str(__main__.LGNLatSurroundSize) + " 1 2 " + " 80 " + str(dog80) + "\n") f.write("Freq" + prefix + " " + str(__main__.LGNLatStr) + " 2.5 " + str(__main__.LGNSurroundSize) + " " + str(__main__.LGNLatSurroundSize) + " 1 2 " + " 30 " + str(dog30) + "\n") f.close() print "END" def pokus(): #f = open("a.dat","r") #data = [line.split() for line in f] #f.close() #return data data = [[0.023333, 0.046667, 0.070000, 0.093333, 0.116667, 0.140000, 0.163333, 0.186667, 0.210000, 0.233333, 0.256667, 0.280000, 0.303333, 0.326667, 0.350000, 0.373333, 0.396667, 0.420000, 0.443333, 0.466667, 0.490000, 0.513333, 0.536667, 0.560000, 0.583333, 0.606667, 0.630000, 0.653333, 0.676667, 0.700000], [0.000000, 0.140682, 0.323128, 0.381151, 0.453309, 0.495148, 0.511234, 0.503472, 0.476296, 0.452621, 0.421329, 0.391765, 0.378917, 0.358795, 0.345896, 0.327351, 0.318290, 0.308762, 0.297803, 0.292149, 0.285739, 0.281539, 0.274593, 0.272147, 0.266158, 0.263694, 0.259160, 0.255340, 0.252024, 0.248111]] def DoG(Input, Rz, Ke, Ki, alpha, beta): A = lambda phi, r: r * exp(- (pow(r, 2.0) / alpha)) B = lambda phi, r: r * exp(- (pow(r, 2.0) / beta)) l = lambda x: 0 h = lambda x: 2 * pi x = zeros(len(Input), Float) for i in xrange(len(Input)): x[i] = Rz + Ke * dblquad(A, 0, Input[i], l, h)[0] - Ki * dblquad(B, 0, Input[i], l, h)[0] return x def DoGfreq(Input, Rz, Ke, Ki, alpha, beta): A = lambda f: 1 - exp(- pow((f / (alpha * 2)), 2)) B = lambda f: 1 - exp(- pow((f / (beta * 2)), 2)) x = zeros(len(Input), Float) for i in xrange(len(Input)): x[i] = Rz + Ke * A(Input[i]) - Ki * B(Input[i]) return x def fitDoG(x, Input, Actual_Output): Rz = x[0] Ke = x[1] Ki = x[2] alpha = x[3] beta = x[4] Fitted_Curve = DoG(Input, Rz, Ke, Ki, alpha, beta) s = 0 for i in xrange(len(Fitted_Curve)): s = s + (Fitted_Curve[i] - Actual_Output[i]) * (Fitted_Curve[i] - Actual_Output[i]) return s def fitDoGfreq(x, Input, Actual_Output): Rz = x[0] Ke = x[1] Ki = x[2] alpha = x[3] beta = x[4] Fitted_Curve = DoGfreq(Input, Rz, Ke, Ki, alpha, beta) s = 0 for i in xrange(len(Fitted_Curve)): s = s + (Fitted_Curve[i] - Actual_Output[i]) * (Fitted_Curve[i] - Actual_Output[i]) return s def save_plots(prefix): p = CFProjectionPlotGroupSaver("Projection"), pre = prefix + create_prefix(["V1afferent_lr" , "V1afferent_str", "V1afferent_lrtc", "V1afferent_size", "V2lateral_inh_size", "V2lateral_exc_size"]), p.filename_prefix = pre, p.projection_name = "V1Afferent", p.sheet_name = "V2", p.plotgroup = p.generate_plotgroup(), p.plotgroup.update_plots(True), p.save_to_disk() def AddV2(): corners = [topo.pattern.basic.Composite(operator=numpy.maximum, generators=[ #topo.pattern.basic.Gaussian(scale=1,size = 0.08838,orientation=0,aspect_ratio=5.6666,x=0.45), #topo.pattern.basic.Gaussian(scale=1,size = 0.08838,orientation=pi/2,aspect_ratio=5.6666,y=0.45)], topo.pattern.basic.Gaussian(scale=1, size=0.04, orientation=0, aspect_ratio=9, x=0.2), topo.pattern.basic.Gaussian(scale=1, size=0.04, orientation=pi / 2, aspect_ratio=9, y=0.2)], scale=1.0, bounds=BoundingBox(radius=0.5), x=numbergen.UniformRandom(lbound= - (__main__.__dict__.get('BS', 0.5)), ubound=(__main__.__dict__.get('BS', 0.5)), seed=12), y=numbergen.UniformRandom(lbound= - (__main__.__dict__.get('BS', 0.5)), ubound=(__main__.__dict__.get('BS', 0.5)), seed=34), orientation=numbergen.UniformRandom(lbound= - pi, ubound=pi, seed=56)) for i in xrange(1)] #combined_corners = topo.pattern.basic.SeparatedComposite(min_separation=2.2*0.27083,generators=corners) combined_corners = corners[0] topo.sim['Retina'].set_input_generator(combined_corners) AH = ActivityHysteresis(time_constant=0.5) HE=SimpleHomeoLinear(smoothing=0.999,eta=locals().get('V2_eta',0.001), mu=locals().get('V2MU',0.01),t_init=0.05) V2_OF = [AH,HE] topo.sim['V2'] = LISSOM(nominal_density=__main__.__dict__.get('default_density', 48.0), nominal_bounds=BoundingBox(radius=__main__.__dict__.get('CS', 0.5)), tsettle=16, output_fns=V2_OF) #make sure that activity is reset at the beginning of iteration topo.sim['V2'].beginning_of_iteration.append(AH.reset) topo.sim.connect('V1Complex', 'V2', delay=0.05, dest_port=('Activity', 'JointNormalize', 'Afferent'), connection_type=CFProjection, strength=__main__.__dict__.get('V2aff_str', 2), name='V1Afferent', weights_generator=topo.pattern.basic.Composite(operator=numpy.multiply, generators=[Gaussian(aspect_ratio=1.0, size=3), #__main__.__dict__.get('V1aff_size',30)), topo.pattern.random.UniformRandom()]), nominal_bounds_template=BoundingBox(radius=__main__.__dict__.get('V2aff_size', 4 * 0.27083) / 2), learning_rate=__main__.__dict__.get('V2_lr', 1.0)); topo.sim.connect('V2', 'V2', delay=0.025, name='V2LateralExcitatory', connection_type=CFProjection, strength=__main__.__dict__.get('V2lat_exc_str', 2.5), weights_generator=topo.pattern.basic.Gaussian(aspect_ratio=1.0, size=__main__.__dict__.get('V2lat_exc_size', 0.05)), nominal_bounds_template=BoundingBox(radius=__main__.__dict__.get('V2lat_exc_size', 0.104)), learning_rate=0) topo.sim.connect('V2', 'V2', delay=0.025, name='V2LateralInhibitory', connection_type=CFProjection, strength= - __main__.__dict__.get('V2lat_inh_str', 2.0), weights_generator=topo.pattern.basic.Composite(operator=numpy.multiply, generators=[Gaussian(aspect_ratio=1.0, size=__main__.__dict__.get('V2lat_inh_size', 0.15)), topo.pattern.random.UniformRandom()]), nominal_bounds_template=BoundingBox(radius=__main__.__dict__.get('V2lat_inh_size', 2 * 0.22917) / 2), learning_rate=0) #topo.sim["V1Simple"].in_connections[0].strength=3.0 #topo.sim["V1Simple"].in_connections[1].strength=3.0 #topo.sim["V1Complex"].output_fn.output_fns[1].r=7 topo.sim["V1Simple"].plastic = False topo.sim["V1Complex"].plastic = False topo.sim["V1ComplexInh"].plastic = False topo.sim["V1Simple"].output_fns[1].plastic=False topo.sim["V1Complex"].output_fns[1].plastic=False ### Lateral excitatory bounds changes #LE='topo.sim["V2"].projections()["V2LateralExcitatory"]' #topo.sim.schedule_command( 20200,LE+'.change_bounds(BoundingBox(radius=0.06250))') #topo.sim.schedule_command( 20500,LE+'.change_bounds(BoundingBox(radius=0.04375))') #topo.sim.schedule_command( 21000,LE+'.change_bounds(BoundingBox(radius=0.03500))') #topo.sim.schedule_command( 22000,LE+'.change_bounds(BoundingBox(radius=0.02800))') #topo.sim.schedule_command( 23000,LE+'.change_bounds(BoundingBox(radius=0.02240))') #topo.sim.schedule_command( 24000,LE+'.change_bounds(BoundingBox(radius=0.01344))') #topo.sim.schedule_command( 25000,LE+'.change_bounds(BoundingBox(radius=0.00806))') #topo.sim.schedule_command( 26500,LE+'.change_bounds(BoundingBox(radius=0.00484))') #topo.sim.schedule_command( 28000,LE+'.change_bounds(BoundingBox(radius=0.00290))') #topo.sim.schedule_command(40000,LE+'.change_bounds(BoundingBox(radius=0.00174))') #global parameter holding the activities activity_history = numpy.array([]) def collect_activity_statistics(): contrib.jacommands.activity_history = numpy.concatenate((contrib.jacommands.activity_history, topo.sim["V1"].activity.flatten()), axis=1) if(int(topo.sim.time()) == 10000): pylab.figure() pylab.hist(contrib.jacommands.activity_history, (numpy.arange(20.0) / 20.0)) pylab.savefig(str(topo.sim.time()) + 'activity_histogram.png') # measure_or_tuning_fullfield() # cyclic_tuning_curve_batch(filename="OrientationTC:V1:[0,0]",sheet=topo.sim["V1"],coords=[(0,0)],x_axis="orientation") save_plotgroup('Activity') def homeostatic_analysis_function(): """ Basic example of an analysis command for run_batch; users are likely to need something similar but highly customized. """ #plot_tracked_attributes(output_fn=topo.sim["V1"].output_fn.output_fns[0], init_time=0, final_timetopo.sim.time(), filename="Afferent", ylabel="Afferent") #plot_tracked_attributes(output_fn=topo.sim["V1"].output_fn.output_fns[2], init_time=0, final_timetopo.sim.time(), filename="V1", ylabel="V1") class SimpleHomeoSigmoid(TransferFnWithState): mu = param.Number(default=0.01, doc="Target average activity.") a_init = param.Number(default=13, doc="Multiplicative parameter controlling the exponential.") b_init = param.Number(default= - 4, doc="Additive parameter controlling the exponential.") eta = param.Number(default=0.0002, doc="Learning rate for homeostatic plasticity.") smoothing = param.Number(default=0.9997, doc="Weighting of previous activity vs. current activity when calculating the average.") randomized_init = param.Boolean(False, doc="Whether to randomize the initial B parameter") noise_magnitude = param.Number(default=0.1, doc="The magnitude of the additive noise to apply to the B parameter at initialization") def __init__(self, **params): super(SimpleHomeoSigmoid, self).__init__(**params) self.first_call = True self.__current_state_stack=[] def __call__(self, x): if self.first_call: self.first_call = False self.a = ones(x.shape, x.dtype.char) * self.a_init if self.randomized_init: self.b = ones(x.shape, x.dtype.char) * self.b_init + (topo.pattern.random.UniformRandom(seed=13)(xdensity=x.shape[0], ydensity=x.shape[1]) - 0.5) * self.noise_magnitude * 2 else: self.b = ones(x.shape, x.dtype.char) * self.b_init self.y_avg = zeros(x.shape, x.dtype.char) * self.mu x_orig = copy(x) x *= 0.0 x += 1.0 / (1.0 + exp(- (self.a * x_orig + self.b))) if self.plastic & (float(topo.sim.time()) % 1.0 >= 0.54): self.y_avg = (1.0 - self.smoothing) * x + self.smoothing * self.y_avg self.b -= self.eta * (self.y_avg - self.mu) def state_push(self): """ Save the current state of the output function to an internal stack. """ self.__current_state_stack.append((copy(self.b), copy(self.y_avg), copy(self.first_call))) super(SimpleHomeoSigmoid, self).state_push() def state_pop(self): """ Pop the most recently saved state off the stack. See state_push() for more details. """ self.b, self.y_avg, self.first_call = self.__current_state_stack.pop() super(SimpleHomeoSigmoid, self).state_pop() class SimpleHomeoLinear(TransferFnWithState): mu = param.Number(default=0.01, doc="Target average activity.") t_init = param.Number(default=0.0, doc="Threshold parameter") alpha = param.Number(default=1.0, doc="Linear slope parameter") eta = param.Number(default=0.0002, doc="Learning rate for homeostatic plasticity.") smoothing = param.Number(default=0.9997, doc="Weighting of previous activity vs. current activity when calculating the average.") randomized_init = param.Boolean(False, doc="Whether to randomize the initial t parameter") noise_magnitude = param.Number(default=0.1, doc="The magnitude of the additive noise to apply to the B parameter at initialization") def __init__(self, **params): super(SimpleHomeoLinear, self).__init__(**params) self.first_call = True self.__current_state_stack=[] def __call__(self, x): if self.first_call: self.first_call = False if self.randomized_init: self.t = ones(x.shape, x.dtype.char) * self.t_init + (topo.pattern.random.UniformRandom(seed=123)(xdensity=x.shape[0], ydensity=x.shape[1]) - 0.5) * self.noise_magnitude * 2 else: self.t = ones(x.shape, x.dtype.char) * self.t_init self.y_avg = ones(x.shape, x.dtype.char) * self.mu x_orig = copy(x) x -= self.t clip_lower(x, 0) x *= self.alpha if self.plastic & (float(topo.sim.time()) % 1.0 >= 0.54): self.y_avg = (1.0 - self.smoothing) * x + self.smoothing * self.y_avg self.t += self.eta * (self.y_avg - self.mu) def state_push(self): """ Save the current state of the output function to an internal stack. """ self.__current_state_stack.append((copy(self.t), copy(self.y_avg), copy(self.first_call))) super(SimpleHomeoLinear, self).state_push() def state_pop(self): """ Pop the most recently saved state off the stack. See state_push() for more details. """ self.t, self.y_avg, self.first_call = self.__current_state_stack.pop() super(SimpleHomeoLinear, self).state_pop() class Jitter(CoordinateMapperFn): scale = 0.4 rand = param.Parameter(default=None) def __call__(self, x, y): return x + (self.rand() - 0.5) * self.scale, y + (self.rand() - 0.5) * self.scale current_histogram = [] activity_queue = [] call_time = 0 def update_histogram(sheet_name="V1"): import contrib.jacommands contrib.jacommands.activity_queue.insert(0, topo.sim[sheet_name].activity) if(contrib.jacommands.call_time >= 1000): contrib.jacommands.activity_queue.pop() contrib.jacommands.call_time = contrib.jacommands.call_time + 1 contrib.jacommands.current_histogram = numpy.empty(0) for a in contrib.jacommands.activity_queue: numpy.concatenate((contrib.jacommands.current_histogram, a.flatten()), axis=1) print contrib.jacommands.current_histogram activities = [] def collect_activity(sheet_name): import contrib.jacommands contrib.jacommands.activities.insert(0, topo.sim[sheet_name].activity.copy()) def measure_histogram(iterations=1000, sheet_name="V1"): import contrib.jacommands topo.sim["V1"].plastic = False topo.sim.state_push() for i in xrange(0, iterations): topo.sim.run(1) contrib.jacommands.collect_activity(sheet_name) topo.sim.state_pop() concat_activities = [] for a in contrib.jacommands.activities: concat_activities = numpy.concatenate((concat_activities, a.flatten()), axis=1) topo.sim["V1"].plastic = True contrib.jacommands.activities = [] pylab.figure() pylab.subplot(111, yscale='log') #pylab.subplot(111) print shape(concat_activities) mu = sum(concat_activities) / len(concat_activities) print mu (bins, a, b) = pylab.hist(concat_activities, (numpy.arange(80.0) / 40.0) , visible=True) pylab.savefig(normalize_path(str(topo.sim.time()) + 'activity_bar_histogram.png')) bins_axis = numpy.arange(79.0) / 40.0 bins = bins * 1.0 / sum(bins) print sum(bins) exponential = numpy.arange(79, dtype='float32') / 40.0 # compute the mean of the actual distribution #mu=0.024 pylab.figure() pylab.subplot(111, yscale='log') print len(bins_axis) print len(bins) print bins_axis print bins print numpy.exp(- (1 / mu) * (exponential+0.025)) print numpy.exp(- (1 / mu) * (exponential)) exponential = - numpy.exp(- (1 / mu) * (exponential+0.025)) + numpy.exp(- (1 / mu) * (exponential)) pylab.plot(bins_axis, bins) pylab.plot(bins_axis, bins, 'ro') pylab.plot(bins_axis, exponential) pylab.plot(bins_axis, exponential, 'go') pylab.axis(ymin=0.0000000001, ymax=100) #pylab.axis("tight") print mean(exponential) print mean(bins) #pylab.show() pylab.savefig(normalize_path(str(topo.sim.time()) + 'activity_histogram.png')) return bins def enable_movie(): # Add a timecode to each movie ActivityMovie.add_timecode = True ActivityMovie.timecode_fmt = '%.2f' # The format for times in filenames ActivityMovie.filename_time_fmt = '%06.2f' # Frame filenames should be like: "frame002.30.tif" ActivityMovie.filename_fmt = 'frame%t.%T' # The directory for movie frames: ActivityMovie.filename_prefix = 'lissom_or_movie/' # Frames should be on a white background MontageBitmap.bg_color = (1, 1, 1) # Maps within each frame will fit to 200x200 pixel tiles MontageBitmap.tile_size = (200, 200) # The montages will contain 1x2 images MontageBitmap.shape = (1, 2) # Frame title parameters MontageBitmap.title_pos = (5, 5) #MontageBitmap.title_options = dict(fill='white') topo.sim['Data'] = InMemoryRecorder() topo.sim.connect('Retina', 'Data', src_port='Activity', name='Retina Activity') topo.sim.connect('V1', 'Data', src_port='Activity', name='V1 Activity') def save_movie(): # Create a movie print 'Composing movie...' movie = ActivityMovie(name='Lissom Orientation Movie', recorder=topo.sim['Data'], montage_params=dict(titles=['Retina', 'V1']), variables=['Retina Activity', 'V1 Activity'], frame_times=list(numpy.arange(0, 10.0, 0.1))) # Save the frames to files: print 'Saving movie to %s...' % ActivityMovie.filename_prefix movie.save() def randomize_V1Simple_relative_LGN_strength(sheet_name="V1Simple", prob=0.5): lgn_on_proj = topo.sim[sheet_name].in_connections[0] lgn_off_proj = topo.sim[sheet_name].in_connections[1] rand =numbergen.UniformRandom(seed=513) rows, cols = lgn_on_proj.cfs.shape for r in xrange(rows): for c in xrange(cols): cf_on = lgn_on_proj.cfs[r, c] cf_off = lgn_off_proj.cfs[r, c] cf_on._has_norm_total = False cf_off._has_norm_total = False ra = rand() ra = (ra-0.5)*2.0 * prob cf_on.weights *= 1-ra cf_off.weights *= (1 + ra) #a = prob #if ra>=0.5: a = (1-a) #cf_on.weights*=a #cf_off.weights*=(1-a) import topo.transferfn.basic ActivityHysteresis = topo.transferfn.basic.Hysteresis SimpleHomeoLinearRelative = topo.transferfn.misc.HomeostaticResponse def _divide_with_constant(x, y): y = numpy.clip(y, 0, 10000) x = numpy.clip(x, 0, 10000) return numpy.divide(x, y + __main__.__dict__.get('LGNGain',0.11)) def add_gc(sheet_name, surround_gaussian_size=0.5, strength=0.63): """ Add divisive normalization to topo.sim[sheet_name], providing contrast gain control and contrast-invariant tuning. Should be used with an LGN sheet of type LISSOM, so that it will respect the tsettle and strict_tsettle parameters. """ print surround_gaussian_size lgn_surroundg = Gaussian(size=surround_gaussian_size, aspect_ratio=1.0, output_fns=[DivisiveNormalizeL1()]) topo.sim.connect(sheet_name, sheet_name, delay=0.05, name='LateralGC', dest_port=('Activity'), activity_group=(0.6, _divide_with_constant), connection_type=SharedWeightCFProjection, strength=strength, weights_generator=lgn_surroundg, nominal_bounds_template=BoundingBox(radius=surround_gaussian_size)) topo.sim[sheet_name].tsettle = 2 topo.sim[sheet_name].strict_tsettle = 1 def AddGC(surround_gaussian_size=__main__.__dict__.get('SurrSize',0.5), strength=__main__.__dict__.get('LatLGNStr',0.63)): add_gc('LGNOn',surround_gaussian_size,strength) add_gc('LGNOff',surround_gaussian_size,strength) #class Habituation(TransferFnWithState): # """ # This output function allows the activity to be smoothly interpolated between # individual time step of the simulation. The time_constant paremater controls the # time scale of this interpolation. # """ # # smoothing = param.Number(default=0.99, doc="""The time constant defining the width of the window over which activity is averaged""") # alpha = param.Number(default=1.0, doc="""This parameter defines how strong influence on the output of the neuron does the habituation has """) # # def __init__(self, **params): # super(Habituation, self).__init__(**params) # self.first_call = True # self.y_avg = 0 # # def __call__(self, x): # if (self.first_call == True): # self.old_a = x.copy() * 0.0 # self.first_call = False # # x_orig = copy(x) # if self.plastic: # self.y_avg = (1.0 - self.smoothing) * x + self.smoothing * self.y_avg # # x -= self.alpha * self.y_avg # x -= x * ((x <= 0) * 1.0) class Translator(PatternGenerator): """ PatternGenerator that moves another PatternGenerator over time. To create a pattern at a new location, asks the underlying PatternGenerator to create a new pattern at a location translated by an amount based on the global time. """ generator = param.ClassSelector(default=Constant(scale=0.0), class_=PatternGenerator, doc="""Pattern to be translated.""") direction = param.Number(default=0, bounds=(- pi, pi), doc=""" The direction in which the pattern should move, in radians.""") speed = param.Number(default=1, bounds=(0.0, None), doc=""" The speed with which the pattern should move, in sheet coordinates per simulation time unit.""") reset_period = param.Number(default=1, bounds=(0.0, None), doc=""" When pattern position should be reset, usually to the value of a dynamic parameter. The pattern is reset whenever fmod(simulation_time,reset_time)==0.""") last_time = 0.0 def __init__(self, **params): super(Translator, self).__init__(**params) self.orientation = params.get('orientation', self.orientation) self.index = 0 def __call__(self, **params): """Construct new pattern out of the underlying one.""" generator = params.get('generator', self.generator) # JABALERT: This condition seems to conflict with the # docstring above; plus, the special case of 0.05 should be # documented. Maybe use a special case for last_time=0.0 # instead, to avoid depending on 0.05? xdensity = params.get('xdensity', self.xdensity) ydensity = params.get('ydensity', self.ydensity) bounds = params.get('bounds', self.bounds) # CB: are the float() calls required because the comparisons # involving FixedPoint fail otherwise? Or for some other # reason? if((float(topo.sim.time()) >= self.last_time + self.reset_period) or (float(topo.sim.time()) <= 0.05)): if ((float(topo.sim.time()) <= (self.last_time + self.reset_period + 1.0)) and (float(topo.sim.time()) >= 0.05)) : return Null()(xdensity=xdensity, ydensity=ydensity, bounds=bounds) self.last_time += self.reset_period # time to reset the parameter (self.x, self.y, self.scale) = (generator.x, generator.y, generator.scale) if isinstance(generator, Selector): self.index = generator.index generator.force_new_dynamic_value('x') generator.force_new_dynamic_value('y') generator.force_new_dynamic_value('scale') discards = (self.direction, self.orientation) self.direction = ((pi + self.inspect_value("orientation") + pi / 2.0) % (2 * pi)) - pi (a, b, c) = (generator.x, generator.y, generator.scale) # compute how much time elapsed from the last reset t = float(topo.sim.time()) - self.last_time ## CEBALERT: mask gets applied twice, both for the underlying ## generator and for this one. (leads to redundant ## calculations in current lissom_oo_or usage, but will lead ## to problems/limitations in the future). dirr = self.inspect_value("direction") # JAHACKALERT: I want it to move in perpendicular orientation # JAB: Does it do that now, or not? Please clarify. return generator(xdensity=xdensity, ydensity=ydensity, bounds=bounds, x=self.x + t * cos(self.inspect_value("orientation") + pi / 2) * self.speed, y=self.y + t * sin(self.inspect_value("orientation") + pi / 2) * self.speed, orientation=self.inspect_value("orientation"), index=self.inspect_value("index"))#,scale=self.inspect_value("scale")) class Expander(PatternGenerator): """ PatternGenerator that expands another PatternGenerator over time. To create a pattern at a new location, asks the underlying PatternGenerator to create a new pattern at a location expanded by an amount based on the global time. """ generator = param.ClassSelector(default=Constant(scale=0.0), class_=PatternGenerator, doc="""Pattern to be translated.""") speed = param.Number(default=1, bounds=(0.0, None), doc=""" The speed with which the pattern should move, in sheet coordinates per simulation time unit.""") reset_period = param.Number(default=1, bounds=(0.0, None), doc=""" When pattern position should be reset, usually to the value of a dynamic parameter. The pattern is reset whenever fmod(simulation_time,reset_time)==0.""") visual_field_size = param.Number(default=10e8, bounds=(0.0, None), doc=""" Sometimes we want to expand stimuli from far positions, and thus the stimulus would not intersect with our visual field. This allows us to 'skip' the simulation time when the stimulus is not in the visual_field. """) last_time = 0.0 def __init__(self, **params): super(Expander, self).__init__(**params) self.size = params.get('size', self.size) x = params.get('x', self.x) y = params.get('y', self.y) # make sure that the stimulus starts with size that intersects with our visual_field_size if (numpy.sqrt(x*x + y*y) > numpy.sqrt(2)*self.visual_field_size): self.size = (numpy.sqrt(x*x + y*y) - numpy.sqrt(2)*self.visual_field_size) self.index = 0 self.last_time=0.0 def __call__(self, **params): """Construct new pattern out of the underlying one.""" generator = params.get('generator', self.generator) xdensity = params.get('xdensity', self.xdensity) ydensity = params.get('ydensity', self.ydensity) bounds = params.get('bounds', self.bounds) # CB: are the float() calls required because the comparisons # involving FixedPoint fail otherwise? Or for some other # reason? if((float(topo.sim.time()) >= self.last_time + self.reset_period) or (float(topo.sim.time()) <= 0.05)): if ((float(topo.sim.time()) <= (self.last_time + self.reset_period + 1.0)) and (float(topo.sim.time()) >= 0.05)) : return Null()(xdensity=xdensity, ydensity=ydensity, bounds=bounds) if (float(topo.sim.time()) >= 0.05): self.last_time += self.reset_period # time to reset the parameter (self.x, self.y) = (generator.x, generator.y) if isinstance(generator, Selector): self.index = generator.index generator.force_new_dynamic_value('x') generator.force_new_dynamic_value('y') if (numpy.sqrt(self.x*self.x + self.y*self.y) > self.visual_field_size): self.size = 2*(numpy.sqrt(self.x*self.x + self.y*self.y) - self.visual_field_size) # compute how much time elapsed from the last reset t = float(topo.sim.time()) - self.last_time ## CEBALERT: mask gets applied twice, both for the underlying ## generator and for this one. (leads to redundant ## calculations in current lissom_oo_or usage, but will lead ## to problems/limitations in the future). #return generator(xdensity=xdensity, ydensity=ydensity, bounds=bounds, x=-2.4, y=2.47,size=6.0,index=self.index) return generator(xdensity=xdensity, ydensity=ydensity, bounds=self.bounds, x=self.x, y=self.y, size=self.size + t * self.speed,index=self.index) class Jitterer(PatternGenerator): """ PatternGenerator that moves another PatternGenerator over time. To create a pattern at a new location, asks the underlying PatternGenerator to create a new pattern at a location translated by an amount based on the global time. """ generator = param.ClassSelector(default=Constant(scale=0.0), class_=PatternGenerator, doc="""Pattern to be translated.""") jitter_magnitude = param.Number(default=0.02, bounds=(0.0, None), doc=""" The speed with which the pattern should move, in sheet coordinates per simulation time unit.""") reset_period = param.Number(default=1, bounds=(0.0, None), doc=""" When pattern position should be reset, usually to the value of a dynamic parameter. The pattern is reset whenever fmod(simulation_time,reset_time)==0.""") seed = param.Number(default=1023, bounds=(0.0, None), doc="""Seed of the jitterer""") last_time = 0.0 def __init__(self, **params): super(Jitterer, self).__init__(**params) self.orientation = params.get('orientation', self.orientation) self.r =numbergen.UniformRandom(seed=1023) self.index = 0 def __call__(self, **params): """Construct new pattern out of the underlying one.""" generator = params.get('generator', self.generator) xdensity = params.get('xdensity', self.xdensity) ydensity = params.get('ydensity', self.ydensity) bounds = params.get('bounds', self.bounds) if((float(topo.sim.time()) >= self.last_time + self.reset_period) or (float(topo.sim.time()) <= 0.05)): if ((float(topo.sim.time()) <= (self.last_time + self.reset_period + 1.0)) and (float(topo.sim.time()) >= 0.05)) : return Null()(xdensity=xdensity, ydensity=ydensity, bounds=bounds) self.last_time += self.reset_period # time to reset the parameter (self.x, self.y, self.scale) = (generator.x, generator.y, generator.scale) if isinstance(generator, Selector): self.index = generator.index generator.force_new_dynamic_value('x') generator.force_new_dynamic_value('y') generator.force_new_dynamic_value('scale') discards = self.orientation (a, b, c) = (generator.x, generator.y, generator.scale) return generator(xdensity=xdensity, ydensity=ydensity, bounds=bounds, x=self.x + self.jitter_magnitude * self.r(), y=self.y + self.jitter_magnitude * self.r(), orientation=self.inspect_value("orientation"), index=self.inspect_value("index")) class SequenceSelector(PatternGenerator): """ PatternGenerator that selects from a list of other PatternGenerators in a sequential order. """ generators = param.List(default=[Constant()], precedence=0.97, class_=PatternGenerator, bounds=(1, None), doc="List of patterns from which to select.") size = param.Number(default=1.0, doc="Scaling factor applied to all sub-patterns.") def __init__(self, generators, **params): super(SequenceSelector, self).__init__(**params) self.generators = generators self.index = 0 def function(self, params): """Selects and returns one of the patterns in the list.""" bounds = params['bounds'] xdensity = params['xdensity'] ydensity = params['ydensity'] x = params['x'] y = params['y'] scale = params['scale'] offset = params['offset'] size = params['size'] orientation = params['orientation'] index = params['index'] if self.index == len(self.generators): self.index = 0 pg = self.generators[self.index] self.index = self.index + 1 image_array = pg(xdensity=xdensity, ydensity=ydensity, bounds=bounds, x=x + size * (pg.x * cos(orientation) - pg.y * sin(orientation)), y=y + size * (pg.x * sin(orientation) + pg.y * cos(orientation)), orientation=pg.orientation + orientation, size=pg.size * size, scale=pg.scale * scale, offset=pg.offset + offset) return image_array def measure_ot(lat_exc, lat_inh, e, t): import topo topo.sim["V1"].in_connections[2].strength = lat_exc topo.sim["V1"].in_connections[3].strength = lat_inh topo.sim["V1"].output_fn.output_fns[1].t = t topo.sim["V1"].output_fn.output_fns[1].e = e import topo.command.analysis import topo.command.pylabplots filename = "Exc=" + str(lat_exc) + "_Inh=" + str(lat_inh) + "_E=" + str(e) + "_T=" + str(t) topo.commands.analysis.measure_or_tuning_fullfield(display=True, num_phase=4, num_orientation=80, frequencies=[2.4], curve_parameters=[{"contrast":1}, {"contrast":5}, {"contrast":10}, {"contrast":50}, {"contrast":90}]) topo.commands.pylabplots.cyclic_tuning_curve(suffix="GC_with_LGNGC_HR", filename=filename, sheet=topo.sim["V1"], coords=[(0, 0)], x_axis="orientation") def plot_linearized_rfs(sheet_name="V1Simple", lgn_on_projection_name="LGNOnAfferent", lgn_off_projection_name="LGNOffAfferent"): (V1x, V1y) = shape(topo.sim[sheet_name]) lgn_on = topo.sim[sheet_name].projections[lgn_on_projection_name] lgn_off = topo.sim[sheet_name].projections[lgn_off_projection_name] for x in xrange(0, V1x): for y in xrange(0, V1y): RF = numpy.zeros(shape(topo.sim["Retina"].activity)) on_cfs = lgn_on.cfs[x][y] off_cfs = lgn_on.cfs[x][y] (lgnx, lgny) = shape(numpy.zeros(shape(cfs))) for lx in xrange(0, lgnx): for ly in xrange(0, lgny): RF += on_cfs.weights[lx, ly] * topo.sim["LGNOn"].projections["Afferent"].cfs[0, 0].weights def plot_proj_activity_sum(sheet, lateral_proj=[]): li = zeros(lateral_proj[0].activity.shape) for p in lateral_proj: li += p.activity pylab.figure(figsize=(5, 5)) a = max(abs(li.max()), abs(li.min())) pylab.imshow(li, interpolation=None, aspect=None, vmin= - a, vmax=a) if (li.min() != li.max()): pylab.colorbar() pylab.show._needmain = False pylab.show() def create_prefix(variables): prefix = "" for var in variables: prefix = prefix + " " + var + "=" + str(__main__.__dict__[var]) return prefix #run_combinations_counter=0 def _run_combinations_rec(func, param, params, index): if(len(params) == index): func(*param) #run_combinations_counter+=1 #print run_combinations_counter return a = params[index] for p in a: new_param = param + [p] _run_combinations_rec(func, new_param, params, index + 1) def run_combinations(func, params): """ this function runs function func with all combinations of params defined in the array params, eg. params = [[1,2,3],[1,2,3]...] """ run_combinations_counter = 0 _run_combinations_rec(func, [], params, 0) def investigate_neuron(coordx,coordy): import pylab x,y = topo.sim["V1Simple"].sheet2matrixidx(coordx,coordy) CF1 = topo.sim["V1Simple"].in_connections[0]._cfs[x][y] CF2 = topo.sim["V1Simple"].in_connections[1]._cfs[x][y] fig = pylab.figure() pylab.title("activation:" + str(topo.sim["V1Simple"].activity[x][y])) f = fig.add_subplot(221) f.imshow(CF1.weights) f = fig.add_subplot(223) f.imshow(CF2.weights) f = fig.add_subplot(222) f.imshow(CF1.get_input_matrix(topo.sim["LGNOn"].activity)) f = fig.add_subplot(224) f.imshow(CF1.get_input_matrix(topo.sim["LGNOff"].activity)) pylab.show() def reset_cc_lissom(): m = numpy.mean(topo.sim["V1Simple"].output_fns[2].t) topo.sim["V1Simple"].output_fns[2].t*=0 topo.sim["V1Simple"].output_fns[2].t+=m s = topo.sim["V1Simple"].output_fns[1].generator.scale topo.sim["V1Simple"].output_fns[1].generator.scale=0.0 a = topo.sim["V1Complex"].in_connections[0].strength topo.sim["V1Complex"].in_connections[0].strength=0 return (s,a) def measure_map_position_and_MR_correlations(): import contrib.surround_analysis import matplotlib.ticker as mticker import operator import matplotlib matplotlib.rc('xtick', labelsize=15) matplotlib.rc('ytick', labelsize=15) lhi = contrib.surround_analysis.compute_local_homogeneity_index(topo.sim["V1Complex"].sheet_views['OrientationPreference'].view()[0]*pi,5) pylab.figure() ax = pylab.subplot(111) b = zip(lhi.flatten(),topo.sim["V1Complex"].sheet_views['ComplexSelectivity'].view()[0].flatten()) b_sorted = sorted(b, key=operator.itemgetter(0)) c,d=zip(*b_sorted) pylab.plot(c,d,'ro') pylab.plot(c,contrib.jacommands.weighted_local_average(c,d,0.05),linewidth=5) ax.xaxis.set_major_locator(mticker.MaxNLocator(5)) pylab.yticks([0,0.25,0.5,0.75,1.0], ['0','0.5','1.0','1.5','2.0']) ax.set_xlabel('Local homogenity index', fontsize=15) ax.set_ylabel('Modulation ratio', fontsize=15) pylab.show() def weighted_local_average(x,y,s): z = [] for a in x: tmp = 0.0 num = 0.0 b = (numpy.abs(x-a) < s) *1.0 z.append(numpy.sum(numpy.multiply(y,b))/numpy.sum(b)) return z def weighted_local_std(x,y,s): z = [] for a in x: tmp = 0.0 num = 0.0 b = (numpy.abs(x-a) < s) *1.0 av = numpy.sum(numpy.multiply(y,b))/numpy.sum(b) z.append(numpy.sqrt(numpy.sum(numpy.power(numpy.multiply(y-av,b),2))/numpy.sum(b))) return z def LateralOrientationAnnisotropy(): pylab.figure() orr = topo.sim["V1Complex"].sheet_views["OrientationPreference"].view()[0] (s,y) = numpy.shape(orr) w = numpy.zeros((s,y)) for x in xrange((s/3)+1,2*(s/3)-1): for y in xrange((s/3)+1,2*(s/3)-1): if (orr[x,y] < 0.05) or (orr[x,y] > 0.95): b = topo.sim["V1ComplexInh"].projections()["LongEI"].cfs[x,y].weights.copy() #b[x-int(s/8):x+int(s/8),y-int(s/8):y+int(s/8)] = 0 c = b.copy()*0 c[x-int(s/12):x+int(s/12),y-int(s/12):y+int(s/12)] += b[x-int(s/12):x+int(s/12),y-int(s/12):y+int(s/12)] w = w + c pylab.hist(numpy.pi*numpy.array(orr.ravel()),weights=numpy.array(w.ravel())) pylab.savefig(normalize_path('hist.png'))
bsd-3-clause
-5,524,606,945,241,420,000
42.273567
615
0.610803
false
ThomasYeoLab/CBIG
stable_projects/predict_phenotypes/Nguyen2020_RNNAD/cbig/Nguyen2020/gen_cv_fold.py
1
4671
#!/usr/bin/env python # Written by Minh Nguyen and CBIG under MIT license: # https://github.com/ThomasYeoLab/CBIG/blob/master/LICENSE.md import argparse import os.path as path import numpy as np import pandas as pd import cbig.Nguyen2020.misc as misc def split_by_median_date(data, subjects): """ Split timepoints in two halves, use first half to predict second half Args: data (Pandas data frame): input data subjects: list of subjects Return: first_half (ndarray): boolean mask, rows used as input second_half (ndarray): boolean mask, rows to predict """ first_half = np.zeros(data.shape[0], int) second_half = np.zeros(data.shape[0], int) for rid in subjects: subj_mask = (data.RID == rid) & data.has_data median_date = np.sort(data.EXAMDATE[subj_mask])[subj_mask.sum() // 2] first_half[subj_mask & (data.EXAMDATE < median_date)] = 1 second_half[subj_mask & (data.EXAMDATE >= median_date)] = 1 return first_half, second_half def gen_fold(data, nb_folds, outdir): """ Generate *nb_folds* cross-validation folds from *data """ subjects = np.unique(data.RID) has_2tp = np.array([np.sum(data.RID == rid) >= 2 for rid in subjects]) potential_targets = np.random.permutation(subjects[has_2tp]) folds = np.array_split(potential_targets, nb_folds) leftover = [subjects[~has_2tp]] for test_fold in range(nb_folds): val_fold = (test_fold + 1) % nb_folds train_folds = [ i for i in range(nb_folds) if (i != test_fold and i != val_fold) ] train_subj = np.concatenate( [folds[i] for i in train_folds] + leftover, axis=0) val_subj = folds[val_fold] test_subj = folds[test_fold] train_timepoints = ( np.in1d(data.RID, train_subj) & data.has_data).astype(int) val_in_timepoints, val_out_timepoints = split_by_median_date( data, val_subj) test_in_timepoints, test_out_timepoints = split_by_median_date( data, test_subj) mask_frame = gen_mask_frame(data, train_timepoints, val_in_timepoints, test_in_timepoints) mask_frame.to_csv( path.join(outdir, 'fold%d_mask.csv' % test_fold), index=False) val_frame = gen_ref_frame(data, val_out_timepoints) val_frame.to_csv( path.join(outdir, 'fold%d_val.csv' % test_fold), index=False) test_frame = gen_ref_frame(data, test_out_timepoints) test_frame.to_csv( path.join(outdir, 'fold%d_test.csv' % test_fold), index=False) def gen_mask_frame(data, train, val, test): """ Create a frame with 3 masks: train: timepoints used for training model val: timepoints used for validation test: timepoints used for testing model """ col = ['RID', 'EXAMDATE'] ret = pd.DataFrame(data[col], index=range(train.shape[0])) ret['train'] = train ret['val'] = val ret['test'] = test return ret def gen_ref_frame(data, test_timepoint_mask): """ Create reference frame which is used to evalute models' prediction """ columns = [ 'RID', 'CognitiveAssessmentDate', 'Diagnosis', 'ADAS13', 'ScanDate' ] ret = pd.DataFrame( np.nan, index=range(len(test_timepoint_mask)), columns=columns) ret[columns] = data[['RID', 'EXAMDATE', 'DXCHANGE', 'ADAS13', 'EXAMDATE']] ret['Ventricles'] = data['Ventricles'] / data['ICV'] ret = ret[test_timepoint_mask == 1] # map diagnosis from numeric categories back to labels mapping = { 1: 'CN', 7: 'CN', 9: 'CN', 2: 'MCI', 4: 'MCI', 8: 'MCI', 3: 'AD', 5: 'AD', 6: 'AD' } ret.replace({'Diagnosis': mapping}, inplace=True) ret.reset_index(drop=True, inplace=True) return ret def main(): parser = argparse.ArgumentParser() parser.add_argument('--seed', type=int, default=0) parser.add_argument('--spreadsheet', required=True) parser.add_argument('--features', required=True) parser.add_argument('--folds', type=int, required=True) parser.add_argument('--outdir', required=True) args = parser.parse_args() np.random.seed(args.seed) columns = ['RID', 'DXCHANGE', 'EXAMDATE'] features = misc.load_feature(args.features) frame = pd.read_csv( args.spreadsheet, usecols=columns + features, converters=misc.CONVERTERS) frame['has_data'] = ~frame[features].isnull().apply(np.all, axis=1) gen_fold(frame, args.folds, args.outdir) if __name__ == '__main__': main()
mit
-2,911,630,953,878,063,600
31.664336
78
0.60972
false
tonnrueter/pymca_devel
PyMca/QtMcaAdvancedFitReport.py
1
37957
#/*########################################################################## # Copyright (C) 2004-2013 European Synchrotron Radiation Facility # # This file is part of the PyMca X-ray Fluorescence Toolkit developed at # the ESRF by the Software group. # # This toolkit is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the Free # Software Foundation; either version 2 of the License, or (at your option) # any later version. # # PyMca is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # PyMca; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # PyMca follows the dual licensing model of Riverbank's PyQt and cannot be # used as a free plugin for a non-free program. # # Please contact the ESRF industrial unit (industry@esrf.fr) if this license # is a problem for you. #############################################################################*/ import os import sys import time MATPLOTLIB = False from PyMca import PyMcaQt as qt QTVERSION = qt.qVersion() try: #this is installation dependent I guess from matplotlib import rcParams from matplotlib import __version__ as matplotlib_version #rcParams['numerix'] = "numeric" from matplotlib.font_manager import FontProperties from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas from matplotlib.figure import Figure MATPLOTLIB = True except: from PyMca import QtBlissGraph from PyMca import ConfigDict from PyMca import PyMcaLogo from PyMca.ConcentrationsTool import ConcentrationsConversion class QtMcaAdvancedFitReport: def __init__(self, fitfile = None, outfile = None, outdir = None, sourcename = None, selection = None, fitresult = None,htmltext=None, concentrations=None, table = None, plotdict=None): self.concentrations = concentrations self.concentrationsConversion = ConcentrationsConversion() if table is None: table = 2 self.tableFlag = table if fitfile is not None: #generate output from fit result file self.fitfile = fitfile self.outfile = outfile self.outdir = outdir self.generateReportFromFitFile() else: #generate output from fitresult INCLUDING fit file self.fitfile = fitfile self.outfile = outfile self.outdir = outdir self.sourcename=sourcename self.selection =selection self.fitresult =fitresult if self.outfile is None: if selection is not None: self.outfile = selection if (self.outfile is None) or (self.outfile == 'Unknown Origin'): if sourcename is not None: self.outfile = os.path.basename(sourcename) self.outfile = self.outfile.replace(" ","_") self.outfile = self.outfile.replace("/","_over_") self.graph = None if htmltext is None: htmltext={} self.otherhtmltext=htmltext if plotdict is None: self.plotDict = {'logy':True, 'xmin':None, 'xmax':None, 'ymin':None, 'ymax':None} else: self.plotDict = plotdict def writeReport(self,text=None): if len(self.outfile) > 5: if self.outfile[-5:] != ".html": outfile = os.path.join(self.outdir, self.outfile+".html") else: outfile = os.path.join(self.outdir, self.outfile) else: outfile = os.path.join(self.outdir, self.outfile+".html") try: os.remove(outfile) except: pass concentrationsfile = outfile[:-5]+"_concentrations.txt" try: os.remove(concentrationsfile) except: pass if text is None: text = self.getText() f=open(outfile,"w") f.write(text) f.close() if len(self._concentrationsTextASCII) > 1: f=open(concentrationsfile, "w") f.write(self._concentrationsTextASCII) f.close() return outfile def generateReportFromFitFile(self): d=ConfigDict.ConfigDict() d.read(self.fitfile) sourcename = "Unknown Source" selection = "Unknown Selection" if 'info' in d: if 'key' in d['info']: selection=d['info']['key'] elif 'Key' in d['info']: selection=d['info']['Key'] for key in d['info'].keys(): if key.upper() == 'SOURCENAME': sourcename = d['info'][key] elif (key.upper() == 'SELECTION') or\ (key.upper() == 'LEGEND'): selection = d['info'][key] self.sourcename = sourcename self.selection = selection if self.outfile is None: if self.outdir is None: self.outdir = os.getcwd() self.outfile= os.path.basename(self.fitfile) else: if self.outdir is None: self.outdir = os.path.dirname(self.outfile) self.outfile= os.path.basename(self.outfile) if self.outdir == '':self.outdir = "." self.fitresult=d if 'concentrations' in d: self.concentrations = d['concentrations'] def getText(self): newlinks = [] for key in self.otherhtmltext.keys(): newlinks.append(["#%s" % (key),"%s" % key]) text =self.getHeader(newlinks) text+=self.getInfo() text+=self.getImage() text+=self.getParam() text+=self.getConcentrations() self._concentrationsTextASCII = self.getConcentrationsASCII() text+=self.getResult() for key in self.otherhtmltext.keys(): text+="\n" text+= "<H2><a NAME=""%s""></a><FONT color=#009999>" % key text+= "%s:" % key text+= "</FONT></H2>" text+= self.otherhtmltext[key] text+="<br>" text+=self.getFooter() return text def getHeader(self,addlink=None): link = [ ['http://pymca.sourceforge.net', 'PyMCA home'], ['http://www.esrf.fr', 'ESRF home'], ['http://www.esrf.fr/UsersAndScience/Experiments/TBS/BLISS', 'BLISS home']] if self.concentrations is not None: link.append(['#Concentrations', 'Concentrations']) if self.tableFlag:link.append(['#Fit_Peak_Results', 'Fit Peak Results']) if addlink is not None: for item in addlink: link.append(item) text ="" text+= "<HTML>" text+= "<HEAD>" text+= "<TITLE>PyMCA : Advanced Fit Results</TITLE>" text+= "</HEAD>" text+= "<BODY TEXT=#000000 BGCOLOR=#FFFFFF ALINK=#ff6600 LINK=#0000cc VLINK=#0000cc marginwidth=10 marginheight=10 topmargin=10 leftmargin=10>" text+= "<CENTER>" text+= "<TABLE WIDTH=100%% border=0 Height=70>" text+= " <TR>" text+= " <TD><Font Size=5 Color=#0000cc>" text+= " <b>PyMCA : Advanced Fit Results</b></Font>" text+= " </td>" text+= " <td rowspan=2 ALIGN=RIGHT VALIGN=bottom>" text+= " <a HREF=""http://www.esrf.fr/"">" logofile = self.outdir + "/" + "PyMcaLogo.png" if not os.path.exists(logofile): pixmap = qt.QPixmap(PyMcaLogo.PyMcaLogo) pixmap.save(logofile,"PNG") text+= " <img SRC=%s ALT=""ESRF home"" WIDTH=55 HEIGHT=68 BORDER=0></a>" % "PyMcaLogo.png" text+= " </td>" text+= " </tr>" text+= " <tr>" text+= " <td width=100%% VALIGN=bottom>" text+= " <TABLE BORDER=0 CELLPADDING=0 CELLSPACING=0 WIDTH=100%%>" text+= " <TR>" text+= " <TD WIDTH=100%% BGCOLOR=#ee22aa HEIGHT=17 ALIGN=LEFT VALIGN=middle>" text+= " <FONT color=#000000>&nbsp;" for name in link: text+= "|&nbsp;&nbsp;<A STYLE=""color: #FFFFFF"" HREF=""%s"">%s</a>&nbsp;&nbsp;"%(tuple(name)) text+= " </FONT>" text+= " </TD>" text+= " </TR>" text+= " </TABLE>" text+= " </td>" text+= " </tr>" text+= " <tr>" text+= " <td colspan=2 height=5><spacer type=block height=10 width=0>" text+= " </td>" text+= " </tr>" text+= "</table>" text+= "</center>" return text def getInfo(self): text ="" text+= "<nobr><H2><FONT color=#0000cc>" text+= "Computed File :&nbsp;" text+= "</FONT>" text+= "<FONT color=#000000>" if self.fitfile is not None: if os.path.basename(self.fitfile) == self.fitfile: text+= "<b><I>%s</I></b>" % (os.getcwd()+"/"+self.fitfile) else: text+= "<b><I>%s</I></b>" % (self.fitfile) else: text+= "<b><I>%s</I></b>" % (self.outdir+"/"+self.outfile+".fit") #and I have to generate it!!!!!!!!!!!!" d=ConfigDict.ConfigDict(self.fitresult) try: os.remove(self.outdir+"/"+self.outfile+".fit") except: pass if self.concentrations is not None: d['concentrations'] = self.concentrations d.write(self.outdir+"/"+self.outfile+".fit") text+= "</FONT>" text+= "</H2>" text+= "</nobr>" text+= "<LEFT>" text+= "<TABLE border=0>" text+= "<TR><TD><SPACER TYPE=BLOCK WIDTH=50></TD><TD>" text+= "<TABLE border=0 cellpadding=1 cellspacing=2>" text+= " <TR><TH ALIGN=LEFT>Source : &nbsp;</TH><TD ALIGN=LEFT>%s</TD></TR>" % (self.sourcename) text+= " <TR><TH ALIGN=LEFT>Selection : &nbsp;</TH><TD ALIGN=LEFT>%s</TD></TR>" % (self.selection) text+= " <TR><TH ALIGN=LEFT>Parameters : &nbsp;</TH><TD ALIGN=LEFT>" d=ConfigDict.ConfigDict(self.fitresult['result']['config']) try: os.remove(self.outdir+"/"+self.outfile+".txt") except: pass d.write(self.outdir+"/"+self.outfile+".txt") text+= "<a HREF=""%s"">%s</a>"% (self.outfile+".txt",self.outfile+".txt") text+="</TD></TR>" """ text+= " <TR><TH ALIGN=RIGHT>Source : </TH><TD ALIGN=LEFT>%s</TD>"%(self.sourcename) text+= " <TH ALIGN=RIGHT>Selection : </TH><TD ALIGN=LEFT>%s</TD></TR>"%(self.selection) keys= [ key for key in info.keys() if key not in ['paramfile', 'peakfile'] ] for idx in range(0, len(keys), 2): text+= " <TR><TH ALIGN=RIGHT>%s : </TH><TD ALIGN=LEFT>%s</TD>"%(keys[idx], info[keys[idx]]) if idx+1<len(keys): text+= " <TH ALIGN=RIGHT>%s : </TH><TD ALIGN=LEFT>%s</TD></TR>"%(keys[idx+1], info[keys[idx+1]]) else: text+= " <TD COLSPAN=2></TD></TR>" """ text+= "</TABLE>" text+= "</TD></TR></TABLE>" text+= "</LEFT>" return text def getParam(self): text="" zero = self.fitresult['result']['fittedpar'][self.fitresult['result']['parameters'].index('Zero')] gain = self.fitresult['result']['fittedpar'][self.fitresult['result']['parameters'].index('Gain')] noise= self.fitresult['result']['fittedpar'][self.fitresult['result']['parameters'].index('Noise')] fano = self.fitresult['result']['fittedpar'][self.fitresult['result']['parameters'].index('Fano')] sum = self.fitresult['result']['fittedpar'][self.fitresult['result']['parameters'].index('Sum')] stdzero = self.fitresult['result']['sigmapar'][self.fitresult['result']['parameters'].index('Zero')] stdgain = self.fitresult['result']['sigmapar'][self.fitresult['result']['parameters'].index('Gain')] stdnoise= self.fitresult['result']['sigmapar'][self.fitresult['result']['parameters'].index('Noise')] stdfano = self.fitresult['result']['sigmapar'][self.fitresult['result']['parameters'].index('Fano')] stdsum = self.fitresult['result']['sigmapar'][self.fitresult['result']['parameters'].index('Sum')] hypermetflag = self.fitresult['result']['config']['fit']['hypermetflag'] if not ('fitfunction' in self.fitresult['result']['config']['fit']): if hypermetflag: self.fitresult['result']['config']['fit']['fitfunction'] = 0 else: self.fitresult['result']['config']['fit']['fitfunction'] = 1 if self.fitresult['result']['config']['fit']['fitfunction'] or\ (hypermetflag != 1): #the peaks are not pure gaussians if self.fitresult['result']['config']['fit']['fitfunction']: #peaks are pseudo-voigt functions hypermetnames = ['Eta Factor'] else: hypermetnames = ['ST AreaR', 'ST SlopeR', 'LT AreaR', 'LT SlopeR', 'STEP HeightR'] hypermetvalues=[] hypermetstd =[] hypermetfinalnames = [] for name in hypermetnames: if name in self.fitresult['result']['parameters']: hypermetvalues.append(self.fitresult['result']['fittedpar'] \ [self.fitresult['result']['parameters'].index(name)]) hypermetstd.append(self.fitresult['result']['sigmapar'] \ [self.fitresult['result']['parameters'].index(name)]) hypermetfinalnames.append(name) # --- html table text+="<H2><FONT color=#009999>" text+="Fit Parameters :" text+="</FONT></H2>" text+="<CENTER>" text+="<TABLE border=0 cellpadding=0 cellspacing=2 width=80%>" text+="<TR>" text+=" <TD><TABLE border=1 cellpadding=1 cellspacing=0 width=100%>" text+=" <TR align=center>" text+=" <TH colspan=2>FIT parameters</TH>" text+=" </TR>" text+=" <TR align=left>" text+=" <TD><I>&nbsp;Region of Fit</I></TD>" text+=" <TD>&nbsp;%d - %d</TD>" % (self.fitresult['result']['config']['fit']['xmin'],self.fitresult['result']['config']['fit']['xmax']) text+=" </TR>" text+=" <TR align=left>" text+=" <TD><I>&nbsp;Number of iterations</I></TD>" #text+=" <TD>&nbsp;%d</TD>" % (fitpar['fit_numiter']) text+=" <TD>&nbsp;%d</TD>" % (self.fitresult['result']['niter']) text+=" </TR>" text+=" <TR align=left>" text+=" <TD><I>&nbsp;Chi square</I></TD>" #text+=" <TD>&nbsp;%.4f</TD>" % (fitpar['fit_chi']) text+=" <TD>&nbsp;%.4f</TD>" % (self.fitresult['result']['chisq']) text+=" </TR>" text+=" <TR align=left>" text+=" <TD><I>&nbsp;Last Chi square difference</I></TD>" #text+=" <TD>&nbsp;%.4f %%</TD>" % (fitpar['fit_lastchi']) text+=" <TD>&nbsp;%.4f %%</TD>" % (self.fitresult['result']['lastdeltachi']*100) text+=" </TR>" text+=" </TABLE>" text+=" </TD>" text+="</TR>" text+="<TR>" text+=" <TD><TABLE border=1 cellpadding=1 cellspacing=0 width=100%>" text+=" <TR align=center>" text+=" <TH colspan=2>Calibration parameters</TH>" text+=" </TR>" text+=" <TR align=left>" text+=" <TD><I>&nbsp;Zero</I></TD>" text+=" <TD>&nbsp;% .5E +/- % .5E</TD>" % (zero, stdzero) text+=" </TR>" text+=" <TR align=left>" text+=" <TD><I>&nbsp;Gain</I></TD>" text+=" <TD>&nbsp;% .5E +/- % .5E</TD>" % (gain, stdgain) text+=" </TR>" text+=" <TR align=left>" text+=" <TD><I>&nbsp;Noise</I></TD>" text+=" <TD>&nbsp;% .5E +/- % .5E</TD>" % (noise, stdnoise) text+=" </TR>" text+=" <TR align=left>" text+=" <TD><I>&nbsp;Fano</I></TD>" text+=" <TD>&nbsp;% .5E +/- % .5E</TD>" % (fano, stdfano) text+=" </TR>" text+=" <TR align=left>" text+=" <TD><I>&nbsp;Sum</I></TD>" text+=" <TD>&nbsp;% .5E +/- % .5E</TD>" % (sum, stdsum) text+=" </TR>" text+=" </TABLE>" text+=" </TD>" text+="</TR>" # --- Peak shape parameters --- if hypermetflag != 1: text+="<TR>" text+=" <TD><TABLE border=1 cellpadding=1 cellspacing=0 width=100%>" text+=" <TR align=center>" text+=" <TH colspan=2>Peak shape parameters</TH>" text+=" </TR>" for i in range(len(hypermetfinalnames)): text+=" <TR align=left>" text+=" <TD><I>&nbsp;%s</I></TD>" % hypermetnames[i] text+=" <TD>&nbsp;% .5E +/- % .5E</TD>" % (hypermetvalues[i], hypermetstd[i]) text+=" </TR>" text+=" </TABLE>" text+=" </TD>" text+="</TR>" # --- Continuum parameters --- text+="<TR>" text+=" <TD><TABLE border=1 cellpadding=1 cellspacing=0 width=100%>" text+=" <TR align=center>" text+=" <TH colspan=2>Continuum parameters</TH>" text+=" </TR>" # Stripping if self.fitresult['result']['config']['fit']['stripflag']: constant = 1.0 iterations = 20000 stripwidth = 1 stripfilterwidth = 1 stripalgorithm = 0 snipwidth = 30 if 'stripalgorithm' in self.fitresult['result']['config']['fit']: stripalgorithm=self.fitresult['result']['config']['fit']['stripalgorithm'] if 'snipwidth' in self.fitresult['result']['config']['fit']: snipwidth=self.fitresult['result']['config']['fit']['snipwidth'] if 'stripconstant' in self.fitresult['result']['config']['fit']: constant=self.fitresult['result']['config']['fit']['stripconstant'] if 'stripiterations' in self.fitresult['result']['config']['fit']: iterations=self.fitresult['result']['config']['fit']['stripiterations'] if 'stripwidth' in self.fitresult['result']['config']['fit']: stripwidth=self.fitresult['result']['config']['fit']['stripwidth'] if 'stripfilterwidth' in self.fitresult['result']['config']['fit']: stripfilterwidth=self.fitresult['result']['config']['fit']['stripfilterwidth'] if stripalgorithm == 1: text+=" <TR align=left>" text+=" <TD><I>&nbsp;Type</I></TD>" text+=" <TD>&nbsp;%s</TD>" % "SNIP Background" text+=" </TR>" text+=" <TR align=left>" text+=" <TD><I>&nbsp;%s<I></TD>" % "SNIP width" text+=" <TD>&nbsp;%.5f</TD>" % snipwidth text+=" </TR>" else: text+=" <TR align=left>" text+=" <TD><I>&nbsp;Type</I></TD>" text+=" <TD>&nbsp;%s</TD>" % "Strip Background" text+=" </TR>" text+=" <TR align=left>" text+=" <TD><I>&nbsp;%s<I></TD>" % "Strip Constant" text+=" <TD>&nbsp;%.5f</TD>" % constant text+=" </TR>" text+=" <TR align=left>" text+=" <TD><I>&nbsp;%s<I></TD>" % "Strip Iterations" text+=" <TD>&nbsp;%d</TD>" % iterations text+=" </TR>" text+=" <TR align=left>" text+=" <TD><I>&nbsp;%s<I></TD>" % "Strip Width" text+=" <TD>&nbsp;%d</TD>" % stripwidth text+=" </TR>" text+=" <TR align=left>" text+=" <TD><I>&nbsp;%s<I></TD>" % "Smoothing Filter Width" text+=" <TD>&nbsp;%d</TD>" % stripfilterwidth text+=" </TR>" stripanchorslist = [] stripanchorsflag = self.fitresult['result']['config']['fit'].get('stripanchorsflag', 0) if stripanchorsflag: stripanchorslist = self.fitresult['result']['config']['fit'].get('stripanchorslist', []) i = 0 for anchor in stripanchorslist: if anchor != 0: text+=" <TR align=left>" text+=" <TD><I>&nbsp;%s%d<I></TD>" % ("Anchor",i) text+=" <TD>&nbsp;%d</TD>" % anchor text+=" </TR>" i += 1 # --- Background Function if self.fitresult['result']['config']['fit']['continuum']: text+=" <TR align=left>" text+=" <TD><I>&nbsp;Type</I></TD>" if 'continuum_name' in self.fitresult['result']['config']['fit']: name = self.fitresult['result']['config']['fit']['continuum_name'] text+=" <TD>&nbsp;%s</TD>" % name elif self.fitresult['result']['config']['fit']['continuum'] == 1: text+=" <TD>&nbsp;%s</TD>" % "Constant Polymomial" elif self.fitresult['result']['config']['fit']['continuum'] == 2: text+=" <TD>&nbsp;%s</TD>" % "1st Order Polymomial" elif self.fitresult['result']['config']['fit']['continuum'] == 3: text+=" <TD>&nbsp;%s</TD>" % "2nd Order Polymomial" else: #compatibility with previous versions text+=" <TD>&nbsp;%s</TD>" % "1st Order Polymomial" text+=" </TR>" isum = self.fitresult['result']['parameters'].index('Sum') a=0 if hypermetflag:a=5 nglobal = len(self.fitresult['result']['parameters']) - len(self.fitresult['result']['groups']) for i in range(isum+1,nglobal-a): text+=" <TR align=left>" text+=" <TD><I>&nbsp;%s<I></TD>" % self.fitresult['result']['parameters'][i] value = self.fitresult['result']['fittedpar'][i] stdvalue = self.fitresult['result']['sigmapar'] [i] text+=" <TD>&nbsp;% .5E +/- % .5E</TD>" % (value, stdvalue) text+=" </TR>" if 0: text+=" <TR align=left>" text+=" <TD><I>&nbsp;%s<I></TD>" % 'Constant' value = self.fitresult['result']['fittedpar'][self.fitresult['result']['parameters'].index('Constant')] stdvalue = self.fitresult['result']['sigmapar'] [self.fitresult['result']['parameters'].index('Constant')] text+=" <TD>&nbsp;% .5E +/- % .5E</TD>" % (value, stdvalue) text+=" </TR>" if self.fitresult['result']['config']['fit']['continuum'] > 1: text+=" <TR align=left>" text+=" <TD><I>&nbsp;%s<I></TD>" % 'Slope' value = self.fitresult['result']['fittedpar'][self.fitresult['result']['parameters'].index('Constant')+1] stdvalue = self.fitresult['result']['sigmapar'] [self.fitresult['result']['parameters'].index('Constant')+1] text+=" <TD>&nbsp;% .5E +/- % .5E</TD>" % (value, stdvalue) text+=" </TR>" text+="</TR>" text+=" </TABLE>" text+=" </TD>" text+="</TR>" if 0: #not yet implemented text+="<TR>" text+=" <TD align=center>" text+=" <I>FIT END STATUS : </I>%s<BR>"% "STATUS" text+=" <B>%s</B>" % "MESSAGE" text+=" </TD>" text+="</TR>" text+="</TABLE>" text+="</CENTER>" return text def getFooter(self): now = time.time() text ="" text+= "<center>" text+= "<table width=100%% border=0 cellspacing=0 cellpadding=0>" text+= " <tr><td colspan=2 height=10><spacer type=block height=10 width=0></td></tr>" text+= " <tr><td colspan=2 bgcolor=#cc0066 height=5><spacer type=block height=5 width=0></td></tr>" text+= " <tr><td colspan=2 height=5><spacer type=block height=5 width=0></td></tr>" text+= " <TR>" text+= " <TD><FONT size=1 >created: %s</font></TD>" % time.ctime(now) #text+= " <TD ALIGN=RIGHT><FONT size=1 >last modified: %s" % time.ctime(now) text+= " <TD ALIGN=RIGHT><FONT size=1 >last modified: %s by" % time.ctime(now) #text+= " <A STYLE=""color: #0000cc"" HREF=""mailto:papillon@esrf.fr"">papillon@esrf.fr</A></FONT></TD>" if sys.platform == 'win32': try: user = os.getenv('USERNAME') text+= " <A STYLE=""color: #0000cc"">%s</A></FONT></TD>" % user except: text +="</FONT></TD>" else: try: user = os.getenv("USER") text+= " <A STYLE=""color: #0000cc"">%s</A></FONT></TD>" % user except: text +="</FONT></TD>" text+= " </TR>" text+= "</TABLE>" text+= "</center>" text+= "</BODY>" text+= "</HTML>" return text def __getFitImage(self,imagefile=None): if imagefile is None:imagefile=self.outdir+"/"+self.outfile+".png" filelink = "%s" % imagefile text = "" text+= "<H2><FONT color=#009999>" text+= "Spectrum, Continuum and Fitted values :" text+= "</FONT></H2>" text+= "<CENTER>" text+= "<IMG SRC=%s ALT=""fit graph"" ALIGN=center>"%filelink text+= "</CENTER>" return text def getImage(self): dict=self.fitresult if MATPLOTLIB: try: fig = Figure(figsize=(6,3)) # in inches canvas = FigureCanvas(fig) ax = fig.add_axes([.15, .15, .8, .8]) ax.set_axisbelow(True) logplot = self.plotDict.get('logy', True) if logplot: axplot = ax.semilogy else: axplot = ax.plot axplot(dict['result']['energy'], dict['result']['ydata'], 'k', lw=1.5) axplot(dict['result']['energy'], dict['result']['continuum'], 'g', lw=1.5) legendlist = ['spectrum', 'continuum', 'fit'] axplot(dict['result']['energy'], dict['result']['yfit'], 'r', lw=1.5) fontproperties = FontProperties(size=8) if dict['result']['config']['fit']['sumflag']: axplot(dict['result']['energy'], dict['result']['pileup'] + dict['result']['continuum'], 'y', lw=1.5) legendlist.append('pileup') if matplotlib_version < '0.99.0': legend = ax.legend(legendlist,0, prop = fontproperties, labelsep=0.02) else: legend = ax.legend(legendlist,0, prop = fontproperties, labelspacing=0.02) except ValueError: fig = Figure(figsize=(6,3)) # in inches canvas = FigureCanvas(fig) ax = fig.add_axes([.15, .15, .8, .8]) ax.set_axisbelow(True) ax.plot(dict['result']['energy'], dict['result']['ydata'], 'k', lw=1.5) ax.plot(dict['result']['energy'], dict['result']['continuum'], 'g', lw=1.5) legendlist = ['spectrum', 'continuum', 'fit'] ax.plot(dict['result']['energy'], dict['result']['yfit'], 'r', lw=1.5) fontproperties = FontProperties(size=8) if dict['result']['config']['fit']['sumflag']: ax.plot(dict['result']['energy'], dict['result']['pileup'] + dict['result']['continuum'], 'y', lw=1.5) legendlist.append('pileup') if matplotlib_version < '0.99.0': legend = ax.legend(legendlist,0, prop = fontproperties, labelsep=0.02) else: legend = ax.legend(legendlist,0, prop = fontproperties, labelspacing=0.02) ax.set_xlabel('Energy') ax.set_ylabel('Counts') legend.draw_frame(False) outfile = self.outdir+"/"+self.outfile+".png" try: os.remove(outfile) except: pass canvas.print_figure(outfile) return self.__getFitImage(self.outfile+".png") if self.graph is None: self.widget = qt.QWidget() self.widget.l = qt.QVBoxLayout(self.widget) self.graph = QtBlissGraph.QtBlissGraph(self.widget) self.widget.l.addWidget(self.graph) widget = self.widget graph = self.graph graph.xlabel('Energy') graph.ylabel('Counts') graph.setCanvasBackground(qt.Qt.white) x = dict['result']['energy'] graph.newcurve('spectrum', x=x,y=dict['result']['ydata'],logfilter=1) graph.newcurve('continuum',x=x,y=dict['result']['continuum'],logfilter=1) graph.newcurve('fit',x=x,y=dict['result']['yfit'],logfilter=1) if dict['result']['config']['fit']['escapeflag']: #I DO NOT HAVE THE CONTRIBUTION pass #self.graph.newcurve('escape',x=x,y=dict['result']['escape'],logfilter=1) if dict['result']['config']['fit']['sumflag']: graph.newcurve('pileup', x=x, y=dict['result']['pileup']+dict['result']['continuum'], logfilter=1) graph.ToggleLogY() ymin=min(min(dict['result']['ydata']),min(dict['result']['yfit'])) ymax=max(max(dict['result']['ydata']),max(dict['result']['yfit'])) graph.setY1AxisLimits(ymin,ymax) graph.setY2AxisLimits(ymin,ymax) graph.show() widget.resize(450,300) #widget.show() qt.qApp.processEvents() outfile = self.outdir+"/"+self.outfile+".png" pixmap = qt.QPixmap.grabWidget(widget) try: os.remove(outfile) except: pass if pixmap.save(outfile,'PNG'): qt.qApp.processEvents() graph.close() del graph widget.close() del widget return self.__getFitImage(self.outfile+".png") else: print("cannot generate image") return "" def getConcentrations(self): return self.concentrationsConversion.getConcentrationsAsHtml(\ self.concentrations) def getConcentrationsASCII(self): return self.concentrationsConversion.getConcentrationsAsAscii(\ self.concentrations) def getResult(self): text = "" if self.tableFlag == 0: return text text+="\n" text+= "<H2><a NAME=""%s""></a><FONT color=#009999>" % 'Fit_Peak_Results' text+= "%s:" % 'Fit Peak Results' text+= "</FONT></H2>" text+="<br>" result = self.fitresult['result'] if self.tableFlag == 1: labels=['Element','Group','Fit&nbsp; Area','Sigma'] else: labels=['Element','Group','Fit&nbsp; Area','Sigma','Energy','Ratio','FWHM','Chi&nbsp; square'] lemmon = ("#%x%x%x" % (255,250,205)).upper() hcolor = ("#%x%x%x" % (230,240,249)).upper() text += "<CENTER>" text += ("<nobr>") text += '<table width="80%" border="0" cellspacing="1" cellpadding="1" >' text += ( "<tr><b>") for l in range(len(labels)): if l < 2: text += '<td align="left" bgcolor=%s><b>%s</b></td>' % (hcolor,labels[l]) elif (l > 3) or (self.tableFlag == 1): text += '<td align="right" bgcolor=%s><b>%s</b></td>' % (hcolor,labels[l]) else: text += '<td align="center" bgcolor=%s><b>%s</b></td>' % (hcolor,labels[l]) text+="</b></tr>\n" for group in result['groups']: text+=("<tr>") ele,group0 = group.split() text += '<td align="left"><b>%s</b></td>' % ele text += '<td align="left"><b>%s</b></td>' % group0 fitarea = "%.6e" % result[group]['fitarea'] sigmaarea = "%.2e" % result[group]['sigmaarea'] text += '<td align="right"><b>%s</b></td>' % fitarea text += '<td align="right"><b>%s</b></td>' % sigmaarea text += '<td align="right"><b>&nbsp;</b></td>' text += '<td align="right"><b>&nbsp;</b></td>' text += '<td align="right"><b>&nbsp;</b></td>' text += '<td align="right"><b>&nbsp;</b></td>' text += '</tr>\n' if type(result[group]['peaks']) != type([]): iterator = [result[group]['peaks']] else: iterator = 1 * result[group]['peaks'] if self.tableFlag == 1: iterator = [] for peak in iterator: text += '<tr><td>&nbsp;</td>' name = peak energy = ("%.3f" % (result[group][peak]['energy'])) ratio = ("%.5f" % (result[group][peak]['ratio'])) area = ("%.6e" % (result[group][peak]['fitarea'])) sigma = ("%.2e" % (result[group][peak]['sigmaarea'])) fwhm = ("%.3f" % (result[group][peak]['fwhm'])) chisq = ("%.2f" % (result[group][peak]['chisq'])) fields = [name,area,sigma,energy,ratio,fwhm,chisq] for field in fields: if field == name: text+=('<td align="left" bgcolor=%s>%s</td>' % (lemmon,field)) else: text+=('<td align="right" bgcolor=%s>%s</td>' % (lemmon,field)) text+="</tr>\n" if type(result[group]['escapepeaks']) != type([]): iterator = [result[group]['escapepeaks']] else: iterator = 1 * result[group]['escapepeaks'] if self.tableFlag == 1: iterator = [] for peak0 in iterator: name = peak0+"esc" peak = peak0+"esc" if result[group][name]['ratio'] > 0.0: text += '<tr><td></td>' energy = ("%.3f" % (result[group][peak]['energy'])) ratio = ("%.5f" % (result[group][peak]['ratio'])) area = ("%.6e" % (result[group][peak]['fitarea'])) sigma = ("%.2e" % (result[group][peak]['sigmaarea'])) fwhm = ("%.3f" % (result[group][peak]['fwhm'])) chisq = ("%.2f" % (result[group][peak]['chisq'])) fields = [name,area,sigma,energy,ratio,fwhm,chisq] for field in fields: if field == name: text+=('<td align="left" bgcolor=%s>%s</td>' % (lemmon,field)) else: text+=('<td align="right" bgcolor=%s>%s</td>' % (lemmon,field)) text+="</tr>\n" text+=("</table>") text+=("</nobr>") text+="</CENTER>" return text def generateoutput(fitfile,outfile=None): report = QtMcaAdvancedFitReport(fitfile, outfile) report.writeReport() if __name__ == "__main__": if len(sys.argv) <2 : print("Usage: %s Input_Fit_Result_File [optional_output_file]" %\ sys.argv[0]) sys.exit(1) app = qt.QApplication(sys.argv) fitfile=sys.argv[1] if len(sys.argv) > 2: outfile = sys.argv[2] else: outfile = None generateoutput(fitfile,outfile) app.quit()
gpl-2.0
2,809,575,190,680,235,000
45.802713
154
0.474142
false
kastnerkyle/minet
minet/datasets.py
1
28453
# -*- coding: utf 8 -*- from __future__ import division import tarfile import os from scipy.io import wavfile import numpy as np import tables import numbers import random import string import fnmatch import theano from lxml import etree try: import urllib.request as urllib # for backwards compatibility except ImportError: import urllib2 as urllib def get_dataset_dir(dataset_name, data_dir=None, folder=None, create_dir=True): if not data_dir: data_dir = os.getenv("MINET_DATA", os.path.join( os.path.expanduser("~"), "minet_data")) if folder is None: data_dir = os.path.join(data_dir, dataset_name) else: data_dir = os.path.join(data_dir, folder) if not os.path.exists(data_dir) and create_dir: os.makedirs(data_dir) return data_dir def download(url, server_fname, local_fname=None, progress_update_percentage=5): """ An internet download utility modified from http://stackoverflow.com/questions/22676/ how-do-i-download-a-file-over-http-using-python/22776#22776 """ u = urllib.urlopen(url) if local_fname is None: local_fname = server_fname full_path = local_fname meta = u.info() with open(full_path, 'wb') as f: try: file_size = int(meta.get("Content-Length")) except TypeError: print("WARNING: Cannot get file size, displaying bytes instead!") file_size = 100 print("Downloading: %s Bytes: %s" % (server_fname, file_size)) file_size_dl = 0 block_sz = int(1E7) p = 0 while True: buffer = u.read(block_sz) if not buffer: break file_size_dl += len(buffer) f.write(buffer) if (file_size_dl * 100. / file_size) > p: status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size) print(status) p += progress_update_percentage def check_fetch_iamondb(): partial_path = get_dataset_dir("iamondb") if not os.path.exists(partial_path): os.makedirs(partial_path) strokes_path = os.path.join(partial_path, "lineStrokes") ascii_path = os.path.join(partial_path, "ascii") if not os.path.exists(strokes_path) or not os.path.exists(ascii_path): raise ValueError("You must download the data from IAMOnDB, and" "unpack in %s" % partial_path) return strokes_path, ascii_path def plot_scatter_iamondb_example(X, y=None): import matplotlib.pyplot as plt rgba_colors = np.zeros((len(X), 4)) normed = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) # for red the first column needs to be one rgba_colors[:, 0] = normed[:, 0] # for blue last color column needs to be one rgba_colors[:, 2] = np.abs(1 - normed[:, 0]) # the fourth column needs to be alphas rgba_colors[:, 3] = np.ones((len(X),)) * .4 + .4 * normed[:, 0] if len(X[0]) == 3: plt.scatter(X[:, 1], X[:, 2], color=rgba_colors) elif len(X[0]) == 2: plt.scatter(X[:, 0], X[:, 1], color=rgba_colors) if y is not None: plt.title(y) def plot_lines_iamondb_example(X, y=None): import matplotlib.pyplot as plt val_index = np.where(X[:, 0] != 1)[0] contiguous = np.where((val_index[1:] - val_index[:-1]) == 1)[0] + 1 non_contiguous = np.where((val_index[1:] - val_index[:-1]) != 1)[0] + 1 prev_nc = 0 for nc in val_index[non_contiguous]: ind = ((prev_nc <= contiguous) & (contiguous < nc))[:-1] prev_nc = nc plt.plot(X[val_index[ind], 1], X[val_index[ind], 2]) plt.plot(X[prev_nc:, 1], X[prev_nc:, 2]) if y is not None: plt.title(y) # A trick for monkeypatching an instancemethod that when method is a # c-extension? there must be a better way class _textEArray(tables.EArray): pass class _handwritingEArray(tables.EArray): pass def fetch_iamondb(): strokes_path, ascii_path = check_fetch_iamondb() stroke_matches = [] for root, dirnames, filenames in os.walk(strokes_path): for filename in fnmatch.filter(filenames, '*.xml'): stroke_matches.append(os.path.join(root, filename)) ascii_matches = [] for root, dirnames, filenames in os.walk(ascii_path): for filename in fnmatch.filter(filenames, '*.txt'): ascii_matches.append(os.path.join(root, filename)) partial_path = get_dataset_dir("iamondb") hdf5_path = os.path.join(partial_path, "iamondb.h5") if not os.path.exists(hdf5_path): # setup tables compression_filter = tables.Filters(complevel=5, complib='blosc') hdf5_file = tables.openFile(hdf5_path, mode='w') handwriting = hdf5_file.createEArray(hdf5_file.root, 'handwriting', tables.Int32Atom(), shape=(0, 3), filters=compression_filter, expectedrows=len(ascii_matches)) handwriting_poslen = hdf5_file.createEArray(hdf5_file.root, 'handwriting_poslen', tables.Int32Atom(), shape=(0, 2), filters=compression_filter, expectedrows=len( ascii_matches)) text = hdf5_file.createEArray(hdf5_file.root, 'text', tables.Int32Atom(), shape=(0, 1), filters=compression_filter, expectedrows=len(ascii_matches)) text_poslen = hdf5_file.createEArray(hdf5_file.root, 'text_poslen', tables.Int32Atom(), shape=(0, 2), filters=compression_filter, expectedrows=len(ascii_matches)) current_text_pos = 0 current_handwriting_pos = 0 for na, ascii_file in enumerate(ascii_matches): if na % 100 == 0: print("Reading ascii file %i of %i" % (na, len(ascii_matches))) with open(ascii_file) as fp: cleaned = [t.strip() for t in fp.readlines() if 'OCR' not in t and 'CSR' not in t and t != '\r\n' and t != '\n'] # Find correspnding XML file for ascii file file_id = ascii_file.split(os.sep)[-2] submatches = [sf for sf in stroke_matches if file_id in sf] # Sort by file number submatches = sorted(submatches, key=lambda x: int( x.split(os.sep)[-1].split( "-")[-1][:-4])) # Skip files where ascii length and number of XML don't match # TODO: Figure out why this is happening if len(cleaned) != len(submatches): continue for n, stroke_file in enumerate(submatches): with open(stroke_file) as fp: tree = etree.parse(fp) root = tree.getroot() # Get all the values from the XML # 0th index is stroke ID, will become up/down s = np.array([[i, int(Point.attrib['x']), int(Point.attrib['y'])] for StrokeSet in root for i, Stroke in enumerate(StrokeSet) for Point in Stroke]) # flip y axis s[:, 2] = -s[:, 2] # Get end of stroke points c = s[1:, 0] != s[:-1, 0] ci = np.where(c == True)[0] nci = np.where(c == False)[0] # set pen down s[0, 0] = 0 s[nci, 0] = 0 # set pen up s[ci, 0] = 1 s[-1, 0] = 1 lh = len(s) for i in range(lh): handwriting.append(s[i][None]) handwriting_poslen.append( np.array([current_handwriting_pos, lh])[None]) current_handwriting_pos += lh lt = len(cleaned[n]) for i in range(lt): text.append( np.array(ord(cleaned[n][i]))[None, None]) text_poslen.append( np.array([current_text_pos, lt])[None]) current_text_pos += lt hdf5_file.close() hdf5_file = tables.openFile(hdf5_path, mode='r') handwriting = hdf5_file.root.handwriting handwriting_poslen = hdf5_file.root.handwriting_poslen text = hdf5_file.root.text text_poslen = hdf5_file.root.text_poslen # Monkeypatch text # A dirty hack to only monkeypatch text text.__class__ = _textEArray def text_getter(self, key): if isinstance(key, numbers.Integral) or isinstance(key, np.integer): p, l = text_poslen[key] return "".join(map(chr, self.read(p, p+l, 1))) elif isinstance(key, slice): start, stop, step = self._processRange(key.start, key.stop, key.step) if key.stop is None: stop = len(text_poslen) if key.start is None: start = 0 if stop <= start: # replicate slice where stop <= start return [] if stop >= len(text_poslen): stop = len(text_poslen) elif key.stop < 0 and key.stop is not None: stop = len(text_poslen) + key.stop if key.start < 0 and key.start is not None: start = len(text_poslen) + key.start return ["".join(map(chr, self.read(text_poslen[k][0], sum(text_poslen[k]), 1))) for k in range(start, stop, step)] # Patch __getitem__ in custom subclass, applying to all instances of it _textEArray.__getitem__ = text_getter # Monkeypatch handwriting # A dirty hack to only monkeypatch handwriting handwriting.__class__ = _handwritingEArray def handwriting_getter(self, key): if isinstance(key, numbers.Integral) or isinstance(key, np.integer): p, l = handwriting_poslen[key] return self.read(p, p+l, 1).astype('float32') elif isinstance(key, slice): start, stop, step = self._processRange(key.start, key.stop, key.step) if key.stop is None: stop = len(text_poslen) if key.start is None: start = 0 if stop <= start: # replicate slice where stop <= start return [] if stop >= len(text_poslen): stop = len(text_poslen) elif key.stop < 0 and key.stop is not None: stop = len(text_poslen) + key.stop if key.start < 0 and key.start is not None: start = len(text_poslen) + key.start return [self.read(handwriting_poslen[k][0], sum(handwriting_poslen[k]), 1).astype('float32') for k in range(start, stop, step)] # Patch __getitem__ in custom subclass, applying to all instances of it _handwritingEArray.__getitem__ = handwriting_getter X = handwriting y = text return (X, y) """ def load_fruitspeech(): # Check if dataset is in the data directory. data_path = os.path.join(os.path.split(__file__)[0], "data") if not os.path.exists(data_path): os.makedirs(data_path) dataset = 'audio.tar.gz' data_file = os.path.join(data_path, dataset) if os.path.isfile(data_file): dataset = data_file if not os.path.isfile(data_file): try: import urllib urllib.urlretrieve('http://google.com') url = 'https://dl.dropboxusercontent.com/u/15378192/audio.tar.gz' except AttributeError: import urllib.request as urllib url = 'https://dl.dropboxusercontent.com/u/15378192/audio.tar.gz' print('Downloading data from %s' % url) urllib.urlretrieve(url, data_file) print('... loading data') if not os.path.exists(os.path.join(data_path, "audio")): tar = tarfile.open(data_file) os.chdir(data_path) tar.extractall() tar.close() h5_file_path = os.path.join(data_path, "saved_fruit.h5") if not os.path.exists(h5_file_path): data_path = os.path.join(data_path, "audio") audio_matches = [] for root, dirnames, filenames in os.walk(data_path): for filename in fnmatch.filter(filenames, '*.wav'): audio_matches.append(os.path.join(root, filename)) random.seed(1999) random.shuffle(audio_matches) # http://mail.scipy.org/pipermail/numpy-discussion/2011-March/055219.html h5_file = tables.openFile(h5_file_path, mode='w') data_x = h5_file.createVLArray(h5_file.root, 'data_x', tables.Float32Atom(shape=()), filters=tables.Filters(1)) data_x_shapes = h5_file.createVLArray(h5_file.root, 'data_x_shapes', tables.Int32Atom(shape=()), filters=tables.Filters(1)) data_y = h5_file.createVLArray(h5_file.root, 'data_y', tables.Int32Atom(shape=()), filters=tables.Filters(1)) for wav_path in audio_matches: # Convert chars to int classes word = wav_path.split(os.sep)[-1][:-6] chars = [ord(c) - 97 for c in word] data_y.append(np.array(chars, dtype='int32')) fs, d = wavfile.read(wav_path) # Preprocessing from A. Graves "Towards End-to-End Speech # Recognition" Pxx, _, _, _ = specgram(d, NFFT=256, noverlap=128) data_x_shapes.append(np.array(Pxx.T.shape, dtype='int32')) data_x.append(Pxx.T.astype('float32').flatten()) h5_file.close() h5_file = tables.openFile(h5_file_path, mode='r') data_x = h5_file.root.data_x data_x_shapes = h5_file.root.data_x_shapes data_y = h5_file.root.data_y # A dirty hack to only monkeypatch data_x data_x.__class__ = _cVLArray # override getter so that it gets reshaped to 2D when fetched old_getter = data_x.__getitem__ def getter(self, key): if isinstance(key, numbers.Integral) or isinstance(key, np.integer): return old_getter(key).reshape(data_x_shapes[key]).astype( theano.config.floatX) elif isinstance(key, slice): start, stop, step = self._processRange(key.start, key.stop, key.step) return [o.reshape(s) for o, s in zip( self.read(start, stop, step), data_x_shapes[slice( start, stop, step)])] # Patch __getitem__ in custom subclass, applying to all instances of it _cVLArray.__getitem__ = getter train_x = data_x[:80] train_y = data_y[:80] valid_x = data_x[80:90] valid_y = data_y[80:90] test_x = data_x[90:] test_y = data_y[90:] rval = [(train_x, train_y), (valid_x, valid_y), (test_x, test_y)] return rval def load_cmuarctic(): # Check if dataset is in the data directory. data_path = os.path.join(os.path.split(__file__)[0], "data") if not os.path.exists(data_path): os.makedirs(data_path) urls = ['http://www.speech.cs.cmu.edu/cmu_arctic/packed/cmu_us_awb_arctic-0.95-release.tar.bz2', 'http://www.speech.cs.cmu.edu/cmu_arctic/packed/cmu_us_bdl_arctic-0.95-release.tar.bz2', 'http://www.speech.cs.cmu.edu/cmu_arctic/packed/cmu_us_clb_arctic-0.95-release.tar.bz2', 'http://www.speech.cs.cmu.edu/cmu_arctic/packed/cmu_us_jmk_arctic-0.95-release.tar.bz2', 'http://www.speech.cs.cmu.edu/cmu_arctic/packed/cmu_us_ksp_arctic-0.95-release.tar.bz2', 'http://www.speech.cs.cmu.edu/cmu_arctic/packed/cmu_us_rms_arctic-0.95-release.tar.bz2', 'http://www.speech.cs.cmu.edu/cmu_arctic/packed/cmu_us_slt_arctic-0.95-release.tar.bz2', ] data_files = [] for url in urls: dataset = url.split('/')[-1] data_file = os.path.join(data_path, dataset) data_files.append(data_file) if os.path.isfile(data_file): dataset = data_file if not os.path.isfile(data_file): try: import urllib urllib.urlretrieve('http://google.com') except AttributeError: import urllib.request as urllib print('Downloading data from %s' % url) urllib.urlretrieve(url, data_file) print('... loading data') folder_paths = [] for data_file in data_files: folder_name = data_file.split(os.sep)[-1].split("-")[0] folder_path = os.path.join(data_path, folder_name) folder_paths.append(folder_path) if not os.path.exists(folder_path): tar = tarfile.open(data_file) os.chdir(data_path) tar.extractall() tar.close() h5_file_path = os.path.join(data_path, "saved_cmu.h5") if not os.path.exists(h5_file_path): # http://mail.scipy.org/pipermail/numpy-discussion/2011-March/055219.html h5_file = tables.openFile(h5_file_path, mode='w') data_x = h5_file.createVLArray(h5_file.root, 'data_x', tables.Float32Atom(shape=()), filters=tables.Filters(1)) data_x_shapes = h5_file.createVLArray(h5_file.root, 'data_x_shapes', tables.Int32Atom(shape=()), filters=tables.Filters(1)) data_y = h5_file.createVLArray(h5_file.root, 'data_y', tables.Int32Atom(shape=()), filters=tables.Filters(1)) data_meta = h5_file.createVLArray(h5_file.root, 'data_meta', tables.StringAtom(200), filters=tables.Filters(1)) for folder_path in folder_paths: audio_matches = [] for root, dirnames, filenames in os.walk(folder_path): for filename in fnmatch.filter(filenames, '*.wav'): audio_matches.append(os.path.join(root, filename)) f = open(os.path.join(folder_path, "etc", "txt.done.data")) read_raw_text = f.readlines() f.close() # Remove all punctuations list_text = [t.strip().lower().translate( string.maketrans("", ""), string.punctuation).split(" ")[1:-1] for t in read_raw_text] # Get rid of numbers, even though it will probably hurt # recognition on certain examples cleaned_lookup = {lt[0]: " ".join(lt[1:]).translate( None, string.digits).strip() for lt in list_text} data_meta.append(folder_path.split(os.sep)[-1]) for wav_path in audio_matches: lookup_key = wav_path.split(os.sep)[-1][:-4] # Some files aren't consistent! if "_" in cleaned_lookup.keys()[0] and "_" not in lookup_key: # Needs an _ to match text format... sometimes! lookup_key = lookup_key[:6] + "_" + lookup_key[6:] elif "_" not in cleaned_lookup.keys()[0]: lookup_key = lookup_key.translate(None, "_") try: words = cleaned_lookup[lookup_key] # Convert chars to int classes chars = [ord(c) - 97 for c in words] # Make spaces last class chars = [c if c >= 0 else 26 for c in chars] data_y.append(np.array(chars, dtype='int32')) # Convert chars to int classes fs, d = wavfile.read(wav_path) # Preprocessing from A. Graves "Towards End-to-End Speech # Recognition" Pxx, _, _, _ = plt.specgram(d, NFFT=256, noverlap=128) data_x_shapes.append(np.array(Pxx.T.shape, dtype='int32')) data_x.append(Pxx.T.astype('float32').flatten()) except KeyError: # Necessary because some labels are missing in some folders print("Skipping %s due to missing key" % wav_path) h5_file.close() h5_file = tables.openFile(h5_file_path, mode='r') data_x = h5_file.root.data_x data_x_shapes = h5_file.root.data_x_shapes data_y = h5_file.root.data_y # A dirty hack to only monkeypatch data_x data_x.__class__ = _cVLArray # override getter so that it gets reshaped to 2D when fetched old_getter = data_x.__getitem__ def getter(self, key): if isinstance(key, numbers.Integral) or isinstance(key, np.integer): return old_getter(key).reshape(data_x_shapes[key]).astype( theano.config.floatX) elif isinstance(key, slice): start, stop, step = self._processRange(key.start, key.stop, key.step) return [o.reshape(s) for o, s in zip( self.read(start, stop, step), data_x_shapes[slice( start, stop, step)])] # Patch __getitem__ in custom subclass, applying to all instances of it _cVLArray.__getitem__ = getter train_x = data_x[:6000] train_y = data_y[:6000] valid_x = data_x[6000:7500] valid_y = data_y[6000:7500] test_x = data_x[7500:] test_y = data_y[7500:] rval = [(train_x, train_y), (valid_x, valid_y), (test_x, test_y)] return rval def load_librispeech(): # Check if dataset is in the data directory. data_path = os.path.join(os.path.split(__file__)[0], "data") if not os.path.exists(data_path): os.makedirs(data_path) dataset = 'dev-clean.tar.gz' data_file = os.path.join(data_path, dataset) if os.path.isfile(data_file): dataset = data_file if not os.path.isfile(data_file): try: import urllib urllib.urlretrieve('http://google.com') url = 'http://www.openslr.org/resources/12/dev-clean.tar.gz' except AttributeError: import urllib.request as urllib url = 'http://www.openslr.org/resources/12/dev-clean.tar.gz' print('Downloading data from %s' % url) urllib.urlretrieve(url, data_file) print('... loading data') if not os.path.exists(os.path.join(data_path, "LibriSpeech", "dev-clean")): tar = tarfile.open(data_file) os.chdir(data_path) tar.extractall() tar.close() h5_file_path = os.path.join(data_path, "saved_libri.h5") if not os.path.exists(h5_file_path): data_path = os.path.join(data_path, "LibriSpeech", "dev-clean") audio_matches = [] for root, dirnames, filenames in os.walk(data_path): for filename in fnmatch.filter(filenames, '*.flac'): audio_matches.append(os.path.join(root, filename)) text_matches = [] for root, dirnames, filenames in os.walk(data_path): for filename in fnmatch.filter(filenames, '*.txt'): text_matches.append(os.path.join(root, filename)) # http://mail.scipy.org/pipermail/numpy-discussion/2011-March/055219.html h5_file = tables.openFile(h5_file_path, mode='w') data_x = h5_file.createVLArray(h5_file.root, 'data_x', tables.Float32Atom(shape=()), filters=tables.Filters(1)) data_x_shapes = h5_file.createVLArray(h5_file.root, 'data_x_shapes', tables.Int32Atom(shape=()), filters=tables.Filters(1)) data_y = h5_file.createVLArray(h5_file.root, 'data_y', tables.Int32Atom(shape=()), filters=tables.Filters(1)) for full_t in text_matches: f = open(full_t, 'r') for line in f.readlines(): word_splits = line.strip().split(" ") file_tag = word_splits[0] words = word_splits[1:] # Convert chars to int classes chars = [ord(c) - 97 for c in (" ").join(words).lower()] # Make spaces last class chars = [c if c >= 0 else 26 for c in chars] data_y.append(np.array(chars, dtype='int32')) audio_path = [a for a in audio_matches if file_tag in a] if len(audio_path) != 1: raise ValueError("More than one match for" "tag %s!" % file_tag) if not os.path.exists(audio_path[0][:-5] + ".wav"): r = os.system("ffmpeg -i %s %s.wav" % (audio_path[0], audio_path[0][:-5])) if r: raise ValueError("A problem occured converting flac to" "wav, make sure ffmpeg is installed") wav_path = audio_path[0][:-5] + '.wav' fs, d = wavfile.read(wav_path) # Preprocessing from A. Graves "Towards End-to-End Speech # Recognition" Pxx, _, _, _ = plt.specgram(d, NFFT=256, noverlap=128) data_x_shapes.append(np.array(Pxx.T.shape, dtype='int32')) data_x.append(Pxx.T.astype('float32').flatten()) f.close() h5_file.close() h5_file_path = os.path.join(data_path, "saved_libri.h5") h5_file = tables.openFile(h5_file_path, mode='r') data_x = h5_file.root.data_x data_x_shapes = h5_file.root.data_x_shapes data_y = h5_file.root.data_y # A dirty hack to only monkeypatch data_x data_x.__class__ = _cVLArray # override getter so that it gets reshaped to 2D when fetched old_getter = data_x.__getitem__ def getter(self, key): if isinstance(key, numbers.Integral) or isinstance(key, np.integer): return old_getter(key).reshape(data_x_shapes[key]).astype( theano.config.floatX) elif isinstance(key, slice): start, stop, step = self._processRange(key.start, key.stop, key.step) return [o.reshape(s) for o, s in zip( self.read(start, stop, step), data_x_shapes[slice( start, stop, step)])] # Patch __getitem__ in custom subclass, applying to all instances of it _cVLArray.__getitem__ = getter train_x = data_x[:2000] train_y = data_y[:2000] valid_x = data_x[2000:2500] valid_y = data_y[2000:2500] test_x = data_x[2500:] test_y = data_y[2500:] rval = [(train_x, train_y), (valid_x, valid_y), (test_x, test_y)] return rval """
bsd-3-clause
-6,510,465,535,556,894,000
41.722222
100
0.518047
false
fegonda/icon_demo
code/model/unet/generate_data.py
1
45068
import os import sys import skimage.transform import skimage.exposure import time import glob import numpy as np import mahotas import random import matplotlib import matplotlib.pyplot as plt import scipy import scipy.ndimage import json from scipy.ndimage.filters import maximum_filter base_path = os.path.dirname(__file__) sys.path.insert(1,os.path.join(base_path, '../../common')) sys.path.insert(2,os.path.join(base_path, '../../database')) from utility import Utility from settings import Paths from project import Project from paths import Paths from db import DB # the idea is to grow the labels to cover the whole membrane # image and label should be [0,1] def adjust_imprecise_boundaries(image, label, number_iterations=5): label = label.copy() label_orig = label.copy() for i in xrange(number_iterations): # grow labels by one pixel label = maximum_filter(label, 2) # only keep pixels that are on dark membrane non_valid_label = np.logical_and(label==1, image>0.7) label[non_valid_label] = 0 # make sure original labels are preserved label = np.logical_or(label==1, label_orig==1) return label def deform_images(image1, image2, image3=None): # assumes image is uint8 def apply_deformation(image, coordinates): # ndimage expects uint8 otherwise introduces artifacts. Don't ask me why, its stupid. deformed = scipy.ndimage.map_coordinates(image, coordinates, mode='reflect') deformed = np.reshape(deformed, image.shape) return deformed if np.max(image1) < 1.1: image1 = np.uint8(image1*255) image2 = np.uint8(image2*255) if not image3 is None: image3 = np.uint8(image3*255) displacement_x = np.random.normal(size=image1.shape, scale=10) displacement_y = np.random.normal(size=image1.shape, scale=10) # smooth over image coords_x, coords_y = np.meshgrid(np.arange(0,image1.shape[0]), np.arange(0,image1.shape[1]), indexing='ij') displacement_x = coords_x.flatten() + scipy.ndimage.gaussian_filter(displacement_x, sigma=5).flatten() displacement_y = coords_y.flatten() + scipy.ndimage.gaussian_filter(displacement_y, sigma=5).flatten() coordinates = np.vstack([displacement_x, displacement_y]) deformed1 = apply_deformation(image1, coordinates) / 255.0 deformed2 = apply_deformation(image2, coordinates) / 255.0 if not image3 is None: deformed3 = apply_deformation(image3, coordinates) return (deformed1, deformed2, deformed3) return (deformed1, deformed2) def deform_images_list(images): # assumes image is uint8 def apply_deformation(image, coordinates): # ndimage expects uint8 otherwise introduces artifacts. Don't ask me why, its stupid. deformed = scipy.ndimage.map_coordinates(image, coordinates, mode='reflect') deformed = np.reshape(deformed, image.shape) return deformed displacement_x = np.random.normal(size=images.shape[:2], scale=10) displacement_y = np.random.normal(size=images.shape[:2], scale=10) # smooth over image coords_x, coords_y = np.meshgrid(np.arange(0,images.shape[0]), np.arange(0,images.shape[1]), indexing='ij') displacement_x = coords_x.flatten() #+ scipy.ndimage.gaussian_filter(displacement_x, sigma=5).flatten() displacement_y = coords_y.flatten() #+ scipy.ndimage.gaussian_filter(displacement_y, sigma=5).flatten() coordinates = np.vstack([displacement_x, displacement_y]) deformed = images.copy() for i in xrange(images.shape[2]): deformed[:,:,i] = apply_deformation(np.uint8(images[:,:,i]), coordinates) return deformed def normalizeImage(img, saturation_level=0.05, doClahe=False): #was 0.005 if not doClahe: sortedValues = np.sort( img.ravel()) minVal = np.float32(sortedValues[np.int(len(sortedValues) * (saturation_level / 2))]) maxVal = np.float32(sortedValues[np.int(len(sortedValues) * (1 - saturation_level / 2))]) normImg = np.float32(img - minVal) * (255 / (maxVal-minVal)) normImg[normImg<0] = 0 normImg[normImg>255] = 255 output = (np.float32(normImg) / 255.0) return output else: output = skimage.exposure.equalize_adapthist(img) return output def generate_experiment_data_supervised(purpose='train', nsamples=1000, patchSize=29, balanceRate=0.5, rng=np.random): start_time = time.time() if os.path.exists('/media/vkaynig/Data1/Cmor_paper_data/'): pathPrefix = '/media/vkaynig/Data1/Cmor_paper_data/' else: pathPrefix = '/n/pfister_lab/vkaynig/' img_search_string_membraneImages = pathPrefix + 'labels/membranes_nonDilate/' + purpose + '/*.tif' img_search_string_backgroundMaskImages = pathPrefix + 'labels/background_nonDilate/' + purpose + '/*.tif' img_search_string_grayImages = pathPrefix + 'images/' + purpose + '/*.tif' img_files_gray = sorted( glob.glob( img_search_string_grayImages ) ) img_files_label = sorted( glob.glob( img_search_string_membraneImages ) ) img_files_backgroundMask = sorted( glob.glob( img_search_string_backgroundMaskImages ) ) whole_set_patches = np.zeros((nsamples, patchSize*patchSize), dtype=np.float) whole_set_labels = np.zeros(nsamples, dtype=np.int32) #how many samples per image? nsamples_perImage = np.uint(np.ceil( (nsamples) / np.float(np.shape(img_files_gray)[0]) )) print 'using ' + np.str(nsamples_perImage) + ' samples per image.' counter = 0 img = mahotas.imread(img_files_gray[0]) grayImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0])) labelImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0])) maskImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0])) for img_index in xrange(np.shape(img_files_gray)[0]): img = mahotas.imread(img_files_gray[img_index]) img = normalizeImage(img) grayImages[:,:,img_index] = img label_img = mahotas.imread(img_files_label[img_index]) labelImages[:,:,img_index] = label_img mask_img = mahotas.imread(img_files_backgroundMask[img_index]) maskImages[:,:,img_index] = mask_img for img_index in xrange(np.shape(img_files_gray)[0]): img = grayImages[:,:,img_index] label_img = labelImages[:,:,img_index] mask_img = maskImages[:,:,img_index] #get rid of invalid image borders border_patch = np.int(np.ceil(patchSize/2.0)) border = np.int(np.ceil(np.sqrt(2*(border_patch**2)))) label_img[:border,:] = 0 #top label_img[-border:,:] = 0 #bottom label_img[:,:border] = 0 #left label_img[:,-border:] = 0 #right mask_img[:border,:] = 0 mask_img[-border:,:] = 0 mask_img[:,:border] = 0 mask_img[:,-border:] = 0 membrane_indices = np.nonzero(label_img) non_membrane_indices = np.nonzero(mask_img) positiveSample = True for i in xrange(nsamples_perImage): if counter >= nsamples: break if positiveSample: randmem = random.choice(xrange(len(membrane_indices[0]))) (row,col) = (membrane_indices[0][randmem], membrane_indices[1][randmem]) label = 1.0 positiveSample = False else: randmem = random.choice(xrange(len(non_membrane_indices[0]))) (row,col) = (non_membrane_indices[0][randmem], non_membrane_indices[1][randmem]) label = 0.0 positiveSample = True imgPatch = img[row-border+1:row+border, col-border+1:col+border] imgPatch = skimage.transform.rotate(imgPatch, random.choice(xrange(360))) imgPatch = imgPatch[border-border_patch:border+border_patch-1,border-border_patch:border+border_patch-1] if random.random() < 0.5: imgPatch = np.fliplr(imgPatch) imgPatch = np.rot90(imgPatch, random.randint(0,3)) whole_set_patches[counter,:] = imgPatch.flatten() whole_set_labels[counter] = label counter += 1 #normalize data whole_data = np.float32(whole_set_patches) whole_data = whole_data - 0.5 data = whole_data.copy() labels = whole_set_labels.copy() #remove the sorting in image order shuffleIndex = rng.permutation(np.shape(labels)[0]) for i in xrange(np.shape(labels)[0]): whole_data[i,:] = data[shuffleIndex[i],:] whole_set_labels[i] = labels[shuffleIndex[i]] data_set = (whole_data, whole_set_labels) end_time = time.time() total_time = (end_time - start_time) print 'Running time: ' + '%.2fm' % (total_time / 60.) rval = data_set return rval def generate_image_data(img, patchSize=29, rows=1): img = normalizeImage(img) # pad image borders border = np.int(np.ceil(patchSize/2.0)) img_padded = np.pad(img, border, mode='reflect') whole_set_patches = np.zeros((len(rows)*img.shape[1], patchSize**2)) counter = 0 for row in rows: for col in xrange(img.shape[1]): imgPatch = img_padded[row+1:row+2*border, col+1:col+2*border] whole_set_patches[counter,:] = imgPatch.flatten() counter += 1 #normalize data whole_set_patches = np.float32(whole_set_patches) whole_set_patches = whole_set_patches - 0.5 return whole_set_patches def stupid_map_wrapper(parameters): f = parameters[0] args = parameters[1:] return f(*args) def gen_annotated_image(path, dim): image = np.zeros( (dim[0], dim[1]) ) # assumes image[:,:] = -1 annotations = [] # load the annotations with open( path ) as labels_f: annotations = json.load( labels_f ) n_labels = len(annotations) if n_labels == 0: return for i_label in range(n_labels): i_coord = 0 coordinates = annotations[ i_label ] for i in range(0, len(coordinates), 2): x = min(coordinates[i], dim[1]-1) y = min(coordinates[i+1], dim[0]-1) #x = coordinates[i] #y = coordinates[i+1] image[x][y] = i_label #print 'row:', x, 'col:', y, 'val:', i_label return image, annotations def get_data_files(project, purpose): files_gray = [] files_annotations = [] if purpose == 'train': purpose_id = 0 path = Paths.TrainGrayscale elif purpose == 'validation': purpose_id = 1 path = Paths.ValidGrayscale images = DB.getImages( project.id, purpose=purpose_id, new=False, annotated=True ) # build the list of images to sample from while discarding those # without annnotations. for image in images: d_path = '%s/%s.tif'%(path, image.id) m_path = '%s/%s.%s.json'%(Paths.Labels, image.id, project.id) if os.path.exists( d_path ) and os.path.exists( m_path ): files_gray.append( d_path ) files_annotations.append( m_path ) return files_gray, files_annotations def gen_validation_data(project, nsamples=1000, patchSize=29, outPatchSize=1): def relabel(image): id_list = np.unique(image) for index, id in enumerate(id_list): image[image==id] = index return image files_gray, files_annotations = get_data_files( project, purpose='validation') # return nothing if images or annotations not found if len( files_gray ) == 0 or len( files_annotations ) == 0: return None print files_gray print files_annotations whole_set_patches = np.zeros((nsamples, patchSize**2), dtype=np.float) whole_set_labels = np.zeros((nsamples, outPatchSize**2), dtype=np.int32) return None def gen_data(project, purpose, nsamples=1000, patchSize=29, outPatchSize=1): if project == None: return n_labels = len( project.labels ) start_time = time.time() files_gray, files_annotations = get_data_files( project, purpose=purpose) # return nothing if images or annotations not found if len( files_gray ) == 0 or len( files_annotations ) == 0: return None whole_set_patches = np.zeros((nsamples, patchSize**2), dtype=np.float) whole_set_labels = np.zeros((nsamples, outPatchSize**2), dtype=np.int32) #how many samples per image? nsamples_perImage = np.uint(np.ceil( (nsamples) / np.float(np.shape(files_gray)[0]) )) counter = 0 # pad image borders border = np.int(np.ceil(patchSize/2.0)) pad = patchSize n_samples_remaining = nsamples n_images = len(files_gray) n_samples_per_label = [ int(nsamples/n_labels) for label in project.labels] for i_image in range( n_images ): if counter >= nsamples: break img = mahotas.imread(files_gray[ i_image ]) ann, annotations = gen_annotated_image( files_annotations[ i_image ], img.shape ) img = np.pad(img, ((pad, pad), (pad, pad)), 'symmetric') img = normalizeImage(img, doClahe=True) ann = np.pad(ann, ((pad, pad), (pad, pad)), 'symmetric') #ann = adjust_imprecise_boundaries(img, ann, 0) # sample equally from each label for i_label in range( n_labels ): # sample evenly across images n_samples = n_samples_per_label[ i_label ] if n_samples > (n_images - i_image): n_samples = int(n_samples/(n_images - i_image)) coordinates = annotations[ i_label ] n_coordinates = len(coordinates)/2 n_samples = min( n_coordinates, n_samples ) if n_samples == 0: continue ''' print '=====================>i_label:', i_label print 'n_coordinates:', n_coordinates print 'n_samples:', n_samples ''' # stay within bounds of available labels coordinates = np.array( coordinates ).reshape( ( n_coordinates ,2) ) indices = np.random.choice(xrange( n_coordinates ), n_samples, replace=False) for i_coord in indices: #(row, col) = (coordinates[i_coord], coordinates[i_coord+1]) (row, col) = (coordinates[i_coord][0], coordinates[i_coord][1]) ''' print '---------------------------------------' print 'sample#:', counter print 'i_coord:', i_coord print 'origin row:', row, 'col:', col print 'extract:' print 'row:', row, 'col:', col, 'val:', ann[row, col] ''' row += pad col += pad r1 = row-border r2 = row+border c1 = col-border c2 = col+border imgPatch = img[r1:r2,c1:c2] annPatch = ann[r1:r2,c1:c2] n_unique = len(np.where( annPatch > -1 )) ''' print '**n_unqiue:', n_unique print '**annPatch:', np.unique(annPatch) print '**annPatch:', np.where( annPatch > -1 ) ''' if n_unique != n_labels: continue ''' print 'imgpatch shape:', imgPatch.shape print 'annpatch shape:', annPatch.shape print 'patch minmax:', np.min(imgPatch), np.max(imgPatch) print 'label minmax:', np.min(annPatch), np.max(annPatch) print 'extracted label unique:', np.unique(annPatch) ''' if random.random() < 0.5: imgPatch = np.fliplr(imgPatch) annPatch = np.fliplr(annPatch) rotateInt = random.randint(0,3) imgPatch = np.rot90(imgPatch, rotateInt) annPatch = np.rot90(annPatch, rotateInt) #print 'rotated label unique:', np.unique(annPatch) #print 'labels:',annPatch offset_small_patch = int(np.ceil((patchSize - outPatchSize) / 2.0)) annPatch = annPatch[offset_small_patch:offset_small_patch+outPatchSize, offset_small_patch:offset_small_patch+outPatchSize] ''' print 'constraint label unique:', np.unique(annPatch) print 'labels:',annPatch print 'final:' print annPatch.flatten() print np.unique(annPatch) ''' whole_set_patches[counter,:] = imgPatch.flatten() whole_set_labels[counter] = np.int32(annPatch.flatten()) ''' print 'patch' print whole_set_patches[counter,:] print 'patch minmax:', np.min( whole_set_patches[counter,:] ), np.max( whole_set_patches[counter,:] ) print 'labels' print whole_set_labels[counter] print 'labels unique:', np.unique( whole_set_labels[counter] ) a = whole_set_labels[counter] + 1 print 'labels count:', np.bincount( a ) print 'ann[row,col]:', ann[row,col] print 'label:', i_label ''' if len(np.unique( whole_set_labels[counter] )) <= 1: print np.unique(annPatch) print np.unique(np.int32(annPatch.flatten())) print np.unique( whole_set_labels[counter] ) print '==> problem <==' exit(1) counter += 1 n_samples_per_label[ i_label ] -= n_samples #normalize data whole_data = np.float32(whole_set_patches) whole_data = whole_data - 0.5 data = whole_data.copy() labels = whole_set_labels.copy() #remove the sorting in image order shuffleIndex = np.random.permutation(np.shape(labels)[0]) for i in xrange(np.shape(labels)[0]): whole_data[i,:] = data[shuffleIndex[i],:] whole_set_labels[i,:] = labels[shuffleIndex[i],:] data_set = (whole_data, whole_set_labels) end_time = time.time() total_time = (end_time - start_time) print print 'data sampling took:', total_time / 60. return data_set def gen_data_old(project, purpose, nsamples=1000, patchSize=29, outPatchSize=1): def relabel(image): id_list = np.unique(image) for index, id in enumerate(id_list): image[image==id] = index return image print 'gen_data - purpose:', purpose if project == None: return n_labels = len( project.labels ) start_time = time.time() files_gray, files_annotations = get_data_files( project, purpose=purpose) # return nothing if images or annotations not found if len( files_gray ) == 0 or len( files_annotations ) == 0: return None print files_gray print files_annotations whole_set_patches = np.zeros((nsamples, patchSize**2), dtype=np.float) whole_set_labels = np.zeros((nsamples, outPatchSize**2), dtype=np.int32) #how many samples per image? nsamples_perImage = np.uint(np.ceil( (nsamples) / np.float(np.shape(files_gray)[0]) )) print 'using ' + np.str(nsamples_perImage) + ' samples per image.' counter = 0 # pad image borders border = np.int(np.ceil(patchSize/2.0)) pad = patchSize n_samples_remaining = nsamples n_images = len(files_gray) n_samples_per_label = [ int(nsamples/n_labels) for label in project.labels] print 'n_samples_per_label:', n_samples_per_label print 'nsamples:',nsamples for i_image in range( n_images ): if counter >= nsamples: break img = mahotas.imread(files_gray[ i_image ]) ann, annotations = gen_annotated_image( files_annotations[ i_image ], img.shape ) img = np.pad(img, ((pad, pad), (pad, pad)), 'symmetric') img = normalizeImage(img, doClahe=True) # get the label indices #indices = np.nonzero( ann ) ann = np.pad(ann, ((pad, pad), (pad, pad)), 'symmetric') # set pixel values to label #ann = ann - 1 #ann[ ann < 0 ] = 0 print ann.shape print img.shape #ann = adjust_imprecise_boundaries(img, ann, 0) print 'min-max' print np.min( ann ), np.max( ann ) print np.min( img ), np.max( img ) print '--------' print 'counter:', counter print 'i_image:',i_image print 'image',files_gray[i_image] # sample equally from each label for i_label in range( n_labels ): # sample evenly across images n_samples = n_samples_per_label[ i_label ] if n_samples > (n_images - i_image): n_samples = int(n_samples/(n_images - i_image)) coordinates = annotations[ i_label ] n_coordinates = len(coordinates)/2 n_samples = min( n_coordinates, n_samples ) print 'n_samples:', n_samples if n_samples == 0: continue print '=====================>i_label:', i_label print 'n_coordinates:', n_coordinates print 'n_samples:', n_samples # stay within bounds of available labels coordinates = np.array( coordinates ).reshape( ( n_coordinates ,2) ) indices = np.random.choice(xrange( n_coordinates ), n_samples, replace=False) print 'indices:', indices for i_coord in indices: #(row, col) = (coordinates[i_coord], coordinates[i_coord+1]) (row, col) = (coordinates[i_coord][0], coordinates[i_coord][1]) print '---------------------------------------' print 'sample#:', counter print 'i_coord:', i_coord print 'origin row:', row, 'col:', col print 'extract:' print 'row:', row, 'col:', col, 'val:', ann[row, col] row += pad col += pad print 'padding:' print 'row:', row, 'col:', col, 'val:', ann[row, col] #imgPatch = img[row:row+patchSize, col:col+patchSize] #annPatch = ann[row:row+patchSize, col:col+patchSize] r1 = row-border r2 = row+border c1 = col-border c2 = col+border imgPatch = img[r1:r2,c1:c2] annPatch = ann[r1:r2,c1:c2] print 'imgpatch shape:', imgPatch.shape print 'annpatch shape:', annPatch.shape print 'patch minmax:', np.min(imgPatch), np.max(imgPatch) print 'label minmax:', np.min(annPatch), np.max(annPatch) print 'extracted label unique:', np.unique(annPatch) if random.random() < 0.5: imgPatch = np.fliplr(imgPatch) annPatch = np.fliplr(annPatch) rotateInt = random.randint(0,3) imgPatch = np.rot90(imgPatch, rotateInt) annPatch = np.rot90(annPatch, rotateInt) print 'rotated label unique:', np.unique(annPatch) ''' print 'rotated:' print 'img minmax:', np.min(imgPatch), np.max(imgPatch) print np.unique(annPatch) print annPatch.flatten() imgPatch, annPatch = deform_images( imgPatch, annPatch ) annPatch = np.round( annPatch ) max_imgPatch = np.double(np.max(imgPatch)) max_annPatch = np.double(np.max(annPatch)) print 'deformed:' print 'img minmax:', np.min(imgPatch), np.max(imgPatch) print annPatch.flatten() print np.unique(annPatch) print 'max_imgPatch:',max_imgPatch print 'max_annPatch:', max_annPatch if max_imgPatch > 0.0: imgPatch = imgPatch / max_imgPatch if max_annPatch > 0.0: annPatch = annPatch / max_annPatch print 'scaled:' print 'img minmax:', np.min(imgPatch), np.max(imgPatch) print annPatch.flatten() print np.unique(annPatch) ''' print 'labels:',annPatch offset_small_patch = int(np.ceil((patchSize - outPatchSize) / 2.0)) annPatch = annPatch[offset_small_patch:offset_small_patch+outPatchSize, offset_small_patch:offset_small_patch+outPatchSize] print 'constraint label unique:', np.unique(annPatch) print 'labels:',annPatch print 'final:' print annPatch.flatten() print np.unique(annPatch) whole_set_patches[counter,:] = imgPatch.flatten() #whole_set_labels[counter] = np.int32(annPatch.flatten()) whole_set_labels[counter] = np.int32(annPatch.flatten()) print 'patch' print whole_set_patches[counter,:] print 'patch minmax:', np.min( whole_set_patches[counter,:] ), np.max( whole_set_patches[counter,:] ) print 'labels' print whole_set_labels[counter] print 'labels unique:', np.unique( whole_set_labels[counter] ) a = whole_set_labels[counter] + 1 print 'labels count:', np.bincount( a ) print 'ann[row,col]:', ann[row,col] print 'label:', i_label if len(np.unique( whole_set_labels[counter] )) <= 1: print '==> problem <==' exit(1) counter += 1 #n_img_samples -= n_label_samples #n_samples_remaining -= n_label_samples n_samples_per_label[ i_label ] -= n_samples print counter print '-----' print 'counter:', counter print 'n_samples_per_label:', n_samples_per_label #normalize data print 'img minmax:', np.min(whole_set_patches), np.max(whole_set_patches) whole_data = np.float32(whole_set_patches) whole_data = whole_data - 0.5 print 'data maxmin:', np.max(whole_data), np.min(whole_data) print 'labels maxmin:', np.max(whole_set_labels), np.min(whole_set_labels) data = whole_data.copy() labels = whole_set_labels.copy() ''' #remove the sorting in image order shuffleIndex = np.random.permutation(np.shape(labels)[0]) for i in xrange(np.shape(labels)[0]): whole_data[i,:] = data[shuffleIndex[i],:] whole_set_labels[i,:] = labels[shuffleIndex[i],:] ''' data_set = (whole_data, whole_set_labels) end_time = time.time() total_time = (end_time - start_time) print 'Running time: ', total_time / 60. print 'finished sampling data' return data_set # changed the patch sampling to use upper left corner instead of middle pixel # for patch labels it doesn't matter and it makes sampling even and odd patches easier def generate_experiment_data_patch_prediction(purpose='train', nsamples=1000, patchSize=29, outPatchSize=1): def relabel(image): id_list = np.unique(image) for index, id in enumerate(id_list): image[image==id] = index return image start_time = time.time() pathPrefix = '/media/vkaynig/Data2/Cmor_paper_data/not_normalized/' # pathPrefix = '/media/vkaynig/Data1/Cmor_paper_data/Thalamus-LGN/Data/25-175_train/' #pathPrefix = '/media/vkaynig/Data1/Cmor_paper_data/Cerebellum-P7/Dense/' # pathPrefix = '/media/vkaynig/Data1/Cmor_paper_data/Cortex-ECS/' if not os.path.exists(pathPrefix): pathPrefix = '/n/pfister_lab/vkaynig/' # if purpose=='train': # if np.random.random()>0.5: # pathPrefix = pathPrefix + 'dense40_train/' # else: # pathPrefix = pathPrefix + 'dense49_train/' # else: # pathPrefix = pathPrefix + 'dense40_train/' print "#################################" print purpose print pathPrefix img_search_string_membraneImages = pathPrefix + 'labels/membranes_fullContour/' + purpose + '/*.tif' img_search_string_labelImages = pathPrefix + 'labels/' + purpose + '/*.tif' img_search_string_grayImages = pathPrefix + 'images/' + purpose + '/*.tif' #<felix-addition> pathPrefix = '/n/home00/fgonda/icon/data/reference/' #img_search_string_membraneImages = pathPrefix + 'labels/membranes_fullContour/' + purpose + '/*.tif' img_search_string_membraneImages = pathPrefix + 'labels/membranes/' + purpose + '/*.tif' img_search_string_labelImages = pathPrefix + 'labels/' + purpose + '/*.tif' img_search_string_grayImages = pathPrefix + 'images/' + purpose + '/*.tif' #</felix-addition> img_files_gray = sorted( glob.glob( img_search_string_grayImages ) ) img_files_membrane = sorted( glob.glob( img_search_string_membraneImages ) ) img_files_labels = sorted( glob.glob( img_search_string_labelImages ) ) print len(img_files_gray) print len(img_files_membrane) print len(img_files_labels) whole_set_patches = np.zeros((nsamples, patchSize**2), dtype=np.float) whole_set_labels = np.zeros((nsamples, outPatchSize**2), dtype=np.int32) whole_set_membranes = np.zeros((nsamples, outPatchSize**2), dtype=np.int32) #how many samples per image? nsamples_perImage = np.uint(np.ceil( (nsamples) / np.float(np.shape(img_files_gray)[0]) )) print 'using ' + np.str(nsamples_perImage) + ' samples per image.' img = mahotas.imread(img_files_gray[0]) grayImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0])) labelImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0])) membraneImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0])) maskImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0])) # read the data # in random order read_order = np.random.permutation(np.shape(img_files_gray)[0]) read_order = read_order[:nsamples] for counter, img_index in enumerate(read_order): #print img_files_gray[img_index] img = mahotas.imread(img_files_gray[img_index]) # normalizes [0,1] img = normalizeImage(img, doClahe=True) grayImages[:,:,counter] = img membrane_img = mahotas.imread(img_files_membrane[img_index])/255. membraneImages[:,:,counter] = membrane_img maskImages[:,:,counter] = 1.0 if purpose == 'validate': label_img = mahotas.imread(img_files_labels[img_index]) label_img = np.double(label_img) if label_img.ndim == 3: label_img = label_img[:,:,0] + 256*label_img[:,:,1] + 256**2 * label_img[:,:,2] labelImages[:,:,counter] = label_img print counter = 0 for img_index in xrange(nsamples):#np.shape(img_files_gray)[0]): #print img_files_gray[read_order[img_index]] img = grayImages[:,:,img_index] label_img = labelImages[:,:,img_index] membrane_img = membraneImages[:,:,img_index] mask_img = maskImages[:,:,img_index] if purpose=='train': membrane_img = adjust_imprecise_boundaries(img, membrane_img, 0) #get rid of invalid image borders mask_img[:,-(patchSize-1):] = 0 mask_img[-(patchSize-1):,:] = 0 valid_indices = np.nonzero(mask_img) for i in xrange(nsamples_perImage): if counter >= nsamples: break randmem = random.choice(xrange(len(valid_indices[0]))) (row,col) = (valid_indices[0][randmem], valid_indices[1][randmem]) imgPatch = img[row:row+patchSize, col:col+patchSize] membranePatch = membrane_img[row:row+patchSize, col:col+patchSize] labelPatch = label_img[row:row+patchSize, col:col+patchSize] print 'sample#:', counter print 'original:' print 'img minmax:', np.min(imgPatch), np.max(imgPatch) print membranePatch.flatten() print np.unique(membranePatch) if random.random() < 0.5: imgPatch = np.fliplr(imgPatch) membranePatch = np.fliplr(membranePatch) if purpose == 'validate': labelPatch = np.fliplr(labelPatch) rotateInt = random.randint(0,3) imgPatch = np.rot90(imgPatch, rotateInt) membranePatch = np.rot90(membranePatch, rotateInt) if purpose=='validate': labelPatch = np.rot90(labelPatch, rotateInt) if purpose=='validate': labelPatch = relabel(labelPatch) imgPatch, membranePatch = deform_images(imgPatch, membranePatch) # get rid of interpolation artifacts membranePatch = np.round(membranePatch) membranePatch, _ = mahotas.label(1-membranePatch) else: imgPatch, membranePatch = deform_images(imgPatch, membranePatch) # get rid of interpolation artifacts membranePatch = np.round(membranePatch) print 'deformed:' print 'img minmax:', np.min(imgPatch), np.max(imgPatch) print membranePatch.flatten() print np.unique(membranePatch) imgPatch = imgPatch / np.double(np.max(imgPatch)) membranePatch = membranePatch / np.double(np.max(membranePatch)) print 'scaled:' print 'img minmax:', np.min(imgPatch), np.max(imgPatch) print membranePatch.flatten() print np.unique(membranePatch) # crop labelPatch to potentially smaller output size offset_small_patch = int(np.ceil((patchSize - outPatchSize) / 2.0)) membranePatch = membranePatch[offset_small_patch:offset_small_patch+outPatchSize, offset_small_patch:offset_small_patch+outPatchSize] labelPatch = labelPatch[offset_small_patch:offset_small_patch+outPatchSize, offset_small_patch:offset_small_patch+outPatchSize] whole_set_patches[counter,:] = imgPatch.flatten() whole_set_labels[counter] = labelPatch.flatten() whole_set_membranes[counter] = np.int32(membranePatch.flatten() > 0) print 'modified:' print 'row:', row, 'col:', col print 'patch' print whole_set_patches[counter,:] print 'img minmax:', np.min( whole_set_patches[counter,:] ), np.max( whole_set_patches[counter,:] ) print 'labels' print whole_set_membranes[counter] print np.unique( whole_set_membranes[counter] ) #print np.unique(whole_set_patches[counter,:]) counter += 1 #normalize data whole_data = np.float32(whole_set_patches) whole_data = whole_data - 0.5 print 'data minmax:', np.max(whole_data), np.min(whole_data) print 'labels minmax:', np.max(whole_set_membranes), np.min(whole_set_membranes) data = whole_data.copy() labels = whole_set_labels.copy() membranes = whole_set_membranes.copy() #remove the sorting in image order shuffleIndex = np.random.permutation(np.shape(membranes)[0]) for i in xrange(np.shape(membranes)[0]): whole_data[i,:] = data[shuffleIndex[i],:] whole_set_labels[i,:] = labels[shuffleIndex[i],:] whole_set_membranes[i,:] = membranes[shuffleIndex[i],:] if purpose == 'validate': data_set = (whole_data, whole_set_membranes, whole_set_labels) else: data_set = (whole_data, whole_set_membranes) end_time = time.time() total_time = (end_time - start_time) print 'Running time: ', total_time / 60. print 'finished sampling data' return data_set def generate_experiment_data_patch_prediction_layers(purpose='train', nsamples=1000, patchSize=29, outPatchSize=1, nr_layers=3): def relabel(image): id_list = np.unique(image) for index, id in enumerate(id_list): image[image==id] = index return image start_time = time.time() if os.path.exists('/media/vkaynig/Data1/Cmor_paper_data/'): pathPrefix = '/media/vkaynig/Data1/Cmor_paper_data/' else: pathPrefix = '/n/pfister_lab/vkaynig/' img_search_string_membraneImages = pathPrefix + 'labels/membranes_fullContour/' + purpose + '/*.tif' img_search_string_labelImages = pathPrefix + 'labels/' + purpose + '/*.tif' img_search_string_grayImages = pathPrefix + 'images/' + purpose + '/*.tif' img_files_gray = sorted( glob.glob( img_search_string_grayImages ) ) img_files_membrane = sorted( glob.glob( img_search_string_membraneImages ) ) img_files_labels = sorted( glob.glob( img_search_string_labelImages ) ) whole_set_patches = np.zeros((nsamples, nr_layers, patchSize**2), dtype=np.float) whole_set_labels = np.zeros((nsamples, outPatchSize**2), dtype=np.int32) whole_set_membranes = np.zeros((nsamples, outPatchSize**2), dtype=np.int32) #how many samples per image? nsamples_perImage = np.uint(np.ceil( (nsamples) / np.float(np.shape(img_files_gray)[0]) )) print 'using ' + np.str(nsamples_perImage) + ' samples per image.' counter = 0 img = mahotas.imread(img_files_gray[0]) grayImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0])) labelImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0])) membraneImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0])) maskImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0])) # read the data # in random order #read_order = np.random.permutation(np.shape(img_files_gray)[0]) for img_index in range(np.shape(img_files_gray)[0]): #print img_files_gray[img_index] img = mahotas.imread(img_files_gray[img_index]) # normalizes [0,1] img = normalizeImage(img) grayImages[:,:,img_index] = img membrane_img = mahotas.imread(img_files_membrane[img_index])/255. membraneImages[:,:,img_index] = membrane_img maskImages[:,:,img_index] = 1.0 if purpose == 'validate': label_img = mahotas.imread(img_files_labels[img_index]) label_img = np.double(label_img) labelImages[:,:,img_index] = label_img for img_index in xrange(np.shape(img_files_gray)[0]): img_cs = int(np.floor(nr_layers/2)) img_valid_range_indices = np.clip(range(img_index-img_cs,img_index+img_cs+1),0,np.shape(img_files_gray)[0]-1) img = grayImages[:,:,img_valid_range_indices] label_img = labelImages[:,:,img_index] membrane_img = membraneImages[:,:,img_index] mask_img = maskImages[:,:,img_index] if purpose=='train': # adjust according to middle image membrane_img = adjust_imprecise_boundaries(img[:,:,img_cs], membrane_img, 0) #get rid of invalid image borders mask_img[:,-patchSize:] = 0 mask_img[-patchSize:,:] = 0 valid_indices = np.nonzero(mask_img) for i in xrange(nsamples_perImage): if counter >= nsamples: break randmem = random.choice(xrange(len(valid_indices[0]))) (row,col) = (valid_indices[0][randmem], valid_indices[1][randmem]) imgPatch = img[row:row+patchSize, col:col+patchSize,:] membranePatch = membrane_img[row:row+patchSize, col:col+patchSize] labelPatch = label_img[row:row+patchSize, col:col+patchSize] if random.random() < 0.5: for flip_i in xrange(nr_layers): imgPatch[:,:,flip_i] = np.fliplr(imgPatch[:,:,flip_i]) membranePatch = np.fliplr(membranePatch) if purpose == 'validate': labelPatch = np.fliplr(labelPatch) rotateInt = random.randint(0,3) for rot_i in xrange(nr_layers): imgPatch[:,:,rot_i] = np.rot90(imgPatch[:,:,rot_i], rotateInt) membranePatch = np.rot90(membranePatch, rotateInt) if purpose=='validate': labelPatch = np.rot90(labelPatch, rotateInt) if purpose=='validate': labelPatch = relabel(labelPatch) deformed_images = deform_images_list(np.dstack([imgPatch*255, np.reshape(membranePatch*255,(patchSize,patchSize,1)), np.uint8(np.reshape(labelPatch,(patchSize,patchSize,1)))])) imgPatch, membranePatch, labelPatch = np.split(deformed_images,[imgPatch.shape[2],imgPatch.shape[2]+1], axis=2) else: deformed_images = deform_images_list(np.dstack([imgPatch*255, np.reshape(membranePatch,(patchSize,patchSize,1))*255])) imgPatch, membranePatch = np.split(deformed_images,[imgPatch.shape[2]], axis=2) imgPatch = imgPatch / np.double(np.max(imgPatch)) membranePatch = membranePatch / np.double(np.max(membranePatch)) # crop labelPatch to potentially smaller output size offset_small_patch = int(np.ceil((patchSize - outPatchSize) / 2.0)) membranePatch = membranePatch[offset_small_patch:offset_small_patch+outPatchSize, offset_small_patch:offset_small_patch+outPatchSize] labelPatch = labelPatch[offset_small_patch:offset_small_patch+outPatchSize, offset_small_patch:offset_small_patch+outPatchSize] #whole_set_patches = np.zeros((nsamples, nr_layers, patchSize**2), dtype=np.float) for patch_i in xrange(nr_layers): whole_set_patches[counter,patch_i,:] = imgPatch[:,:,patch_i].flatten() whole_set_labels[counter] = labelPatch.flatten() whole_set_membranes[counter] = np.int32(membranePatch.flatten() > 0) counter += 1 #normalize data whole_data = np.float32(whole_set_patches) whole_data = whole_data - 0.5 data = whole_data.copy() labels = whole_set_labels.copy() membranes = whole_set_membranes.copy() #remove the sorting in image order shuffleIndex = np.random.permutation(np.shape(membranes)[0]) for i in xrange(np.shape(membranes)[0]): whole_data[i,:,:] = data[shuffleIndex[i],:,:] whole_set_labels[i,:] = labels[shuffleIndex[i],:] whole_set_membranes[i,:] = membranes[shuffleIndex[i],:] if purpose == 'validate': data_set = (whole_data, whole_set_membranes, whole_set_labels) else: data_set = (whole_data, whole_set_membranes) end_time = time.time() total_time = (end_time - start_time) print 'Running time: ', total_time / 60. print 'finished sampling data' return data_set if __name__=="__main__": import uuid test = generate_experiment_data_patch_prediction(purpose='validate', nsamples=20, patchSize=1024, outPatchSize=1024) # dir_path = './training_patches/' # for i in xrange(30): # unique_filename = str(uuid.uuid4()) # img = np.reshape(test[1][i],(388,388)) # img_gray = np.reshape(test[0][i],(572,572)) # mahotas.imsave(dir_path+unique_filename+'.tif', np.uint8(img*255)) # mahotas.imsave(dir_path+unique_filename+'_gray.tif', np.uint8((img_gray+0.5)*255)) #data_val = generate_experiment_data_supervised(purpose='validate', nsamples=10000, patchSize=65, balanceRate=0.5) #data = generate_experiment_data_patch_prediction(purpose='validate', nsamples=2, patchSize=315, outPatchSize=215) # plt.imshow(np.reshape(data[0][0],(315,315))); plt.figure() # plt.imshow(np.reshape(data[1][0],(215,215))); plt.figure() # plt.imshow(np.reshape(data[2][0],(215,215))); plt.show() # image = mahotas.imread('ac3_input_0141.tif') # image = normalizeImage(image) # label = mahotas.imread('ac3_labels_0141.tif') / 255. # test = adjust_imprecise_boundaries(image, label, 10) # plt.imshow(label+image); plt.show() # plt.imshow(test+image); plt.show()
mit
1,202,716,281,150,593,500
37.355745
192
0.584073
false
omki2005/influxdb-python
influxdb/_dataframe_client.py
1
18144
# -*- coding: utf-8 -*- """DataFrame client for InfluxDB.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import math from collections import defaultdict import pandas as pd import pandas.core.common as pdcom import numpy as np from .client import InfluxDBClient from .line_protocol import _escape_tag def _pandas_time_unit(time_precision): unit = time_precision if time_precision == 'm': unit = 'ms' elif time_precision == 'u': unit = 'us' elif time_precision == 'n': unit = 'ns' assert unit in ('s', 'ms', 'us', 'ns') return unit def _escape_pandas_series(s): return s.apply(lambda v: _escape_tag(v)) class DataFrameClient(InfluxDBClient): """DataFrameClient instantiates InfluxDBClient to connect to the backend. The ``DataFrameClient`` object holds information necessary to connect to InfluxDB. Requests can be made to InfluxDB directly through the client. The client reads and writes from pandas DataFrames. """ EPOCH = pd.Timestamp('1970-01-01 00:00:00.000+00:00') def write_points(self, dataframe, measurement, tags=None, tag_columns=None, field_columns=None, time_precision=None, database=None, retention_policy=None, batch_size=None, protocol='line', numeric_precision=None): """Write to multiple time series names. :param dataframe: data points in a DataFrame :param measurement: name of measurement :param tags: dictionary of tags, with string key-values :param time_precision: [Optional, default None] Either 's', 'ms', 'u' or 'n'. :param batch_size: [Optional] Value to write the points in batches instead of all at one time. Useful for when doing data dumps from one database to another or when doing a massive write operation :type batch_size: int :param protocol: Protocol for writing data. Either 'line' or 'json'. :param numeric_precision: Precision for floating point values. Either None, 'full' or some int, where int is the desired decimal precision. 'full' preserves full precision for int and float datatypes. Defaults to None, which preserves 14-15 significant figures for float and all significant figures for int datatypes. """ if tag_columns is None: tag_columns = [] if field_columns is None: field_columns = [] if batch_size: number_batches = int(math.ceil(len(dataframe) / float(batch_size))) for batch in range(number_batches): start_index = batch * batch_size end_index = (batch + 1) * batch_size if protocol == 'line': points = self._convert_dataframe_to_lines( dataframe.iloc[start_index:end_index].copy(), measurement=measurement, global_tags=tags, time_precision=time_precision, tag_columns=tag_columns, field_columns=field_columns, numeric_precision=numeric_precision) else: points = self._convert_dataframe_to_json( dataframe.iloc[start_index:end_index].copy(), measurement=measurement, tags=tags, time_precision=time_precision, tag_columns=tag_columns, field_columns=field_columns) super(DataFrameClient, self).write_points( points, time_precision, database, retention_policy, protocol=protocol) return True if protocol == 'line': points = self._convert_dataframe_to_lines( dataframe, measurement=measurement, global_tags=tags, tag_columns=tag_columns, field_columns=field_columns, time_precision=time_precision, numeric_precision=numeric_precision) else: points = self._convert_dataframe_to_json( dataframe, measurement=measurement, tags=tags, time_precision=time_precision, tag_columns=tag_columns, field_columns=field_columns) super(DataFrameClient, self).write_points( points, time_precision, database, retention_policy, protocol=protocol) return True def query(self, query, params=None, epoch=None, expected_response_code=200, database=None, raise_errors=True, chunked=False, chunk_size=0, dropna=True): """ Quering data into a DataFrame. :param query: the actual query string :param params: additional parameters for the request, defaults to {} :param epoch: response timestamps to be in epoch format either 'h', 'm', 's', 'ms', 'u', or 'ns',defaults to `None` which is RFC3339 UTC format with nanosecond precision :param expected_response_code: the expected status code of response, defaults to 200 :param database: database to query, defaults to None :param raise_errors: Whether or not to raise exceptions when InfluxDB returns errors, defaults to True :param chunked: Enable to use chunked responses from InfluxDB. With ``chunked`` enabled, one ResultSet is returned per chunk containing all results within that chunk :param chunk_size: Size of each chunk to tell InfluxDB to use. :param dropna: drop columns where all values are missing :returns: the queried data :rtype: :class:`~.ResultSet` """ query_args = dict(params=params, epoch=epoch, expected_response_code=expected_response_code, raise_errors=raise_errors, chunked=chunked, database=database, chunk_size=chunk_size) results = super(DataFrameClient, self).query(query, **query_args) if query.strip().upper().startswith("SELECT"): if len(results) > 0: return self._to_dataframe(results, dropna) else: return {} else: return results def _to_dataframe(self, rs, dropna=True): result = defaultdict(list) if isinstance(rs, list): return map(self._to_dataframe, rs) for key, data in rs.items(): name, tags = key if tags is None: key = name else: key = (name, tuple(sorted(tags.items()))) df = pd.DataFrame(data) df.time = pd.to_datetime(df.time) df.set_index('time', inplace=True) df.index = df.index.tz_localize('UTC') df.index.name = None result[key].append(df) for key, data in result.items(): df = pd.concat(data).sort_index() if dropna: df.dropna(how='all', axis=1, inplace=True) result[key] = df return result @staticmethod def _convert_dataframe_to_json(dataframe, measurement, tags=None, tag_columns=None, field_columns=None, time_precision=None): if not isinstance(dataframe, pd.DataFrame): raise TypeError('Must be DataFrame, but type was: {0}.' .format(type(dataframe))) if not (isinstance(dataframe.index, pd.PeriodIndex) or isinstance(dataframe.index, pd.DatetimeIndex)): raise TypeError('Must be DataFrame with DatetimeIndex or ' 'PeriodIndex.') # Make sure tags and tag columns are correctly typed tag_columns = tag_columns if tag_columns is not None else [] field_columns = field_columns if field_columns is not None else [] tags = tags if tags is not None else {} # Assume field columns are all columns not included in tag columns if not field_columns: field_columns = list( set(dataframe.columns).difference(set(tag_columns))) dataframe.index = pd.to_datetime(dataframe.index) if dataframe.index.tzinfo is None: dataframe.index = dataframe.index.tz_localize('UTC') # Convert column to strings dataframe.columns = dataframe.columns.astype('str') # Convert dtype for json serialization dataframe = dataframe.astype('object') precision_factor = { "n": 1, "u": 1e3, "ms": 1e6, "s": 1e9, "m": 1e9 * 60, "h": 1e9 * 3600, }.get(time_precision, 1) points = [ {'measurement': measurement, 'tags': dict(list(tag.items()) + list(tags.items())), 'fields': rec, 'time': np.int64(ts.value / precision_factor)} for ts, tag, rec in zip(dataframe.index, dataframe[tag_columns].to_dict('record'), dataframe[field_columns].to_dict('record')) ] return points def _convert_dataframe_to_lines(self, dataframe, measurement, field_columns=None, tag_columns=None, global_tags=None, time_precision=None, numeric_precision=None): dataframe = dataframe.dropna(how='all').copy() if len(dataframe) == 0: return [] if not isinstance(dataframe, pd.DataFrame): raise TypeError('Must be DataFrame, but type was: {0}.' .format(type(dataframe))) if not (isinstance(dataframe.index, pd.PeriodIndex) or isinstance(dataframe.index, pd.DatetimeIndex)): raise TypeError('Must be DataFrame with DatetimeIndex or ' 'PeriodIndex.') dataframe = dataframe.rename( columns={item: _escape_tag(item) for item in dataframe.columns}) # Create a Series of columns for easier indexing column_series = pd.Series(dataframe.columns) if field_columns is None: field_columns = [] if tag_columns is None: tag_columns = [] if global_tags is None: global_tags = {} # Make sure field_columns and tag_columns are lists field_columns = list(field_columns) if list(field_columns) else [] tag_columns = list(tag_columns) if list(tag_columns) else [] # If field columns but no tag columns, assume rest of columns are tags if field_columns and (not tag_columns): tag_columns = list(column_series[~column_series.isin( field_columns)]) # If no field columns, assume non-tag columns are fields if not field_columns: field_columns = list(column_series[~column_series.isin( tag_columns)]) precision_factor = { "n": 1, "u": 1e3, "ms": 1e6, "s": 1e9, "m": 1e9 * 60, "h": 1e9 * 3600, }.get(time_precision, 1) # Make array of timestamp ints if isinstance(dataframe.index, pd.PeriodIndex): time = ((dataframe.index.to_timestamp().values.astype(np.int64) / precision_factor).astype(np.int64).astype(str)) else: time = ((pd.to_datetime(dataframe.index).values.astype(np.int64) / precision_factor).astype(np.int64).astype(str)) # If tag columns exist, make an array of formatted tag keys and values if tag_columns: # Make global_tags as tag_columns if global_tags: for tag in global_tags: dataframe[tag] = global_tags[tag] tag_columns.append(tag) tag_df = dataframe[tag_columns] for tag_col_i in tag_columns: if pdcom.is_categorical_dtype(tag_df[tag_col_i]): if '' not in tag_df[tag_col_i].cat.categories: tag_df[tag_col_i] = (tag_df[tag_col_i].cat. add_categories(''). fillna(''). astype(str)) else: tag_df[tag_col_i] = (tag_df[tag_col_i]. fillna(''). astype(str)) tag_df = tag_df.fillna('') # replace NA with empty string tag_df = tag_df.sort_index(axis=1) tag_df = self._stringify_dataframe( tag_df, numeric_precision, datatype='tag') # join preprendded tags, leaving None values out tags = tag_df.apply( lambda s: [',' + s.name + '=' + v if v else '' for v in s]) tags = tags.sum(axis=1) del tag_df elif global_tags: tag_string = ''.join( [",{}={}".format(k, _escape_tag(v)) if v else '' for k, v in sorted(global_tags.items())] ) tags = pd.Series(tag_string, index=dataframe.index) else: tags = '' # Make an array of formatted field keys and values field_df = dataframe[field_columns] # Keep the positions where Null values are found mask_null = field_df.isnull().values field_df = self._stringify_dataframe(field_df, numeric_precision, datatype='field') field_df = (field_df.columns.values + '=').tolist() + field_df field_df[field_df.columns[1:]] = ',' + field_df[ field_df.columns[1:]] field_df = field_df.where(~mask_null, '') # drop Null entries fields = field_df.sum(axis=1) del field_df # Generate line protocol string measurement = _escape_tag(measurement) points = (measurement + tags + ' ' + fields + ' ' + time).tolist() return points @staticmethod def _stringify_dataframe(dframe, numeric_precision, datatype='field'): # Prevent modification of input dataframe dframe = dframe.copy() # Find int and string columns for field-type data int_columns = dframe.select_dtypes(include=['integer']).columns string_columns = dframe.select_dtypes(include=['object']).columns # Convert dframe to string if numeric_precision is None: # If no precision specified, convert directly to string (fast) dframe = dframe.astype(str) elif numeric_precision == 'full': # If full precision, use repr to get full float precision float_columns = (dframe.select_dtypes( include=['floating']).columns) nonfloat_columns = dframe.columns[~dframe.columns.isin( float_columns)] dframe[float_columns] = dframe[float_columns].applymap(repr) dframe[nonfloat_columns] = (dframe[nonfloat_columns].astype(str)) elif isinstance(numeric_precision, int): # If precision is specified, round to appropriate precision float_columns = (dframe.select_dtypes( include=['floating']).columns) nonfloat_columns = dframe.columns[~dframe.columns.isin( float_columns)] dframe[float_columns] = (dframe[float_columns].round( numeric_precision)) # If desired precision is > 10 decimal places, need to use repr if numeric_precision > 10: dframe[float_columns] = (dframe[float_columns].applymap(repr)) dframe[nonfloat_columns] = (dframe[nonfloat_columns] .astype(str)) else: dframe = dframe.astype(str) else: raise ValueError('Invalid numeric precision.') if datatype == 'field': # If dealing with fields, format ints and strings correctly dframe[int_columns] += 'i' dframe[string_columns] = '"' + dframe[string_columns] + '"' elif datatype == 'tag': dframe = dframe.apply(_escape_pandas_series) dframe.columns = dframe.columns.astype(str) return dframe def _datetime_to_epoch(self, datetime, time_precision='s'): seconds = (datetime - self.EPOCH).total_seconds() if time_precision == 'h': return seconds / 3600 elif time_precision == 'm': return seconds / 60 elif time_precision == 's': return seconds elif time_precision == 'ms': return seconds * 1e3 elif time_precision == 'u': return seconds * 1e6 elif time_precision == 'n': return seconds * 1e9
mit
-857,455,492,528,429,400
38.103448
79
0.529927
false
NicWayand/xray
xarray/test/test_dask.py
1
11746
import numpy as np import pandas as pd import xarray as xr from xarray import Variable, DataArray, Dataset import xarray.ufuncs as xu from xarray.core.pycompat import suppress from . import TestCase, requires_dask with suppress(ImportError): import dask import dask.array as da def _copy_at_variable_level(arg): """We need to copy the argument at the level of xarray.Variable objects, so that viewing its values does not trigger lazy loading. """ if isinstance(arg, Variable): return arg.copy(deep=False) elif isinstance(arg, DataArray): ds = arg.to_dataset(name='__copied__') return _copy_at_variable_level(ds)['__copied__'] elif isinstance(arg, Dataset): ds = arg.copy() for k in list(ds): ds._variables[k] = ds._variables[k].copy(deep=False) return ds else: assert False class DaskTestCase(TestCase): def assertLazyAnd(self, expected, actual, test): expected_copy = _copy_at_variable_level(expected) actual_copy = _copy_at_variable_level(actual) with dask.set_options(get=dask.get): test(actual_copy, expected_copy) var = getattr(actual, 'variable', actual) self.assertIsInstance(var.data, da.Array) @requires_dask class TestVariable(DaskTestCase): def assertLazyAnd(self, expected, actual, test): expected_copy = expected.copy(deep=False) actual_copy = actual.copy(deep=False) with dask.set_options(get=dask.get): test(actual_copy, expected_copy) var = getattr(actual, 'variable', actual) self.assertIsInstance(var.data, da.Array) def assertLazyAndIdentical(self, expected, actual): self.assertLazyAnd(expected, actual, self.assertVariableIdentical) def assertLazyAndAllClose(self, expected, actual): self.assertLazyAnd(expected, actual, self.assertVariableAllClose) def setUp(self): self.values = np.random.RandomState(0).randn(4, 6) self.data = da.from_array(self.values, chunks=(2, 2)) self.eager_var = Variable(('x', 'y'), self.values) self.lazy_var = Variable(('x', 'y'), self.data) def test_basics(self): v = self.lazy_var self.assertIs(self.data, v.data) self.assertEqual(self.data.chunks, v.chunks) self.assertArrayEqual(self.values, v) def test_copy(self): self.assertLazyAndIdentical(self.eager_var, self.lazy_var.copy()) self.assertLazyAndIdentical(self.eager_var, self.lazy_var.copy(deep=True)) def test_chunk(self): for chunks, expected in [(None, ((2, 2), (2, 2, 2))), (3, ((3, 1), (3, 3))), ({'x': 3, 'y': 3}, ((3, 1), (3, 3))), ({'x': 3}, ((3, 1), (2, 2, 2))), ({'x': (3, 1)}, ((3, 1), (2, 2, 2)))]: rechunked = self.lazy_var.chunk(chunks) self.assertEqual(rechunked.chunks, expected) self.assertLazyAndIdentical(self.eager_var, rechunked) def test_indexing(self): u = self.eager_var v = self.lazy_var self.assertLazyAndIdentical(u[0], v[0]) self.assertLazyAndIdentical(u[:1], v[:1]) self.assertLazyAndIdentical(u[[0, 1], [0, 1, 2]], v[[0, 1], [0, 1, 2]]) with self.assertRaisesRegexp(TypeError, 'stored in a dask array'): v[:1] = 0 def test_squeeze(self): u = self.eager_var v = self.lazy_var self.assertLazyAndIdentical(u[0].squeeze(), v[0].squeeze()) def test_equals(self): v = self.lazy_var self.assertTrue(v.equals(v)) self.assertIsInstance(v.data, da.Array) self.assertTrue(v.identical(v)) self.assertIsInstance(v.data, da.Array) def test_transpose(self): u = self.eager_var v = self.lazy_var self.assertLazyAndIdentical(u.T, v.T) def test_shift(self): u = self.eager_var v = self.lazy_var self.assertLazyAndIdentical(u.shift(x=2), v.shift(x=2)) self.assertLazyAndIdentical(u.shift(x=-2), v.shift(x=-2)) self.assertEqual(v.data.chunks, v.shift(x=1).data.chunks) def test_roll(self): u = self.eager_var v = self.lazy_var self.assertLazyAndIdentical(u.roll(x=2), v.roll(x=2)) self.assertEqual(v.data.chunks, v.roll(x=1).data.chunks) def test_unary_op(self): u = self.eager_var v = self.lazy_var self.assertLazyAndIdentical(-u, -v) self.assertLazyAndIdentical(abs(u), abs(v)) self.assertLazyAndIdentical(u.round(), v.round()) def test_binary_op(self): u = self.eager_var v = self.lazy_var self.assertLazyAndIdentical(2 * u, 2 * v) self.assertLazyAndIdentical(u + u, v + v) self.assertLazyAndIdentical(u[0] + u, v[0] + v) def test_reduce(self): u = self.eager_var v = self.lazy_var self.assertLazyAndAllClose(u.mean(), v.mean()) self.assertLazyAndAllClose(u.std(), v.std()) self.assertLazyAndAllClose(u.argmax(dim='x'), v.argmax(dim='x')) self.assertLazyAndAllClose((u > 1).any(), (v > 1).any()) self.assertLazyAndAllClose((u < 1).all('x'), (v < 1).all('x')) with self.assertRaisesRegexp(NotImplementedError, 'dask'): v.prod() with self.assertRaisesRegexp(NotImplementedError, 'dask'): v.median() def test_missing_values(self): values = np.array([0, 1, np.nan, 3]) data = da.from_array(values, chunks=(2,)) eager_var = Variable('x', values) lazy_var = Variable('x', data) self.assertLazyAndIdentical(eager_var, lazy_var.fillna(lazy_var)) self.assertLazyAndIdentical(Variable('x', range(4)), lazy_var.fillna(2)) self.assertLazyAndIdentical(eager_var.count(), lazy_var.count()) def test_concat(self): u = self.eager_var v = self.lazy_var self.assertLazyAndIdentical(u, Variable.concat([v[:2], v[2:]], 'x')) self.assertLazyAndIdentical(u[:2], Variable.concat([v[0], v[1]], 'x')) self.assertLazyAndIdentical( u[:3], Variable.concat([v[[0, 2]], v[[1]]], 'x', positions=[[0, 2], [1]])) def test_missing_methods(self): v = self.lazy_var try: v.argsort() except NotImplementedError as err: self.assertIn('dask', str(err)) try: v[0].item() except NotImplementedError as err: self.assertIn('dask', str(err)) def test_univariate_ufunc(self): u = self.eager_var v = self.lazy_var self.assertLazyAndAllClose(np.sin(u), xu.sin(v)) def test_bivariate_ufunc(self): u = self.eager_var v = self.lazy_var self.assertLazyAndAllClose(np.maximum(u, 0), xu.maximum(v, 0)) self.assertLazyAndAllClose(np.maximum(u, 0), xu.maximum(0, v)) @requires_dask class TestDataArrayAndDataset(DaskTestCase): def assertLazyAndIdentical(self, expected, actual): self.assertLazyAnd(expected, actual, self.assertDataArrayIdentical) def assertLazyAndAllClose(self, expected, actual): self.assertLazyAnd(expected, actual, self.assertDataArrayAllClose) def setUp(self): self.values = np.random.randn(4, 6) self.data = da.from_array(self.values, chunks=(2, 2)) self.eager_array = DataArray(self.values, dims=('x', 'y'), name='foo') self.lazy_array = DataArray(self.data, dims=('x', 'y'), name='foo') def test_rechunk(self): chunked = self.eager_array.chunk({'x': 2}).chunk({'y': 2}) self.assertEqual(chunked.chunks, ((2,) * 2, (2,) * 3)) def test_new_chunk(self): chunked = self.eager_array.chunk() self.assertTrue(chunked.data.name.startswith('xarray-<this-array>')) def test_lazy_dataset(self): lazy_ds = Dataset({'foo': (('x', 'y'), self.data)}) self.assertIsInstance(lazy_ds.foo.variable.data, da.Array) def test_lazy_array(self): u = self.eager_array v = self.lazy_array self.assertLazyAndAllClose(u, v) self.assertLazyAndAllClose(-u, -v) self.assertLazyAndAllClose(u.T, v.T) self.assertLazyAndAllClose(u.mean(), v.mean()) self.assertLazyAndAllClose(1 + u, 1 + v) actual = xr.concat([v[:2], v[2:]], 'x') self.assertLazyAndAllClose(u, actual) def test_groupby(self): u = self.eager_array v = self.lazy_array expected = u.groupby('x').mean() actual = v.groupby('x').mean() self.assertLazyAndAllClose(expected, actual) def test_groupby_first(self): u = self.eager_array v = self.lazy_array for coords in [u.coords, v.coords]: coords['ab'] = ('x', ['a', 'a', 'b', 'b']) with self.assertRaisesRegexp(NotImplementedError, 'dask'): v.groupby('ab').first() expected = u.groupby('ab').first() actual = v.groupby('ab').first(skipna=False) self.assertLazyAndAllClose(expected, actual) def test_reindex(self): u = self.eager_array v = self.lazy_array for kwargs in [{'x': [2, 3, 4]}, {'x': [1, 100, 2, 101, 3]}, {'x': [2.5, 3, 3.5], 'y': [2, 2.5, 3]}]: expected = u.reindex(**kwargs) actual = v.reindex(**kwargs) self.assertLazyAndAllClose(expected, actual) def test_to_dataset_roundtrip(self): u = self.eager_array v = self.lazy_array expected = u.assign_coords(x=u['x']) self.assertLazyAndIdentical(expected, v.to_dataset('x').to_array('x')) def test_merge(self): def duplicate_and_merge(array): return xr.merge([array, array.rename('bar')]).to_array() expected = duplicate_and_merge(self.eager_array) actual = duplicate_and_merge(self.lazy_array) self.assertLazyAndIdentical(expected, actual) def test_ufuncs(self): u = self.eager_array v = self.lazy_array self.assertLazyAndAllClose(np.sin(u), xu.sin(v)) def test_where_dispatching(self): a = np.arange(10) b = a > 3 x = da.from_array(a, 5) y = da.from_array(b, 5) expected = DataArray(a).where(b) self.assertLazyAndIdentical(expected, DataArray(a).where(y)) self.assertLazyAndIdentical(expected, DataArray(x).where(b)) self.assertLazyAndIdentical(expected, DataArray(x).where(y)) def test_simultaneous_compute(self): ds = Dataset({'foo': ('x', range(5)), 'bar': ('x', range(5))}).chunk() count = [0] def counting_get(*args, **kwargs): count[0] += 1 return dask.get(*args, **kwargs) with dask.set_options(get=counting_get): ds.load() self.assertEqual(count[0], 1) def test_stack(self): data = da.random.normal(size=(2, 3, 4), chunks=(1, 3, 4)) arr = DataArray(data, dims=('w', 'x', 'y')) stacked = arr.stack(z=('x', 'y')) z = pd.MultiIndex.from_product([np.arange(3), np.arange(4)], names=['x', 'y']) expected = DataArray(data.reshape(2, -1), {'w': [0, 1], 'z': z}, dims=['w', 'z']) assert stacked.data.chunks == expected.data.chunks self.assertLazyAndIdentical(expected, stacked) def test_dot(self): eager = self.eager_array.dot(self.eager_array[0]) lazy = self.lazy_array.dot(self.lazy_array[0]) self.assertLazyAndAllClose(eager, lazy)
apache-2.0
-8,016,033,652,901,601,000
35.365325
86
0.584454
false
prheenan/Research
Perkins/Projects/Outreach/2017_JILA_photo_contest/main_photo_contest.py
1
2221
# force floating point division. Can still use integer with // from __future__ import division # This file is used for importing the common utilities classes. import numpy as np import matplotlib.pyplot as plt import sys sys.path.append("../../../../../") from IgorUtil.PythonAdapter import PxpLoader from GeneralUtil.python import PlotUtilities,CheckpointUtilities,GenUtilities from Research.Perkins.AnalysisUtil.Images import ImageUtil from Research.Perkins.AnalysisUtil.ForceExtensionAnalysis import FEC_Util import matplotlib.gridspec as gridspec from scipy.stats import norm from matplotlib_scalebar.scalebar import ScaleBar def run(): """ <Description> Args: param1: This is the first param. Returns: This is a description of what is returned. """ base = FEC_Util.default_data_root() input_directory = base + \ "4Patrick/CuratedData/Outreach/2017-JILA-photo-contest/" files = GenUtilities.getAllFiles(input_directory,ext=".ibw") # read all the ibw files in and cache them images = [] func = PxpLoader.read_ibw_as_image for i,f in enumerate(files): cache_file = "./{:d}.pkl".format(i) tmp = CheckpointUtilities.getCheckpoint(cache_file,func,False,f) images.append(tmp) crop = [ None,None,None,None,None,None,None,None ] for i,image in enumerate(images): fig = PlotUtilities.figure() vmin,vmax = np.percentile(image.height_nm_rel(),[75,99]) range_plot_nanometers = 1000 * image.range_microns() vmin_dict = dict(vmin=vmin,vmax=vmax) ImageUtil.PlotImage(image,cmap=plt.cm.gray, range_plot=range_plot_nanometers,**vmin_dict) PlotUtilities.FormatImageAxis() PlotUtilities.colorbar("Height (nm)") pixel_size_meters = image.pixel_size_meters pixel_size_nanometers = pixel_size_meters * 1e9 print(pixel_size_nanometers,range_plot_nanometers) scalebar = ScaleBar(pixel_size_nanometers,'nm',box_alpha=0.7) plt.gca().add_artist(scalebar) PlotUtilities.savefig(fig,"{:d}.pdf".format(i)) if __name__ == "__main__": run()
gpl-3.0
-6,349,060,512,923,287,000
36.016667
77
0.662765
false
haidawyl/MLinAction
Logistic回归/logRegres.py
1
21132
#!/usr/bin/python # -*- coding:utf-8 -*- import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import random def loadDataSet(): ''' 读取文件加载数据集 :return: ''' dataMat = [] # 数据矩阵 labelMat = [] # 类别标签向量 fr = open('testSet.txt') # 打开文件 for line in fr.readlines(): # 遍历所有行数据 lineArr = line.strip().split() # 截取掉每行的回车字符, 再使用空格字符 ' ' 将行数据分割成一个元素列表 dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])]) # 数据矩阵加入1.0以及文件的第0列和第1列, 生成3维数据[x0, x1, x2] labelMat.append(int(lineArr[2])) # 类别标签矩阵加入第2列 return dataMat, labelMat # 返回数据矩阵和类别标签矩阵 def sigmoid(inX): ''' sigmoid函数, sigma(z) = 1 / (1 + np.exp(-z)) :param inX: :return: ''' # 如果inX是一个向量或数组, 则np.exp(-inX)是针对其中的每一个元素进行运算的, 得到的结果仍然是一个向量或数组 return np.longfloat(1.0 / (1 + np.exp(-inX))) def gradAscent(dataMatIn, classLabels): ''' 批量梯度上升算法 :param dataMatIn: 数据集 :param classLabels: 类别标签 :return: 回归系数 ''' dataMatrix = np.mat(dataMatIn) # 将数组转换为NumPy矩阵 labelMat = np.mat(classLabels).transpose() # 将一维数组转换为NumPy行向量, 然后再对行向量进行转置变成列向量 m, n = np.shape(dataMatrix) # 取得数据矩阵的行数和列数 alpha = 0.001 # 向目标移动的步长 maxCycles = 500 # 最大迭代次数 weights = np.ones((n, 1)) # 回归系数, 创建以1填充的n行x1列的NumPy数组 # 迭代maxCycles后得到回归系数 for k in range(maxCycles): # dataMatrix: m x n # weights: n x 1 # resultMatrix = dataMatrix * weights: m x 1 # dataMatrix[0][0] * weights[0] + dataMatrix[0][1] * weights[1] + dataMatrix[0][2] * weights[2] + ... + dataMatrix[0][n] * weights[n] = resultMatrix[0][0] # dataMatrix[1][0] * weights[0] + dataMatrix[1][1] * weights[1] + dataMatrix[1][2] * weights[2] + ... + dataMatrix[1][n] * weights[n] = resultMatrix[1][0] # dataMatrix[2][0] * weights[0] + dataMatrix[2][1] * weights[1] + dataMatrix[2][2] * weights[2] + ... + dataMatrix[2][n] * weights[n] = resultMatrix[2][0] # ... # dataMatrix[m][0] * weights[0] + dataMatrix[m][1] * weights[1] + dataMatrix[m][2] * weights[2] + ... + dataMatrix[m][n] * weights[n] = resultMatrix[m][0] # 对列向量运行sigmoid函数, 返回的结果还是列向量 # h: m x 1 h = sigmoid(dataMatrix * weights) # 得到预测类别的值 # 向量相减等于向量中对应的元素相减, 得到的结果还是向量 # labelMat: m x 1 # error: m x 1 # 梯度反方向=实际值-预测值 error = (labelMat - h) # 计算真实类别与预测类别的差值 # 梯度方向=预测值-实际值 # error = (h - labelMat) # 计算真实类别与预测类别的差值 # dataMatrix; m x n # dataMatrix.transpose(): n x m # error: m x 1 # dataMatrix.transpose() * error: n x 1 本次计算得到的梯度 # 数字乘以向量等于该数字分别乘以向量中的每一个元素 # alpha * dataMatrix.transpose() * error: n x 1 # weights: n x 1 # 向量相加等于向量中对应的元素相加, 得到的结果还是向量 # 梯度上升算法(梯度反方向) weights = weights + alpha * dataMatrix.transpose() * error # 按照真实类别与预测类别的差值方向调整回归系数 # 梯度下降算法(梯度方向) # weights = weights - alpha * dataMatrix.transpose() * error # 按照真实类别与预测类别的差值方向调整回归系数 return weights # 返回回归系数 def plotBestFit(weights): ''' :param weights: 回归系数 :return: ''' dataMat, labelMat = loadDataSet() # 读取数据集和类别标签 dataArr = np.array(dataMat) # 将数据集转换成NumPy数组 trainingClassEst = np.mat(dataArr) * np.mat(weights) fig = plt.figure() # 画图 ax = fig.add_subplot(111) ax.scatter(dataArr[np.array(labelMat) == 1.0][:, 1], dataArr[np.array(labelMat) == 1.0][:, 2], s= 30, c='r', marker='s') # 画标签为1的散点图 ax.scatter(dataArr[np.array(labelMat) == 0.0][:, 1], dataArr[np.array(labelMat) == 0.0][:, 2], s= 30, c='g') # 画标签为0的散点图 # 创建区间为[-0.3, 0.3), 步长为0.1的等差numpy.ndarray数组 x1 = np.arange(-3.0, 3.0, 0.1) # 我们要画的横轴数据 # 此处令sigmoid函数为0. 0是两个分类(即类别l和类别0)的分界处. 因此我们设定0 = w0x0 + w1x1 + w2x2, # 在生成数据时, 我们已将x0设置为1.0, 则有x2 = (-weights[0] - weights[1] * x1) / weights[2] x2 = (-weights[0] - weights[1] * x1) / weights[2] # 根据公式计算得出纵轴数据 ax.plot(x1, x2) # 画出分割线 plt.xlabel('X1') plt.ylabel('X2') plt.show() plotROC(np.mat(trainingClassEst).T, labelMat, u'训练集ROC曲线') def stocGradAscent0(dataMatrix, classLabels, numIter=10): ''' 随机梯度上升算法 :param dataMatrix: 数据集 :param classLabels: 类别标签 :return: 回归系数 ''' m, n = np.shape(dataMatrix) # 取得数据集的行数和列数 alpha = 0.01 # 向目标移动的步长 weights = np.ones(n) # 回归系数, 创建以1填充的长度为n的NumPy数组 # 记录回归系数每次改变后的值的数组 weightsArr = [] for i in range(m * numIter): # 遍历numIter次数据集 # dataMatrix[i]; 长度为n的NumPy数组 # weights; 长度为n的NumPy数组 # dataMatrix[i] * weights: 进行点乘, 得到长度为n的NumPy数组 # np.sum(dataMatrix[i] * weights): 对NumPy数组所有元素加和得到一个数字 # 调用sigmoid函数, 参数是一个数字, 得到的h也是一个数字 h = sigmoid(np.sum(dataMatrix[i%m] * weights)) # 得到预测类别的值 # classLabels: 长度为m的列表 # classLabels[i]: 列表的一个元素, 因此是一个数字 # 梯度方向=预测值-实际值 # 梯度反方向=实际值-预测值 error = classLabels[i%m] - h # 计算真实类别与预测类别的差值 # dataMatrix[i]; 长度为n的NumPy数组 # 一个数字乘以一个NumPy数组等于该数字分别乘以数组中的每一个元素 # error * dataMatrix[i%m]: 本次计算得到的梯度 # weights: 长度为n的NumPy数组 # NumPy数组加上NumPy数组等于数组中对应的元素相加, 得到的结果还是NumPy数组 # 梯度上升算法 weights = weights + alpha * error * dataMatrix[i%m] # 按照真实类别与预测类别的差值方向调整回归系数 weightsArr.append(weights.copy()) # 画出回归系数的变化情况 mpl.rcParams['font.sans-serif'] = [u'SimHei'] # 指定显示字体 mpl.rcParams['axes.unicode_minus'] = False # 解决保存图像中负号'-'显示为方块的问题 plt.figure(1, facecolor='white', figsize=(6, 5)) # 创建一个新图形, 背景色设置为白色 plt.subplot(311) # subplot(numRows, numCols, plotNum) 将整个绘图区域等分为numRows行* numCols列个子区域,然后按照从左到右,从上到下的顺序对每个子区域进行编号,左上的子区域的编号为1 indexArr = range(len(weightsArr)) plt.plot(indexArr, np.mat(weightsArr)[:, 0], alpha=0.5) plt.ylabel('X0') plt.subplot(312) # subplot(numRows, numCols, plotNum) 将整个绘图区域等分为numRows行* numCols列个子区域,然后按照从左到右,从上到下的顺序对每个子区域进行编号,左上的子区域的编号为1 plt.plot(indexArr, np.mat(weightsArr)[:, 1], alpha=0.5) plt.ylabel('X1') plt.subplot(313) # subplot(numRows, numCols, plotNum) 将整个绘图区域等分为numRows行* numCols列个子区域,然后按照从左到右,从上到下的顺序对每个子区域进行编号,左上的子区域的编号为1 plt.plot(indexArr, np.mat(weightsArr)[:, 2], alpha=0.5) plt.ylabel('X2') plt.show() return weights # 返回回归系数 def stocGradAscent1(dataMatrix, classLabels, numIter=150): ''' 改进后的随机梯度上升算法 :param dataMatrix: 数据集 :param classLabels: 类别标签 :param numIter: 迭代次数 :return: 回归系数 ''' m, n = np.shape(dataMatrix) # 取得数据集的行数和列数 weights = np.ones(n) # 回归系数, 创建以1填充的长度为n的NumPy数组 # 记录回归系数每次改变后的值的数组 weightsArr = [] for j in range(numIter): # 遍历numIter次 dataIndex = range(m) # 生成[0, m)的整数列表, 即为数据集索引值的列表 for i in range(m): # 遍历整个数据集 # 虽然alpha在每次迭代时都会减小, 但是永远也不会减小到0, 这是因为存在一个常数项. # 这样做的原因是为了保证在多次迭代之后新数据仍然具有一定的影响. 如果要处理的问题 # 是动态变化的, 那么可以适当加大该常数项, 来确保新的值获得更大的回归系数. # 另外值得注意的一点是, 在降低alpha的函数中, alpha每次减少1/(j+i), 其中j是迭代次数, # i是样本点的下标. 这样当j<<max(i)时, alpha就不是严格下降的. alpha = 4 / (1.0 + j + i) + 0.01 # 每次动态减小alpha值 # 通过随机选取样本来更新回归系数. 这种方法将减少周期性的波动. randIndex = int(random.uniform(0, len(dataIndex))) # 随机生成一个[0, m)范围内的实数, 并转换为整数, 即得到随机选取样本的索引值 # dataMatrix[randIndex]; 长度为n的NumPy数组 # weights; 长度为n的NumPy数组 # dataMatrix[randIndex] * weights: 进行点乘, 得到长度为n的NumPy数组 # np.sum(dataMatrix[randIndex] * weights): 对NumPy数组所有元素加和得到一个数字 # 调用sigmoid函数, 参数是一个数字, 得到的h也是一个数字 h = sigmoid(np.sum(dataMatrix[randIndex] * weights)) # 得到预测类别的值 # classLabels: 长度为m的列表 # classLabels[randIndex]: 列表的一个元素, 因此是一个数字 error = classLabels[randIndex] - h # 计算真实类别与预测类别的差值 # dataMatrix[randIndex]; 长度为n的NumPy数组 # 一个数字乘以一个NumPy数组等于该数字分别乘以数组中的每一个元素 # error * dataMatrix[i%m]: 本次计算得到的梯度 # weights: 长度为n的NumPy数组 # NumPy数组加上NumPy数组等于数组中对应的元素相加, 得到的结果还是NumPy数组 weights = weights + alpha * error * dataMatrix[randIndex] # 按照真实类别与预测类别的差值方向调整回归系数 del(dataIndex[randIndex]) # 删除dataIndex列表中已经使用过的元素, 下次计算梯度时不再使用该样本, 保证所有的样本数据都能够参与计算梯度 weightsArr.append(weights.copy()) # 画出回归系数的变化情况 mpl.rcParams['font.sans-serif'] = [u'SimHei'] # 指定显示字体 mpl.rcParams['axes.unicode_minus'] = False # 解决保存图像中负号'-'显示为方块的问题 plt.figure(1, facecolor='white', figsize=(6, 5)) # 创建一个新图形, 背景色设置为白色 plt.subplot(311) # subplot(numRows, numCols, plotNum) 将整个绘图区域等分为numRows行* numCols列个子区域,然后按照从左到右,从上到下的顺序对每个子区域进行编号,左上的子区域的编号为1 indexArr = range(len(weightsArr)) plt.plot(indexArr, np.mat(weightsArr)[:, 0], alpha=0.5) plt.ylabel('X0') plt.subplot(312) # subplot(numRows, numCols, plotNum) 将整个绘图区域等分为numRows行* numCols列个子区域,然后按照从左到右,从上到下的顺序对每个子区域进行编号,左上的子区域的编号为1 plt.plot(indexArr, np.mat(weightsArr)[:, 1], alpha=0.5) plt.ylabel('X1') plt.subplot(313) # subplot(numRows, numCols, plotNum) 将整个绘图区域等分为numRows行* numCols列个子区域,然后按照从左到右,从上到下的顺序对每个子区域进行编号,左上的子区域的编号为1 plt.plot(indexArr, np.mat(weightsArr)[:, 2], alpha=0.5) plt.ylabel('X2') plt.show() return weights # 返回回归系数 def testGradAscent(): ''' 测试批量梯度上升算法 :return: ''' dataMat, labelMat = loadDataSet() weights = gradAscent(dataMat, labelMat) plotBestFit(weights.getA()) print weights def testStocGradAscent0(): ''' 测试随机梯度上升算法 :return: ''' dataArr, labelMat = loadDataSet() weights = stocGradAscent0(np.array(dataArr), labelMat, numIter=200) plotBestFit(np.mat(weights).T.getA()) print weights def testStocGradAscent1(): ''' 测试随机梯度上升算法 :return: ''' dataArr, labelMat = loadDataSet() weights = stocGradAscent1(np.array(dataArr), labelMat, numIter=30) plotBestFit(np.mat(weights).T.getA()) print weights def classifyVector(inX, weights): ''' 执行分类操作 :param inX: 特征向量 :param weights: 回归系数 :return: ''' # 特征向量点乘回归系数, 再对乘积进行加和运算, 最后使用得到的和值调用Sigmoid算法得到预测值 prob = sigmoid(np.sum(inX * weights)) if prob > 0.5: # 预测值大于0.5, 归入类别1 return 1.0 else: # 否则归入类别0 return 0.0 def colicTest(): ''' :return: ''' frTrain = open('horseColicTraining.txt') # 打开训练集文件 frTest = open('horseColicTest.txt') # 打开测试集文件 trainingSet = [] # 训练集 trainingLabels = [] # 训练集类别标签 for line in frTrain.readlines(): currLine = line.strip().split('\t') # 截取掉每行的回车字符, 再使用tab字符 '\t' 将行数据分割成一个元素列表 lineArr = [] for i in range(21): # 前21列是特征数据 lineArr.append(float(currLine[i])) trainingSet.append(lineArr) trainingLabels.append(float(currLine[21])) # 最后一列是类别标签 trainWeights = stocGradAscent1(np.array(trainingSet),trainingLabels, 500) # 使用训练集计算回归系数向量 errorCount = 0 # 预测错误数 numTestVec = 0.0 # 测试样本数量 # 导入测试集 for line in frTest.readlines(): numTestVec += 1.0 # 测试样本数量+1 currLine = line.strip().split('\t') # 截取掉每行的回车字符, 再使用tab字符 '\t' 将行数据分割成一个元素列表 lineArr = [] for i in range(21): # 前21列是特征数据 lineArr.append(float(currLine[i])) # 预测分类, 与实际分类不符, 错误总数+1 if int(classifyVector(np.array(lineArr), trainWeights)) != int(currLine[21]): errorCount += 1 # 计算错误率 errorRate = (float(errorCount) / numTestVec) print 'the error rate of this test is : %f' % errorRate return errorRate def multiTest(): ''' 执行10次训练和测试, 计算平均错误率 :return: ''' numTests = 10 errorSum = 0.0 for k in range(numTests): errorSum += colicTest() print 'after %d iterations the average error rate is: %f' % (numTests, errorSum/float(numTests)) def drawSigmoid(): ''' :return: ''' mpl.rcParams['font.sans-serif'] = [u'SimHei'] # 指定显示字体 mpl.rcParams['axes.unicode_minus'] = False # 解决保存图像中负号'-'显示为方块的问题 plt.figure(1, facecolor='white', figsize=(6, 5)) # 创建一个新图形, 背景色设置为白色 plt.subplot(211) # subplot(numRows, numCols, plotNum) 将整个绘图区域等分为numRows行* numCols列个子区域,然后按照从左到右,从上到下的顺序对每个子区域进行编号,左上的子区域的编号为1 x1 = np.arange(-5.0, 6.0, 0.1) y1 = sigmoid(x1) plt.plot(x1, y1) plt.subplot(212) # subplot(numRows, numCols, plotNum) 将整个绘图区域等分为numRows行* numCols列个子区域,然后按照从左到右,从上到下的顺序对每个子区域进行编号,左上的子区域的编号为1 x2 = np.arange(-60, 60, 1) y2 = sigmoid(x2) plt.plot(x2, y2) plt.show() def plotROC(predStrengths, classLabels, title): ''' <0.0, 0.0>: 将所有样例判为反例, 则TP=FP=0 <1.0, 1.0>: 将所有样例判为正例, 则FN=TN=0 x轴表示假阳率(FP/(FP+TN)), 在<0.0, 0.0>点假阳率等于0, 在<1.0, 1.0>点假阳率等于1 y轴表示真阳率(TP/(TP+FN)), 在<0.0, 0.0>点真阳率等于0, 在<1.0, 1.0>点真阳率等于1 :param predStrengths: 行向量, 表示分类结果的预测强度, 如果值为负数则值越小被判为反例的预测强度越高, 反之值为正数则值越大被判为正例的预测强度越高 :param classLabels: 类别标签 :return: ''' cur = (1.0, 1.0) # 绘制光标的位置, 起始点为右上角<1.0, 1.0>的位置 ySum = 0.0 # 计算AUC(Area Under the Curve, ROC曲线下面的面积)的值 numPosClas = sum(np.array(classLabels) == 1.0) # 真实正例的数目 yStep = 1 / float(numPosClas) # y轴的步长 # len(classLabels) - numPosClas: 真实反例的数目 xStep = 1 / float(len(classLabels) - numPosClas) # x轴的步长 # 分类器的预测强度从小到大排序的索引列表 # 得到的是样本预测强度从小到大(从负数到正数)的样例的索引值列表 # 第一个索引指向的样例被判为反例的强度最高 # 最后一个索引指向的样例被判为正例的强度最高 sortedIndicies = predStrengths.argsort() # print predStrengths[0, sortedIndicies] mpl.rcParams['font.sans-serif'] = [u'SimHei'] # 指定显示字体 mpl.rcParams['axes.unicode_minus'] = False # 解决保存图像中负号'-'显示为方块的问题 fig = plt.figure() fig.clf() ax = plt.subplot(111) # 遍历分类器的预测强度从小到大排序的索引列表 # 先从排名最低的样例开始, 所有排名更低的样例都被判为反例, 而所有排名更高的样例都被判为正例. # 第一个值对应点为<1.0, 1.0>, 而最后一个值对应点为<0.0, 0.0>. # 然后, 将其移到排名次低的样例中去, 如果该样例属于正例, 那么对真阳率进行修改; # 如果该样例属于反例, 那么对假阳率进行修改. # 初始时预测强度最小, 那么所有的样本都被判为正例, 即对应图中右上角的位置. # 向后迭代的过程中, 预测强度依次增大, 则排名低(列表前面)的样本被判为反例, 排名高(列表后面)的样本被判为正例. # 如果当前样本为真实正例, 在将其预测为反例时, 则为伪反例FN, 根据真阳率=TP/(TP+FN), 因此真阳率下降, 沿y轴下移 # 如果当前样本为真实反例, 在将其预测为反例时, 则为真反例TN, 根据假阳率=FP/(FP+TN), 因此假阳率下降, 沿x轴左移 for index in sortedIndicies.tolist()[0]: if classLabels[index] == 1.0: # 标签为1.0的类即正例, 则要沿着y轴的方向下降一个步长, 也就是要不断降低真阳率 delX = 0 delY = yStep else: # 标签为其它(0或者-1)的类即反例, 则要沿着x轴的方向倒退一个步长, 也就是要不断降低假阳率 delX = xStep delY = 0 # 为了计算AUC, 需要对多个小矩形的面积进行累加. 这些小矩形的宽度是xStep, 因此可以先对所有矩形 # 的高度进行累加, 最后再乘以xStep得到其总面积. 所有高度的和(ySum)随着x轴的每次移动而渐次增加 ySum += cur[1] # 一旦决定了是在x轴还是y轴方向上进行移动, 就可以在当前点和新点之间画出一条线段 ax.plot([cur[0], cur[0]-delX], [cur[1], cur[1]-delY], c='b') # 更新当前点cur cur = (cur[0]-delX, cur[1]-delY) # 画出左下角到右上角之间的虚线 ax.plot([0, 1], [0, 1], 'b--') plt.xlabel(u'假阳率(False Positive Rate)') plt.ylabel(u'真阳率(True Positive Rate)') plt.title(title) ax.axis([0, 1, 0, 1]) plt.show() print 'the Area Under the Curve is: ', ySum*xStep if __name__=='__main__': # testGradAscent() # testStocGradAscent0() testStocGradAscent1() # multiTest() # drawSigmoid()
apache-2.0
-3,469,160,589,560,217,000
37.068878
162
0.631484
false
Castronova/EMIT
tests/transformations/test_spatial.py
1
16244
__author__ = 'tonycastronova' import unittest #import odm2.api import utilities from shapely.wkt import loads from ODMconnection import dbconnection # from ODMconnection import SessionFactory from api_old.ODM2.Core.model import * from sqlalchemy import func import matplotlib.pyplot as plt from scipy.spatial import Voronoi, voronoi_plot_2d import numpy as np ################################################################## ############## ! NOTE ! ############## # # # You must run insert_test_geometries before running these tests # # /tests/data/sample gis/insert_test_geometies.py # ################################################################## class testSpatial(unittest.TestCase): def setUp(self): # build database connection string engine = 'postgresql' address = 'localhost' db = 'odm2CamelCase' user = 'tonycastronova' pwd = 'water' dbconn = dbconnection() connection = dbconn.createConnection(engine,address,db,user,pwd) # self._session_factory = SessionFactory(connection_string, False) self._session = connection.getSession() def test_get_intersecting(self): """ 1.) determine potential source ts based on variable/unit/location(bbox) (omitted) 2.) Query all sampling features that belong to this set of series ids 3.) for each target feature, determine the points that intersect it """ # this will already be known on the client (querying from the db for testing purposes only) targets = [ 'POLYGON ((-111.961138093451495 41.896360920478401,-111.752777525493116 41.893660783528617,-111.756504446950089 41.606066677767103,-111.964865014908469 41.608766814716887,-111.961138093451495 41.896360920478401))', 'POLYGON ((-111.752777525493116 41.893660783528617,-111.499522658320416 41.892196863718198,-111.501743961064676 41.507916435626953,-111.754998828237376 41.509380355437372,-111.752777525493116 41.893660783528617))', 'POLYGON ((-111.340098770929785 41.625102365813746,-111.132370933565824 41.620863022194158,-111.135345259823112 41.475121035587051,-111.343073097187073 41.479360379206639,-111.340098770929785 41.625102365813746))' ] res = None # isolate only the samplingfeature ids that I am interested in (i.e. a set of points) sourceids = ['points_nad83_0','points_nad83_1','points_nad83_2','points_nad83_3','points_nad83_4'] sources = {} for target in targets: try: #ST_Equals(geometry, geometry) #return self._session.query(Samplingfeature).filter(func.ST_AsText(Samplingfeature.FeatureGeometry) == func.ST_AsText(wkt_geometry)).first() res = self._session.query(Samplingfeature).filter(Samplingfeature.SamplingFeatureCode.in_(sourceids)).filter(func.ST_Intersects(Samplingfeature.FeatureGeometry,target)).all() sources[target] = res except Exception, e: print e return None print 'done' pass """ # SELECT points.* FROM points_table points INNER JOIN polygon_table polys ON ST_Within(points.geometry,polys.geometry) WHERE polys.id = 1 - """ def test_build_theissen(self): print 'BUILD THEISSEN POLYGONS' # isolate only the samplingfeature ids that I am interested in (i.e. a set of points) sourceids = ['points_nad83_0','points_nad83_1','points_nad83_2','points_nad83_3','points_nad83_4'] sources = {} try: res = self._session.query(Samplingfeature, Samplingfeature.FeatureGeometry.ST_AsText()).filter(Samplingfeature.SamplingFeatureCode.in_(sourceids)).all() except Exception, e: return None # build coordinates coords = [] for r in res: point = loads(r[1]) coords.append((point.x, point.y)) points = np.array(coords) #np.array([[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2], # [2, 0], [2, 1], [2, 2]]) vor = Voronoi(points) regions, vertices = self.voronoi_finite_polygons_2d(vor) # colorize for region in regions: polygon = vertices[region] plt.fill(*zip(*polygon), alpha=0.4) plt.plot(points[:,0], points[:,1], 'ko') plt.xlim(vor.min_bound[0] - 0.1, vor.max_bound[0] + 0.1) plt.ylim(vor.min_bound[1] - 0.1, vor.max_bound[1] + 0.1) plt.show() def test_voronoi(self): # make up data points np.random.seed(1234) points = np.random.rand(15, 2) # compute Voronoi tesselation vor = Voronoi(points) # plot regions, vertices = self.voronoi_finite_polygons_2d(vor) print "--" print regions print "--" print vertices # colorize for region in regions: polygon = vertices[region] plt.fill(*zip(*polygon), alpha=0.4) plt.plot(points[:,0], points[:,1], 'ko') plt.xlim(vor.min_bound[0] - 0.1, vor.max_bound[0] + 0.1) plt.ylim(vor.min_bound[1] - 0.1, vor.max_bound[1] + 0.1) plt.show() def voronoi_finite_polygons_2d(self, vor, radius=None): if vor.points.shape[1] != 2: raise ValueError("Requires 2D input") new_regions = [] new_vertices = vor.vertices.tolist() center = vor.points.mean(axis=0) if radius is None: radius = vor.points.ptp().max() # Construct a map containing all ridges for a given point all_ridges = {} for (p1, p2), (v1, v2) in zip(vor.ridge_points, vor.ridge_vertices): all_ridges.setdefault(p1, []).append((p2, v1, v2)) all_ridges.setdefault(p2, []).append((p1, v1, v2)) # Reconstruct infinite regions for p1, region in enumerate(vor.point_region): vertices = vor.regions[region] if all(v >= 0 for v in vertices): # finite region new_regions.append(vertices) continue # reconstruct a non-finite region ridges = all_ridges[p1] new_region = [v for v in vertices if v >= 0] for p2, v1, v2 in ridges: if v2 < 0: v1, v2 = v2, v1 if v1 >= 0: # finite ridge: already in the region continue # Compute the missing endpoint of an infinite ridge t = vor.points[p2] - vor.points[p1] # tangent t /= np.linalg.norm(t) n = np.array([-t[1], t[0]]) # normal midpoint = vor.points[[p1, p2]].mean(axis=0) direction = np.sign(np.dot(midpoint - center, n)) * n far_point = vor.vertices[v2] + direction * radius new_region.append(len(new_vertices)) new_vertices.append(far_point.tolist()) # sort region counterclockwise vs = np.asarray([new_vertices[v] for v in new_region]) c = vs.mean(axis=0) angles = np.arctan2(vs[:,1] - c[1], vs[:,0] - c[0]) new_region = np.array(new_region)[np.argsort(angles)] # finish new_regions.append(new_region.tolist()) return new_regions, np.asarray(new_vertices) def test_nearest(self): # this will already be known on the client (querying from the db for testing purposes only) targets = [ 'POLYGON ((-111.961138093451495 41.896360920478401,-111.752777525493116 41.893660783528617,-111.756504446950089 41.606066677767103,-111.964865014908469 41.608766814716887,-111.961138093451495 41.896360920478401))', 'POLYGON ((-111.752777525493116 41.893660783528617,-111.499522658320416 41.892196863718198,-111.501743961064676 41.507916435626953,-111.754998828237376 41.509380355437372,-111.752777525493116 41.893660783528617))', 'POLYGON ((-111.340098770929785 41.625102365813746,-111.132370933565824 41.620863022194158,-111.135345259823112 41.475121035587051,-111.343073097187073 41.479360379206639,-111.340098770929785 41.625102365813746))' ] # isolate only the samplingfeature ids that I am interested in (i.e. a set of points) sourceids = ['points_nad83_0','points_nad83_1','points_nad83_2','points_nad83_3','points_nad83_4'] nearest = [] for target in targets: try: res = self._session.query(Samplingfeature, Samplingfeature.FeatureGeometry.ST_AsText()).\ filter(Samplingfeature.SamplingFeatureCode.in_(sourceids)). \ order_by(Samplingfeature.FeatureGeometry.distance_centroid(target)).limit(1).all() nearest.append(res[0][1]) except Exception, e: print e return None #for n in nearest: print n # plotting for i in range(0,len(targets)): # plot the polygon polygon = [(x,y) for x,y in loads(targets[i]).boundary.coords] p = plt.fill(*zip(*polygon), alpha=0.4) # set the point color face_color = p[0].get_facecolor() ptc = list(face_color) ptc[3] = 1.0 pt_color = tuple(ptc) # plot the point n = loads(nearest[i]) plt.plot(n.x,n.y,marker='o',color=pt_color) # plot ignored points res = self._session.query(Samplingfeature.FeatureGeometry.ST_AsText()).\ filter(Samplingfeature.SamplingFeatureCode.in_(sourceids)).all() for geom in res: if geom[0] not in nearest: # plot the point n = loads(geom[0]) plt.plot(n.x,n.y,marker='o',color=(0.0,0.0,0.0,0.2)) plt.show() def func(self, x, y): return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2 def test_spline(self): from scipy.interpolate import SmoothBivariateSpline # this will already be known on the client (querying from the db for testing purposes only) targets = [ 'POLYGON ((-111.961138093451495 41.896360920478401,-111.752777525493116 41.893660783528617,-111.756504446950089 41.606066677767103,-111.964865014908469 41.608766814716887,-111.961138093451495 41.896360920478401))', 'POLYGON ((-111.752777525493116 41.893660783528617,-111.499522658320416 41.892196863718198,-111.501743961064676 41.507916435626953,-111.754998828237376 41.509380355437372,-111.752777525493116 41.893660783528617))', 'POLYGON ((-111.340098770929785 41.625102365813746,-111.132370933565824 41.620863022194158,-111.135345259823112 41.475121035587051,-111.343073097187073 41.479360379206639,-111.340098770929785 41.625102365813746))' ] # isolate only the samplingfeature ids that I am interested in (i.e. a set of points) sourceids = ['points_nad83_0','points_nad83_1','points_nad83_2','points_nad83_3','points_nad83_4'] # plot all points res = self._session.query(Samplingfeature.FeatureGeometry.ST_AsText()).\ filter(Samplingfeature.SamplingFeatureCode.in_(sourceids)).all() x = [] y = [] w = [] i = 0 for geom in res: # plot the point n = loads(geom[0]) x.append(n.x) y.append(n.y) w.append(i) #plt.plot(n.x,n.y,marker='o',color=(0.0,0.0,0.0,0.2)) i += 1 # calculate spline #s = SmoothBivariateSpline(x,y,w) #plt.imshow(s) from scipy.interpolate import griddata from numpy import linspace,exp from numpy.random import randn from scipy.interpolate import UnivariateSpline grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j] points = np.random.rand(10, 2) values = self.func(points[:,0], points[:,1]) grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest') grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear') grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic') plt.subplot(221) plt.imshow(self.func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower') plt.plot(points[:,0], points[:,1], 'ko', ms=1) plt.title('Original') plt.subplot(222) plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower') plt.title('Nearest') plt.subplot(223) plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower') plt.title('Linear') plt.subplot(224) plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower') plt.title('Cubic') plt.gcf().set_size_inches(6, 6) plt.show() def test_2d_iterp(self): import numpy as np from scipy.interpolate import Rbf import matplotlib.pyplot as plt from matplotlib import cm # 2-d tests - setup scattered data x = np.random.rand(3)*4.0-2.0 y = np.random.rand(3)*4.0-2.0 z = x*np.exp(-x**2-y**2) ti = np.linspace(-2.0, 2.0, 100) XI, YI = np.meshgrid(ti, ti) # use RBF rbf = Rbf(x, y, z, epsilon=2) ZI = rbf(XI, YI) # plot the result n = plt.Normalize(-2., 2.) plt.subplot(1, 1, 1) plt.pcolor(XI, YI, ZI, cmap=cm.jet) plt.scatter(x, y, 100, z, cmap=cm.jet) plt.title('RBF interpolation - multiquadrics') plt.xlim(-2, 2) plt.ylim(-2, 2) plt.colorbar() plt.show() def test_2d_iterp_pts(self): import numpy as np from scipy.interpolate import Rbf import matplotlib.pyplot as plt from matplotlib import cm sourceids = ['points_nad83_0','points_nad83_1','points_nad83_2','points_nad83_3','points_nad83_4'] # plot ignored points res = self._session.query(Samplingfeature.FeatureGeometry.ST_AsText()).\ filter(Samplingfeature.SamplingFeatureCode.in_(sourceids)).all() x = [] y = [] z = [] i = 0 for geom in res: # plot the point n = loads(geom[0]) x.append(n.x) y.append(n.y) z.append(i) i += 1 minx = min(x)-.1 maxx = max(x)+.1 miny = min(y)-.1 maxy = max(y)+.1 xspace = np.linspace(minx,maxx) yspace = np.linspace(miny,maxy) xi,yi = np.meshgrid(xspace,yspace) rbf = Rbf(x, y, z, epsilon=2) zi = rbf(xi, yi) n = plt.Normalize(minx,maxy) plt.subplot(1, 1, 1) plt.pcolor(xi, yi, zi, cmap=cm.jet) plt.scatter(x, y, 100, z, cmap=cm.jet) plt.title('RBF interpolation - multiquadrics') plt.xlim(minx,maxx) plt.ylim(miny,maxy) plt.colorbar() # 2-d tests - setup scattered data # x = np.random.rand(3)*4.0-2.0 # y = np.random.rand(3)*4.0-2.0 # z = x*np.exp(-x**2-y**2) # ti = np.linspace(-2.0, 2.0, 100) # XI, YI = np.meshgrid(ti, ti) # # # use RBF # rbf = Rbf(x, y, z, epsilon=2) # ZI = rbf(XI, YI) # # # plot the result # n = plt.Normalize(-2., 2.) # plt.subplot(1, 1, 1) # plt.pcolor(XI, YI, ZI, cmap=cm.jet) # plt.scatter(x, y, 100, z, cmap=cm.jet) # plt.title('RBF interpolation - multiquadrics') # plt.xlim(-2, 2) # plt.ylim(-2, 2) # plt.colorbar() plt.show() # backup of database pts #pts = [(u'POINT(-111.884043556863 41.8462732663056)',), # (u'POINT(-111.647763078984 41.8412763981934)',), # (u'POINT(-111.64633540238 41.6599614695491)',), # (u'POINT(-111.889754263277 41.6556784397386)',), # (u'POINT(-111.179485153036 41.7641818616045)',)]
gpl-2.0
-4,724,675,461,572,569,000
34.85872
234
0.570549
false
scienceopen/histfeas
histfeas/io.py
2
4385
try: import simplekml except ImportError: pass import logging from matplotlib.pyplot import figure,clf # from pymap3d import aer2geodetic from pymap3d.vincenty import vdist def planviewkml(cam,xKM,zKM,makeplot,figh,odir): """ https://developers.google.com/kml/documentation/cameras """ decimaterayfactor = 16 az = 106.444916022 #TODO arbitrary should be from real camera pointing srange = 500e3 #[m] clr=['r','b','g','m'] kclr = ['ff5c5ccd','ffff0000'] ax = figure(figh).gca(); clf() kml1d = simplekml.Kml() #%% setup camera (I preferred LookAt) # camview = skml.Camera(latitude=67.2, # longitude=-147.2, # altitude=100e3, #meters, int # heading = 180., tilt = 10., roll = 0., # altitudemode = skml.AltitudeMode.relativetoground) #%% setup LookAt lkat = simplekml.LookAt(latitude=65.111, longitude=-147.465, altitude=0, heading=180, range=4e3, tilt=45) lla = [] for C in cam: if C.usecam: #az is the temporary scalar defined above FIXME el = C.angle_deg[::decimaterayfactor] #double colon Np = el.size lla.append((C.lon,C.lat)) # get LLA of pixel rays at 100km altitude latre,lonre,altre = aer2geodetic(az,el,srange,C.lat,C.lon,C.alt_m) # get ECEF of center ray at 90km (bottom of model space) #centazray = az #TODO #centelray = cam[ci].angle_deg[Np//2] #xrc,yrc,zrc = aer2ecef(centazray,centelray,zbottom,lat0,lon0,alt0) #%% camera ground location kml1d = campoint(kml1d,(C.lat,C.lon),C.name,lkat) #%% camera rays if 'kmlrays' in makeplot: for cri in range(Np): linestr = kml1d.newlinestring(name='') linestr.coords = [(C.lon, C.lat, C.alt_m), (lonre[cri], latre[cri], altre[cri])] linestr.altitudemode = simplekml.AltitudeMode.relativetoground linestr.style.linestyle.color = kclr[C.name] ax.plot(lonre,latre,'x',color=clr[C.name],markersize=6) ax.plot(C.lon,C.lat,'o',color=clr[C.name],markersize=12,label='cam{}'.format(C.name)) ax.set_ylabel('WGS84 latitude [deg.]') ax.set_xlabel('WGS84 longitude [deg.]') ax.set_title('pixel locations at 100km altitude') ax.legend() #%% setup line on ground connecting sites kml1d = KMLline(kml1d) #%% write KML try: kmlfn = odir/'cam.kml' logging.info('saving {}'.format(kmlfn)) kml1d.save(str(kmlfn)) except Exception as e: logging.error('Error writing KML {} {}'.format(kmlfn,e)) def campoint(kml,latlon,sitename='',lkat=None): """ camera location points latlon: len=2 or 3 vector of WGS84 lat,lon (optional altitude,meters) """ bpnt = kml.newpoint(name=sitename, #description = 'camera {} location'.format(C.name), coords = [(latlon[1],latlon[0])], altitudemode = simplekml.AltitudeMode.clamptoground) bpnt.style.iconstyle.icon.href='http://maps.google.com/mapfiles/kml/shapes/arrow.png' # 'http://maps.google.com/mapfiles/kml/paddle/pink-blank.png' bpnt.style.iconstyle.scale = 2.0 bpnt.style.labelstyle.size= 2.5 #bpnt.camera = camview if lkat is not None: bpnt.lookat = lkat return kml def KMLline(kml,lla): """ https://developers.google.com/kml/faq#linestyle https://simplekml.readthedocs.org/en/latest/geometries.html#simplekml.LineString lla: list of len=2 or 3 tuples of WGS84 lat,lon (optional altitude,meters) """ assert len(lla[0]) in (2,3),'lla must be 2 or 3 length vector lat,lon,(alt)' dist_km = vdist(lla[0][0],lla[0][1], lla[1][0],lla[1][1])[0] / 1e3 ls = kml.newlinestring(name='{:.1f} km'.format(dist_km), coords=(lla[0][::-1], lla[1][::-1])) ls.style.linestyle.width = 5 ls.style.linestyle.color = simplekml.Color.yellow ls.style.labelstyle.scale= 2.5 ls.style.labelstyle.color= simplekml.Color.white ls.style.linestyle.gxlabelvisibility=1 # NOT !! labelstyle !! return kml
gpl-3.0
-2,419,800,184,413,960,700
35.541667
93
0.588597
false
danche354/Sequence-Labeling
chunk_all/senna-pos-64-32-rmsprop5.py
1
6160
from keras.models import Model from keras.layers import Input, Masking, Dense, LSTM from keras.layers import Dropout, TimeDistributed, Bidirectional, merge from keras.layers.embeddings import Embedding from keras.utils import np_utils from keras.optimizers import RMSprop import numpy as np import pandas as pd import sys import math import os from datetime import datetime # add path sys.path.append('../') sys.path.append('../tools') from tools import conf from tools import load_data from tools import prepare from tools import plot np.random.seed(0) # train hyperparameters step_length = conf.chunk_step_length pos_length = conf.chunk_pos_length emb_vocab = conf.senna_vocab emb_length = conf.senna_length output_length = conf.chunk_ALL_length split_rate = conf.chunk_split_rate batch_size = conf.batch_size nb_epoch = 90 #conf.nb_epoch model_name = os.path.basename(__file__)[:-3] folder_path = 'model/%s'%model_name if not os.path.isdir(folder_path): os.makedirs(folder_path) # the data, shuffled and split between train and test sets train_data, dev_data = load_data.load_chunk(dataset='train.txt', split_rate=split_rate, chunk_type="ALL") train_samples = len(train_data) dev_samples = len(dev_data) print('train shape:', train_samples) print('dev shape:', dev_samples) print() word_embedding = pd.read_csv('../preprocessing/senna/embeddings.txt', delimiter=' ', header=None) word_embedding = word_embedding.values word_embedding = np.concatenate([np.zeros((1,emb_length)),word_embedding, np.random.uniform(-1,1,(1,emb_length))]) embed_index_input = Input(shape=(step_length,)) embedding = Embedding(emb_vocab+2, emb_length, weights=[word_embedding], mask_zero=True, input_length=step_length)(embed_index_input) pos_input = Input(shape=(step_length, pos_length)) senna_pos_merge = merge([embedding, pos_input], mode='concat') input_mask = Masking(mask_value=0)(senna_pos_merge) dp_1 = Dropout(0.5)(input_mask) hidden_1 = Bidirectional(LSTM(64, return_sequences=True))(dp_1) hidden_2 = Bidirectional(LSTM(32, return_sequences=True))(hidden_1) dp_2 = Dropout(0.5)(hidden_2) output = TimeDistributed(Dense(output_length, activation='softmax'))(dp_2) model = Model(input=[embed_index_input,pos_input], output=output) rmsprop = RMSprop(lr=0.0005) model.compile(loss='categorical_crossentropy', optimizer=rmsprop, metrics=['accuracy']) print(model.summary()) number_of_train_batches = int(math.ceil(float(train_samples)/batch_size)) number_of_dev_batches = int(math.ceil(float(dev_samples)/batch_size)) print('start train %s ...\n'%model_name) best_accuracy = 0 best_epoch = 0 all_train_loss = [] all_dev_loss = [] all_dev_accuracy = [] log = open('%s/model_log.txt'%folder_path, 'w') start_time = datetime.now() print('train start at %s\n'%str(start_time)) log.write('train start at %s\n\n'%str(start_time)) for epoch in range(nb_epoch): start = datetime.now() print('-'*60) print('epoch %d start at %s'%(epoch, str(start))) log.write('-'*60+'\n') log.write('epoch %d start at %s\n'%(epoch, str(start))) train_loss = 0 dev_loss = 0 np.random.shuffle(train_data) for i in range(number_of_train_batches): train_batch = train_data[i*batch_size: (i+1)*batch_size] embed_index, hash_index, pos, label, length, sentence = prepare.prepare_chunk(batch=train_batch, chunk_type="ALL") pos = np.array([(np.concatenate([np_utils.to_categorical(p, pos_length), np.zeros((step_length-length[l], pos_length))])) for l,p in enumerate(pos)]) y = np.array([np_utils.to_categorical(each, output_length) for each in label]) train_metrics = model.train_on_batch([embed_index, pos], y) train_loss += train_metrics[0] all_train_loss.append(train_loss) correct_predict = 0 all_predict = 0 for j in range(number_of_dev_batches): dev_batch = dev_data[j*batch_size: (j+1)*batch_size] embed_index, hash_index, pos, label, length, sentence = prepare.prepare_chunk(batch=dev_batch, chunk_type="ALL") pos = np.array([(np.concatenate([np_utils.to_categorical(p, pos_length), np.zeros((step_length-length[l], pos_length))])) for l,p in enumerate(pos)]) y = np.array([np_utils.to_categorical(each, output_length) for each in label]) # for loss dev_metrics = model.test_on_batch([embed_index, pos], y) dev_loss += dev_metrics[0] # for accuracy prob = model.predict_on_batch([embed_index, pos]) for i, l in enumerate(length): predict_label = np_utils.categorical_probas_to_classes(prob[i]) correct_predict += np.sum(predict_label[:l]==label[i][:l]) all_predict += np.sum(length) epcoh_accuracy = float(correct_predict)/all_predict all_dev_accuracy.append(epcoh_accuracy) all_dev_loss.append(dev_loss) if epcoh_accuracy>=best_accuracy: best_accuracy = epcoh_accuracy best_epoch = epoch end = datetime.now() model.save('%s/model_epoch_%d.h5'%(folder_path, epoch), overwrite=True) print('epoch %d end at %s'%(epoch, str(end))) print('epoch %d train loss: %f'%(epoch, train_loss)) print('epoch %d dev loss: %f'%(epoch, dev_loss)) print('epoch %d dev accuracy: %f'%(epoch, epcoh_accuracy)) print('best epoch now: %d\n'%best_epoch) log.write('epoch %d end at %s\n'%(epoch, str(end))) log.write('epoch %d train loss: %f\n'%(epoch, train_loss)) log.write('epoch %d dev loss: %f\n'%(epoch, dev_loss)) log.write('epoch %d dev accuracy: %f\n'%(epoch, epcoh_accuracy)) log.write('best epoch now: %d\n\n'%best_epoch) end_time = datetime.now() print('train end at %s\n'%str(end_time)) log.write('train end at %s\n\n'%str(end_time)) timedelta = end_time - start_time print('train cost time: %s\n'%str(timedelta)) print('best epoch last: %d\n'%best_epoch) log.write('train cost time: %s\n\n'%str(timedelta)) log.write('best epoch last: %d\n\n'%best_epoch) plot.plot_loss(all_train_loss, all_dev_loss, folder_path=folder_path, title='%s'%model_name) plot.plot_accuracy(all_dev_accuracy, folder_path=folder_path, title='%s'%model_name)
mit
-2,480,071,498,095,432,700
32.478261
157
0.68263
false
DTMilodowski/EOlab
src/potentialAGB_Ghana_app.py
1
5924
import numpy as np import os import sys import matplotlib as mpl import matplotlib.cm as cm import matplotlib.pyplot as plt import prepare_EOlab_layers as EO sys.path.append('/home/dmilodow/DataStore_DTM/FOREST2020/PotentialBiomass/src') import geospatial_utility_tools as geo sys.path.append('/home/dmilodow/DataStore_DTM/FOREST2020/EOdata/EO_data_processing/src/') import auxilliary_functions as aux # Get perceptionally uniform colourmaps sys.path.append('/home/dmilodow/DataStore_DTM/FOREST2020/EOdata/EO_data_processing/src/plot_EO_data/colormap/') import colormaps as cmaps plt.register_cmap(name='viridis', cmap=cmaps.viridis) plt.register_cmap(name='inferno', cmap=cmaps.inferno) plt.register_cmap(name='plasma', cmap=cmaps.plasma) plt.register_cmap(name='magma', cmap=cmaps.magma) plt.set_cmap(cmaps.viridis) import shapefile sf = shapefile.Reader("/home/dmilodow/DataStore_DTM/EOlaboratory/Areas/NaturalEarth/10m_cultural/ne_10m_admin_0_countries") vertices = [] for shape_rec in sf.shapeRecords(): if shape_rec.record[3] == 'Ghana': pts = shape_rec.shape.points prt = list(shape_rec.shape.parts) + [len(pts)] for i in range(len(prt) - 1): vertices.append([]) for j in range(prt[i], prt[i+1]): vertices[i].append((pts[j][0], pts[j][1])) plt.figure(1, facecolor='White',figsize=[2, 1]) plt.show() DATADIR = '/home/dmilodow/DataStore_DTM/FOREST2020/PotentialBiomassV2/output/' SAVEDIR = '/home/dmilodow/DataStore_DTM/EOlaboratory/EOlab/GhanaPotentialAGB/' NetCDF_file = 'WAFR_v003_AGBpot_mean_WC2_SOILGRIDS_GridSearch.nc' ds,geoTrans = EO.load_NetCDF(DATADIR+NetCDF_file,lat_var = 'lat', lon_var = 'lon') lat = np.asarray(ds.variables['lat']) lon = np.asarray(ds.variables['lon']) vars = ['AGB_mean','AGBpot_mean','training', 'areas'] # create clipping mask S=4.5 N=11.5 E= 1.5 W=-3.5 lat_mask = np.all((lat<=N,lat>=S),axis=0) lon_mask = np.all((lon<=E,lon>=W),axis=0) n_lat = lat_mask.sum() n_lon = lon_mask.sum() lat_clip = lat[lat_mask] lon_clip = lon[lon_mask] array_mask = np.ix_(lat_mask,lon_mask) latgrid = np.zeros((n_lat,n_lon)) longrid = np.zeros((n_lat,n_lon)) for ilat in range(0,n_lat): longrid[ilat,:] = lon_clip.copy() for ilon in range(0,n_lon): latgrid[:,ilon] = lat_clip.copy() # now loop through all polygons, and find the pixels which fall within them country_mask = np.zeros((n_lat,n_lon)) for vv in range(0,len(vertices)): temp1,temp2,inside = aux.points_in_poly(longrid.ravel(),latgrid.ravel(),vertices[vv]) country_mask[inside.reshape(n_lat,n_lon)] = 1 """ for ilat in range(0,lat_clip.size): print ilat, lat_clip.size for ilon in range(0,lon_clip.size): shape = 0 while shape<len(vertices): if aux.point_in_poly(lon_clip[ilon],lat_clip[ilat],vertices[shape]): country_mask[ilat,ilon]=1 shape = len(vertices) shape+=1 """ # apply clip dataset = {} for vv in range(0,len(vars)): dataset[vars[vv]]= np.asarray(ds.variables[vars[vv]])[array_mask] dataset[vars[vv]][country_mask==0]=-9999 # create new geoTrans object to account for clipping geoTrans[0] = np.min(lon_clip) if geoTrans[5]>0: geoTrans[3]=np.min(lat_clip) else: geoTrans[3]=np.max(lat_clip) # sequestration potential is defined by pixels with positive potential biomass that # are not already forests dataset['seqpot_mean'] = dataset['AGBpot_mean']-dataset['AGB_mean'] #dataset['seqpot_mean'][dataset['training']==1] = 0. dataset['seqpot_mean'][dataset['seqpot_mean']<0] = 0. dataset['seqpot_mean'][dataset['AGB_mean']==-9999] = -9999. dataset['training'][dataset['training']<1] = -9999. vars = ['AGB_mean','AGBpot_mean','seqpot_mean','training'] cmaps = ['viridis','viridis','plasma','viridis'] ulims = [400.,400.,200.,1.] llims = [0.,0.,0.,0.] axis_labels = ['AGB$_{obs}$ / Mg(C) ha$^{-1}$', 'AGB$_{potential}$ / Mg(C) ha$^{-1}$', 'Sequestration potential / Mg(C) ha$^{-1}$', 'Training sample'] for vv in range(0,len(vars)): print( vars[vv]) file_prefix = SAVEDIR + 'ghana_' + vars[vv] # delete existing dataset if present if 'ghana_'+vars[vv]+'_data.tif' in os.listdir(SAVEDIR): os.system("rm %s" % (SAVEDIR+'ghana_'+vars[vv]+'_data.tif')) if 'ghana_'+vars[vv]+'_display.tif' in os.listdir(SAVEDIR): os.system("rm %s" % (SAVEDIR+'ghana_'+vars[vv]+'_display.tif')) EO.write_array_to_display_layer_GeoTiff(dataset[vars[vv]], geoTrans, file_prefix, cmaps[vv], ulims[vv], llims[vv]) if vars[vv]!='training': EO.plot_legend(cmaps[vv],ulims[vv],llims[vv],axis_labels[vv], file_prefix,extend='max') """ rows, cols = dataset[vars[0]].shape latitude = np.arange(geoTrans_rs[3],rows*geoTrans_rs[5]+geoTrans_rs[3],geoTrans_rs[5])[:rows] longitude = np.arange(geoTrans_rs[0],cols*geoTrans_rs[1]+geoTrans_rs[0],geoTrans_rs[1])[:cols] areas = geo.calculate_cell_area_array(latitude,longitude, area_scalar = 1./10.**4,cell_centred=False) """ # loop through the variables, multiplying by cell areas to give values in Mg for vv in range(0,len(vars)): print(vars[vv]) if 'ghana_'+vars[vv]+'_total_data.tif' in os.listdir(SAVEDIR): os.system("rm %s" % (SAVEDIR+'ghana_'+vars[vv]+'_total_data.tif')) file_prefix = SAVEDIR + 'ghana_' + vars[vv] + '_total' out_array = dataset[vars[vv]] * dataset['areas'] out_array[dataset[vars[vv]]==-9999]=-9999 EO.write_array_to_data_layer_GeoTiff(out_array, geoTrans, file_prefix) out_array=None # Also want to write cell areas to file. However, as this will be compared against other layers, need to carry across # nodata values areas_out = dataset['areas'] areas_out[np.asarray(dataset[vars[0]])==-9999] = -9999 if 'ghana_cell_areas_data.tif' in os.listdir(SAVEDIR): os.system("rm %s" % (SAVEDIR+'ghana_cell_areas_data.tif')) area_file_prefix = SAVEDIR + 'ghana_cell_areas' EO.write_array_to_data_layer_GeoTiff(areas_out, geoTrans, area_file_prefix)
gpl-3.0
-7,608,073,235,829,537,000
35.567901
150
0.682309
false
jhmatthews/cobra
source/alpha_ox.py
1
1900
import matplotlib.pyplot as plt import pylab import numpy as np import classes as cls import disk as d import sys, os import read_output as rd import bal from constants import * from math import fabs spec = rd.read_spec_file("run110e") spec_array = bal.flambda_to_fnu (spec.spec[4], spec.freq, spec.wavelength) BI = bal.BALnicity( C/(1549.0*ANGSTROM), spec.freq, spec_array) print BI ls_file = sys.argv[1] rd.setpars() filenames = np.loadtxt(ls_file, dtype='string') bheight = [] bhseven =[] bhnine =[] for i in range(len(filenames)): spec = rd.read_spec_file (filenames[i]) spec_array = bal.flambda_to_fnu (spec.spec[4], spec.freq, spec.wavelength) BI = bal.BALnicity( C/(1549.0*ANGSTROM), spec.freq, spec_array) if fabs(BI) > 3000.0: if 'bh7' in filenames[i]: bhseven.append(spec) if 'bh8' in filenames[i]: bheight.append(spec) if 'bh9' in filenames[i]: bhnine.append(spec) #pylab.plot(spec.wavelength, spec.spec[4]) #pylab.xlim(1400,1700) #pylab.title(filenames[i]) print len( bhseven) / 24.0 print len( bheight) / 27.0 print len( bhnine) / 27.0 for i in range(len(bhseven)): pylab.plot(bhseven[i].wavelength, bhseven[i].spec[4]) pylab.xlim(1400,1700) pylab.gca().set_yscale("log") pylab.xlabel("Flux erg s$^{-1}$ cm$^{-3}$ sr$^{-1}$ $\AA^{-1}$") pylab.ylabel("$\lambda, (/AA$)") pylab.savefig('bh7.png') pylab.clf() for i in range(len(bheight)): pylab.plot(bheight[i].wavelength, bheight[i].spec[4]) pylab.xlim(1400,1700) pylab.gca().set_yscale("log") pylab.xlabel("Flux erg s$^{-1}$ cm$^{-3}$ sr$^{-1}$ $\AA^{-1}$") pylab.ylabel("$\lambda, (/AA$)") pylab.savefig('bh8.png') pylab.clf() for i in range(len(bhnine)): pylab.plot(bhnine[i].wavelength, bhnine[i].spec[4]) pylab.xlim(1400,1700) pylab.gca().set_yscale("log") pylab.xlabel("Flux erg s$^{-1}$ cm$^{-3}$ sr$^{-1}$ $\AA^{-1}$") pylab.ylabel("$\lambda, (/AA$)") pylab.savefig('bh9.png')
gpl-2.0
8,641,475,226,005,434,000
21.093023
75
0.664737
false
gfyoung/pandas
pandas/core/computation/ops.py
1
16488
""" Operator classes for eval. """ from __future__ import annotations from datetime import datetime from distutils.version import LooseVersion from functools import partial import operator from typing import Callable, Iterable, Optional, Union import numpy as np from pandas._libs.tslibs import Timestamp from pandas.core.dtypes.common import is_list_like, is_scalar import pandas.core.common as com from pandas.core.computation.common import ensure_decoded, result_type_many from pandas.core.computation.scope import DEFAULT_GLOBALS from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded REDUCTIONS = ("sum", "prod") _unary_math_ops = ( "sin", "cos", "exp", "log", "expm1", "log1p", "sqrt", "sinh", "cosh", "tanh", "arcsin", "arccos", "arctan", "arccosh", "arcsinh", "arctanh", "abs", "log10", "floor", "ceil", ) _binary_math_ops = ("arctan2",) MATHOPS = _unary_math_ops + _binary_math_ops LOCAL_TAG = "__pd_eval_local_" class UndefinedVariableError(NameError): """ NameError subclass for local variables. """ def __init__(self, name: str, is_local: Optional[bool] = None): base_msg = f"{repr(name)} is not defined" if is_local: msg = f"local variable {base_msg}" else: msg = f"name {base_msg}" super().__init__(msg) class Term: def __new__(cls, name, env, side=None, encoding=None): klass = Constant if not isinstance(name, str) else cls # pandas\core\computation\ops.py:72: error: Argument 2 for "super" not # an instance of argument 1 [misc] supr_new = super(Term, klass).__new__ # type: ignore[misc] return supr_new(klass) is_local: bool def __init__(self, name, env, side=None, encoding=None): # name is a str for Term, but may be something else for subclasses self._name = name self.env = env self.side = side tname = str(name) self.is_local = tname.startswith(LOCAL_TAG) or tname in DEFAULT_GLOBALS self._value = self._resolve_name() self.encoding = encoding @property def local_name(self) -> str: return self.name.replace(LOCAL_TAG, "") def __repr__(self) -> str: return pprint_thing(self.name) def __call__(self, *args, **kwargs): return self.value def evaluate(self, *args, **kwargs): return self def _resolve_name(self): res = self.env.resolve(self.local_name, is_local=self.is_local) self.update(res) if hasattr(res, "ndim") and res.ndim > 2: raise NotImplementedError( "N-dimensional objects, where N > 2, are not supported with eval" ) return res def update(self, value): """ search order for local (i.e., @variable) variables: scope, key_variable [('locals', 'local_name'), ('globals', 'local_name'), ('locals', 'key'), ('globals', 'key')] """ key = self.name # if it's a variable name (otherwise a constant) if isinstance(key, str): self.env.swapkey(self.local_name, key, new_value=value) self.value = value @property def is_scalar(self) -> bool: return is_scalar(self._value) @property def type(self): try: # potentially very slow for large, mixed dtype frames return self._value.values.dtype except AttributeError: try: # ndarray return self._value.dtype except AttributeError: # scalar return type(self._value) return_type = type @property def raw(self) -> str: return f"{type(self).__name__}(name={repr(self.name)}, type={self.type})" @property def is_datetime(self) -> bool: try: t = self.type.type except AttributeError: t = self.type return issubclass(t, (datetime, np.datetime64)) @property def value(self): return self._value @value.setter def value(self, new_value): self._value = new_value @property def name(self): return self._name @property def ndim(self) -> int: return self._value.ndim class Constant(Term): def __init__(self, value, env, side=None, encoding=None): super().__init__(value, env, side=side, encoding=encoding) def _resolve_name(self): return self._name @property def name(self): return self.value def __repr__(self) -> str: # in python 2 str() of float # can truncate shorter than repr() return repr(self.name) _bool_op_map = {"not": "~", "and": "&", "or": "|"} class Op: """ Hold an operator of arbitrary arity. """ op: str def __init__(self, op: str, operands: Iterable[Union[Term, Op]], encoding=None): self.op = _bool_op_map.get(op, op) self.operands = operands self.encoding = encoding def __iter__(self): return iter(self.operands) def __repr__(self) -> str: """ Print a generic n-ary operator and its operands using infix notation. """ # recurse over the operands parened = (f"({pprint_thing(opr)})" for opr in self.operands) return pprint_thing(f" {self.op} ".join(parened)) @property def return_type(self): # clobber types to bool if the op is a boolean operator if self.op in (CMP_OPS_SYMS + BOOL_OPS_SYMS): return np.bool_ return result_type_many(*(term.type for term in com.flatten(self))) @property def has_invalid_return_type(self) -> bool: types = self.operand_types obj_dtype_set = frozenset([np.dtype("object")]) return self.return_type == object and types - obj_dtype_set @property def operand_types(self): return frozenset(term.type for term in com.flatten(self)) @property def is_scalar(self) -> bool: return all(operand.is_scalar for operand in self.operands) @property def is_datetime(self) -> bool: try: t = self.return_type.type except AttributeError: t = self.return_type return issubclass(t, (datetime, np.datetime64)) def _in(x, y): """ Compute the vectorized membership of ``x in y`` if possible, otherwise use Python. """ try: return x.isin(y) except AttributeError: if is_list_like(x): try: return y.isin(x) except AttributeError: pass return x in y def _not_in(x, y): """ Compute the vectorized membership of ``x not in y`` if possible, otherwise use Python. """ try: return ~x.isin(y) except AttributeError: if is_list_like(x): try: return ~y.isin(x) except AttributeError: pass return x not in y CMP_OPS_SYMS = (">", "<", ">=", "<=", "==", "!=", "in", "not in") _cmp_ops_funcs = ( operator.gt, operator.lt, operator.ge, operator.le, operator.eq, operator.ne, _in, _not_in, ) _cmp_ops_dict = dict(zip(CMP_OPS_SYMS, _cmp_ops_funcs)) BOOL_OPS_SYMS = ("&", "|", "and", "or") _bool_ops_funcs = (operator.and_, operator.or_, operator.and_, operator.or_) _bool_ops_dict = dict(zip(BOOL_OPS_SYMS, _bool_ops_funcs)) ARITH_OPS_SYMS = ("+", "-", "*", "/", "**", "//", "%") _arith_ops_funcs = ( operator.add, operator.sub, operator.mul, operator.truediv, operator.pow, operator.floordiv, operator.mod, ) _arith_ops_dict = dict(zip(ARITH_OPS_SYMS, _arith_ops_funcs)) SPECIAL_CASE_ARITH_OPS_SYMS = ("**", "//", "%") _special_case_arith_ops_funcs = (operator.pow, operator.floordiv, operator.mod) _special_case_arith_ops_dict = dict( zip(SPECIAL_CASE_ARITH_OPS_SYMS, _special_case_arith_ops_funcs) ) _binary_ops_dict = {} for d in (_cmp_ops_dict, _bool_ops_dict, _arith_ops_dict): _binary_ops_dict.update(d) def _cast_inplace(terms, acceptable_dtypes, dtype): """ Cast an expression inplace. Parameters ---------- terms : Op The expression that should cast. acceptable_dtypes : list of acceptable numpy.dtype Will not cast if term's dtype in this list. dtype : str or numpy.dtype The dtype to cast to. """ dt = np.dtype(dtype) for term in terms: if term.type in acceptable_dtypes: continue try: new_value = term.value.astype(dt) except AttributeError: new_value = dt.type(term.value) term.update(new_value) def is_term(obj) -> bool: return isinstance(obj, Term) class BinOp(Op): """ Hold a binary operator and its operands. Parameters ---------- op : str lhs : Term or Op rhs : Term or Op """ def __init__(self, op: str, lhs, rhs): super().__init__(op, (lhs, rhs)) self.lhs = lhs self.rhs = rhs self._disallow_scalar_only_bool_ops() self.convert_values() try: self.func = _binary_ops_dict[op] except KeyError as err: # has to be made a list for python3 keys = list(_binary_ops_dict.keys()) raise ValueError( f"Invalid binary operator {repr(op)}, valid operators are {keys}" ) from err def __call__(self, env): """ Recursively evaluate an expression in Python space. Parameters ---------- env : Scope Returns ------- object The result of an evaluated expression. """ # recurse over the left/right nodes left = self.lhs(env) right = self.rhs(env) return self.func(left, right) def evaluate(self, env, engine: str, parser, term_type, eval_in_python): """ Evaluate a binary operation *before* being passed to the engine. Parameters ---------- env : Scope engine : str parser : str term_type : type eval_in_python : list Returns ------- term_type The "pre-evaluated" expression as an instance of ``term_type`` """ if engine == "python": res = self(env) else: # recurse over the left/right nodes left = self.lhs.evaluate( env, engine=engine, parser=parser, term_type=term_type, eval_in_python=eval_in_python, ) right = self.rhs.evaluate( env, engine=engine, parser=parser, term_type=term_type, eval_in_python=eval_in_python, ) # base cases if self.op in eval_in_python: res = self.func(left.value, right.value) else: from pandas.core.computation.eval import eval res = eval(self, local_dict=env, engine=engine, parser=parser) name = env.add_tmp(res) return term_type(name, env=env) def convert_values(self): """ Convert datetimes to a comparable value in an expression. """ def stringify(value): encoder: Callable if self.encoding is not None: encoder = partial(pprint_thing_encoded, encoding=self.encoding) else: encoder = pprint_thing return encoder(value) lhs, rhs = self.lhs, self.rhs if is_term(lhs) and lhs.is_datetime and is_term(rhs) and rhs.is_scalar: v = rhs.value if isinstance(v, (int, float)): v = stringify(v) v = Timestamp(ensure_decoded(v)) if v.tz is not None: v = v.tz_convert("UTC") self.rhs.update(v) if is_term(rhs) and rhs.is_datetime and is_term(lhs) and lhs.is_scalar: v = lhs.value if isinstance(v, (int, float)): v = stringify(v) v = Timestamp(ensure_decoded(v)) if v.tz is not None: v = v.tz_convert("UTC") self.lhs.update(v) def _disallow_scalar_only_bool_ops(self): rhs = self.rhs lhs = self.lhs # GH#24883 unwrap dtype if necessary to ensure we have a type object rhs_rt = rhs.return_type rhs_rt = getattr(rhs_rt, "type", rhs_rt) lhs_rt = lhs.return_type lhs_rt = getattr(lhs_rt, "type", lhs_rt) if ( (lhs.is_scalar or rhs.is_scalar) and self.op in _bool_ops_dict and ( not ( issubclass(rhs_rt, (bool, np.bool_)) and issubclass(lhs_rt, (bool, np.bool_)) ) ) ): raise NotImplementedError("cannot evaluate scalar only bool ops") def isnumeric(dtype) -> bool: return issubclass(np.dtype(dtype).type, np.number) class Div(BinOp): """ Div operator to special case casting. Parameters ---------- lhs, rhs : Term or Op The Terms or Ops in the ``/`` expression. """ def __init__(self, lhs, rhs): super().__init__("/", lhs, rhs) if not isnumeric(lhs.return_type) or not isnumeric(rhs.return_type): raise TypeError( f"unsupported operand type(s) for {self.op}: " f"'{lhs.return_type}' and '{rhs.return_type}'" ) # do not upcast float32s to float64 un-necessarily acceptable_dtypes = [np.float32, np.float_] _cast_inplace(com.flatten(self), acceptable_dtypes, np.float_) UNARY_OPS_SYMS = ("+", "-", "~", "not") _unary_ops_funcs = (operator.pos, operator.neg, operator.invert, operator.invert) _unary_ops_dict = dict(zip(UNARY_OPS_SYMS, _unary_ops_funcs)) class UnaryOp(Op): """ Hold a unary operator and its operands. Parameters ---------- op : str The token used to represent the operator. operand : Term or Op The Term or Op operand to the operator. Raises ------ ValueError * If no function associated with the passed operator token is found. """ def __init__(self, op: str, operand): super().__init__(op, (operand,)) self.operand = operand try: self.func = _unary_ops_dict[op] except KeyError as err: raise ValueError( f"Invalid unary operator {repr(op)}, " f"valid operators are {UNARY_OPS_SYMS}" ) from err def __call__(self, env): operand = self.operand(env) return self.func(operand) def __repr__(self) -> str: return pprint_thing(f"{self.op}({self.operand})") @property def return_type(self) -> np.dtype: operand = self.operand if operand.return_type == np.dtype("bool"): return np.dtype("bool") if isinstance(operand, Op) and ( operand.op in _cmp_ops_dict or operand.op in _bool_ops_dict ): return np.dtype("bool") return np.dtype("int") class MathCall(Op): def __init__(self, func, args): super().__init__(func.name, args) self.func = func def __call__(self, env): # pandas\core\computation\ops.py:592: error: "Op" not callable [operator] operands = [op(env) for op in self.operands] # type: ignore[operator] with np.errstate(all="ignore"): return self.func.func(*operands) def __repr__(self) -> str: operands = map(str, self.operands) return pprint_thing(f"{self.op}({','.join(operands)})") class FuncNode: def __init__(self, name: str): from pandas.core.computation.check import NUMEXPR_INSTALLED, NUMEXPR_VERSION if name not in MATHOPS or ( NUMEXPR_INSTALLED and NUMEXPR_VERSION < LooseVersion("2.6.9") and name in ("floor", "ceil") ): raise ValueError(f'"{name}" is not a supported function') self.name = name self.func = getattr(np, name) def __call__(self, *args): return MathCall(self, args)
bsd-3-clause
-7,600,813,253,418,710,000
25.550725
84
0.552705
false
nasseralkmim/elastopy
elastopy/postprocess/plotter.py
1
9746
import matplotlib.pyplot as plt from elastopy.postprocess import draw from elastopy import stress import matplotlib.animation as animation import numpy as np def show(): plt.show() def initiate(aspect='equal', axis='off'): fig = plt.figure() ax = fig.add_axes([.1, .1, .8, .8]) ax.set_aspect(aspect) if axis == 'off': ax.set_axis_off() return fig, ax def model(model, name=None, color='k', dpi=100, ele=False, ele_label=False, surf_label=False, nodes_label=False, edges_label=False): """Plot the model geometry """ fig = plt.figure(name, dpi=dpi) ax = fig.add_subplot(1, 1, 1) ax.set_xlabel(r'x') ax.set_ylabel(r'y') ax.set_aspect('equal') draw.domain(model, ax, color=color) if ele is True: draw.elements(model, ax, color=color) if ele_label is True: draw.elements_label(model, ax) if surf_label is True: draw.surface_label(model, ax) if nodes_label is True: draw.nodes_label(model, ax) if edges_label is True: draw.edges_label(model, ax) return None def model_deformed(model, U, magf=1, ele=False, name=None, color='Tomato', dpi=100): """Plot deformed model """ fig = plt.figure(name, dpi=dpi) ax = fig.add_subplot(1, 1, 1) ax.set_xlabel(r'x') ax.set_ylabel(r'y') ax.set_aspect('equal') if ele is True: draw.elements(model, ax, color='SteelBlue') draw.deformed_elements(model, U, ax, magf=magf, color=color) draw.domain(model, ax, color='SteelBlue') # draw.deformed_domain(model, U, ax, magf=magf, color=color) def stresses(model, SIG, ftr=1, s11=False, s12=False, s22=False, spmax=False, spmin=False, dpi=100, name=None, lev=20, vmin=None, vmax=None, cbar_orientation='vertical', title=''): """Plot stress with nodal stresses """ fig, ax = initiate() ax.set_xlabel(r'x') ax.set_ylabel(r'y') if s11 is True: ax.set_title(title) draw.tricontourf(model, SIG[:, 0]/ftr, ax, 'spring', lev=lev, vmin=vmin, vmax=vmax, cbar_orientation=cbar_orientation) if s12 is True: ax.set_title(title) draw.tricontourf(model, SIG[:, 2]/ftr, ax, 'cool', lev=lev, vmin=vmin, vmax=vmax, cbar_orientation=cbar_orientation) if s22 is True: ax.set_title(title) draw.tricontourf(model, SIG[:, 1]/ftr, ax, 'autumn', lev=lev, vmin=vmin, vmax=vmax, cbar_orientation=cbar_orientation) if spmax is True: spmx = stress.principal_max(SIG[:, 0], SIG[:, 1], SIG[:, 2]) ax.set_title(title) draw.tricontourf(model, spmx/ftr, ax, 'plasma', lev=lev, vmin=vmin, vmax=vmax, cbar_orientation=cbar_orientation) if spmin is True: spmn = stress.principal_min(SIG[:, 0], SIG[:, 1], SIG[:, 2]) ax.set_title(title) draw.tricontourf(model, spmn/ftr, ax, 'viridis', lev=lev, vmin=vmin, vmax=vmax, cbar_orientation=cbar_orientation) def model_deformed_dyn(model, U, ax, magf=1, ele=False, name=None, color='Tomato', dpi=100): """Plot deformed model """ if ele is True: im = draw.deformed_elements_dyn(model, U, ax, magf=magf, color=color) else: im = draw.deformed_domain_dyn(model, U, ax, magf=magf, color=color) return im def anime(frames, fig, t_int, interval=100): """Plot animation with images frames """ ani = animation.ArtistAnimation(fig, frames, interval=interval, blit=True) return ani def stresses_dyn(model, SIG, ax, ftr=1, s11=False, s12=False, s22=False, spmax=False, spmin=False, dpi=100, name=None, lev=20, vmin=None, vmax=None): """Plot stress with nodal stresses Return: im = list with matplotlib Artist """ if s11 is True: s_range = [np.amin(s11), np.amax(s11)] ax.set_title(r'Temperature $^{\circ} C$') im = draw.tricontourf_dyn(model, SIG[:, 0]/ftr, ax, 'hot', lev=lev) if s12 is True: s_range = [np.amin(s12), np.amax(s12)] ax.set_title(r'Stress 12 ('+str(ftr)+' Pa)') im = draw.tricontourf_dyn(model, SIG[:, 2]/ftr, ax, 'cool', lev=lev) if s22 is True: s_range = [np.amin(s22), np.amax(s22)] ax.set_title(r'Stress 22 ('+str(ftr)+' Pa)') im = draw.tricontourf_dyn(model, SIG[:, 1]/ftr, ax, 'autumn', lev=lev) if spmax is True: spmx = stress.principal_max(SIG[:, 0], SIG[:, 1], SIG[:, 2]) s_range = [np.amin(spmx), np.amax(spmx)] ax.set_title(r'Stress Principal Max ('+str(ftr)+' Pa)') im = draw.tricontourf_dyn(model, spmx/ftr, ax, 'plasma', lev=lev, vmin=vmin, vmax=vmax) if spmin is True: spmn = stress.principal_min(SIG[:, 0], SIG[:, 1], SIG[:, 2]) s_range = [np.amin(spmn), np.amax(spmn)] ax.set_title(r'Stress Principal Min ('+str(ftr)+' Pa)') im = draw.tricontourf_dyn(model, spmn/ftr, ax, 'viridis', lev=lev) return im, s_range def stress_animation(SIG, model, t_int, dt, name="Stresses.gif", brate=500, vmin=None, vmax=None, interval=100, ftr=1, lev=20, show_plot=False, **sig_plt): """Plot an animation gif for the stresses """ N = int(t_int/dt)+1 frm, srange = [], [] fig, ax = initiate() for n in range(N): t = n*dt im, val_range = stresses_dyn(model, SIG[:, :, n], ax, **sig_plt, ftr=ftr, lev=lev) te = ax.text(0, 1, "Time (h): "+str(round(t/(60*60), 2)), ha='left', va='top', transform=ax.transAxes) frm.append(im + [te]) srange.append(val_range) # srange = [max, min] srange = np.array(srange) print('Min and Max: ', np.amin(srange), np.amax(srange)) if 'spmax' in sig_plt: cmap_color = 'plasma' if 'spmin' in sig_plt: cmap_color = 'viridis' if 's11' in sig_plt: cmap_color = 'hot' else: cmap_color = 'plasma' # Change the colorbar range sm = plt.cm.ScalarMappable(cmap=cmap_color, norm=plt.Normalize(vmin=vmin, vmax=vmax)) # fake up the array of the scalar mappable. Urgh... sm._A = [] cbar = plt.colorbar(sm) cbar.set_label(r'Temperature $^{\circ} C$') ani = anime(frm, fig, t_int, interval=interval) ani.save(name, writer='imagemagick', bitrate=brate) if show_plot is True: plt.show(block=False) def displ_animation(U, model, t_int, dt, magf=1, name='displacement.gif', brate=250, interval=100, show_plot=False): """Plot an animation for the displacement """ N = int(t_int/dt)+1 fig, ax = initiate() frm = [] for n in range(N): t = n*dt im = model_deformed_dyn(model, U[:, n], ax, ele=True, magf=magf) te = ax.text(.5, 1, "Time (h): "+str(round(t/(60*60), 2)), ha='center', va='top', transform=ax.transAxes) frm.append([im, te]) ani = anime(frm, fig, t_int, interval=interval) ani.save(name, writer='imagemagick', bitrate=brate) plt.show() def stress_through_time(model, sig, node, t_int, dt, time_scale='day', ax=None, ylabel='', label='...', linestyle=None, marker=None, title=None): """Plott solution at a specific node through time """ print('Plotting Stress through time at node {} ...'.format(node), end='') if ax is None: fig, ax = plt.subplots() if time_scale == 'day': time_factor = 60*60*24 elif time_scale == 'hour': time_factor = 60*60 else: print('Time scale should be hour or day!') # t_int in seconds number_steps = int(t_int/dt) t = np.linspace(0, t_int, number_steps+1) ax.plot(t/time_factor, sig/1e6, label=label, linestyle=linestyle, marker=marker, mfc='none', mew=.5) ax.set_xlabel('Time ({})'.format(time_scale)) ax.set_ylabel(ylabel) if title is not None: ax.set_title(title) ax.legend() plt.tight_layout() print('Done') def stress_along_y_at_time(model, sig, time, t_int, dt, time_scale='day', ax=None, x=0, label=None, marker=None, ylabel=None, ftr=1, linestyle=None): print('Plotting solution at time {} through the y-axis ...'.format(time), end='') if ax is None: fig, ax = plt.subplots() if time_scale == 'day': # (t_int/dt) is the number of steps # (t_int/60*60*24) is the time interval in days time_index = int((t_int/dt)/(t_int/(60*60*24))*time) elif time_scale == 'hour': time_index = int((t_int/dt)/(t_int/(60*60))*time) else: print('Time scale should be hour or day!') nodes = np.where(np.round(model.XYZ[:, 0], 3) == x)[0] y = model.XYZ[nodes, 1] data = np.array(list(zip(y, sig[nodes, time_index]))) sorted_data = data[np.argsort(data[:, 0])] # import os # np.savetxt('gisele_solution_in_y.txt', # sorted_data, # fmt='%.4f', newline=os.linesep) ax.plot(sorted_data[:, 1]/ftr, sorted_data[:, 0], label=label, marker=marker, linestyle=linestyle, mfc='none', mew=.5) ax.set_ylabel(r'$y (m)$') ax.set_xlabel(ylabel) ax.legend() plt.tight_layout() print('Done')
gpl-3.0
-7,231,165,284,431,400,000
30.237179
85
0.554381
false
ibogun/DeepAntrack
python/Evaluation/DatasetEvaluation.py
1
17795
__author__ = 'Ivan' import glob import cPickle import datetime import os import re import sys import numpy as np import matplotlib.pyplot as plt import matplotlib.patches as mpatches # import seaborn as sns class AllExperiments(object): """""" def load(self, results_path, datasetType, trackerLabel): # find how many subfolders in the result path subfolders=os.listdir(results_path); subfolders=[x for x in subfolders if not ("." in x)] if len(subfolders)==2: paths = list() paths.append(results_path + "/SRE/"); paths.append(results_path + "/TRE/"); run1 = Experiment(paths[1], datasetType, trackerLabel) run1.loadResults(lookForDefault=True) run2 = Experiment(paths[0], datasetType, trackerLabel) run2.loadResults() run3 = Experiment(paths[1], datasetType, trackerLabel) run3.loadResults() d = dict() d['default'] = run1; d['SRE'] = run2; d['TRE'] = run3; self.data = d; else: # if there are three folders do this, otherwise do something else paths = list() paths.append(results_path + "/default/"); paths.append(results_path + "/SRE/"); paths.append(results_path + "/TRE/"); run1 = Experiment(paths[0], datasetType, trackerLabel) run1.loadResults() run2 = Experiment(paths[1], datasetType, trackerLabel) run2.loadResults() run3 = Experiment(paths[2], datasetType, trackerLabel) run3.loadResults() d = dict() d['default'] = run1; d['SRE'] = run2; d['TRE'] = run3; self.data = d; def save(self, trackerLabel, picklePathPrefix='./Runs/'): picklePath = picklePathPrefix + '/' + trackerLabel + '.p' savePickle(self, picklePath) class Dataset(object): def __init__(self, path_groundTruth, datasetType): ''' :param path_groundTruth: :param datasetType: :return: ''' self.path_gt = path_groundTruth self.datasetType = datasetType self.loadGroundTruth() def loadGroundTruth(self): videos = [f for f in os.listdir(self.path_gt) if not f.startswith('.')] # list videos l = list() listDicts = list() for vid in videos: d = dict() vidPath = self.path_gt + "/" + vid boxes = self.loadOneGroundTruth(vidPath) images = self.loadImages(vidPath) l.append((vid, boxes)) # <== This should be deprecated d["name"] = vid; d["boxes"] = boxes; d["images"] = images; listDicts.append(d); self.data = l; self.dictData = listDicts; def loadImages(self, path): if self.datasetType == 'vot2014': format = 'jpg' elif self.datasetType == 'wu2013': format = 'jpg' path = path + "/img/"; images = glob.glob(path + "*." + format); else: print "Dataset not recognized" return images; def loadOneGroundTruth(self, path): if self.datasetType == 'vot2014': gt = np.genfromtxt(path + "/groundtruth.txt", delimiter=',') print "VOT 2014 need rework" return gt elif self.datasetType == 'wu2013': gt = np.genfromtxt(path + "/groundtruth_rect.txt", delimiter=',') #print gt.max() if np.isnan(np.min(gt)): gt = np.genfromtxt(path + "/groundtruth_rect.txt", ) return gt; else: print "Dataset not recognized" class Experiment(object): def __init__(self, path_current, datasetType, tracker_label): self.path_results = path_current self.trackerLabel = tracker_label self.datasetType = datasetType def loadResults(self,lookForDefault=False,oldEvaluation=False): ''' Load ground truth annotations. Note the format of the annotations: - top left corner width height :return: saves results in a list of the form : (sequenceName, matrix) ''' # list all the files resultFilesNames = glob.glob(self.path_results + "/*.dat") try: f = open(self.path_results + "/tracker_info.txt", 'r') trackerInformation = f.read(); f.close(); f = open(self.path_results + "/experiment_info.txt", 'r') experimentInformation = f.read(); f.close() self.trackerInformation = trackerInformation self.experimentInformation = experimentInformation; except IOError: self.trackerInformation = "Not avaliable" self.experimentInformation = "Not avaliable" # "OPE" or default tracker run is simply the first run of the tracker in TRE # lookForDefault is a flag which separates OPE from TRE and SRE if lookForDefault: regExpression = re.compile("(.*\/+)([\w|-]+)(_sframe=0)(?=__.*)") else: regExpression = re.compile("(.*\/+)([\w|-]+)(_sframe=\d+)(?=__.*)") # for old files #regExpression = re.compile("(.*\/+)([\w|-]+)(?=__.*)") # get rid of absolute path and then delete extension l = list() counts = dict() boxesDict = dict(); names = set() resultFilesNames=[x for x in resultFilesNames if regExpression.match(x) is not None]; # find different videos in the dataset for fileNames in resultFilesNames: m = regExpression.match(fileNames) names.add(m.group(2)) for video in names: counts[video] = 0; boxesDict[video] = list() for fileNames in resultFilesNames: m = regExpression.match(fileNames) video = m.group(2) #sequenceName=os.path.splitext(os.path.basename(fileNames))[0] #print fileNames try: boxes = np.loadtxt(fileNames, delimiter=',') except ValueError: boxes = np.loadtxt(fileNames, delimiter='\t') except: print "Unexpected error:", sys.exc_info()[0] raise boxesDict[video].append(boxes) counts[video] = counts[video] + 1 for video, videoList in boxesDict.iteritems(): l.append((video, videoList)) #l.append((sequenceName,boxes)) self.data = l; self.time = datetime.datetime.now() def __str__(self): s = "Results on " + self.datasetType + " dataset \n" s = s + "Time: " + str(self.time) + "\n\n" s = s + "Tracker information: \n\n" s = s + self.trackerInformation return s def save(self, saveTo): cPickle.dump(self, open(saveTo, 'w')) def loadPickle(path): return cPickle.loads(open(path, 'r').read()) def savePickle(obj, path): cPickle.dump(obj, open(path, 'w')) class Evaluator(object): def __init__(self, dataset, listOfExperiments): self.dataset = dataset self.listOfExperiments = listOfExperiments @staticmethod def getIntegralValues(x_pr,y_pr,x_s,y_s): p = np.trapz(y_pr, x=x_pr) / 51 s = np.trapz(y_s, x=x_s) return (p,s) @staticmethod def createPlotData(centerDistance, maxValue=50, n=100): x = np.linspace(0, maxValue, num=n); y = np.zeros(n) for idx in range(0, n): # find percentage of centerDistance<= x[idx y[idx] = len(np.nonzero(centerDistance <= x[idx])[0]) / (centerDistance.shape[0] * 1.0) return (x, y) def createHistogramPlot(self, x_s, y_s, x_pr, y_pr, trackerNames, savefilename=''): precision = list() success = list() n_groups = len(x_pr) names = list() plt.figure(figsize=(15, 10)) for i in range(0, n_groups): p = np.trapz(y_pr[i], x=x_pr[i]) / 51 s = np.trapz(y_s[i], x=x_s[i]) precision.append(p) success.append(s) names.append(self.listOfExperiments[i].trackerLabel) plt.subplot(1, 2, 1) plt.subplots_adjust(bottom=0.2) #plt.xlim([0,1.1]) index = np.arange(n_groups) idx_success = [i[0] for i in sorted(enumerate(success), key=lambda x: x[1])] successTrackerNames = [trackerNames[x] for x in idx_success] sorted_success = [success[x] for x in idx_success] plt.xticks(index, successTrackerNames, rotation=45) plt.bar(index, sorted_success, align="center") plt.ylim((0, 1)) plt.title("Success") plt.subplot(1, 2, 2) # NOTE: BOTH ARE SORTED ACCORDING TO SUCCESS precisionTrackerNames = [trackerNames[x] for x in idx_success] sorted_precision = [precision[x] for x in idx_success] plt.bar(index, sorted_precision, align="center") plt.xticks(index, precisionTrackerNames, rotation=45) plt.ylim((0, 1)) plt.title("Precision") if savefilename == '': plt.show() else: plt.savefig(savefilename) def createPlot(self, x_s, y_s, x_pr, y_pr, savefilename=''): plt.figure(figsize=(15, 10)) cm = plt.get_cmap('gist_rainbow') NUM_COLORS = len(x_pr) headerFontSize = 14; axisFontSize = 12; lineWidth = 1.8; legendSize = 9; with plt.style.context('grayscale'): handlesLegendPrecision = list() handlesLegendSuccess = list() for i in range(0, len(x_s)): p = np.trapz(y_pr[i], x=x_pr[i]) / 51 s = np.trapz(y_s[i], x=x_s[i]) p = np.ma.round(p, 2) s = np.ma.round(s, 2) color = cm(1. * i / NUM_COLORS) red_patch = mpatches.Patch(label=self.listOfExperiments[i].trackerLabel + ' [' + str(p) + ']', color=color) blue_path = mpatches.Patch(label=self.listOfExperiments[i].trackerLabel + ' [' + str(s) + ']', color=color) handlesLegendPrecision.append(red_patch) handlesLegendSuccess.append(blue_path) print self.listOfExperiments[i].trackerLabel plt.subplot(1, 2, 1) for i in range(0, len(x_s)): plt.plot(x_s[i], y_s[i], linewidth=lineWidth, color=cm(1. * i / NUM_COLORS)) plt.title('success', fontsize=headerFontSize) plt.ylim([0, 1.1]) plt.xlim([-0.02, 1.1]) plt.xlabel('Overlap threshold', fontsize=axisFontSize) plt.ylabel('Success rate', fontsize=axisFontSize) plt.legend(handles=handlesLegendSuccess, prop={'size': legendSize}) plt.grid(b=False) #plt.axes("on") plt.subplot(1, 2, 2) for i in range(0, len(x_pr)): plt.plot(x_pr[i], y_pr[i], linewidth=lineWidth, color=cm(1. * i / NUM_COLORS)) plt.ylim([0, 1.1]) plt.xlim([-0.5, 51]) plt.title("precision", fontsize=headerFontSize) plt.grid(b=False) #plt.axes("on") plt.xlabel('Location error threshold', fontsize=axisFontSize) plt.ylabel('Precision', fontsize=axisFontSize) plt.legend(handles=handlesLegendPrecision, prop={'size': legendSize}, loc=2) if savefilename == '': plt.show() else: plt.savefig(savefilename) @staticmethod def evaluateSingleVideo(video, gt, experimentNumber=0, n=1000): ''' Evaluate single video tracker run :param video: video data :param gt: ground truth data :param n: number of points to sample :return: (x_pr,y_pr,x_s,y_s) list ''' if video[0] != gt[0]: raise Exception("You cannot compare apples to oranges \n OR " + video[0] + " and " + gt[0]) findCenter = lambda x: np.array([x[0] + x[2] / 2.0, x[1] + x[3] / 2.0]) distCenter = lambda x, y: np.linalg.norm(findCenter(x) - findCenter(y)) get4D = lambda x: np.array([x[0], x[1], x[0] + x[2], x[1] + x[3]]) intersection = lambda x, y: max(min(x[2], y[2]) - max(x[0], y[0]), 0) * max(min(x[3], y[3]) - max(x[1], y[1]), 0) area = lambda x: ((int)(x[3]) - (int)(x[1])) * ((int)(x[2]) - (int)(x[0])) distJaccard = lambda x, y: (intersection(x, y) / ((float)(area(x) + area(y) - intersection(x, y)))) distJarrardFull = lambda x, y: distJaccard(get4D((x)), get4D((y))) boxes = video[1][experimentNumber] boxes_gt = gt[1] # print video[0], " ", " ground truth size: ", boxes_gt.shape[0], " got size: ", boxes[0].shape[0] # print boxes_gt.shape[0] # print boxes[0].shape[0] nFrames = min(boxes.shape[0], boxes_gt.shape[0]); centerDistance = np.zeros((nFrames, 1)) overlap_over_union = np.zeros((nFrames, 1)) for idx in range(0, nFrames): # calculate different statistics: overlap over union and euclidean distance of centers if np.isnan(np.sum(boxes[idx])): overlap_over_union[idx]=0 centerDistance[idx]=np.inf else: overlap_over_union[idx] = distJarrardFull(boxes[idx], boxes_gt[idx]) centerDistance[idx] = distCenter((boxes[idx]), (boxes_gt[idx])) (x_pr, y_pr) = Evaluator.createPlotData(centerDistance, maxValue=50, n=n) (x_s, y_s) = Evaluator.createPlotData(overlap_over_union, maxValue=1, n=n) # complement success plot curve y_s = 1 - y_s; return (x_pr, y_pr, x_s, y_s) def evaluateSingleTracker(self, listRun, n=1000): listGT = self.dataset.data runs = listRun.data precision_x = np.zeros(n) precision_y = np.zeros(n) success_x = np.zeros(n) success_y = np.zeros(n) for video in runs: gt = [x for x in listGT if x[0] == video[0]][0] (x_pr, y_pr, x_s, y_s) = Evaluator.evaluateSingleVideo(video, gt, n=n) precision_x = precision_x + x_pr precision_y = precision_y + y_pr success_x = success_x + x_s success_y = success_y + y_s precision_x = precision_x / len(runs) precision_y = precision_y / len(runs) success_x = success_x / len(runs) success_y = success_y / len(runs) return (precision_x, precision_y, success_x, success_y) def evaluate(self, n=1000, successAndPrecisionPlotName='', histogramPlot=''): ''' Evaluate the dataset :return: accuracy and precision ''' listGT = self.dataset.data pr_x_list = list() pr_y_list = list() sc_x_list = list() sc_y_list = list(); experimentNames = list() for listRun in self.listOfExperiments: runs = listRun.data experimentNames.append(listRun.trackerLabel) (precision_x, precision_y, success_x, success_y) = self.evaluateSingleTracker(listRun, n) pr_x_list.append(precision_x) pr_y_list.append(precision_y) sc_x_list.append(success_x) sc_y_list.append(success_y) # REWRITE THIS FUNCTION: SUCCESS plots are not generated properly self.createPlot(sc_x_list, sc_y_list, pr_x_list, pr_y_list, savefilename=successAndPrecisionPlotName) # get some real data and finish this plot self.createHistogramPlot(sc_x_list, sc_y_list, pr_x_list, pr_y_list, trackerNames=experimentNames, savefilename=histogramPlot) if __name__ == "__main__": wu2013results = "/Users/Ivan/Files/Results/Tracking/wu2013" wu2013GroundTruth = "/Users/Ivan/Files/Data/wu2013" vot2014Results = "/Users/Ivan/Files/Results/Tracking/vot2014" vot2014GrounTruth = "/Users/Ivan/Files/Data/vot2014" datasetType = 'wu2013' # Note: in wu2013 i # trackerLabel="STR+f_hog" #wildcard = sys.argv[1] wildcard="lambda" # # run=Experiment(wu2013results,datasetType,trackerLabel) # run.loadResults() # picklePath='./Runs/'+trackerLabel+'.p' # # # # savePickle(a,picklePath) # # # run=loadPickle(picklePath) # # dataset = Dataset(wu2013GroundTruth, datasetType) runsNames = glob.glob('./Runs/' + wildcard + '*.p') experimentType='default' runs = list() for runName in runsNames: run = loadPickle(runName) run=run.data[experimentType] runs.append(run) evaluator = Evaluator(dataset, runs) saveFigureToFolder = '/Users/Ivan/Code/personal-website/Projects/Object_aware_tracking/images/multiScale/' saveFormat = ['png', 'pdf'] successAndPrecision = 'SuccessAndPrecision_wu2013' histograms = 'histogram_wu2013' # for i in saveFormat: # evaluator.evaluate(successAndPrecisionPlotName=saveFigureToFolder+successAndPrecision+'.'+ # i,histogramPlot=saveFigureToFolder+histograms+'.'+ # i) evaluator.evaluate()
mit
3,967,240,927,230,921,000
27.426518
118
0.546783
false
k-shukla/heisenberg_model-py
Shukla - Ising Model 2D MCRG.py
1
12407
# -*- coding: utf-8 -*- ''' Here, we create a static 2D N-by-M Ising grid of spins up and down, an update mechanism to update the spin at every site, and finally include the presence of an inter-spin coupling and an external magnetic field in the grids. This script then performs a histogram analysis of the lattices generated. This is part of an attempt to recreate the weighted histogram analysis method (WHAM) seen in A. Ferrenberg & R. Swendsen, Phys. Rev. Lett. 61, 23 (1988) and A. Ferrenberg & R. Swendsen, Phys. Rev. Lett. 63, 12 (1989). We're specifically looking at the two-state Ising model, i.e. with spins ±1/2. ''' # This section imports the libraries necessary to run the program. import math import matplotlib import numpy import random import time # This section stores the time at the start of the program. program_start_time = time.clock() ''' Since we're interested in the amount of time the program will run in, we'll store the time at the beginning of the program using time.clock(), and compare it to the time at the end (again using time.clock(), applied to a different variable name. time.clock() just takes the time at a given moment; it's up to us to store it properly. ''' # This section sets the simulation parameters. x_len = 8 # x_len is the number of sites in each row. y_len = 8 # y_len is the number of rows in each column. size = x_len * y_len # size simply keeps the total number of sites handy. MC_num = 1000000 # MC_num is the number of Monte Carlo updates. hist_bin_size = 1 # hist_bin_size is the size of the bins of the histograms. MC_therm_steps = 10000 # MC_therm_steps is the number of thermalisation steps. h_hist = 0.0 # h_hist is the histogram external field. T_hist = 2.5 # T_hist is the histogram temperature. b_hist = 1/T_hist # b_hist is the value of beta corresponding to the histogram temperature. Jx_hist = 1.0 # Jx_hist is the histogram x-direction coupling constant. Jy_hist = 1.0 # Jy_hist is the histogram y-direction coupling constant. kNN_2pt_G_dist = 1 # kNN_2pt_G_dist is the distance at which we're looking at the kth nearest-neighbour two-point Green function. kNN_2pt_G_conn = False # kNN_2pt_G_conn tells us whether we're looking at the two-point disconnected or the two-point connected Green function. # This section creates the initial system, a static 2D array of spins (up or down). initial_grid = numpy.random.choice([-1, 1], size = [y_len, x_len]) ''' Note that this is faster than my original choice of how to initialise the system: initial_grid = [[-1.0 if random.random() <= 0.5 else 1.0 for cube in xrange(x_len)] for row in xrange(y_len)] ''' # This function provides a printed version of the 2D Ising grid. def print_grid(grating): Ising_grid_printed = [] for chain in grating: IG_single_row = [] for entry in chain: if entry == -1.0: IG_single_row += ["-"] elif entry == +1.0: IG_single_row += ["+"] else: raise ArithmeticError("Ising spin must be +1.0 or -1.0") IG_single_row_printed = " ".join(IG_single_row) Ising_grid_printed += [IG_single_row_printed] for IG_row in Ising_grid_printed: print IG_row return Ising_grid_printed # This function performs a single Monte Carlo update. def MC_update(lat, h, Jx, Jy, T): x_size = len(lat[0]) y_size = len(lat) beta = 1.0 / T for y in xrange(y_size): for x in xrange(x_size): dE = 0.0 dE += h * lat[y][x] dE += Jx * lat[y][(x-1) % x_size] * lat[y][x] dE += Jx * lat[y][(x+1) % x_size] * lat[y][x] dE += Jy * lat[(y-1) % y_size][x] * lat[y][x] dE += Jy * lat[(y+1) % y_size][x] * lat[y][x] if random.random() < math.exp(-2*beta*dE): lat[y][x] = -lat[y][x] return lat ''' Following Swendsen's remark, I'll exploit the fact that exp(0) = 1 and that P = exp(-beta*E), which here is P = exp(-2*beta*h*spin). Since we have P as 1 for E < 0 and exp(-beta*E) for E > 0, it suffices to compare the result of random.random() with exp(-2*beta*h*spin). This is the standard thing we do with the Metropolis-Hastings algorithm, but exploiting the fact that exp(0) = 1 simplifies matters, since it lets us collapse the min(1, exp(-a)) comparison into a single line. ''' # This function retrieves the magnetisation, energy, and kth nearest-neighbour two-point Green function of a given lattice. def lat_props(trel, mu, ccx, ccy, temp, dist, conn): net_M = 0.0 net_E = 0.0 net_corr = 0.0 x_size = len(trel[0]) y_size = len(trel) sites = float(x_size * y_size) for y_pt in xrange(y_size): for x_pt in xrange(x_size): curr_site = trel[y_pt][x_pt] next_site_down = trel[(y_pt + dist) % y_size][x_pt] next_site_right = trel[y_pt][(x_pt + dist) % x_size] net_M += curr_site net_E += -mu * curr_site net_E += -ccx * trel[y_pt][(x_pt + 1) % x_size] * curr_site net_E += -ccy * trel[(y_pt + 1) % y_size][x_pt] * curr_site net_corr += curr_site * next_site_down net_corr += curr_site * next_site_right lat_m = net_M / sites lat_e = net_E / sites disc_corr_func = net_corr / sites conn_corr_func = disc_corr_func - (lat_m ** 2.0) if conn == True: return (net_M, lat_m, net_E, lat_e, conn_corr_func) elif conn == False: return (net_M, lat_m, net_E, lat_e, disc_corr_func) else: raise TypeError("'conn' must be of type bool") ''' Note that this gives either the kth nearest-neighbour two-point connected correlation function (i.e. G^(2)_c(i, i+k) = <x_i x_(i+k)> - <x_i><x_(i+k)>) or the kth nearest-neighbour two-point disconnected correlation function (i.e. G^(2)(i, i+k) = <x_i x_(i+k)>), depending on whether or not we have conn = True or conn = False. Since <x_i> = <x_(i+k)> = m (the average per-site magnetisation), the two-point connected correlation function just substitutes m^2 for <x_i><x_(i+k)>. ''' # This function performs the MC thermalisation. def MC_thermal(collec, therm_steps, mag_field, couplx, couply, t): now_collec = collec for indiv_step in xrange(therm_steps): now_collec = MC_update(now_collec, mag_field, couplx, couply, t) return now_collec # This function performs several Monte Carlo updates, with the number of Monte Carlo updates specified by MC_iter. def many_MC(array, MC_iter, ext_field, cc_x, cc_y, tepl, therm_steps_per_sample, many_MC_G_dist, many_MC_G_corr): MC_M = [0] * MC_iter MC_E = [0] * MC_iter MC_G = [0] * MC_iter x_dist = len(array[0]) y_dist = len(array) points = float(x_dist * y_dist) b = 1.0/tepl now_lat = array for update in xrange(MC_iter): now_lat = MC_thermal(now_lat, therm_steps_per_sample, ext_field, cc_x, cc_y, tepl) now_update = MC_update(now_lat, ext_field, cc_x, cc_y, tepl) now_props = lat_props(now_update, ext_field, cc_x, cc_y, tepl, many_MC_G_dist, many_MC_G_corr) now_lat = now_update MC_M[update] = now_props[0] MC_E[update] = now_props[2] MC_G[update] = now_props[4] avg_M = numpy.mean(MC_M, axis = None) avg_m = float(avg_M / points) avg_E = numpy.mean(MC_E, axis = None) avg_e = float(avg_E / points) avg_G = numpy.mean(MC_G, axis = None) cv = math.pow(b, 2) * numpy.var(MC_E, axis = None) / points if ext_field != 0.0: sus = b * numpy.var(MC_M, axis = None) / points else: sus = b * numpy.var(MC_M, axis = None) / (points ** 2.0) return (now_lat, avg_M, avg_m, avg_E, avg_e, avg_G, sus, cv, MC_M, MC_E, MC_G) ''' We need to do this for the susceptibility in the case of h = 0 because in this specific case, we have no interactions whatsoever. Thus, we're looking at the standard deviation of a set of ±1 values picked at random; since there's no scale dependence, multiplying by array_sites in this specific case will give us an extraneous factor of array_sites. To write cv in terms of the total energy rather than the per-site energy, we have: cv = (math.pow(b, 2) * (avg_E2 - math.pow(avg_E, 2))) / array_sites. ''' # This function performs a single renormalisation reduction. def reduction(grid, x_reduce_factor, y_reduce_factor): if len(grid) % y_reduce_factor != 0 or len(grid[0]) % x_reduce_factor != 0: raise ArithmeticError("reduce_factor for a given direction must be an integer factor of that direction's length") else: x_len = len(grid[0]) y_len = len(grid) x_len_new = x_len / x_reduce_factor y_len_new = y_len / y_reduce_factor reduced_grid = numpy.zeros(shape = [y_len_new, x_len_new]) for spin_block_y in xrange(0, y_len, y_reduce_factor): for spin_block_x in xrange(0, x_len, x_reduce_factor): block_net_spin = 0 for y_site_place in xrange(spin_block_y, spin_block_y + y_reduce_factor): for x_site_place in xrange(spin_block_x, spin_block_x + x_reduce_factor): block_net_spin += grid[y_site_place][x_site_place] reduced_grid[spin_block_y // y_reduce_factor][spin_block_x // x_reduce_factor] = numpy.sign(block_net_spin) if int(reduced_grid[spin_block_y // y_reduce_factor][spin_block_x // x_reduce_factor]) == 0: reduced_grid[spin_block_y // y_reduce_factor][spin_block_x // x_reduce_factor] = numpy.random.choice([-1, 1]) return reduced_grid ''' In particular, this performs an r_x by r_y reduction; i.e. reduction of the x-direction by r_x and reduction of the y-direction by r_y. Note that if we apply x != y, we would have an anisotropic reduction, which for the Monte Carlo renormalisation group process to work correctly requires the Hamiltonian to have an anisotropy between the x and y couplings; i.e. something like H = J_x s_x1 s_x2 + J_y s_y1 s_y2. Also note that if the spins of the block average to zero, we randomly pick a ±1/2 spin. Finally, note that this does a simultaneous x-direction and y-direction reduction, so if we want to do more i-reductions than j-reductions for some perverse reason, we should split up the reduction into two calls to reduction(); one where both are reduced together, and one where the scale for j is set to 1. ''' # Here, we run the simulation. For testing, we also print the actual arrays; these commands are then commented out as necessary. print "Initial 2D Ising Grid:" print " " print_grid(initial_grid) print " " print " " updated_grid = many_MC(array = initial_grid, MC_iter = 10, ext_field = 0.0, cc_x = 1.0, cc_y = 1.0, tepl = 2.0, therm_steps_per_sample = 10, many_MC_G_dist = 1, many_MC_G_corr = False) print "Updated 2D Ising Grid:" print " " print_grid(updated_grid[0]) # This section stores the time at the end of the program. program_end_time = time.clock() total_program_time = program_end_time - program_start_time print " " print "Program run time: %f seconds" % (total_program_time) print "Program run time per site per MC sweep: %6g seconds" % (total_program_time / (MC_num * MC_therm_steps * size)) ''' Note: To find out how long the program takes, we take the difference of time.clock() evaluated at the beginning of the program and at the end of the program. Here, we take the time at the end of the program, and define the total program time. '''
mit
5,622,901,772,047,909,000
42.779783
184
0.601258
false
enfeizhan/clusteror
clusteror/core.py
1
32044
''' This module contains ``Clusteror`` class capsulating raw data to discover clusters from, the cleaned data for a clusteror to run on. The clustering model encompasses two parts: 1. Neural network: Pre-training (often encountered in Deep Learning context) is implemented to achieve a goal that the neural network maps the input data of higher dimension to a one dimensional representation. Ideally this mapping is one-to-one. A Denoising Autoencoder (DA) or Stacked Denoising Autoencoder (SDA) is implemented for this purpose. 2. One dimensional clustering model: A separate model segments the samples against the one dimensional representation. Two models are available in this class definition: * K-Means * Valley model The pivot idea here is given the neural network is a good one-to-one mapper the separate clustering model on one dimensional representation is equivalent to a clustering model on the original high dimensional data. Note ---- Valley model is explained in details in module ``clusteror.utils``. ''' # import ipdb import os import sys import json import timeit import warnings import numpy as np import pandas as pd import pickle as pk import theano import theano.tensor as T from sklearn.cluster import KMeans from theano import function from theano import shared from theano.tensor.shared_randomstreams import RandomStreams from .nn import dA from .nn import SdA from .settings import numpy_random_seed from .settings import theano_random_seed from .utils import find_local_extremes class OutRangeError(Exception): ''' Exceptions thrown as cleaned data go beyond range ``[-1, 1]``. ''' pass class Clusteror(object): ''' ``Clusteror`` class can train neural networks *DA* or *SDA*, train taggers, or load saved models from files. Parameters ---------- raw_data : Pandas DataFrame Dataframe read from data source. It can be original dataset without any preprocessing or with a certain level of manipulation for future analysis. Attributes ---------- _raw_data : Pandas DataFrame Stores the original dataset. It's the dataset that later post-clustering performance analysis will be based on. _cleaned_data : Pandas DataFrame Preprocessed data. Not necessarily has same number of columns with ``_raw_data`` as a categorical column can derive multiple columns. As the ``tanh`` function is used as activation function for symmetric consideration. All columns should have values in range ``[-1, 1]``, otherwise an ``OutRangeError`` will be raised. _network : str **da** for *DA*; **sda** for *SDA*. Facilating functions called with one or the other algorithm. _da_dim_reducer: Theano function Keeps the Theano function that is from trained DA model. Reduces the dimension of the cleaned data down to one. _sda_dim_reducer: Theano function Keeps the Theano function that is from trained SDA model. Reduces the dimension of the cleaned data down to one. _one_dim_data: Numpy Array The dimension reduced one dimensional data. _valley: Python function Trained valley model tagging sample with their one dimensional representation. _kmeans: Scikit-Learn K-Means model Trained K-Means model tagging samples with their one dimensional representation. _tagger: str Keeps records of which tagger implemented. _field_importance: List Keeps the list of coefficiences that influence the clustering emphasis. ''' def __init__(self, raw_data): self._raw_data = raw_data.copy() @classmethod def from_csv(cls, filepath, **kwargs): ''' Class method for directly reading CSV file. Parameters ---------- filepath : str Path to the CSV file **kwargs : keyword arguments Other keyword arguments passed to ``pandas.read_csv`` ''' raw_data = pd.read_csv(filepath, **kwargs) return cls(raw_data) @property def raw_data(self): ''' Pandas DataFrame: For assgining new values to ``_raw_data``. ''' return self._raw_data @raw_data.setter def raw_data(self, raw_data): self._raw_data = raw_data @property def cleaned_data(self): ''' Pandas DataFrame: For assgining cleaned dataframe to ``_cleaned_dat``. ''' return self._cleaned_data @cleaned_data.setter def cleaned_data(self, cleaned_data): self._cleaned_data = cleaned_data @property def da_dim_reducer(self): ''' Theano function: Function that reduces dataset dimension. Attribute ``_network`` is given **da** to designate the method of the autoencoder as ``DA``. ''' return self._da_dim_reducer @da_dim_reducer.setter def da_dim_reducer(self, da_dim_reducer): self._da_dim_reducer = da_dim_reducer self._network = 'da' @property def sda_dim_reducer(self): ''' Theano function: Function that reduces dataset dimension. Attribute ``_network`` is given **sda** to designate the method of the autoencoder as ``SDA``. ''' return self._sda_dim_reducer @sda_dim_reducer.setter def sda_dim_reducer(self, sda_dim_reducer): self._sda_dim_reducer = sda_dim_reducer self._network = 'sda' @property def one_dim_data(self): ''' Numpy Array: Stores the output of neural network that has dimension one. ''' return self._one_dim_data @one_dim_data.setter def one_dim_data(self, one_dim_data): self._one_dim_data = one_dim_data @property def valley(self): ''' Python function: Trained on the dimension reduced one dimensional data that segregates subjects into concentration of existence in a subset of ``[-1, 1]``, by locating the "valley" in the distribution landscape. ``_tagger`` is given **valley** to facilitate follow-up usages. ''' return self._valley @valley.setter def valley(self, valley): self._valley = valley self._tagger = 'valley' @property def kmeans(self): ''' Python function: Trained on the dimension reduced one dimensional data that segregates subjects into concentration of existence in a subset of ``[-1, 1]`` with K-Means algorithm. ``_tagger`` is given **valley** to facilitate follow-up usages. ''' return self._kmeans @kmeans.setter def kmeans(self, kmeans): self._kmeans = kmeans self._tagger = 'kmeans' @property def tagger(self): ''' str: Name the tagger if necessary to do so, which will facilitate, e.g. prefixing the filepath. ''' return self._tagger @tagger.setter def tagger(self, tagger): self._tagger = tagger @property def field_importance(self): ''' List: Significance that given to fields when training of neural network is done. Fields with a large number will be given more attention. Note ---- The importance is only meaningful relatively between fields. If no values are specified, all fields are treated equally. Parameters ---------- field_importance : List or Dict, default None (List of Ones) * If a list is designated, all fields should be assigned an importance, viz, the length of the list should be equal to the length of the features training the neural network. * It can also be given in a dict. In such a case, the fields can be selectively given a value. Dict key is for field name and value is for the importance. Fields not included will be initiated with the default value one. A warning will be issued when a key is not on the list of field names, mostly because of a typo. ''' return self._field_importance @field_importance.setter def field_importance(self, field_importance): n_fields = self._cleaned_data.shape[1] if isinstance(field_importance, list): assert len(field_importance) == n_fields self._field_importance = field_importance elif isinstance(field_importance, dict): self._field_importance = [1] * n_fields columns = self._cleaned_data.columns.tolist() for field, importance in field_importance.items(): try: index = columns.index(field) self._field_importance[index] = importance except ValueError: msg = '{} isn\'t in fields'.format(field) warnings.warn(msg) def _check_cleaned_data(self): ''' Checks on cleaned data before any work is done. This list of checks can be extended when more checks should be included. ''' cleaned_data_info = ( 'Need first assign your cleaned data to attribute "_cleaned_data"' ) assert self._cleaned_data is not None, cleaned_data_info if (self._cleaned_data.max() > 1).any(): raise OutRangeError('Maximum should be less equal than 1.') if (self._cleaned_data.min() < -1).any(): raise OutRangeError('Minimum should be greater equal than -1') def _check_network(self): ''' Check if network has been correctly setup. ''' network_info = ( 'Clusteror needs to know which network to use in' 'attribute "_network"' ) assert self._network is not None, network_info info = 'Train {} with {} or load it first!' if self._network == 'da': info = info.format('DA', '"train_da_dim_reducer"') assert self._da_dim_reducer is not None, info elif self._network == 'sda': info = info.format('SDA', '"train_sda_dim_reducer"') assert self._sda_dim_reducer is not None, info def _prepare_network_training(self, batch_size): ''' Preparations needed to kick off training neural networks. Parameters ---------- batch_size: int Size of each training batch. Necessary to derive the number of batches. ''' self.np_rs = np.random.RandomState(numpy_random_seed) self.theano_rs = RandomStreams(self.np_rs.randint(theano_random_seed)) # compute number of minibatches for training, validation and testing self.data = np.asarray(self._cleaned_data, dtype=theano.config.floatX) self.train_set = shared(value=self.data, borrow=True) # compute number of minibatches for training # needs one more batch if residual is non-zero # e.g. 5 rows with batch size 2 needs 5 // 2 + 1 self.n_train_batches = ( self.data.shape[0] // batch_size + int(self.data.shape[0] % batch_size > 0) ) def _pretraining_early_stopping( self, train_func, n_train_batches, min_epochs, patience, patience_increase, improvement_threshold, verbose, **kwargs ): ''' Scheme of early stopping if no substantial improvement can be observed. Parameters ---------- train_func: Theano Function Function that takes in training set and updates internal parameters, in this case the weights and biases in neural network, and returns the evaluation of the cost function after each training step. n_train_batches: int Number of training batches derived from the total number of training samples and the batch size. min_epochs: int The mininum number of training epoch to run. It can be exceeded depending on the setup of patience and ad-hoc training progress. patience: int True number of training epochs to run if larger than ``min_epochs``. Note it is potentially increased during the training if the cost is better than the expectation from current cost. patience_increase: int Coefficient used to increase patience against epochs that have been run. improvement_threshold: float, between 0 and 1 Minimum improvement considered as substantial improvement, i.e. new cost over existing lowest cost lower than this value. verbose: boolean Prints out training at each epoch if true. **kwargs: keyword arguments All keyword arguments pass on to ``train_func``. ''' n_epochs = 0 done_looping = False check_frequency = min(min_epochs, patience // 3) best_cost = np.inf assert improvement_threshold > 0 and improvement_threshold < 1 start_time = timeit.default_timer() while (n_epochs < min_epochs) or (not done_looping): n_epochs += 1 # go through training set c = [] for minibatch_index in range(n_train_batches): c.append(train_func(minibatch_index, **kwargs)) cost = np.mean(c) if verbose: print( 'Training epoch {n_epochs}, '.format(n_epochs=n_epochs) + 'cost {cost}.'.format(cost=cost) ) if n_epochs % check_frequency == 0: # check cost every check_frequency if cost < best_cost: benchmark_better_cost = best_cost * improvement_threshold if cost < benchmark_better_cost: # increase patience if cost improves a lot # the increase is a multiplicity of epochs that # have been run patience = max(patience, n_epochs * patience_increase) if verbose: print( 'Epoch {n_epochs},'.format(n_epochs=n_epochs) + ' patience increased to {patience}'.format( patience=patience ) ) best_cost = cost if n_epochs > patience: done_looping = True end_time = timeit.default_timer() if verbose: training_time = (end_time - start_time) sys.stderr.write( os.path.split(__file__)[1] + ' ran for {time:.2f}m\n'.format(time=training_time / 60.)) def train_da_dim_reducer( self, field_importance=None, batch_size=50, corruption_level=0.3, learning_rate=0.002, min_epochs=200, patience=60, patience_increase=2, improvement_threshold=0.98, verbose=False, ): ''' Trains a ``DA`` neural network. Parameters ---------- field_importance : List or Dict, default None (List of Ones) * If a list is designated, all fields should be assigned an importance, viz, the length of the list should be equal to the length of the features training the neural network. * It can also be given in a dict. In such a case, the fields can be selectively given a value. Dict key is for field name and value is for the importance. Fields not included will be initiated with the default value one. A warning will be issued when a key is not on the list of field names, mostly because of a typo. batch_size: int Size of each training batch. Necessary to derive the number of batches. corruption_level: float, between 0 and 1 Dropout rate in reading input, typical pratice in deep learning to avoid overfitting. learning_rate: float Propagating step size for gredient descent algorithm. min_epochs: int The mininum number of training epoch to run. It can be exceeded depending on the setup of patience and ad-hoc training progress. patience: int True number of training epochs to run if larger than ``min_epochs``. Note it is potentially increased during the training if the cost is better than the expectation from current cost. patience_increase: int Coefficient used to increase patience against epochs that have been run. improvement_threshold: float, between 0 and 1 Minimum improvement considered as substantial improvement, i.e. new cost over existing lowest cost lower than this value. verbose: boolean, default False Prints out training at each epoch if true. ''' self._network = 'da' # note .field_importance indicates the magic of the property # decorator is played to transform the format the input self.field_importance = field_importance self._check_cleaned_data() self._prepare_network_training(batch_size=batch_size) # allocate symbolic variables for the dat # index to a [mini]batch index = T.lscalar('index') x = T.matrix('x') da = dA( n_visible=self.data.shape[1], n_hidden=1, np_rs=self.np_rs, theano_rs=self.theano_rs, field_importance=field_importance, input_data=x, ) cost, updates = da.get_cost_updates( corruption_level=corruption_level, learning_rate=learning_rate ) train_da = theano.function( [index], cost, updates=updates, givens={ x: self.train_set[index * batch_size: (index + 1) * batch_size] } ) self._pretraining_early_stopping( train_func=train_da, n_train_batches=self.n_train_batches, min_epochs=min_epochs, patience=patience, patience_increase=patience_increase, improvement_threshold=improvement_threshold, verbose=verbose ) self.da = da self._da_dim_reducer = function([x], da.get_hidden_values(x)) self.da_reconstruct = function( [x], da.get_reconstructed_input(da.get_hidden_values(x)) ) def train_sda_dim_reducer( self, field_importance=None, batch_size=50, hidden_layers_sizes=[20], corruption_levels=[0.3], learning_rate=0.002, min_epochs=200, patience=60, patience_increase=2, improvement_threshold=0.98, verbose=False ): ''' Trains a ``SDA`` neural network. Parameters ---------- field_importance : List or Dict, default None (List of Ones) * If a list is designated, all fields should be assigned an importance, viz, the length of the list should be equal to the length of the features training the neural network. * It can also be given in a dict. In such a case, the fields can be selectively given a value. Dict key is for field name and value is for the importance. Fields not included will be initiated with the default value one. A warning will be issued when a key is not on the list of field names, mostly because of a typo. batch_size: int Size of each training batch. Necessary to derive the number of batches. hidden_layers_sizes: List of ints Number of neurons in the hidden layers (all but the input layer). corruption_levels: List of floats, between 0 and 1 Dropout rate in reading input, typical pratice in deep learning to avoid overfitting. learning_rate: float Propagating step size for gredient descent algorithm. min_epochs: int The mininum number of training epoch to run. It can be exceeded depending on the setup of patience and ad-hoc training progress. patience: int True number of training epochs to run if larger than ``min_epochs``. Note it is potentially increased during the training if the cost is better than the expectation from current cost. patience_increase: int Coefficient used to increase patience against epochs that have been run. improvement_threshold: float, between 0 and 1 Minimum improvement considered as substantial improvement, i.e. new cost over existing lowest cost lower than this value. verbose: boolean, default False Prints out training at each epoch if true. ''' # note .field_importance indicates the magic of the property # decorator is played to transform the format the input self.field_importance = field_importance assert hidden_layers_sizes is not None assert isinstance(corruption_levels, list) assert len(hidden_layers_sizes) == len(corruption_levels) self._network = 'sda' self._check_cleaned_data() self._prepare_network_training(batch_size=batch_size) # for the purpose of this excercise, restrict the final layer 1d hidden_layers_sizes.append(1) corruption_levels.append(0) x = T.matrix('x') sda = SdA( n_ins=self.data.shape[1], hidden_layers_sizes=hidden_layers_sizes, np_rs=self.np_rs, theano_rs=self.theano_rs, field_importance=field_importance, input_data=x ) pretraining_fns = sda.pretraining_functions( train_set=self.train_set, batch_size=batch_size ) for ind in range(sda.n_layers): self._pretraining_early_stopping( train_func=pretraining_fns[ind], n_train_batches=self.n_train_batches, min_epochs=min_epochs, patience=patience, patience_increase=patience_increase, improvement_threshold=improvement_threshold, verbose=verbose, corruption_level=corruption_levels[ind], learning_rate=learning_rate ) self.sda = sda self._sda_dim_reducer = function([x], sda.get_final_hidden_layer(x)) self.sda_reconstruct = function( [x], sda.get_first_reconstructed_input(sda.get_final_hidden_layer(x)) ) def _prefix_filepath(self, prefix_type, filepath): ''' Prefixes a filepath with the type stored in the file. Examples -------- >> clusteror._prefix_filepath('network', 'a/b') 'a/da_b' Note ---- Only the filename part is prefixed if there are directories in the path. Parameters ---------- prefix_type: str The type to prefixing the filepath. filepath: str Filepath to be prefixed. Returns ------- Prefixed filepath. ''' filepath_list = list(os.path.split(filepath)) filepath_list[-1] = ( getattr(self, prefix_type) + '_' + filepath_list[-1] ) filepath = os.path.join(tuple(filepath_list)) return filepath def save_dim_reducer( self, filepath='dim_reducer.pk', include_network=False ): ''' Save dimension reducer from the neural network training. Parameters ---------- filepath: str Filename to store the dimension reducer. include_network: boolean If true, prefix the filepath with the network type. ''' self._check_network() if include_network: filepath = self._prefix_filepath('network', filepath) with open(filepath, 'wb') as f: if self._network == 'da': pk.dump(self._da_dim_reducer, f) elif self._network == 'sda': pk.dump(self._sda_dim_reducer, f) def load_dim_reducer(self, filepath='dim_reducer.pk'): ''' Loads saved dimension reducer. Need to first name the network type. Parameters ---------- filepath: str ''' assert self._network is not None with open(filepath, 'rb') as f: if self._network == 'da': self._da_dim_reducer = pk.load(f) elif self._network == 'sda': self._sda_dim_reducer = pk.load(f) def reduce_to_one_dim(self): ''' Reduces the dimension of input dataset to one before the tagging in the next step. Input of the Theano function is the cleaned data and output is a one dimensional data stored in ``_one_dim_data``. ''' self._check_cleaned_data() self._check_network() if self._network == 'da': self._one_dim_data = self._da_dim_reducer(self._cleaned_data) elif self._network == 'sda': self._one_dim_data = self._sda_dim_reducer(self._cleaned_data) self._one_dim_data = self._one_dim_data[:, 0] def _check_one_dim_data(self): ''' Check if one_dim_data exists. Give error info if not. ''' one_dim_data_info = 'Get reduced one dimensional data first!' assert self._one_dim_data is not None, one_dim_data_info def train_valley(self, bins=100, contrast=0.3): ''' Trains the ability to cut the universe of samples into clusters based how the dimension reduced dataset assembles in a histogram. Unlike the K-Means, no need to preset the number of clusters. Parameters ---------- bins: int Number of bins to aggregate the one dimensional data. contrast: float, between 0 and 1 Threshold used to define local minima and local maxima. Detailed explanation in ``utils.find_local_extremes``. Note ---- When getting only one cluster, check the distribution of ``one_dim_data``. Likely the data points flock too close to each other. Try increasing ``bins`` first. If not working, try different neural networks with more or less layers with more or less neurons. ''' bins = np.linspace(-1, 1, bins+1) # use the left point of bins to name the bin left_points = np.asarray(bins[:-1]) self._check_one_dim_data() cuts = pd.cut(self._one_dim_data, bins=bins) # ipdb.set_trace() bin_counts = cuts.describe().reset_index().loc[:, 'counts'] local_min_inds, local_mins, local_max_inds, local_maxs = ( find_local_extremes(bin_counts, contrast) ) self.trained_bins = left_points[local_min_inds].tolist() + [1] if self.trained_bins[0] != -1: self.trained_bins = [-1] + self.trained_bins def valley(one_dim_data): cuts = pd.cut( one_dim_data, bins=self.trained_bins, labels=list(range(len(self.trained_bins) - 1)) ) return cuts.get_values() self._valley = valley self._tagger = 'valley' def _check_tagger(self): ''' Check tagger existence. Give error info if not. ''' tagger_info = 'Clusteror needs to know which tagger to use' assert self._tagger is not None, tagger_info info = 'Train {} with {} or load it first' if self._tagger == 'valley': info = info.format('"valley"', '"train_valley"') assert self._valley is not None, info elif self._tagger == 'kmeans': info = info.format('"kmeans"', '"train_kmeans"') assert self._kmeans is not None, info def save_valley(self, filepath, include_taggername=False): ''' Saves valley tagger. Parameters ---------- filepath: str File path to save the tagger. include_taggername: boolean, default False Include the **valley_** prefix in filename if true. ''' self.check_tagger() if include_taggername: filepath = self._prefix_filepath('tagger', filepath) with open(filepath, 'w') as f: json.dump(self.trained_bins, f) def load_valley(self, filepath): ''' Loads a saved valley tagger from a file. Create the valley function from the saved parameters. Parameter --------- filepath: str File path to the file saving the valley tagger. ''' with open(filepath, 'r') as f: self.trained_bins = json.load(f) def valley(one_dim_data): cuts = pd.cut( one_dim_data, bins=self.trained_bins, labels=list(range(len(self.trained_bins) - 1)) ) return cuts.get_values() self._valley = valley self._tagger = 'valley' def train_kmeans(self, n_clusters=10, **kwargs): ''' Trains K-Means model on top of the one dimensional data derived from dimension reducers. Parameters ---------- n_clusters: int The number of clusters required to start a K-Means learning. **kwargs: keyword arguments Any other keyword arguments passed on to Scikit-Learn K-Means model. ''' self._check_one_dim_data() self._kmeans = KMeans(n_clusters=n_clusters, **kwargs) self._kmeans.fit(self._one_dim_data.reshape(-1, 1)) self._tagger = 'kmeans' def save_kmeans(self, filepath, include_taggername=False): ''' Saves K-Means model to the named file path. Can add a prefix to indicate this saves a K-Means model. Parameters ---------- filepath: str File path for saving the model. include_taggername: boolean, default False Include the **kmean_** prefix in filename if true. ''' self._check_tagger() if include_taggername: filepath = self._prefix_filepath('tagger', filepath) with open(filepath, 'wb') as f: pk.dump(self._kmeans, f) def load_kmeans(self, filepath): ''' Loads a saved K-Means tagger from a file. Parameter --------- filepath: str File path to the file saving the K-Means tagger. ''' with open(filepath, 'rb') as f: self._kmeans = pk.load(f) self._tagger = 'kmeans' def add_cluster(self): ''' Tags each sample regarding their reduced one dimensional value. Adds an extra column **'cluster'** to ``raw_data``, seggesting a zero-based cluster ID. ''' self._check_tagger() if self._tagger == 'valley': self.raw_data.loc[:, 'cluster'] = self._valley(self._one_dim_data) elif self._tagger == 'kmeans': self.raw_data.loc[:, 'cluster'] = ( self._kmeans.predict(self._one_dim_data.reshape(-1, 1)) )
mit
-9,203,161,279,996,490,000
36.002309
79
0.584758
false
DStauffman/dstauffman2
dstauffman2/apps/bac_gui/bac_gui.py
1
21397
r""" The main module file for the BAC GUI. It defines the GUI and it's behavior and plotting. Notes ----- #. Written by David C. Stauffer in June 2016. """ #%% Imports import doctest import glob import os import pickle import sys import unittest from enum import Enum, unique import matplotlib.pyplot as plt import numpy as np from PyQt5 import QtCore, QtGui from PyQt5.QtWidgets import QApplication, QComboBox, QFormLayout, QGridLayout, QGroupBox, \ QHBoxLayout, QLabel, QLineEdit, QMainWindow, QPushButton, QRadioButton, QToolTip, QVBoxLayout, \ QWidget #%% Constants GUI_TOKEN = -1 LEGAL_LIMIT = 0.08/100 BMI_CONV = 703.0704 #%% Classes - Gender @unique class Gender(Enum): r"""Enumerator definitions for the possible gender conditions.""" male = 1 # uncircumcised male female = 2 # female #%% Classes - GuiSettings class GuiSettings(object): r"""Settings that capture the current state of the GUI.""" def __init__(self): self.profile = 'Default' self.height = GUI_TOKEN self.weight = GUI_TOKEN self.gender = Gender.female self.age = GUI_TOKEN self.bmi = GUI_TOKEN self.hr1 = 0 self.hr2 = 0 self.hr3 = 0 self.hr4 = 0 self.hr5 = 0 self.hr6 = 0 def __str__(self): r"""Prints all the settings out.""" text = ['GuiSettings:'] for key in sorted(vars(self)): text.append(' {}: {}'.format(key, getattr(self, key))) return '\n'.join(text) @staticmethod def get_text_fields(): r"""Returns the names of all the line edit widgets.""" return ['height', 'weight', 'age', 'bmi', 'hr1', 'hr2', 'hr3', 'hr4', 'hr5', 'hr6'] @staticmethod def load(filename): r"""Loads a instance of the class from a given filename.""" with open(filename, 'rb') as file: gui_settings = pickle.load(file) assert isinstance(gui_settings, GuiSettings) return gui_settings def save(self, filename): r"""Saves an instance of the class to the given filename.""" with open(filename, 'wb') as file: pickle.dump(self, file) #%% Classes - BacGui class BacGui(QMainWindow): r"""The BAC GUI.""" # Create GUI setting defaults for the class gui_settings = GuiSettings() def __init__(self): # call super method super().__init__() # initialize the state data #self.initialize_state(filename, board, cur_move, cur_game, game_hist) # call init method to instantiate the GUI self.init() # GUI initialization def init(self): r"""Initializes the GUI.""" # initialize timer self.timer = QtCore.QTimer(self) # properties QToolTip.setFont(QtGui.QFont('SansSerif', 10)) # Central Widget self.gui_widget = QWidget(self) self.setCentralWidget(self.gui_widget) # Panels (group boxes) self.grp_profile = QGroupBox('Profile') self.grp_consump = QGroupBox('Consumption') self.grp_plotter = QWidget() # Layouts layout_gui = QHBoxLayout(self.gui_widget) layout_profile = QGridLayout(self.grp_profile) layout_consump = QFormLayout(self.grp_consump) layout_plotter = QVBoxLayout(self.grp_plotter) # Labels lbl_profile = QLabel('Profile:') lbl_height = QLabel('Height:') lbl_weight = QLabel('Weight:') lbl_age = QLabel('Age:') lbl_bmi = QLabel('BMI:') lbl_gender = QLabel('Gender:') lbl_hr1 = QLabel('Hour 1:') lbl_hr2 = QLabel('Hour 2:') lbl_hr3 = QLabel('Hour 3:') lbl_hr4 = QLabel('Hour 4:') lbl_hr5 = QLabel('Hour 5:') lbl_hr6 = QLabel('Hour 6:') lbl_drink = QLabel('One Drink is:\n1 oz of 100 proof\n5 oz of wine\n12 oz of regular beer') # Fields self.popup_profile = QComboBox() profiles = self.initialize_profiles() for this_profile in profiles: self.popup_profile.addItem(this_profile) self.popup_profile.setCurrentIndex(0) self.popup_profile.activated.connect(self.onActivated) self.lne_height = QLineEdit('') self.lne_weight = QLineEdit('') self.lne_age = QLineEdit('') self.lne_bmi = QLineEdit('') self.radio_gender = QWidget() layout_gender = QHBoxLayout(self.radio_gender) self.radio_fmal = QRadioButton('Female') self.radio_fmal.setChecked(True) self.radio_fmal.toggled.connect(self.radio_toggle) self.radio_male = QRadioButton('Male') self.radio_male.toggled.connect(self.radio_toggle) layout_gender.addWidget(self.radio_fmal) layout_gender.addWidget(self.radio_male) self.lne_hr1 = QLineEdit('') self.lne_hr2 = QLineEdit('') self.lne_hr3 = QLineEdit('') self.lne_hr4 = QLineEdit('') self.lne_hr5 = QLineEdit('') self.lne_hr6 = QLineEdit('') lnes = [getattr(self, 'lne_' + field) for field in self.gui_settings.get_text_fields()] for this_lne in lnes: this_lne.setAlignment(QtCore.Qt.AlignCenter) this_lne.editingFinished.connect(self.text_finished) # Buttons - Save Profile button self.btn_save = QPushButton('Save Profile') self.btn_save.setToolTip('Saves the current profile to disk.') self.btn_save.setMaximumWidth(120) self.btn_save.setStyleSheet('color: black; background-color: #00bfbf; font: bold;') self.btn_save.clicked.connect(self.btn_save_function) # Buttons - Plot button self.btn_plot = QPushButton('Plot') self.btn_plot.setToolTip('Plots the BAC over time with the given information.') self.btn_plot.setMaximumWidth(200) self.btn_plot.setStyleSheet('color: black; background-color: #009900; font: bold;') self.btn_plot.clicked.connect(self.btn_plot_function) # Populate widgets - profile layout_profile.addWidget(lbl_profile, 0, 0) layout_profile.addWidget(lbl_height, 1, 0) layout_profile.addWidget(lbl_weight, 2, 0) layout_profile.addWidget(lbl_age, 3, 0) layout_profile.addWidget(lbl_bmi, 4, 0) layout_profile.addWidget(lbl_gender, 5, 0) layout_profile.addWidget(self.popup_profile, 0, 1) layout_profile.addWidget(self.lne_height, 1, 1) layout_profile.addWidget(self.lne_weight, 2, 1) layout_profile.addWidget(self.lne_age, 3, 1) layout_profile.addWidget(self.lne_bmi, 4, 1) layout_profile.addWidget(self.radio_gender, 5, 1) layout_profile.addWidget(self.btn_save, 6, 0, 1, 2, QtCore.Qt.AlignCenter) # Populate widgets - consumption layout_consump.addRow(lbl_hr1, self.lne_hr1) layout_consump.addRow(lbl_hr2, self.lne_hr2) layout_consump.addRow(lbl_hr3, self.lne_hr3) layout_consump.addRow(lbl_hr4, self.lne_hr4) layout_consump.addRow(lbl_hr5, self.lne_hr5) layout_consump.addRow(lbl_hr6, self.lne_hr6) # Populate widgets - plotter layout_plotter.addWidget(lbl_drink) layout_plotter.addWidget(self.btn_plot) # Populate widgets - main GUI layout_gui.addWidget(self.grp_profile) layout_gui.addWidget(self.grp_consump) layout_gui.addWidget(self.grp_plotter) # Call wrapper to initialize GUI self.wrapper() # GUI final layout properties self.center() self.setWindowTitle('BAC GUI') self.setWindowIcon(QtGui.QIcon(os.path.join(get_root_dir(), 'bac_gui.png'))) self.show() #%% Other initializations def initialize_profiles(self): r"""Gets the list of all current profiles that exist in the folder.""" # Check to see if the Default profile exists, and if so load it, else create it folder = get_root_dir() filename = os.path.join(folder, 'Default.pkl') if os.path.isfile(filename): # pragma: no cover self.gui_settings = GuiSettings.load(filename) else: # pragma: no cover self.gui_settings.save(filename) # Find all the pickle files that exist, and make them into profiles profiles = glob.glob(os.path.join(folder, '*.pkl')) profiles = [os.path.normpath(x).split(os.path.sep)[-1][:-4] for x in profiles] profiles = set(profiles) ^ {'Default'} profiles = ['Default'] + sorted(profiles) + ['New+'] return profiles #%% wrapper def wrapper(self): r"""Acts as a wrapper to everything the GUI needs to do.""" # Note: nothing is done to update the profile field, it's assumed to correct already # loop through and update the text fields for field in self.gui_settings.get_text_fields(): this_value = getattr(self.gui_settings, field) this_lne = getattr(self, 'lne_' + field) if this_value == GUI_TOKEN: this_lne.setText('') else: this_lne.setText('{:g}'.format(this_value)) # update the gender button group if self.gui_settings.gender == Gender.female: if self.radio_male.isChecked(): self.radio_fmal.setChecked(True) elif self.gui_settings.gender == Gender.male: if self.radio_fmal.isChecked(): self.radio_male.setChecked(True) else: # pragma: no cover raise ValueError('Unexpected value for gender: "{}".'.format(self.gui_settings.gender)) #%% Other callbacks - closing def closeEvent(self, event): r"""Things in here happen on GUI closing.""" event.accept() #%% Other callbacks - center the GUI on the screen def center(self): r"""Makes the GUI centered on the active screen.""" frame_gm = self.frameGeometry() screen = QApplication.desktop().screenNumber(QApplication.desktop().cursor().pos()) center_point = QApplication.desktop().screenGeometry(screen).center() frame_gm.moveCenter(center_point) self.move(frame_gm.topLeft()) #%% Other callbacks - dislaying an error for invalid edit box entries def display_text_error(self, field): r"""Displays a temporary message for invalid characters within the line edit boxes.""" field.setStyleSheet('color: white; background-color: red; font: bold;') reset = lambda: field.setStyleSheet('color: black; background-color: white; font: normal;') self.timer.setInterval(300) self.timer.setSingleShot(True) self.timer.timeout.connect(reset) self.timer.start() self.wrapper() #%% Other callbacks - updating the selected profile name def onActivated(self, value): r"""Controls behavior when mode combobox is changed.""" # check if "New+" was selected if value == len(self.popup_profile) - 1: # get the current items items = [self.popup_profile.itemText(i) for i in range(self.popup_profile.count())] # ask for a new profile text, ok = QtGui.QInputDialog.getText(self, 'Profile Name', 'Enter a new profile:') if not ok or not text: # put back to last choice ix = items.index(self.gui_settings.profile) self.popup_profile.setCurrentIndex(ix) else: if text in items: # if already an existing profile, then load the old one print('Profile "{}" already exists and is being loaded.'.format(text)) self.gui_settings = self.gui_settings.load(os.path.join(get_root_dir(), text + '.pkl')) else: # create the new profile gui_settings = GuiSettings() gui_settings.profile = text # if successful in saving, then update the working copy gui_settings.save(os.path.join(get_root_dir(), text + '.pkl')) self.gui_settings = gui_settings # find where to insert in GUI and insert i = 1 while i < len(items)-1 and items[i] < text: i += 1 self.popup_profile.insertItem(i, text) self.popup_profile.setCurrentIndex(i) else: # changed to the desired existing profile text = self.popup_profile.currentText() if text != self.gui_settings.profile: self.gui_settings = self.gui_settings.load(os.path.join(get_root_dir(), text + '.pkl')) # update the GUI to reflect any new settings self.wrapper() #%% Other callbacks - update the line edit boxes def text_finished(self): r"""Updates gui_settings for LineEdit text changes that happen when you leave the box.""" sender = self.sender() fields = self.gui_settings.get_text_fields() senders = [getattr(self, 'lne_' + field) for field in fields] for ix in range(len(senders)): if sender == senders[ix]: text = sender.text() if text == '': setattr(self.gui_settings, fields[ix], GUI_TOKEN) break try: value = float(text) except ValueError: self.display_text_error(senders[ix]) break else: setattr(self.gui_settings, fields[ix], value) break else: # pragma: no cover raise ValueError('Unexpected field went into this method.') # check for conditions to update the BMI if sender == self.lne_height or sender == self.lne_weight: if self.gui_settings.height != GUI_TOKEN and self.gui_settings.weight != GUI_TOKEN: if self.gui_settings.bmi == GUI_TOKEN: self.gui_settings.bmi = calculate_bmi(self.gui_settings.height, self.gui_settings.weight, \ self.gui_settings.gender, BMI_CONV) # call the wrapper to update all the possible field changes self.wrapper() #%% Other callbacks - Updating the gender button group def radio_toggle(self): r"""Controls the gender radio button group.""" # assert that only one of the button group is checked assert self.radio_fmal.isChecked() ^ self.radio_male.isChecked(), 'Only one button may be checked.' # determine which button is checked and update the settings accordingly if self.radio_fmal.isChecked(): self.gui_settings.gender = Gender.female elif self.radio_male.isChecked(): # pragma: no branch self.gui_settings.gender = Gender.male self.wrapper() #%% Other callbacks - Save button def btn_save_function(self): r"""Saves the current settings to the specified profile.""" # save the profile self.gui_settings.save(os.path.join(get_root_dir(), self.gui_settings.profile + '.pkl')) #%% Other callbacks - Plot button def btn_plot_function(self): r"""Plots the results and saves to a .png file.""" # call the plotting function fig = plot_bac(self.gui_settings, LEGAL_LIMIT) # save the figure filename = os.path.join(get_root_dir(), fig.canvas.manager.get_window_title() + '.png') fig.savefig(filename, dpi=160, bbox_inches='tight') #%% Functions - get_root_dir def get_root_dir(): r""" Returns the folder that contains this source file and thus the root folder for the whole code. Returns ------- folder : str Location of the folder that contains all the source files for the code. Examples -------- >>> from dstauffman2.apps.bac_gui import get_root_dir >>> folder = get_root_dir() """ # this folder is the root directory based on the location of this file (utils.py) folder = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) return folder #%% Functions - calculate_bmi def calculate_bmi(height, weight, gender, conv=BMI_CONV): r""" Calculates the BMI (Body Mass Index) for someone based on their height and weight. Parameters ---------- height : float Height in inches weight : float Weight in pounds gender : class Gender Gender conv : float, optional Unit conversion factor Returns ------- bmi : float Body mass index Notes ----- #. Should this eventually include gender? Examples -------- >>> from dstauffman2.apps.bac_gui import calculate_bmi, Gender >>> height = 69 >>> weight = 161 >>> gender = Gender.male >>> bmi = calculate_bmi(height, weight, gender) >>> print('{:.2f}'.format(bmi)) 23.78 """ # calculate the BMI using a simple formula (could be expanded later) bmi = weight / height**2 * conv return bmi #%% Functions - calculate_bac def calculate_bac(time_drinks, drinks, time_out, body_weight): r""" Calculates a BAC (Blood Alcohol Content) over time. Examples -------- >>> from dstauffman2.apps.bac_gui import calculate_bac >>> import numpy as np >>> time_drinks = np.array([1, 2, 3, 4, 5, 6]) >>> drinks = np.array([1, 1.5, 2.2, 0.5, 0, 0]) >>> time_out = time_drinks.copy() >>> body_weight = 105 >>> bac = calculate_bac(time_drinks, drinks, time_out, body_weight) >>> print(bac) # doctest: +NORMALIZE_WHITESPACE [0.00020714 0.00059286 0.00122857 0.00125714 0.00110714 0.00095714] """ # hard-coded values drink_weight_conv = 0.0375 # converts standard drinks consumed per pound to BAC burn_up = 0.00015 # alcohol content burned up per hour # potentially expand time and data vectors if time_drinks[0] > time_out[0]: time_drinks = np.append(time_out[0], time_drinks) drinks = np.append(0, drinks) if time_drinks[-1] < time_out[-1]: time_drinks = np.append(time_drinks, np.inf) drinks = np.append(drinks, drinks[-1]) # find the cumulative amount of drinks consumed cum_drinks = np.cumsum(drinks) ## find the BAC assuming no alcohol was converted bac_init = cum_drinks / body_weight * drink_weight_conv # interpolate the BAC to the desired time, still assuming no alcohol was converted bac_interp = np.interp(time_out, time_drinks, bac_init) # subtract off the amount that was converted by the body in the given time bac = np.maximum(bac_interp - burn_up * time_out, 0) return bac #%% Function - plot_bac def plot_bac(gui_settings, legal_limit=None): r""" Plots the BAC over time. Parameters ---------- gui_settings : class GuiSettings GUI settings legal_limit : float, optional Legal limit for BAC before considered impaired and unable to drive Returns ------- fig : class matplotlib.Figure Figure handle Examples -------- >>> from dstauffman2.apps.bac_gui import GuiSettings, plot_bac, Gender >>> import matplotlib.pyplot as plt >>> gui_settings = GuiSettings() >>> gui_settings.height = 69 >>> gui_settings.weight = 161 >>> gui_settings.age = 34 >>> gui_settings.bmi = 23.78 >>> gui_settings.gender = Gender.male >>> fig = plot_bac(gui_settings) Close the figure >>> plt.close(fig) """ #% hard-coded values time_drinks = np.array([1, 2, 3, 4, 5, 6]) time_out = np.linspace(0, 12, 1000) ratio2per = 100 # check inputs assert isinstance(gui_settings, GuiSettings) # pull out information from gui_settings drinks = np.array([gui_settings.hr1, gui_settings.hr2, gui_settings.hr3, \ gui_settings.hr4, gui_settings.hr5, gui_settings.hr6]) body_weight = gui_settings.weight name = gui_settings.profile # calculate the BAC bac = ratio2per * calculate_bac(time_drinks, drinks, time_out, body_weight) # create the figure and axis fig = plt.figure(facecolor='w') this_title = 'BAC vs. Time for {}'.format(name) fig.canvas.manager.set_window_title(this_title) ax = fig.add_subplot(111) # plot the data ax.plot(time_out, bac, '.-', label='BAC') if legal_limit is not None: ax.plot(np.array([time_out[0], time_out[-1]]), np.full(2, ratio2per*legal_limit), '--', \ label='Legal Limit', color='red', linewidth=2) # add some labels and such ax.set_title(this_title) ax.set_xlabel('Time [hr]') ax.set_ylabel('BAC [%]') ax.grid(True) ax.legend() plt.show(block=False) return fig #%% Unit Test if __name__ == '__main__': # turn interactive plotting off plt.ioff() # open a qapp if QApplication.instance() is None: qapp = QApplication(sys.argv) else: qapp = QApplication.instance() # run the tests unittest.main(module='dstauffman2.apps.bac_gui.test_bac_gui', exit=False) doctest.testmod(verbose=False) # close the qapp qapp.closeAllWindows()
lgpl-3.0
-8,448,845,782,150,075,000
36.277003
111
0.60429
false
Patrick-Cole/pygmi
pygmi/rsense/change.py
1
32294
# ----------------------------------------------------------------------------- # Name: change.py (part of PyGMI) # # Author: Patrick Cole # E-Mail: pcole@geoscience.org.za # # Copyright: (c) 2019 Council for Geoscience # Licence: GPL-3.0 # # This file is part of PyGMI # # PyGMI is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # PyGMI is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ----------------------------------------------------------------------------- """Change Detection.""" import datetime from pathlib import Path import xml.etree.ElementTree as ElementTree from PyQt5 import QtWidgets, QtCore import numpy as np import pandas as pd from osgeo import gdal, osr, ogr from shapely.geometry.polygon import Polygon import matplotlib.pyplot as plt from matplotlib.figure import Figure import matplotlib.animation as manimation from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg from matplotlib.backends.backend_qt5 import NavigationToolbar2QT # import pygmi.menu_default as menu_default from pygmi.raster.datatypes import Data from pygmi.misc import frm from pygmi.raster.ginterp import histcomp, norm255 from pygmi.raster.modest_image import imshow class CreateSceneList(QtWidgets.QDialog): """ Create Scene List. This class creates a list of scenes for use in change detection. Attributes ---------- name : str item name piter : progressbar reference to a progress bar. parent : parent reference to the parent routine outdata : dictionary dictionary of output datasets ifile : str input file name. Used in main.py """ def __init__(self, parent=None): super().__init__(parent) if parent is None: self.showprocesslog = print else: self.showprocesslog = parent.showprocesslog self.parent = parent self.indata = {'tmp': True} self.outdata = {} self.ifile = '' if parent is not None: self.piter = self.parent.pbar.iter else: self.piter = iter self.shapefile = QtWidgets.QLineEdit('') self.scenefile = QtWidgets.QLineEdit('') self.isrecursive = QtWidgets.QCheckBox('Recursive file search') self.useall = QtWidgets.QCheckBox('Use all scenes') self.setupui() def setupui(self): """ Set up UI. Returns ------- None. """ gridlayout_main = QtWidgets.QGridLayout(self) buttonbox = QtWidgets.QDialogButtonBox() # helpdocs = menu_default.HelpButton('pygmi.grav.iodefs.importpointdata') pb_shape = QtWidgets.QPushButton('Load shapefile or kml file') pb_scene = QtWidgets.QPushButton('Set scene directory') buttonbox.setOrientation(QtCore.Qt.Horizontal) buttonbox.setCenterButtons(True) buttonbox.setStandardButtons(buttonbox.Cancel | buttonbox.Ok) self.setWindowTitle(r'Create Scene List') gridlayout_main.addWidget(self.shapefile, 0, 0, 1, 1) gridlayout_main.addWidget(pb_shape, 0, 1, 1, 1) gridlayout_main.addWidget(self.scenefile, 1, 0, 1, 1) gridlayout_main.addWidget(pb_scene, 1, 1, 1, 1) gridlayout_main.addWidget(self.useall, 2, 0, 1, 2) gridlayout_main.addWidget(self.isrecursive, 3, 0, 1, 2) # gridlayout_main.addWidget(helpdocs, 5, 0, 1, 1) gridlayout_main.addWidget(buttonbox, 5, 1, 1, 3) buttonbox.accepted.connect(self.accept) buttonbox.rejected.connect(self.reject) pb_shape.pressed.connect(self.get_shape) pb_scene.pressed.connect(self.get_scene) def settings(self, nodialog=False): """ Entry point into item. Returns ------- bool True if successful, False otherwise. """ if not nodialog: tmp = self.exec_() if tmp != 1: return tmp idir = self.scenefile.text() sfile = self.shapefile.text() if idir == '' or sfile == '': return False if not self.useall.isChecked(): if sfile[-3:] == 'shp': ddpoints = get_shape_coords(sfile) else: ddpoints = get_kml_coords(sfile) ddpoints2 = Polygon(ddpoints) if self.isrecursive.isChecked(): subfiles = Path(idir).rglob('*.tif') else: subfiles = Path(idir).glob('*.tif') subfiles = list(subfiles) dtime = [] flist = [] nodates = False for ifile in self.piter(subfiles): dataset = gdal.Open(str(ifile), gdal.GA_ReadOnly) metadata = dataset.GetMetadata() if not self.useall.isChecked(): gtr = dataset.GetGeoTransform() cols = dataset.RasterXSize rows = dataset.RasterYSize dxlim = (gtr[0], gtr[0]+gtr[1]*cols) dylim = (gtr[3]+gtr[5]*rows, gtr[3]) coords = [[dxlim[0], dylim[0]], [dxlim[0], dylim[1]], [dxlim[1], dylim[1]], [dxlim[1], dylim[0]], [dxlim[0], dylim[0]]] coords2 = Polygon(coords) if not coords2.contains(ddpoints2): continue if 'TIFFTAG_DATETIME' not in metadata: dt = datetime.datetime(1900, 1, 1) nodates = True else: dtimestr = metadata['TIFFTAG_DATETIME'] dt = datetime.datetime.strptime(dtimestr, '%Y:%m:%d %H:%M:%S') dtime.append(dt) flist.append(ifile) if nodates is True: self.showprocesslog('Some of your scenes do not have dates. ' 'Correct this in the output spreadsheet') if not flist: self.showprocesslog('No scenes could be found. Please make sure ' 'that your shapefile or kml file is in the ' 'area of your scenes and in the same ' 'projection.') return False self.showprocesslog('Updating spreadsheet...') df = pd.DataFrame() df['Datetime'] = dtime df['Filename'] = flist df['Use'] = True df['Shapefile'] = sfile df.sort_values('Datetime', inplace=True) self.outdata['SceneList'] = df self.showprocesslog('Saving to disk...') ext = ('Scene List File (*.xlsx)') filename, _ = QtWidgets.QFileDialog.getSaveFileName( self.parent, 'Save File', '.', ext) if filename == '': return False df.to_excel(filename, index=False) return True def loadproj(self, projdata): """ Load project data into class. Parameters ---------- projdata : dictionary Project data loaded from JSON project file. Returns ------- chk : bool A check to see if settings was successfully run. """ self.shapefile.setText(projdata['shapefile']) self.scenefile.setText(projdata['scenefile']) self.isrecursive.setChecked(projdata['recursive']) chk = self.settings(True) return chk def saveproj(self): """ Save project data from class. Returns ------- projdata : dictionary Project data to be saved to JSON project file. """ projdata = {} projdata['shapefile'] = self.shapefile.text() projdata['scenefile'] = self.scenefile.text() projdata['recursive'] = self.isrecursive.isChecked() return projdata def get_shape(self, filename=''): """ Get shape filename. Parameters ---------- filename : str, optional Input filename. The default is ''. Returns ------- None. """ ext = ('shapefile or kml file (*.shp *.kml)') if filename == '': filename, _ = QtWidgets.QFileDialog.getOpenFileName( self.parent, 'Open File', '.', ext) if filename == '': return self.shapefile.setText(filename) def get_scene(self, directory=''): """ Get Scene Directory. Parameters ---------- directory : str, optional Directory path as a string. The default is ''. Returns ------- None. """ if directory == '': directory = QtWidgets.QFileDialog.getExistingDirectory( self.parent, 'Select Directory') if directory == '': return self.scenefile.setText(directory) class LoadSceneList(): """ Load scene list. Attributes ---------- name : str item name pbar : progressbar reference to a progress bar. parent : parent reference to the parent routine outdata : dictionary dictionary of output datasets ifile : str input file name. Used in main.py ext : str filename extension """ def __init__(self, parent=None): self.ifile = '' self.parent = parent self.indata = {} self.outdata = {} def settings(self, nodialog=False): """ Entry point into item. Returns ------- bool True if successful, False otherwise. """ if not nodialog: ext = 'Scene List File (*.xlsx)' self.ifile, _ = QtWidgets.QFileDialog.getOpenFileName( self.parent, 'Open Scene List Spreadsheet', '.', ext) if self.ifile == '': return False df = pd.read_excel(self.ifile) self.outdata['SceneList'] = df return True def loadproj(self, projdata): """ Load project data into class. Parameters ---------- projdata : dictionary Project data loaded from JSON project file. Returns ------- chk : bool A check to see if settings was successfully run. """ self.ifile = projdata['ifile'] chk = self.settings(True) return chk def saveproj(self): """ Save project data from class. Returns ------- projdata : dictionary Project data to be saved to JSON project file. """ projdata = {} projdata['ifile'] = self.ifile return projdata class MyMplCanvas(FigureCanvasQTAgg): """Simple canvas with a sine plot.""" def __init__(self, parent=None, width=10, height=8, dpi=100, bands=(0, 1, 2)): self.fig = Figure(figsize=(width, height), dpi=dpi) self.ax1 = self.fig.add_subplot(111) self.im1 = None self.bands = bands self.parent = parent self.rcid = None self.manip = 'RGB' self.cbar = None self.capture_active = False self.writer = None super().__init__(self.fig) self.setParent(parent) FigureCanvasQTAgg.setSizePolicy(self, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding) FigureCanvasQTAgg.updateGeometry(self) self.fig.canvas.mpl_connect('button_release_event', self.onClick) def capture(self): """Capture.""" self.capture_active = not self.capture_active if self.capture_active: ext = ('GIF (*.gif)') wfile, _ = QtWidgets.QFileDialog.getSaveFileName( self.parent, 'Save File', '.', ext) if wfile == '': self.capture_active = not self.capture_active return self.writer = manimation.PillowWriter(fps=4) self.writer.setup(self.fig, wfile) # , 100) else: self.writer.finish() def compute_initial_figure(self, dat, dates, points): """ Compute initial figure. Parameters ---------- dat : PyGMI Data PyGMI dataset. dates : str Dates to show on title. points : numpy array Points to plot. Returns ------- None. """ extent = [] rtmp1 = dat[self.bands[2]].data rtmp2 = dat[self.bands[1]].data rtmp3 = dat[self.bands[0]].data rtmp1 = (rtmp1-rtmp1.min())/rtmp1.ptp() rtmp2 = (rtmp2-rtmp2.min())/rtmp2.ptp() rtmp3 = (rtmp3-rtmp3.min())/rtmp3.ptp() alpha = np.logical_not(rtmp1 == 0.) dtmp = np.array([rtmp1, rtmp2, rtmp3, alpha]) dtmp = np.moveaxis(dtmp, 0, 2) dtmp = dtmp*255 dtmp = dtmp.astype(np.uint8) extent = dat[self.bands[0]].extent # self.im1 = imshow(self.ax1, dtmp, extent=extent) self.im1 = self.ax1.imshow(dtmp, extent=extent) self.ax1.plot(points[:, 0], points[:, 1]) self.cbar = None self.fig.suptitle(dates) def update_plot(self, dat, dates): """ Update plot. Parameters ---------- dat : PyGMI Data PyGMI dataset. dates : str Dates to show on title. Returns ------- None. """ extent = dat[self.bands[0]].extent if self.manip == 'NDWI': if self.cbar is None: self.cbar = self.figure.colorbar(self.im1, format=frm) green = np.ma.masked_equal(dat[2].data, 0) nir = np.ma.masked_equal(dat[4].data, 0) green = green.astype(float) nir = nir.astype(float) dtmp = (green-nir)/(green+nir) self.im1.set_clim(-1, 1) self.im1.set_cmap(plt.cm.get_cmap('PiYG_r')) elif self.manip == 'NDVI': if self.cbar is None: self.cbar = self.figure.colorbar(self.im1, format=frm) red = np.ma.masked_equal(dat[3].data, 0) nir = np.ma.masked_equal(dat[4].data, 0) red = red.astype(float) nir = nir.astype(float) dtmp = (nir-red)/(nir+red) self.im1.set_clim(-1, 1) self.im1.set_cmap(plt.cm.get_cmap('PiYG')) else: if self.cbar is not None: self.cbar.remove() self.cbar = None # rtmp1 = dat[self.bands[2]].data # rtmp2 = dat[self.bands[1]].data # rtmp3 = dat[self.bands[0]].data mask = (dat[self.bands[2]].data == 0.) red = np.ma.array(dat[self.bands[2]].data, mask=mask) green = np.ma.array(dat[self.bands[1]].data, mask=mask) blue = np.ma.array(dat[self.bands[0]].data, mask=mask) red = histcomp(red, nbr_bins=10000, perc=2.) green = histcomp(green, nbr_bins=10000, perc=2.) blue = histcomp(blue, nbr_bins=10000, perc=2.) red = norm255(red) green = norm255(green) blue = norm255(blue) red[mask] = 0 green[mask] = 0 blue[mask] = 0 # rtmp1 = (rtmp1-rtmp1.min()) # rtmp2 = (rtmp2-rtmp2.min()) # rtmp3 = (rtmp3-rtmp3.min()) # if rtmp1.ptp() != 0.: # rtmp1 = rtmp1/rtmp1.ptp() # if rtmp2.ptp() != 0.: # rtmp2 = rtmp2/rtmp2.ptp() # if rtmp3.ptp() != 0.: # rtmp3 = rtmp3/rtmp3.ptp() alpha = ~mask # np.logical_not(red == 0.) alpha = alpha*255 dtmp = np.array([red, green, blue, alpha]) dtmp = np.moveaxis(dtmp, 0, 2) # dtmp = dtmp*255 dtmp = dtmp.astype(np.uint8) self.im1.set_clim(0, 255) self.im1.set_data(dtmp) self.im1.set_extent(extent) self.fig.suptitle(dates) self.ax1.xaxis.set_major_formatter(frm) self.ax1.yaxis.set_major_formatter(frm) self.fig.canvas.draw() def onClick(self, event): """ On click event. Parameters ---------- event : TYPE Unused. Returns ------- None. """ self.rcid = self.fig.canvas.mpl_connect('draw_event', self.redraw) def redraw(self, event): """ Redraw event. Parameters ---------- event : TYPE Unused. Returns ------- None. """ self.fig.canvas.mpl_disconnect(self.rcid) self.parent.newdata(self.parent.curimage) class SceneViewer(QtWidgets.QDialog): """Scene Viewer.""" def __init__(self, parent=None): super().__init__(parent) if parent is None: self.showprocesslog = print else: self.showprocesslog = parent.showprocesslog self.parent = parent self.indata = {} self.outdata = {} self.ifile = '' self.df = None if parent is None: self.piter = iter else: self.piter = self.parent.pbar.iter self.pbar = QtWidgets.QProgressBar() # self.setAttribute(QtCore.Qt.WA_DeleteOnClose) self.setWindowTitle("View Change Data") self.file_menu = QtWidgets.QMenu('&File', self) self.help_menu = QtWidgets.QMenu('&Help', self) self.help_menu.addAction('&About', self.about) self.file_menu.addAction('&Quit', self.fileQuit, QtCore.Qt.CTRL + QtCore.Qt.Key_Q) vlayout = QtWidgets.QVBoxLayout(self) hlayout = QtWidgets.QHBoxLayout() hlayout2 = QtWidgets.QHBoxLayout() self.canvas = MyMplCanvas(self, width=5, height=4, dpi=100) mpl_toolbar = NavigationToolbar2QT(self.canvas, self) self.slider = QtWidgets.QScrollBar(QtCore.Qt.Horizontal) self.button1 = QtWidgets.QPushButton('Start Capture') self.button2 = QtWidgets.QPushButton('Update Scene List File') self.button3 = QtWidgets.QPushButton('Next Scene') self.cb_use = QtWidgets.QCheckBox('Use Scene') self.cb_display = QtWidgets.QCheckBox('Only Display Scenes Flagged ' 'for Use') self.cb_display.setChecked(True) self.manip = QtWidgets.QComboBox() actions = ['RGB', 'NDVI', 'NDWI'] self.manip.addItems(actions) hlayout2.addWidget(QtWidgets.QLabel('Band Manipulation:')) hlayout2.addWidget(self.manip) hlayout.addWidget(self.button3) hlayout.addWidget(self.button2) hlayout.addWidget(self.button1) vlayout.addWidget(self.canvas) vlayout.addWidget(mpl_toolbar) vlayout.addWidget(self.slider) vlayout.addWidget(self.cb_display) vlayout.addWidget(self.cb_use) vlayout.addLayout(hlayout2) vlayout.addLayout(hlayout) vlayout.addWidget(self.pbar) self.curimage = 0 mpl_toolbar.actions()[0].triggered.connect(self.home_callback) self.slider.valueChanged.connect(self.newdata) self.cb_use.stateChanged.connect(self.flaguse) self.button2.clicked.connect(self.updateanim) self.button3.clicked.connect(self.nextscene) self.button1.clicked.connect(self.capture) self.manip.currentIndexChanged.connect(self.manip_change) def settings(self, nodialog=False): """ Entry point into item. Returns ------- bool True if successful, False otherwise. """ if 'SceneList' not in self.indata: return False self.df = self.indata['SceneList'] sfile = self.df['Shapefile'][0] dates = self.df.Datetime[self.curimage] dat = self.get_tiff(self.df.Filename[self.curimage], firstrun=True) points = get_shape_coords(sfile, False) self.slider.setMaximum(len(self.df)-1) self.cb_use.setChecked(bool(self.df.Use[self.curimage])) self.canvas.bands = list(dat.keys()) self.canvas.compute_initial_figure(dat, dates, points) if not nodialog: tmp = self.exec_() if tmp != 1: return tmp return True def loadproj(self, projdata): """ Load project data into class. Parameters ---------- projdata : dictionary Project data loaded from JSON project file. Returns ------- chk : bool A check to see if settings was successfully run. """ return False def saveproj(self): """ Save project data from class. Returns ------- projdata : dictionary Project data to be saved to JSON project file. """ projdata = {} # projdata['ftype'] = '2D Mean' return projdata def manip_change(self, event): """ Change manipulation. Parameters ---------- event : TYPE Unused. Returns ------- None. """ self.canvas.manip = self.manip.currentText() self.newdata(self.curimage) def updateanim(self, event): """ Update animation file. Parameters ---------- event : TYPE Unused. Returns ------- bool True if successful, False otherwise. """ ext = ('Scene List File (*.xlsx)') filename, _ = QtWidgets.QFileDialog.getSaveFileName( self.parent, 'Save File', '.', ext) if filename == '': return False self.df.to_excel(filename, index=False) return True def nextscene(self, event): """ Get next scene. Parameters ---------- event : TYPE Unused. Returns ------- None. """ self.slider.setValue(self.slider.value()+1) def flaguse(self, event): """ Flag the scene for use. Parameters ---------- event : TYPE Unused. Returns ------- None. """ self.df.loc[self.curimage, 'Use'] = self.cb_use.isChecked() def home_callback(self, event): """ Home callback. Parameters ---------- event : TYPE Unused. Returns ------- None. """ self.newdata(self.curimage) def newdata(self, indx, capture=False): """ Get new dataset. Parameters ---------- indx : int Current index. capture : bool, optional Option to capture the scene. The default is False. Returns ------- None. """ if not self.df.Use[indx] and self.cb_display.isChecked(): if capture is False: return else: self.curimage = indx dates = self.df.Datetime[indx] dat = self.get_tiff(self.df.Filename[self.curimage]) self.cb_use.setChecked(bool(self.df.Use[self.curimage])) self.canvas.update_plot(dat, dates) def capture(self): """Capture.""" self.slider.valueChanged.disconnect() self.canvas.capture() for indx in self.df.index: self.slider.setValue(indx) self.newdata(indx, capture=True) self.canvas.writer.grab_frame() self.canvas.capture() self.slider.valueChanged.connect(self.newdata) self.slider.setValue(self.curimage) def fileQuit(self): """ File quit. Returns ------- None. """ self.close() def closeEvent(self, cevent): """ Close event. Parameters ---------- cevent : TYPE Unused. Returns ------- None. """ self.fileQuit() def about(self): """ About. Returns ------- None. """ QtWidgets.QMessageBox.about(self, "About", """Timeseries Plot""") def get_tiff(self, ifile, firstrun=False): """ Get TIFF images. Parameters ---------- ifile : str Filename to import. firstrun : bool, optional Option for first time running this routine. The default is False. Returns ------- datall : dictionary or None Data images """ datall = {} ifile = str(ifile) dataset = gdal.Open(ifile, gdal.GA_ReadOnly) self.pbar.setMinimum(0) self.pbar.setValue(0) self.pbar.setMaximum(dataset.RasterCount-1) gtr = dataset.GetGeoTransform() cols = dataset.RasterXSize rows = dataset.RasterYSize dx = abs(gtr[1]) dy = abs(gtr[5]) dxlim = (gtr[0], gtr[0]+gtr[1]*cols) dylim = (gtr[3]+gtr[5]*rows, gtr[3]) ############################################################################### axes = self.canvas.ax1 if firstrun is True: axes.set_xlim(dxlim[0], dxlim[1]) axes.set_ylim(dylim[0], dylim[1]) ext = (axes.transAxes.transform([(1, 1)]) - axes.transAxes.transform([(0, 0)]))[0] xlim, ylim = axes.get_xlim(), axes.get_ylim() xoff = max(int((xlim[0]-dxlim[0])/dx), 0) yoff = max(-int((ylim[1]-dylim[1])/dy), 0) xoff1 = min(int((xlim[1]-dxlim[1])/dx), 0) yoff1 = min(-int((ylim[0]-dylim[0])/dy), 0) xsize = cols-xoff+xoff1 ysize = rows-yoff+yoff1 xdim = dx*xsize/int(ext[0]) ydim = dy*ysize/int(ext[1]) xbuf = min(xsize, int(ext[0])) ybuf = min(ysize, int(ext[1])) gtrnew = (gtr[0]+xoff*dx, xdim, 0, gtr[3]-yoff*dy, 0, -ydim) ############################################################################### for i in range(dataset.RasterCount): rtmp = dataset.GetRasterBand(i+1) nval = rtmp.GetNoDataValue() bandid = rtmp.GetDescription() if bandid == '': bandid = 'Band '+str(i+1) dat = Data() dat.data = rtmp.ReadAsArray(xoff, yoff, xsize, ysize, xbuf, ybuf) if dat.data is None: self.showprocesslog('Error: Dataset could not be read ' 'properly') if dat.data.dtype.kind == 'i': if nval is None: nval = 999999 nval = int(nval) elif dat.data.dtype.kind == 'u': if nval is None: nval = 0 nval = int(nval) else: if nval is None: nval = 1e+20 nval = float(nval) dat.nullvalue = nval dat.xdim = xdim dat.ydim = ydim dat.extent_from_gtr(gtrnew) dat.wkt = dataset.GetProjection() datall[i+1] = dat self.pbar.setValue(i) if datall == {}: datall = None dataset = None return datall def get_shape_coords(sfile, todegrees=False): """ Get coordinates from a shapefile. Parameters ---------- sfile : str Shapefile name. todegrees : bool, optional Transform the coordinates to degrees. The default is False. Returns ------- ddpoints : numpy array Output coordinates. """ sr = osr.SpatialReference() sr.ImportFromEPSG(32735) # utm 35s sr.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER) vec = ogr.Open(sfile) layer = vec.GetLayer(0) srdd = osr.SpatialReference() srdd.ImportFromEPSG(4326) # degrees srdd.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER) coordtrans = osr.CoordinateTransformation(sr, srdd) points = [] poly = layer.GetNextFeature() geom = poly.GetGeometryRef() ifin = 0 imax = 0 if geom.GetGeometryName() == 'MULTIPOLYGON': for i in range(geom.GetGeometryCount()): geom.GetGeometryRef(i) itmp = geom.GetGeometryRef(i) itmp = itmp.GetGeometryRef(0).GetPointCount() if itmp > imax: imax = itmp ifin = i geom = geom.GetGeometryRef(ifin) pts = geom.GetGeometryRef(0) for p in range(pts.GetPointCount()): points.append((pts.GetX(p), pts.GetY(p))) if todegrees is True: ddpoints = np.array(coordtrans.TransformPoints(points)) ddpoints = ddpoints[:, :2] else: ddpoints = np.array(points) return ddpoints def get_kml_coords(kml): """ Extract points from kml. Parameters ---------- kml : str. kml file name. Returns ------- coordinates : numpy array Coordinate in numpy format. """ ns = "{http://www.opengis.net/kml/2.2}" tree = ElementTree.parse(kml) coordinates = [] for placemark in tree.findall(".//" + ns + "Placemark"): polygon = placemark.findall(".//" + ns + "Polygon") for i in polygon: coordstext = i.findtext('.//'+ns+'coordinates') coordstext = coordstext.strip() for point_text in coordstext.split(): floats = point_text.split(",") coordinates.append([float(floats[0]), float(floats[1])]) coordinates = np.array(coordinates) return coordinates def _testfn(): """Test routine.""" import sys # sfile = r'C:\Work\Workdata\change\PlanetaryPolygon.shp' sfile = r'C:\Work\Workdata\change\fl35.shp' pdir = r'C:\Work\Workdata\change\Planet' APP = QtWidgets.QApplication(sys.argv) # Necessary to test Qt Classes CSL = CreateSceneList(None) CSL.isrecursive.setChecked(True) CSL.shapefile.setText(sfile) CSL.scenefile.setText(pdir) CSL.settings(True) plt.show() def _testanim(): """Test for animation.""" # import sys from matplotlib import rcParams # rcParams['axes.formatter.limits'] = [-12, 12] # rcParams['axes.formatter.useoffset'] = False wfile = r'C:\Work\Workdata\change\tmp.gif' rcParams['figure.dpi'] = 300 # rcParams['savefig.dpi'] = 300 # APP = QtWidgets.QApplication(sys.argv) # Necessary to test Qt Classes fig = plt.figure() writer = manimation.PillowWriter(fps=4) writer.setup(fig, wfile) # , 100) tmp = np.random.rand(100, 100) im = plt.imshow(tmp) for i in range(20): red = np.random.rand(100, 100) # green = np.random.rand(100, 100) # blue = np.random.rand(100, 100) # alpha = np.logical_not(red == 0.) # dtmp = np.array([red, green, blue, alpha]) # dtmp = np.moveaxis(dtmp, 0, 2) # dtmp = dtmp*255 # dtmp = dtmp.astype(np.uint8) # im.set_clim(0, 255) im.set_data(red) fig.suptitle(str(i)) writer.grab_frame() plt.show() writer.finish() # CSL = LoadSceneList(None) # CSL.ifile = r'C:\Work\Workdata\change\Planet\paddock.xlsx' # CSL.settings(True) # SV = SceneViewer() # SV.indata = CSL.outdata # SV.settings() if __name__ == "__main__": _testanim()
gpl-3.0
-5,716,543,948,395,511,000
25.777778
81
0.530222
false
sieben/makesense
makesense/parser.py
1
18440
# -*- coding: utf8 -*- import pdb from collections import defaultdict from collections import namedtuple from csv import DictWriter from csv import DictReader from itertools import chain from os.path import join as pj import numpy as np import operator import os import re import subprocess import pandas as pd import logging import xml.etree.ElementTree as ET log = logging.getLogger("parsing") # RATE of information RATE = 250000.0 # Broadcast cost dis_packet = 39.0 * 47.0 / RATE dio_packet = 30.0 * 80.0 / RATE time_regexp = "^(?P<time>\d+)" mote_id_regexp = "ID:(?P<mote_id>\d+)" fieldnames = ["time", "mote_id", "node", "strobes", "message_type", "message", "tx", "rx", "size", "rime", "clock", "cpu", "lpm", "irq", "green_led", "yellow_led", "red_led", "sensors", "serial"] message_t = namedtuple("Message", fieldnames) message_t.__new__.__defaults__ = tuple(len(fieldnames) * [None]) def ipv6_to_host(s): return int(s.split(":")[-1:][0], 16) def powertracker2csv(folder, shift=0): """ format : Sky_2 MONITORED 9898083 us Sky_2 ON 180565 us 1,82 % Sky_2 TX 83860 us 0,85 % Sky_2 RX 2595 us 0,03 % Sky_2 INT 907 us 0,01 % sky_tx_cost = (17.4 * 10 ** -3) / (3600 * 10 ** 6) sky_rx_cost = (19.7 * 10 ** -3) / (3600 * 10 ** 6) sky_on_cost = (365 * 10 ** -6) / (3600 * 10 ** 6) sky_int_cost = (19.7 * 10 ** -3) / (3600 * 10 ** 6) wismote_tx_cost = (25.8 * 10 ** -3) / (3600 * 10 ** 6) wismote_rx_cost = (22.3 * 10 ** -3) / (3600 * 10 ** 6) wismote_on_cost = (365 * 10 ** -6) / (3600 * 10 ** 6) wismote_int_cost = (25.8 * 10 ** -3) / (3600 * 10 ** 6) """ with open(pj(folder, "powertracker.log")) as powertracker_file: powertracker_logs = powertracker_file.read() monitored_iterable = re.finditer( r"^(Sky|Wismote)_(?P<mote_id>\d+) MONITORED (?P<monitored_time>\d+)", powertracker_logs, re.MULTILINE) on_iterable = re.finditer( r"^(Sky|Wismote)_(?P<mote_id>\d+) ON (?P<on_time>\d+)", powertracker_logs, re.MULTILINE) tx_iterable = re.finditer( r"^(Sky|Wismote)_(?P<mote_id>\d+) TX (?P<tx_time>\d+)", powertracker_logs, re.MULTILINE) rx_iterable = re.finditer( r"^(Sky|Wismote)_(?P<mote_id>\d+) RX (?P<rx_time>\d+)", powertracker_logs, re.MULTILINE) int_iterable = re.finditer( r"^(Sky|Wismote)_(?P<mote_id>\d+) INT (?P<int_time>\d+)", powertracker_logs, re.MULTILINE) all_iterable = zip( monitored_iterable, on_iterable, tx_iterable, rx_iterable, int_iterable) fields = ["mote_id", "monitored_time", "tx_time", "rx_time", "on_time", "int_time"] output_folder = pj(folder, "results") if not os.path.exists(output_folder): os.makedirs(output_folder) with open(pj(output_folder, "powertracker.csv"), "w") as csv_output: writer = DictWriter(csv_output, delimiter=',', fieldnames=fields) writer.writeheader() for matches in all_iterable: row = {} for match in matches: all(m.groupdict()["mote_id"] == matches[0].groupdict()["mote_id"] for m in matches) row.update((k, int(v)) for k, v in match.groupdict().items()) # Passing the result from us to s row["monitored_time"] = float( row["monitored_time"]) / (10 ** 6) row["tx_time"] = float(row["tx_time"]) / (10 ** 6) row["rx_time"] = float(row["rx_time"]) / (10 ** 6) row["on_time"] = float(row["on_time"]) / (10 ** 6) row["int_time"] = float(row["int_time"]) / (10 ** 6) if row["monitored_time"] > shift: writer.writerow(row) def pcap2csv(folder): """ Execute a simple filter on PCAP and count """ print("start pcap2csv") with open(pj(folder, "results", "pcap.csv"), "w") as output_file: command = ["tshark", "-T", "fields", "-E", "header=y", "-E", "separator=,", "-e", "frame.time_relative", "-e", "frame.len", "-e", "wpan.src64", "-e", "wpan.dst64", "-e", "icmpv6.type", "-e", "ipv6.src", "-e", "ipv6.dst", "-e", "icmpv6.code", "-e", "data.data", "-r", pj(folder, "output.pcap")] print(str(command)) process = subprocess.Popen(command, stdout=subprocess.PIPE) stdout, stderr = process.communicate() output_file.write(stdout) def format_pcap_csv(folder): df = pd.read_csv(pj(folder, "results", "pcap.csv")) df.rename(columns={"frame.time_relative": "time", "frame.len": "length", "wpan.src64": "mac_src", "wpan.dst64": "mac_dst", "ipv6.src": "ip_src", "icmpv6.type": "icmpv6_type", "ipv6.dst": "ip_dst", "icmpv6.code": "icmp_code", "data.data": "payload" }, inplace=True) SIM_TIME = 200 df["time"] *= SIM_TIME / df.time.max() def f(x): if isinstance(x["mac_dst"], str): try: return ipv6_to_host(x["mac_dst"]) except: return x["mac_dst"] df.mac_dst = df.apply(f, axis=1) def f(x): if isinstance(x["mac_src"], str): try: return ipv6_to_host(x["mac_src"]) except: return x["mac_src"] df.mac_src = df.apply(f, axis=1) def f(x): if isinstance(x["ip_src"], str): try: return ipv6_to_host(x["ip_src"]) except: return x["ip_src"] df.ip_src = df.apply(f, axis=1) df.icmpv6_type = df.icmpv6_type.apply(lambda x: "rpl" if x == 155 else x) code = {0: "dis", 1: "dio", 2: "dao"} df.icmp_code = df.icmp_code.apply(lambda x: code[x] if x in code else x) def f(x): if isinstance(x["payload"], str): return "udp" else: return x["icmpv6_type"] df.icmpv6_type = df.apply(f, axis=1) # ACK packets def f(x): if x["length"] == 5: return "ack" else: return x["icmpv6_type"] df.icmpv6_type = df.apply(f, axis=1) # Forwarding def f(x): if x.icmpv6_type == "udp": if x.mac_src != x.ip_src: return True else: return False else: return False df["forwarding"] = df.apply(f, axis=1) df.to_csv(pj(folder, "results", "pcap_relooked.csv"), index=False) def cast_message(d): """ Do standard casting for messages """ d["mote_id"] = int(d["mote_id"]) d["time"] = float(d["time"]) / (10 ** 6) return d def frame_size(size): """ Very basic function that gives the size of the frame sent on the wire for a UDP packet 10: 55 20: 65 30: 75 40: 85 50: 95 """ d = {'udp': 8, 'ipv6_hop': 8, '6lowpan': 6, '802.15.4': 23} if size > 50: log.warning('Fragmentation occurred I cannot compute reliably') total = sum(list(d.values()) + [size]) log.debug("frame_size Payload size: %d ; On wire size %d", size, total) return total def _handle_dis_log(match, stats): d = match.groupdict() d = cast_message(d) d["node"] = int(d["mote_id"]) d["message_type"] = "dis" stats[d["node"], "dis_time"] += dis_packet stats[d["node"], "rpl_time"] += dis_packet return message_t(**d) _handle_dis_log.regexp = r" ".join( [time_regexp, mote_id_regexp, "RPL: Sending a DIS"]) def _handle_dao_log(match, stats): d = match.groupdict() d = cast_message(d) d["message_type"] = "dao" d["node"] = int(d["mote_id"]) stats[d["node"], "dao_count"] += 1 stats[d["node"], "rpl_count"] += 1 return message_t(**d) _handle_dao_log.regexp = r" ".join( [time_regexp, mote_id_regexp, "RPL: Sending DAO with prefix"]) def _handle_overhearing_log(match, stats): d = match.groupdict() d = cast_message(d) d["message_type"] = "overhearing" stats[d["mote_id"], "overhearing_count"] += 1 return message_t(**d) _handle_overhearing_log.regexp = r" ".join( [time_regexp, mote_id_regexp, "contikimac: data not for us"]) def _handle_dio_log(match, stats): d = match.groupdict() d = cast_message(d) d["node"] = int(d["mote_id"]) d["message_type"] = "dio" stats[d["node"], "dio_time"] += dio_packet stats[d["node"], "rpl_time"] += dio_packet return message_t(**d) _handle_dio_log.regexp = r" ".join( [time_regexp, mote_id_regexp, "(RPL: Sending a multicast-DIO|RPL: Sending unicast-DIO)"]) def _handle_forward_log(match, stats): d = match.groupdict() d = cast_message(d) d["node"] = int(d["mote_id"]) d["message_type"] = "forwarding" # TODO: Hard coded one stats[d["mote_id"], "forwarding_time"] += 8.0 * frame_size(10) / RATE return message_t(**d) _handle_forward_log.regexp = r" ".join( [time_regexp, mote_id_regexp, "Forwarding packet to"]) def _handle_battery_recalibration_log(match, stats): d = match.groupdict() d = cast_message(d) d["node"] = int(d["mote_id"]) d["message_type"] = "battery_recalibration" stats[d["node"], "battery_recalibration_count"] += 1 return message_t(**d) _handle_battery_recalibration_log.regexp = r" ".join( [time_regexp, mote_id_regexp, "Battery recalibration"]) def _handle_parent_log(match, stats): # Simple count of parent messages d = match.groupdict() d = cast_message(d) d["node"] = int(d["node"]) d["message_type"] = "parent" stats[d["node"], "parent_count"] += 1 return message_t(**d) _handle_parent_log.regexp = r" ".join( [time_regexp, mote_id_regexp, "Preferred Parent (?P<node>\d+)$"]) def _handle_mac_log(match, stats): d = match.groupdict() d = cast_message(d) d["node"] = int(d["mote_id"]) d["message_type"] = "mac" d["strobes"] = int(d["strobes"]) d["size"] = int(d["size"]) stats[ d["mote_id"], "mac_time"] += (d["strobes"] + 1) * 8.0 * d["size"] / RATE return message_t(**d) _handle_mac_log.regexp = r" ".join( [time_regexp, mote_id_regexp, "contikimac: send \(strobes=(?P<strobes>\d+), len=(?P<size>\d+), ack, no collision\), done$"]) def _handle_udp_sending_log(match, stats): d = match.groupdict() d = cast_message(d) d["node"] = int(d["mote_id"]) d["message_type"] = "udp_sending" d["size"] = len(d["message"]) stats[d["node"], "data_time"] += 8.0 * frame_size(d["size"]) / RATE return message_t(**d) _handle_udp_sending_log.regexp = r" ".join( [time_regexp, mote_id_regexp, "DATA send to root \'(?P<message>(.)*)\'$"]) def _handle_udp_reception_log(match, stats): d = match.groupdict() d = cast_message(d) d["node"] = int(d["node"]) d["message_type"] = "udp_reception" d["size"] = len(d["message"]) stats[d["node"], "data_time"] += 8.0 * frame_size(d["size"]) / RATE return message_t(**d) _handle_udp_reception_log.regexp = r" ".join( [time_regexp, mote_id_regexp, "DATA recv \'(?P<message>(.)*)\' from (?P<node>\d+)$"]) def _handle_neighbor_log(match, stats): d = match.groupdict() d = cast_message(d) d["node"] = int(d["node"]) d["message_type"] = "neighbor" stats[d["node"], "neighbor_count"] += 1 return message_t(**d) _handle_neighbor_log.regexp = r" ".join( [time_regexp, mote_id_regexp, "Neighbor (?P<node>\d+)$"]) def _handle_stats_log(match, stats): d = match.groupdict() d = cast_message(d) d["node"] = int(d["mote_id"]) d["message_type"] = "stats" for field in ["clock", "cpu", "lpm", "irq", "green_led", "yellow_led", "red_led", "tx", "rx", "sensors", "serial"]: d[field] = int(d[field]) return message_t(**d) _handle_stats_log.regexp = r" ".join([ time_regexp, mote_id_regexp, "E (?P<rime>\d+.\d+)", "clock (?P<clock>\d+)", "cpu (?P<cpu>\d+)", "lpm (?P<lpm>\d+)", "irq (?P<irq>\d+)", "gled (?P<green_led>\d+)", "yled (?P<yellow_led>\d+)", "rled (?P<red_led>\d+)", "tx (?P<tx>\d+)", "listen (?P<rx>\d+)", "sensors (?P<sensors>\d+)", "serial (?P<serial>\d+)" ]) _handlers = [ _handle_battery_recalibration_log, _handle_dao_log, _handle_dio_log, _handle_dis_log, _handle_forward_log, _handle_mac_log, _handle_neighbor_log, _handle_overhearing_log, _handle_parent_log, _handle_stats_log, _handle_udp_reception_log, _handle_udp_sending_log, ] def powertracker2message(folder, stats): with open(pj(folder, "results", "powertracker.csv")) as energy_f: messages = set() for row in DictReader(energy_f): m = {"time": float(row["monitored_time"]), "rx": float(row["rx_time"]), "tx": float(row["tx_time"]), "message_type": "energy", "node": int(row["mote_id"])} messages.add(message_t(**m)) stats[int(row["mote_id"]), "powertracker"] += 1 return messages def serial2message(folder, stats): """ # mote_id => node that emit the serial log # node => node that emit the message # Typically when a message come to root, mote_id => root, node => node that emitted the # message # Regular expression used for matching logs in serial """ messages = set() with open(pj(folder, "serial.log")) as serial_file: for line in serial_file: for handler in _handlers: match = re.match(handler.regexp, line, re.MULTILINE) if match: message = handler(match, stats) messages.add(message) break return messages def print_stats(stats): for name, count in sorted(stats.items()): if count: if "time" in name[1] and stats[name[0], "mac_time"]: total = stats[name[0], "mac_time"] log.info("%s: %f (%f)", name, count, count / total) else: log.info("%s: %d", name, count) else: log.warning("No packets for %s", name) def message(folder): """ Message queue preparation - Extract from powertracker all the message - Extract from the serial logs all the message. IMPORTANT: We only extract the message received from the root or send by the root. 186572 ID:2 DATA send to 1 'Hello 1' 187124 ID:8 DATA recv 'Hello 1' from 2 197379 ID:8 REPLY send to 7 'Reply 1' 197702 ID:7 REPLY recv 'Reply 1' from 8 """ stats = defaultdict(float) sorted_messages = sorted( chain(serial2message(folder, stats), # powertracker2message(folder, stats) ), key=operator.attrgetter("time")) print_stats(stats) # MESSAGE logging for debugging purposes # log.info(pj(folder, "results", "messages.csv")) with open(pj(folder, "results", "messages.csv"), "w") as f: writer = DictWriter(f, fieldnames=fieldnames) writer.writeheader() for m in sorted_messages: writer.writerow(m._asdict()) log.info("messages saved") return sorted_messages def csc_to_graph(name): tree = ET(name) mote_id = [int(t.text) for t in tree.findall(".//mote/interface_config/id")] mote_type = [t.text for t in tree.findall(".//mote/motetype_identifier")] x = [float(t.text) for t in tree.findall(".//mote/interface_config/x")] y = [float(t.text) for t in tree.findall(".//mote/interface_config/y")] z = [float(t.text) for t in tree.findall(".//mote/interface_config/z")] def bytes_to_times(n): """ Take bytes and send back a time in seconds spent to send it in 802.15.4 """ return (992 + (n / 250000.0)) * 10 ** -6 def message_2_csv(f): """ Extract from the serial logs all the message. IMPORTANT: We only extract the message received from the root or send by the root. 186572 ID:2 DATA send to 1 'Hello 1' 187124 ID:8 DATA recv 'Hello 1' from 2 TODO: Pass all times to seconds. """ # Departures from client departure = "^(?P<time>\d+)\s+ID:\d+\s+Client sending" ack_time = 352 * 10 ** -6 fieldnames = ["time", "message_type"] with open(f) as serial_file, open("serial.csv", "w") as output_file: writer = DictWriter(output_file, fieldnames) writer.writeheader() # Counting packets to get an average rate match = 0 for line in serial_file: if re.match(departure, line): d = re.match(departure, line).groupdict() d["message_type"] = "transmission" d["time"] = float(d["time"]) / (10 ** 6) counter["time"].append(d["time"]) match += 1 counter["count"].append(match) model_receiver["time"].append(d["time"]) model_receiver["rx"].append(2 * bytes_to_times(192)) model_receiver["tx"].append(ack_time) model_sender["time"].append(d["time"]) model_sender["rx"].append(ack_time) model_sender["tx"].append(2 * bytes_to_times(192)) print("match %d lines" % match) for i in model_receiver["time"]: print(i) def parse_iotlab_energy(folder): current_df = pd.read_csv( pj(folder, "current.csv"), header=None, names=['mote_id', 'time', 'current']) voltage_df = pd.read_csv( pj(folder, "voltage.csv"), header=None, names=['mote_id', 'time', 'voltage']) power_df = pd.read_csv( pj(folder, "power.csv"), header=None, names=['mote_id', 'time', 'power']) temp_df = pd.merge( current_df, voltage_df, how="left", on=["mote_id", "time"]) res_df = pd.merge(temp_df, power_df, how="left", on=["mote_id", "time"]) res_df.to_csv(pj(folder, "energy.csv"), index=False)
apache-2.0
3,840,065,303,742,943,700
30.043771
128
0.541432
false
poidl/yassy
doc/python/frei_appendix_B2.py
1
4132
#!/bin/python # pylint: disable=C0103 """Python translation of Frei Appendix B1 and B2.""" # Frei, B.: Digital sound generation. Institute for Computer Music and # Sound Technology (ICST) Zurich University of the Arts. import numpy as np import scipy.signal as sig import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt # import bindings # parameters fs = 48000 fc = 18300 rlen = 10 ppiv = 100 beta = 9 apof = 0.9 apobeta = 0.7 pts = ppiv * rlen + 1 x1 = np.arange(pts) x2 = rlen * 2 * (x1 - (pts - 1) / 2 + 0.00001) / (pts - 1) x3 = np.pi * fc / fs * x2 h = np.sin(x3) / x3 w = np.kaiser(pts, beta) g = w * h # apodization and normalization aw = 1 - apof * np.kaiser(pts, apobeta) g = aw * g g = g / max(g) # diagrams figname = 'frei_appendixB1a.svg' fig = plt.figure() plt.plot(x2 / 2, g) plt.xlim(-rlen / 2, rlen / 2) plt.ylim(- 0.2, 1.0001) plt.xlabel('Time in Sampling Intervals') plt.title('Bandlimited Impulse') plt.grid() fig.savefig('../figures/' + figname) zpad = 20 g2 = np.concatenate([g, np.zeros((zpad - 1) * pts)]) wspec = np.abs(np.fft.rfft(g2, norm="ortho")) wspec = wspec / max(wspec) # cut = 0.00001 # wspec[wspec > cut] = cut fmax = 60000 rng = round(rlen * zpad * fmax / fs) xidx = np.arange(rng + 1) figname = 'frei_appendixB1b.svg' fig = plt.figure() plt.semilogy((fmax / 1000) * xidx / rng, wspec[: (rng + 1)]) x_save = (fmax / 1000) * xidx / rng y_save = wspec[: (rng + 1)] plt.ylim(1e-5, 1) plt.xlabel('Frequency in kHz') plt.title('Amplitude Spectrum') plt.grid() # markers at 20 kHz, fs - 20 kHz and fs plt.axvline(20, color="g") plt.axvline(fs / 1000 - 20, color="r") plt.axvline(fs / 1000, color="r") fig.savefig('../figures/' + figname) fcomp = 21000 rng = 1 + 2 * np.floor(0.5 * rlen * zpad * fcomp / fs - 0.5) xidx = np.arange(rng + 1) # This is different to Frei's Matlab version. See Matlab/Scipy docs for # differences in call signatures. Whereas the Matlab function firpm # (FIR Parks-McClellan, former "remez" wants amplitudes on the band edges, # Scipy wants them in the band *center* # As a consequence, the plots will differ from Frei Fig. 14 a = 0.5 * (wspec[: int(rng)] + wspec[1: int(rng + 1)]) a = a[::2] a = 1.0 / a ftune = 0.35 f = xidx / (ftune + rlen * zpad) wgt = np.arange((rng + 1) / 2, 0, -1) wgt = 1 + wgt * wgt b = sig.remez(16, f, a, wgt) [w, h] = sig.freqz(b, 1, rlen * zpad, 'whole') figname = 'frei_prefilter_magnitude_response.svg' fig = plt.figure() f_center = 0.5 * (f[: -1] + f[1:]) f_center = f_center[::2] plt.plot(fs * f_center / 1000, a) plt.plot(0.5 * fs * w / np.pi / 1000, abs(h)) plt.xlim(0, fs / 1000) plt.ylim(0, max(abs(h))) plt.xlabel('Frequency in kHz') plt.title('Prefilter Magnitude Response') plt.grid() fig.savefig('../figures/' + figname) # check by convolving prefilter and bandlimited impulse imp = g[:pts:ppiv] res = np.convolve(b, imp) res = np.concatenate([res, np.zeros(1000 - len(res))]) wspec = np.abs(np.fft.rfft(res, norm="ortho")) rng = round(1000 * 20000 / fs) xidx = np.arange(rng + 1) figname = 'frei_normalized_overall_magnitude_response.svg' fig = plt.figure() plt.plot(20 * xidx / rng, wspec[: int(rng + 1)] / wspec[0]) plt.xlim(0, 20) plt.xlabel('Frequency in kHz') plt.title('Normalized Overall Magnitude Response') plt.grid() fig.savefig('../figures/' + figname) figname = 'frei_normalized_overall_magnitude_response_compare_1.svg' fig = plt.figure() plt.plot(20 * xidx / rng, wspec[: int(rng + 1)] / wspec[0]) plt.plot(x_save, y_save) plt.xlim(0, 20) plt.xlabel('Frequency in kHz') plt.title('Normalized Overall Magnitude Response') plt.grid() fig.savefig('../figures/' + figname) figname = 'frei_normalized_overall_magnitude_response_compare_2.svg' fig = plt.figure() xax = 0.5 * fs * w / np.pi / 1000 ynew = np.interp(xax, x_save, y_save) plt.semilogy(xax, ynew) plt.semilogy(xax, abs(h) * ynew) plt.xlim(0, fs / 1000) plt.ylim(1e-5, 1.1) plt.xlabel('Frequency in kHz') plt.title('Effect of prefilter') plt.grid() # markers at 20 kHz, fs - 20 kHz and fs plt.axvline(20, color="g") plt.axvline(fs / 1000 - 20, color="r") plt.axvline(fs / 1000, color="r") fig.savefig('../figures/' + figname)
gpl-2.0
7,719,040,988,503,525,000
26.546667
74
0.658035
false
grundgruen/zipline
tests/pipeline/test_numerical_expression.py
1
16240
from operator import ( add, and_, ge, gt, le, lt, methodcaller, mul, ne, or_, ) from unittest import TestCase import numpy from numpy import ( arange, eye, float64, full, isnan, zeros, ) from pandas import ( DataFrame, date_range, Int64Index, ) from zipline.pipeline import Factor from zipline.pipeline.expression import ( NumericalExpression, NUMEXPR_MATH_FUNCS, ) from zipline.utils.numpy_utils import datetime64ns_dtype, float64_dtype from zipline.utils.test_utils import check_arrays class F(Factor): dtype = float64_dtype inputs = () window_length = 0 class G(Factor): dtype = float64_dtype inputs = () window_length = 0 class H(Factor): dtype = float64_dtype inputs = () window_length = 0 class DateFactor(Factor): dtype = datetime64ns_dtype inputs = () window_length = 0 class NumericalExpressionTestCase(TestCase): def setUp(self): self.dates = date_range('2014-01-01', periods=5, freq='D') self.assets = Int64Index(range(5)) self.f = F() self.g = G() self.h = H() self.d = DateFactor() self.fake_raw_data = { self.f: full((5, 5), 3), self.g: full((5, 5), 2), self.h: full((5, 5), 1), self.d: full((5, 5), 0, dtype='datetime64[ns]'), } self.mask = DataFrame(True, index=self.dates, columns=self.assets) def check_output(self, expr, expected): result = expr._compute( [self.fake_raw_data[input_] for input_ in expr.inputs], self.mask.index, self.mask.columns, self.mask.values, ) check_arrays(result, expected) def check_constant_output(self, expr, expected): self.assertFalse(isnan(expected)) return self.check_output(expr, full((5, 5), expected)) def test_validate_good(self): f = self.f g = self.g NumericalExpression("x_0", (f,), dtype=float64_dtype) NumericalExpression("x_0 ", (f,), dtype=float64_dtype) NumericalExpression("x_0 + x_0", (f,), dtype=float64_dtype) NumericalExpression("x_0 + 2", (f,), dtype=float64_dtype) NumericalExpression("2 * x_0", (f,), dtype=float64_dtype) NumericalExpression("x_0 + x_1", (f, g), dtype=float64_dtype) NumericalExpression("x_0 + x_1 + x_0", (f, g), dtype=float64_dtype) NumericalExpression("x_0 + 1 + x_1", (f, g), dtype=float64_dtype) def test_validate_bad(self): f, g, h = self.f, self.g, self.h # Too few inputs. with self.assertRaises(ValueError): NumericalExpression("x_0", (), dtype=float64_dtype) with self.assertRaises(ValueError): NumericalExpression("x_0 + x_1", (f,), dtype=float64_dtype) # Too many inputs. with self.assertRaises(ValueError): NumericalExpression("x_0", (f, g), dtype=float64_dtype) with self.assertRaises(ValueError): NumericalExpression("x_0 + x_1", (f, g, h), dtype=float64_dtype) # Invalid variable name. with self.assertRaises(ValueError): NumericalExpression("x_0x_1", (f,), dtype=float64_dtype) with self.assertRaises(ValueError): NumericalExpression("x_0x_1", (f, g), dtype=float64_dtype) # Variable index must start at 0. with self.assertRaises(ValueError): NumericalExpression("x_1", (f,), dtype=float64_dtype) # Scalar operands must be numeric. with self.assertRaises(TypeError): "2" + f with self.assertRaises(TypeError): f + "2" with self.assertRaises(TypeError): f > "2" # Boolean binary operators must be between filters. with self.assertRaises(TypeError): f + (f > 2) with self.assertRaises(TypeError): (f > f) > f def test_combine_datetimes(self): with self.assertRaises(TypeError) as e: self.d + self.d message = e.exception.args[0] expected = ( "Don't know how to compute datetime64[ns] + datetime64[ns].\n" "Arithmetic operators are only supported on Factors of dtype " "'float64'." ) self.assertEqual(message, expected) # Confirm that * shows up in the error instead of +. with self.assertRaises(TypeError) as e: self.d * self.d message = e.exception.args[0] expected = ( "Don't know how to compute datetime64[ns] * datetime64[ns].\n" "Arithmetic operators are only supported on Factors of dtype " "'float64'." ) self.assertEqual(message, expected) def test_combine_datetime_with_float(self): # Test with both float-type factors and numeric values. for float_value in (self.f, float64(1.0), 1.0): for op, sym in ((add, '+'), (mul, '*')): with self.assertRaises(TypeError) as e: op(self.f, self.d) message = e.exception.args[0] expected = ( "Don't know how to compute float64 {sym} datetime64[ns].\n" "Arithmetic operators are only supported on Factors of " "dtype 'float64'." ).format(sym=sym) self.assertEqual(message, expected) with self.assertRaises(TypeError) as e: op(self.d, self.f) message = e.exception.args[0] expected = ( "Don't know how to compute datetime64[ns] {sym} float64.\n" "Arithmetic operators are only supported on Factors of " "dtype 'float64'." ).format(sym=sym) self.assertEqual(message, expected) def test_negate_datetime(self): with self.assertRaises(TypeError) as e: -self.d message = e.exception.args[0] expected = ( "Can't apply unary operator '-' to instance of " "'DateFactor' with dtype 'datetime64[ns]'.\n" "'-' is only supported for Factors of dtype 'float64'." ) self.assertEqual(message, expected) def test_negate(self): f, g = self.f, self.g self.check_constant_output(-f, -3.0) self.check_constant_output(--f, 3.0) self.check_constant_output(---f, -3.0) self.check_constant_output(-(f + f), -6.0) self.check_constant_output(-f + -f, -6.0) self.check_constant_output(-(-f + -f), 6.0) self.check_constant_output(f + -g, 1.0) self.check_constant_output(f - -g, 5.0) self.check_constant_output(-(f + g) + (f + g), 0.0) self.check_constant_output((f + g) + -(f + g), 0.0) self.check_constant_output(-(f + g) + -(f + g), -10.0) def test_add(self): f, g = self.f, self.g self.check_constant_output(f + g, 5.0) self.check_constant_output((1 + f) + g, 6.0) self.check_constant_output(1 + (f + g), 6.0) self.check_constant_output((f + 1) + g, 6.0) self.check_constant_output(f + (1 + g), 6.0) self.check_constant_output((f + g) + 1, 6.0) self.check_constant_output(f + (g + 1), 6.0) self.check_constant_output((f + f) + f, 9.0) self.check_constant_output(f + (f + f), 9.0) self.check_constant_output((f + g) + f, 8.0) self.check_constant_output(f + (g + f), 8.0) self.check_constant_output((f + g) + (f + g), 10.0) self.check_constant_output((f + g) + (g + f), 10.0) self.check_constant_output((g + f) + (f + g), 10.0) self.check_constant_output((g + f) + (g + f), 10.0) def test_subtract(self): f, g = self.f, self.g self.check_constant_output(f - g, 1.0) # 3 - 2 self.check_constant_output((1 - f) - g, -4.) # (1 - 3) - 2 self.check_constant_output(1 - (f - g), 0.0) # 1 - (3 - 2) self.check_constant_output((f - 1) - g, 0.0) # (3 - 1) - 2 self.check_constant_output(f - (1 - g), 4.0) # 3 - (1 - 2) self.check_constant_output((f - g) - 1, 0.0) # (3 - 2) - 1 self.check_constant_output(f - (g - 1), 2.0) # 3 - (2 - 1) self.check_constant_output((f - f) - f, -3.) # (3 - 3) - 3 self.check_constant_output(f - (f - f), 3.0) # 3 - (3 - 3) self.check_constant_output((f - g) - f, -2.) # (3 - 2) - 3 self.check_constant_output(f - (g - f), 4.0) # 3 - (2 - 3) self.check_constant_output((f - g) - (f - g), 0.0) # (3 - 2) - (3 - 2) self.check_constant_output((f - g) - (g - f), 2.0) # (3 - 2) - (2 - 3) self.check_constant_output((g - f) - (f - g), -2.) # (2 - 3) - (3 - 2) self.check_constant_output((g - f) - (g - f), 0.0) # (2 - 3) - (2 - 3) def test_multiply(self): f, g = self.f, self.g self.check_constant_output(f * g, 6.0) self.check_constant_output((2 * f) * g, 12.0) self.check_constant_output(2 * (f * g), 12.0) self.check_constant_output((f * 2) * g, 12.0) self.check_constant_output(f * (2 * g), 12.0) self.check_constant_output((f * g) * 2, 12.0) self.check_constant_output(f * (g * 2), 12.0) self.check_constant_output((f * f) * f, 27.0) self.check_constant_output(f * (f * f), 27.0) self.check_constant_output((f * g) * f, 18.0) self.check_constant_output(f * (g * f), 18.0) self.check_constant_output((f * g) * (f * g), 36.0) self.check_constant_output((f * g) * (g * f), 36.0) self.check_constant_output((g * f) * (f * g), 36.0) self.check_constant_output((g * f) * (g * f), 36.0) self.check_constant_output(f * f * f * 0 * f * f, 0.0) def test_divide(self): f, g = self.f, self.g self.check_constant_output(f / g, 3.0 / 2.0) self.check_constant_output( (2 / f) / g, (2 / 3.0) / 2.0 ) self.check_constant_output( 2 / (f / g), 2 / (3.0 / 2.0), ) self.check_constant_output( (f / 2) / g, (3.0 / 2) / 2.0, ) self.check_constant_output( f / (2 / g), 3.0 / (2 / 2.0), ) self.check_constant_output( (f / g) / 2, (3.0 / 2.0) / 2, ) self.check_constant_output( f / (g / 2), 3.0 / (2.0 / 2), ) self.check_constant_output( (f / f) / f, (3.0 / 3.0) / 3.0 ) self.check_constant_output( f / (f / f), 3.0 / (3.0 / 3.0), ) self.check_constant_output( (f / g) / f, (3.0 / 2.0) / 3.0, ) self.check_constant_output( f / (g / f), 3.0 / (2.0 / 3.0), ) self.check_constant_output( (f / g) / (f / g), (3.0 / 2.0) / (3.0 / 2.0), ) self.check_constant_output( (f / g) / (g / f), (3.0 / 2.0) / (2.0 / 3.0), ) self.check_constant_output( (g / f) / (f / g), (2.0 / 3.0) / (3.0 / 2.0), ) self.check_constant_output( (g / f) / (g / f), (2.0 / 3.0) / (2.0 / 3.0), ) def test_pow(self): f, g = self.f, self.g self.check_constant_output(f ** g, 3.0 ** 2) self.check_constant_output(2 ** f, 2.0 ** 3) self.check_constant_output(f ** 2, 3.0 ** 2) self.check_constant_output((f + g) ** 2, (3.0 + 2.0) ** 2) self.check_constant_output(2 ** (f + g), 2 ** (3.0 + 2.0)) self.check_constant_output(f ** (f ** g), 3.0 ** (3.0 ** 2.0)) self.check_constant_output((f ** f) ** g, (3.0 ** 3.0) ** 2.0) self.check_constant_output((f ** g) ** (f ** g), 9.0 ** 9.0) self.check_constant_output((f ** g) ** (g ** f), 9.0 ** 8.0) self.check_constant_output((g ** f) ** (f ** g), 8.0 ** 9.0) self.check_constant_output((g ** f) ** (g ** f), 8.0 ** 8.0) def test_mod(self): f, g = self.f, self.g self.check_constant_output(f % g, 3.0 % 2.0) self.check_constant_output(f % 2.0, 3.0 % 2.0) self.check_constant_output(g % f, 2.0 % 3.0) self.check_constant_output((f + g) % 2, (3.0 + 2.0) % 2) self.check_constant_output(2 % (f + g), 2 % (3.0 + 2.0)) self.check_constant_output(f % (f % g), 3.0 % (3.0 % 2.0)) self.check_constant_output((f % f) % g, (3.0 % 3.0) % 2.0) self.check_constant_output((f + g) % (f * g), 5.0 % 6.0) def test_math_functions(self): f, g = self.f, self.g fake_raw_data = self.fake_raw_data alt_fake_raw_data = { self.f: full((5, 5), .5), self.g: full((5, 5), -.5), } for funcname in NUMEXPR_MATH_FUNCS: method = methodcaller(funcname) func = getattr(numpy, funcname) # These methods have domains in [0, 1], so we need alternate inputs # that are in the domain. if funcname in ('arcsin', 'arccos', 'arctanh'): self.fake_raw_data = alt_fake_raw_data else: self.fake_raw_data = fake_raw_data f_val = self.fake_raw_data[f][0, 0] g_val = self.fake_raw_data[g][0, 0] self.check_constant_output(method(f), func(f_val)) self.check_constant_output(method(g), func(g_val)) self.check_constant_output(method(f) + 1, func(f_val) + 1) self.check_constant_output(1 + method(f), 1 + func(f_val)) self.check_constant_output(method(f + .25), func(f_val + .25)) self.check_constant_output(method(.25 + f), func(.25 + f_val)) self.check_constant_output( method(f) + method(g), func(f_val) + func(g_val), ) self.check_constant_output( method(f + g), func(f_val + g_val), ) def test_comparisons(self): f, g, h = self.f, self.g, self.h self.fake_raw_data = { f: arange(25).reshape(5, 5), g: arange(25).reshape(5, 5) - eye(5), h: full((5, 5), 5), } f_data = self.fake_raw_data[f] g_data = self.fake_raw_data[g] cases = [ # Sanity Check with hand-computed values. (f, g, eye(5), zeros((5, 5))), (f, 10, f_data, 10), (10, f, 10, f_data), (f, f, f_data, f_data), (f + 1, f, f_data + 1, f_data), (1 + f, f, 1 + f_data, f_data), (f, g, f_data, g_data), (f + 1, g, f_data + 1, g_data), (f, g + 1, f_data, g_data + 1), (f + 1, g + 1, f_data + 1, g_data + 1), ((f + g) / 2, f ** 2, (f_data + g_data) / 2, f_data ** 2), ] for op in (gt, ge, lt, le, ne): for expr_lhs, expr_rhs, expected_lhs, expected_rhs in cases: self.check_output( op(expr_lhs, expr_rhs), op(expected_lhs, expected_rhs), ) def test_boolean_binops(self): f, g, h = self.f, self.g, self.h self.fake_raw_data = { f: arange(25).reshape(5, 5), g: arange(25).reshape(5, 5) - eye(5), h: full((5, 5), 5), } # Should be True on the diagonal. eye_filter = f > g # Should be True in the first row only. first_row_filter = f < h eye_mask = eye(5, dtype=bool) first_row_mask = zeros((5, 5), dtype=bool) first_row_mask[0] = 1 self.check_output(eye_filter, eye_mask) self.check_output(first_row_filter, first_row_mask) for op in (and_, or_): # NumExpr doesn't support xor. self.check_output( op(eye_filter, first_row_filter), op(eye_mask, first_row_mask), )
apache-2.0
-5,645,014,339,899,608,000
32.484536
79
0.498091
false
SanPen/PracticalGridModeling
examples/substation.py
1
1669
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Nov 20 16:40:22 2017 @author: santi """ import pandas as pd import numpy as np import networkx as nx from matplotlib import pyplot as plt if __name__ == "__main__": # load data conn_df = pd.read_excel('substation.xlsx', 'Connectivity', index_col=0).fillna(0) stat_df = pd.read_excel('substation.xlsx', 'States', index_col=0) pos_df = pd.read_excel('substation.xlsx', 'Pos', index_col=0) node_names = conn_df.columns.values G = nx.Graph() pos = dict() lpos = dict() # add nodes to the graph for i in range(len(node_names)): G.add_node(node_names[i]) x = pos_df.values[i, 0] y = pos_df.values[i, 1] pos[node_names[i]] = [x, y] lpos[node_names[i]] = [x, y] # add branches to the graph for i, line in enumerate(conn_df.values): if stat_df.values[i] > 0: x, y = np.where(line > 0)[0] # works because there are only 2 values per line with a 1 in the excel file n1 = node_names[x] n2 = node_names[y] G.add_edge(n1, n2) # get the islands islands = list(nx.connected_components(G)) sub_grids = list() print('Islands:\n', islands, '\n\n') for island in islands: g = nx.subgraph(G, island) sub_grids.append(g) # plot nx.draw(G, pos=pos, node_size=100, node_color='black') for name in node_names: x, y = lpos[name] plt.text(x+1.5,y+1,s=name, bbox=dict(facecolor='white', alpha=0.5), horizontalalignment='center') plt.show()
gpl-3.0
-6,445,472,641,222,862,000
26.833333
117
0.559617
false
turbomanage/training-data-analyst
blogs/gcp_forecasting/time_series.py
2
8054
"""Utilities for disa time-series modeling.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys import numpy as np import pandas as pd from pandas.tseries.holiday import USFederalHolidayCalendar as calendar def _keep(window, windows): """Helper function for creating rolling windows.""" windows.append(window.copy()) return -1. # Float return value required for Pandas apply. def create_rolling_features_label(series, window_size, pred_offset, pred_n=1): """Computes rolling window of the series and creates rolling window of label. Args: series: A Pandas Series. The indices are datetimes and the values are numeric type. window_size: integer; steps of historical data to use for features. pred_offset: integer; steps into the future for prediction. pred_n: integer; window size of label. Returns: Pandas dataframe where the index is the datetime predicting at. The columns beginning with "-" indicate windows N steps before the prediction time. Examples: >>> series = pd.Series(np.random.random(6), index=pd.date_range(start='1/1/2018', end='1/06/2018')) # Example #1: >>> series 2018-01-01 0.803948 2018-01-02 0.269849 2018-01-03 0.971984 2018-01-04 0.809718 2018-01-05 0.324454 2018-01-06 0.229447 >>> window_size = 3 # get 3 months of historical data >>> pred_offset = 1 # predict starting next month >>> pred_n = 1 # for predicting a single month >>> utils.create_rolling_features_label(series, window_size, pred_offset, pred_n) pred_datetime -3_steps -2_steps -1_steps label 2018-01-04 0.803948 0.269849 0.971984 0.809718 2018-01-05 0.269849 0.971984 0.809718 0.324454 2018-01-06 0.971984 0.809718 0.324454 0.229447 # Example #2: >>> window_size = 3 # get 3 months of historical data >>> pred_offset = 2 # predict starting 2 months into future >>> pred_n = 1 # for predicting a single month >>> utils.create_rolling_features_label(series, window_size, pred_offset, pred_n) pred_datetime -4_steps -3_steps -2_steps label 2018-01-05 0.803948 0.269849 0.971984 0.324454 2018-01-06 0.269849 0.971984 0.809718 0.229447 # Example #3: >>> window_size = 3 # get 3 months of historical data >>> pred_offset = 1 # predict starting next month >>> pred_n = 2 # for predicting a multiple months >>> utils.create_rolling_features_label(series, window_size, pred_offset, pred_n) pred_datetime -3_steps -2_steps -1_steps label_0_steps label_1_steps 2018-01-04 0.803948 0.269849 0.971984 0.809718 0.324454 2018-01-05 0.269849 0.971984 0.809718 0.324454 0.229447 """ if series.isnull().sum() > 0: raise ValueError('Series must not contain missing values.') if pred_n < 1: raise ValueError('pred_n must not be < 1.') if len(series) < (window_size + pred_offset + pred_n): raise ValueError('window_size + pred_offset + pred_n must not be greater ' 'than series length.') total_steps = len(series) def compute_rolling_window(series, window_size): # Accumulate series into list. windows = [] series.rolling(window_size)\ .apply(_keep, args=(windows,)) return np.array(windows) features_start = 0 features_end = total_steps - (pred_offset - 1) - pred_n historical_windows = compute_rolling_window( series[features_start:features_end], window_size) # Get label pred_offset steps into the future. label_start, label_end = window_size + pred_offset - 1, total_steps label_series = series[label_start:label_end] y = compute_rolling_window(label_series, pred_n) if pred_n == 1: # TODO(crawles): remove this if statement/label name. It's for backwards # compatibility. columns = ['label'] else: columns = ['label_{}_steps'.format(i) for i in range(pred_n)] # Make dataframe. Combine features and labels. label_ix = label_series.index[0:len(label_series) + 1 - pred_n] df = pd.DataFrame(y, columns=columns, index=label_ix) df.index.name = 'pred_date' # Populate dataframe with past sales. for day in range(window_size - 1, -1, -1): day_rel_label = pred_offset + window_size - day - 1 df.insert(0, '-{}_steps'.format(day_rel_label), historical_windows[:, day]) return df def add_aggregate_features(df, time_series_col_names): """Compute summary statistic features for every row of dataframe.""" x = df[time_series_col_names] features = {} features['mean'] = x.mean(axis=1) features['std'] = x.std(axis=1) features['min'] = x.min(axis=1) features['max'] = x.max(axis=1) percentiles = range(10, 100, 20) for p in percentiles: features['{}_per'.format(p)] = np.percentile(x, p, axis=1) df_features = pd.DataFrame(features, index=x.index) return df_features.merge(df, left_index=True, right_index=True) def move_column_to_end(df, column_name): temp = df[column_name] df.drop(column_name, axis=1, inplace=True) df[column_name] = temp def is_between_dates(dates, start=None, end=None): """Return boolean indices indicating if dates occurs between start and end.""" if start is None: start = pd.to_datetime(0) if end is None: end = pd.to_datetime(sys.maxsize) date_series = pd.Series(pd.to_datetime(dates)) return date_series.between(start, end).values def _count_holidays(dates, months, weeks): """Count number of holidays spanned in prediction windows.""" cal = calendar() holidays = cal.holidays(start=dates.min(), end=dates.max()) def count_holidays_during_month(date): n_holidays = 0 beg = date end = date + pd.DateOffset(months=months, weeks=weeks) for h in holidays: if beg <= h < end: n_holidays += 1 return n_holidays return pd.Series(dates).apply(count_holidays_during_month) def _get_day_of_month(x): """From a datetime object, extract day of month.""" return int(x.strftime('%d')) def add_date_features(df, dates, months, weeks, inplace=False): """Create features using date that is being predicted on.""" if not inplace: df = df.copy() df['doy'] = dates.dayofyear df['dom'] = dates.map(_get_day_of_month) df['month'] = dates.month df['year'] = dates.year df['n_holidays'] = _count_holidays(dates, months, weeks).values return df class Metrics(object): """Performance metrics for regressor.""" def __init__(self, y_true, predictions): self.y_true = y_true self.predictions = predictions self.residuals = self.y_true - self.predictions self.rmse = self.calculate_rmse(self.residuals) self.mae = self.calculate_mae(self.residuals) self.malr = self.calculate_malr(self.y_true, self.predictions) def calculate_rmse(self, residuals): """Root mean squared error.""" return np.sqrt(np.mean(np.square(residuals))) def calculate_mae(self, residuals): """Mean absolute error.""" return np.mean(np.abs(residuals)) def calculate_malr(self, y_true, predictions): """Mean absolute log ratio.""" return np.mean(np.abs(np.log(1 + predictions) - np.log(1 + y_true))) def report(self, name=None): if name is not None: print_string = '{} results'.format(name) print(print_string) print('~' * len(print_string)) print('RMSE: {:2.3f}\nMAE: {:2.3f}\nMALR: {:2.3f}'.format( self.rmse, self.mae, self.malr))
apache-2.0
7,227,592,053,026,123,000
35.609091
80
0.619071
false
DailyActie/Surrogate-Model
01-codes/scikit-learn-master/benchmarks/bench_isotonic.py
1
3056
""" Benchmarks of isotonic regression performance. We generate a synthetic dataset of size 10^n, for n in [min, max], and examine the time taken to run isotonic regression over the dataset. The timings are then output to stdout, or visualized on a log-log scale with matplotlib. This allows the scaling of the algorithm with the problem size to be visualized and understood. """ from __future__ import print_function import argparse import gc from datetime import datetime import matplotlib.pyplot as plt import numpy as np from sklearn.isotonic import isotonic_regression from sklearn.utils.bench import total_seconds def generate_perturbed_logarithm_dataset(size): return np.random.randint(-50, 50, size=n) \ + 50. * np.log(1 + np.arange(n)) def generate_logistic_dataset(size): X = np.sort(np.random.normal(size=size)) return np.random.random(size=size) < 1.0 / (1.0 + np.exp(-X)) DATASET_GENERATORS = { 'perturbed_logarithm': generate_perturbed_logarithm_dataset, 'logistic': generate_logistic_dataset } def bench_isotonic_regression(Y): """ Runs a single iteration of isotonic regression on the input data, and reports the total time taken (in seconds). """ gc.collect() tstart = datetime.now() isotonic_regression(Y) delta = datetime.now() - tstart return total_seconds(delta) if __name__ == '__main__': parser = argparse.ArgumentParser( description="Isotonic Regression benchmark tool") parser.add_argument('--iterations', type=int, required=True, help="Number of iterations to average timings over " "for each problem size") parser.add_argument('--log_min_problem_size', type=int, required=True, help="Base 10 logarithm of the minimum problem size") parser.add_argument('--log_max_problem_size', type=int, required=True, help="Base 10 logarithm of the maximum problem size") parser.add_argument('--show_plot', action='store_true', help="Plot timing output with matplotlib") parser.add_argument('--dataset', choices=DATASET_GENERATORS.keys(), required=True) args = parser.parse_args() timings = [] for exponent in range(args.log_min_problem_size, args.log_max_problem_size): n = 10 ** exponent Y = DATASET_GENERATORS[args.dataset](n) time_per_iteration = \ [bench_isotonic_regression(Y) for i in range(args.iterations)] timing = (n, np.mean(time_per_iteration)) timings.append(timing) # If we're not plotting, dump the timing to stdout if not args.show_plot: print(n, np.mean(time_per_iteration)) if args.show_plot: plt.plot(*zip(*timings)) plt.title("Average time taken running isotonic regression") plt.xlabel('Number of observations') plt.ylabel('Time (s)') plt.axis('tight') plt.loglog() plt.show()
mit
5,887,678,171,284,266,000
32.217391
77
0.646924
false
RuthAngus/granola
granola/seismology/GProtation.py
1
5438
# This script contains the prior, lhf and logprob functions, plus plotting # routines. from __future__ import print_function import numpy as np import matplotlib.pyplot as plt import pandas as pd import os import george from george.kernels import ExpSine2Kernel, ExpSquaredKernel, WhiteKernel import emcee3 import corner class MyModel(object): """ Model for emcee3 """ def __init__(self, x, y, yerr, p_init, p_max): self.p_init = p_init self.p_max = p_max self.x = x self.y = y self.yerr = yerr def lnGauss(self, t, mu, sigma): return -.5 * ((t - mu)**2/(.5 * sigma**2)) def Glnprior(self, theta): """ theta = A, l, G, sigma, period """ mu = np.array([-13, 6.2, -1.4, -17, self.p_init]) sigma = np.array([2.7, 1.5, 1.5, 5, self.p_init * 2]) if np.log(.5) < theta[4] < self.p_max and 0 < theta[1]: return np.sum(self.lnGauss(theta, mu, sigma)) return -np.inf def lnlike_split(self, theta): return np.sum([self.lnlike(theta, self.x[i], self.y[i], self.yerr[i]) for i in range(len(self.x))]) def lnlike(self, theta, xi, yi, yerri): theta = np.exp(theta) k = theta[0] * ExpSquaredKernel(theta[1]) \ * ExpSine2Kernel(theta[2], theta[4]) + WhiteKernel(theta[3]) gp = george.GP(k, solver=george.HODLRSolver) try: gp.compute(xi, np.sqrt(theta[3]+yerri**2)) except (ValueError, np.linalg.LinAlgError): return 10e25 return gp.lnlikelihood(yi, quiet=True) def make_plot(sampler, xb, yb, yerrb, ID, RESULTS_DIR, trths, traces=False, tri=False, prediction=True): _, ndims = np.shape(sampler.get_coords(flat=True)) flat = sampler.get_coords(flat=True) logprob = sampler.get_log_probability(flat=True) mcmc_res = list(map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]), zip(*np.percentile(flat, [16, 50, 84], axis=0)))) med = np.concatenate([np.array(mcmc_res[i]) for i in range(len(mcmc_res))]) print("median values = ", med[::3]) ml = logprob == max(logprob) maxlike = flat[np.where(ml)[0][0], :] print("max like = ", maxlike) print("\n", np.exp(np.array(maxlike[-1])), "period (days)", "\n") r = np.concatenate((maxlike, med)) # calculate autocorrelation times try: acorr_t = emcee3.autocorr.integrated_time(flat, c=1) except: acorr_t = emcee3.autocorr.integrated_time(flat) # save data df = pd.DataFrame({"N": [ID], "A_max": [r[0]], "l_max": [r[1]], "gamma_max": [r[2]], "period_max": [r[3]], "sigma_max": [r[4]], "A": [r[5]], "A_errp": [r[6]], "A_errm": [r[7]], "l": [r[8]], "l_errp": [r[9]], "l_errm": [r[10]], "gamma": [r[11]], "gamma_errp": [r[12]], "gamma_errm": [r[13]], "sigma": [r[14]], "sigma_errp": [r[15]], "sigma_errm": [r[16]], "period": [r[17]], "period_errp": [r[18]], "period_errm": [r[19]], "acorr_A": acorr_t[0], "acorr_l": acorr_t[1], "acorr_gamma": acorr_t[2], "acorr_sigma": acorr_t[3], "acorr_period": acorr_t[4]}) df.to_csv(os.path.join(RESULTS_DIR, "{0}_mcmc_results.txt".format(ID))) fig_labels = ["ln(A)", "ln(l)", "ln(G)", "ln(s)", "ln(P)", "lnprob"] if traces: print("Plotting traces") for i in range(ndims): plt.clf() plt.plot(sampler.get_coords()[:, :, i], 'k-', alpha=0.3) plt.ylabel(fig_labels[i]) plt.savefig(os.path.join(RESULTS_DIR, "{0}_{1}.png".format(ID, fig_labels[i]))) if tri: print("Making triangle plot") fig = corner.corner(flat, labels=fig_labels, quantiles=[.16, .5, .84], show_titles=True) fig.savefig(os.path.join(RESULTS_DIR, "{0}_triangle".format(ID))) print(os.path.join("{0}_triangle.png".format(ID))) if prediction: if len(xb) > 1: # if the data is a list of lists. try: x = [i for j in xb for i in j] y = [i for j in yb for i in j] yerr = [i for j in yerrb for i in j] except: # if the data are just a single list. TypeError x, y, yerr = xb, yb, yerrb else: # if the data is a list of a single list. x, y, yerr = xb[0], yb[0], yerrb[0] print("plotting prediction") theta = np.exp(np.array(maxlike)) k = theta[0] * ExpSquaredKernel(theta[1]) \ * ExpSine2Kernel(theta[2], theta[4]) + WhiteKernel(theta[3]) gp = george.GP(k, solver=george.HODLRSolver) gp.compute(x-x[0], yerr) xs = np.linspace((x-x[0])[0], (x-x[0])[-1], 1000) mu, cov = gp.predict(y, xs) plt.clf() plt.errorbar(x-x[0], y, yerr=yerr, fmt="k.", capsize=0) plt.xlabel("Time (days)") plt.ylabel("Normalised Flux") plt.plot(xs, mu, color='#0066CC') plt.xlim(min(x-x[0]), max(x-x[0])) plt.savefig(os.path.join(RESULTS_DIR, "{0}_prediction".format(ID))) print(os.path.join(RESULTS_DIR, "{0}_prediction.png".format(ID))) return r
mit
9,039,887,474,962,364,000
37.842857
78
0.51655
false
tnipen/snowproduction
run.py
1
13301
#!/usr/bin/env python # -*- coding: utf-8 -*- import numpy as np import matplotlib.pylab as mpl import netCDF4 import verif.util import sys import matplotlib.colors import argparse import matplotlib reload(sys) sys.setdefaultencoding('ISO-8859-1') __version__ = "0.1.0" __days_in_month = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] def main(): parser = argparse.ArgumentParser(description="Create maps of snow production potential") parser.add_argument('--version', action="version", version=__version__) subparsers = parser.add_subparsers(title="valid commands", dest="command") p_compute = subparsers.add_parser('compute', help='Compute hours') p_compute.add_argument('-d', help='Dates', dest="dates", type=parse_dates) p_compute.add_argument('-r', required=True, type=float, help='Threshold (degrees C)', dest="threshold") p_compute.add_argument('-f', type=str, help='Output filename', dest="filename") p_compute.add_argument('-month', help='Compute each month, then sum', action="store_true") p_compute.add_argument('-debug', help="Show debug information", action="store_true") p_compute.add_argument('-drybulb', help="Should the dry bulb temperature be used?", action="store_true") p_compute.add_argument('-i', help="Read values from this text file", dest="ifilename") p_plot = subparsers.add_parser('plot', help='Plot hours') p_plot.add_argument('file', type=str, help='Input filename') p_plot.add_argument('-xlim', help='x-axis limits', type=verif.util.parse_numbers) p_plot.add_argument('-ylim', help='y-axis limits', type=verif.util.parse_numbers) p_plot.add_argument('-edges', help='Colorbar edges', type=verif.util.parse_numbers) p_plot.add_argument('-cmap', help='Colormap', type=str) p_plot.add_argument('-maptype', help='maptype', type=str) p_plot.add_argument('-debug', help="Show debug information", action="store_true") p_plot.add_argument('-f', metavar="file", help="Plot to this file", dest="ofile") p_plot.add_argument('-fs', help="Figure size width,height", dest="figsize", type=verif.util.parse_numbers) p_plot.add_argument('-dpi', type=int, default=300, help="Dots per inch in figure") p_plot.add_argument('-tight', help="Without any border padding. Useful for exporting just the values.", action="store_true") p_plot.add_argument('-legfs', type=int, default=10, help="Legend font size") p_plot.add_argument('-fontsize', type=int, default=12, help="Font size") args = parser.parse_args() if args.command == "compute": [lats, lons, values] = get_values(args) if args.filename is None: print values else: save(lats, lons, values, args.filename) elif args.command == "plot": [lats, lons, values] = load_finished_file(args.file) plot(lats, lons, values, args) def plot(lats, lons, values, args): """ Creates the plot Arguments: lats (np.array): 2D array with latitudes lons (np.array): 2D array with longitudes values (np.array): 2D array with number of hours """ font = {'family' : 'normal', 'weight' : 'bold', 'size' : args.fontsize} font = {'sans-serif' : 'Arial', 'family': 'san-serif', 'size' : args.fontsize} matplotlib.rc('font', **font) mpl.clf() dlat = 0 dlon = 0 cmap = mpl.cm.RdBu if args.cmap is not None: cmap = args.cmap if args.maptype is not None: llcrnrlat = max(-90, np.min(lats) - dlat / 10) urcrnrlat = min(90, np.max(lats) + dlat / 10) llcrnrlon = np.min(lons) - dlon / 10 urcrnrlon = np.max(lons) + dlon / 10 llcrnrlat = 56 urcrnrlat = 72 llcrnrlon = 0 urcrnrlon = 30 res = verif.util.get_map_resolution([llcrnrlat, urcrnrlat], [llcrnrlon, urcrnrlon]) if args.xlim is not None: llcrnrlon = args.xlim[0] urcrnrlon = args.xlim[1] if args.ylim is not None: llcrnrlat = args.ylim[0] urcrnrlat = args.ylim[1] map = mpl_toolkits.basemap.Basemap(llcrnrlon=llcrnrlon, llcrnrlat=llcrnrlat, urcrnrlon=urcrnrlon, urcrnrlat=urcrnrlat, projection='tmerc', lat_0=60, lon_0=10, resolution=res) map.drawcoastlines(linewidth=0.25) map.drawcountries(linewidth=0.25) map.drawmapboundary() [x,y] = map(lons, lats) if args.edges is None: mpl.contourf(x, y, values, cmap=cmap, extend="both") else: # mpl.contour(x, y, values, [0,1000,2000,3000,4000,5000,6000,7000,8000], colors="k", linewidths=0.3) mpl.contourf(x, y, values, args.edges, cmap=cmap, extend="both") else: if args.edges is None: # mpl.contourf(values, cmap=cmap, extend="both") mpl.imshow(values, cmap=cmap) else: print args.edges a = 0.25 b = 0.50 cdict = {'red': [ (0.0, 0.68,0.9), (a, 0.99,0.19), (b, 0.855, 0.776), (1, 0.2,1)], 'green': [(0,0.68,0.33), (a,0.82,0.639), (b,0.854,0.86), (1,0.51,1)], 'blue': [(0,0.68,0.05), (a,0.635,0.329), (b,0.922,0.94), (1,0.74,1)]} epic = matplotlib.colors.LinearSegmentedColormap('epic', cdict) mpl.register_cmap(cmap=epic) mpl.contourf(values, args.edges, cmap=cmap, extend="max") if args.legfs is not None and args.legfs > 0: cb = mpl.colorbar(extend="both") # , ax=ax) cb.ax.set_position([0.05,0.4,0.1,0.5]) cb.set_ticks(np.linspace(0,4000,5)) # 16 cb.set_label(u"Snøproduksjonspotensial (timer/år)", labelpad=-120, fontsize=26) mpl.gca().set_position([0,0,1,1]) if args.figsize is not None: mpl.gcf().set_size_inches((args.figsize), forward=True) if args.ofile is None: mpl.show() else: mpl.savefig(args.ofile, dpi=args.dpi) def parse_dates(string): """ Translate dates in the form 20130101:20140101 into an array of date integers """ dates = verif.util.parse_numbers(string, True) dates = [int(date) for date in dates] return dates def load_finished_file(filename): """ Retrieve lat, lon, and values from file """ file = netCDF4.Dataset(filename, 'r') lats = verif.util.clean(file.variables["latitude"]) lons = verif.util.clean(file.variables["longitude"]) values = verif.util.clean(file.variables["snow_production_potential"]) file.close() return [lats, lons, values] def wetbulb(temperature, rh): """ Computes the wetbulb temperature of arrays Arguments: rh: surface relative humidity between 0 and 1 temperature: surface temperature in degrees C Returns: twet: surface wetbulb temperature in degrees C """ P = 101.325; e = rh*0.611*np.exp((17.63*temperature)/(temperature+243.04)); Td = (116.9 + 243.04*np.log(e))/(16.78-np.log(e)); gamma = 0.00066 * P; delta = (4098*e)/pow(Td+243.04,2); TWet = (gamma * temperature + delta * Td)/(gamma + delta); TWet[e < 1e-9] = -50 return TWet def snow_production(temperature, rh, threshold=0, use_wetbulb=True): if use_wetbulb: return wetbulb(temperature, rh) < threshold else: return temperature < threshold def save(lats, lons, values, filename, x=None, y=None, proj=None): """ Creates a netcdf file with snow production values Arguments: lats: 2D numpy array with latitudes lons: 2D numpy array with longitudes values: 2D numpy array with snow production values filename (str): Write to this filename """ file = netCDF4.Dataset(filename, 'w', format="NETCDF3_CLASSIC") file.createDimension("x", lats.shape[1]) file.createDimension("y", lats.shape[0]) vLat=file.createVariable("latitude", "f4", ("y", "x")) vLon=file.createVariable("longitude", "f4", ("y", "x")) vValues=file.createVariable("snow_production_potential", "f4", ("y", "x")) if x is not None: vX=file.createVariable("x", "f4", ("x")) if y is not None: vY=file.createVariable("y", "f4", ("y")) if proj is not None: vProj=file.createVariable("proj", "string", None) vLat[:] = lats vLat.units = "degrees_north" ; vLon[:] = lons vLon.units = "degrees_east" ; vValues[:] = values vValues.units = "hours" vValues.coordinates = "longitude latitude" if x is not None: vX[:] = x if y is not None: vY[:] = y if proj is not None: vProj[:] = proj file.close() def get_values(args): if args.ifilename is not None: return get_values_text(args) else: return get_values_netcdf(args) def get_values_text(args): file = open(args.ifilename) header = None index = 0 total = [0]*12 count = [0]*12 for line in file: if header is None: header = line else: words = [word for word in line.strip().split(' ') if word != ""] if len(words) == 7 and words[5] != "x" and words[6] != "x" and words[5] != "-" and words[6] != "-": month = int(words[2]) m = month - 1 t2 = [float(words[5])] rh2 = [float(words[6]) / 100.0] total[m] += np.sum(snow_production(np.array(t2), np.array(rh2), args.threshold)) count[m] += 1 hours = 0 for m in range(0, 12): if count[m] == 0: verif.util.warning("Missing data for month %d" % (m + 1)) else: temp = total[m] * __days_in_month[m] / count[m] * 24 hours += temp print "Values for month %d: %d %d" % (m + 1, count[m], temp) return [1,1,hours] def get_values_netcdf(args): lats = None lons = None x = None y = None proj = None tindex = range(6, 30) if args.month: # Store values for each month total = [None]*12 count = [0]*12 else: total = None count = 0 for date in args.dates: if args.debug: print date year = int(date / 10000) month = int(date / 100) % 100 day = int(date % 100) if date <= 20121231: dir = "/lustre/storeB/users/lisesg/harmonie/AM2p5_MIST2_c38h12/archive/" ifilename = "%s/%04d/%02d/%02d/00/AM2p5_MIST2_c38h12_%d00_fp.nc" % (dir, year, month, day, date) elif date <= 20131221: dir = "/lustre/storeB/immutable/archive/projects/MIST2/AM2p5_MIST2/archive/" ifilename = "%s/%04d/%02d/%02d/00/AM2p5_MIST2_%d00_fp.nc" % (dir, year, month, day, date) elif date <= 20140430: # AROME Norway continue elif date <= 20140616: dir = "/lustre/storeB/immutable/short-term-archive/DNMI_AROME_METCOOP/" ifilename = "%s/%04d/%02d/%02d/arome_metcoop2_5km_%d_00.nc" % (dir, year, month, day, date) else: dir = "/lustre/storeB/immutable/short-term-archive/DNMI_AROME_METCOOP/" ifilename = "%s/%04d/%02d/%02d/AROME_MetCoOp_00_fp.nc_%d" % (dir, year, month, day, date) try: ifile = netCDF4.Dataset(ifilename, 'r') if lats is None: lats = verif.util.clean(ifile.variables["latitude"]) lons = verif.util.clean(ifile.variables["longitude"]) # x = verif.util.clean(Ifile.variables["x"]) # y = verif.util.clean(Ifile.variables["y"]) # proj = verif.util.clean(Ifile.variables["proj"]) t2 = verif.util.clean(ifile.variables["air_temperature_2m"][tindex, :, :, :])-273.15 rh2 = verif.util.clean(ifile.variables["relative_humidity_2m"][tindex, :, :, :]) curr_hours = snow_production(t2, rh2, args.threshold, not args.drybulb) curr_hours = np.squeeze(np.sum(curr_hours, axis=0)) if args.month: m = month - 1 if total[m] is None: total[m] = curr_hours else: total[m] += curr_hours count[m] += 1 else: if total is None: total = curr_hours else: total += curr_hours count += 1 ifile.close() except Exception as e: print "Could not parse %s" % ifilename print e if args.month: hours = None for m in range(0, 12): if total[m] is None: verif.util.warning("Missing data for month %d" % (m + 1)) else: if hours is None: hours = np.zeros(total[m].shape, int) print "Values for month %d: %d" % (m + 1, count[m]) hours += total[m] * __days_in_month[m] / count[m] else: hours = total * 1.0 / count * 365 return [lats, lons, hours] if __name__ == '__main__': main()
gpl-2.0
-4,191,752,475,699,130,400
36.567797
128
0.564779
false
trovdimi/wikilinks
heatmaps.py
1
18658
import numpy as np import numpy.random import matplotlib.pyplot as plt import cPickle as pickle import MySQLdb from wsd.database import MySQLDatabase import matplotlib.cm as cm from matplotlib.colors import LogNorm, Normalize, BoundaryNorm, PowerNorm from conf import * __author__ = 'dimitrovdr' from matplotlib import style style.use('acm-3col') import pylab params = { 'font.family' : 'serif', 'font.serif' : ['Times New Roman'], 'font.size' : 7 } pylab.rcParams.update(params) def clicks_heatmap(): print 'loading' db = MySQLDatabase(DATABASE_HOST, DATABASE_USER, DATABASE_PASSWORD, DATABASE_NAME) db_worker_view = db.get_work_view() coords = db_worker_view.retrieve_all_links_coords_clicks() print 'coord loaded' links = {} x = [] y = [] values = [] confident_values = [] not_confident_values = [] x_conf = [] y_conf = [] x_not_conf = [] y_not_conf = [] number_of_not_confident_clicks=0 number_of_confident_clicks = 0 number_of_valid_normed_links=0 for coord in coords: try: v = links[coord['key']] links[coord['key']]+=1 except: links[coord['key']]=0 for coord in coords: x_normed = float(coord['x'])/float(1920) y_normed = float(coord['y'])/float(coord['page_length']) if x_normed <=1.0 and y_normed <=1.0: x.append(x_normed) y.append(y_normed) number_of_valid_normed_links+=1 if links[coord['key']]==0: x_conf.append(x_normed) y_conf.append(y_normed) values.append(float(coord['counts'])) number_of_confident_clicks+=1 confident_values.append(coord['counts']) else: x_not_conf.append(x_normed) y_not_conf.append(y_normed) values.append(float(coord['counts'])/float(links[coord['key']])+1.0) number_of_not_confident_clicks+=1 not_confident_values.append(float(coord['counts'])/float(links[coord['key']])) print '###########' print sum(values) print sum(confident_values) print number_of_confident_clicks print sum(not_confident_values) print number_of_not_confident_clicks print number_of_valid_normed_links print len(coords) print '###########' heatmap, xedges, yedges = np.histogram2d(x_conf, y_conf, bins=100, weights=confident_values) extent = [xedges[0], xedges[-1], yedges[-1], yedges[0] ] fig_size = (2.4, 2) #fig_size = (3.5, 3) plt.clf() plt.figure(figsize=fig_size) plt.grid(True) plt.imshow(heatmap, extent=extent, origin='upper', norm=LogNorm(), cmap=plt.get_cmap('jet')) plt.colorbar() #plt.title("Clicks Heatmap Log Normalized") plt.show() plt.savefig('output/clicks_heatmap_lognormed_self_loop_confident.pdf') plt.clf() plt.figure(figsize=fig_size) plt.grid(True) plt.imshow(heatmap , extent=extent, origin='upper', norm=Normalize(), cmap=plt.get_cmap('jet')) plt.colorbar() #plt.title("Clicks Heatmap Normalized") plt.show() plt.savefig('output/clicks_heatmap_normed_self_loop_confident.pdf') print "conf done" heatmap, xedges, yedges = np.histogram2d(x_not_conf, y_not_conf, bins=100, weights=not_confident_values) extent = [xedges[0], xedges[-1], yedges[-1], yedges[0] ] fig_size = (2.4, 2) #fig_size = (3.5, 3) plt.clf() plt.figure(figsize=fig_size) plt.grid(True) plt.imshow(heatmap, extent=extent, origin='upper', norm=LogNorm(), cmap=plt.get_cmap('jet')) plt.colorbar() #plt.title("Clicks Heatmap Log Normalized") plt.show() plt.savefig('output/clicks_heatmap_lognormed_self_loop_not_confident.pdf') plt.clf() plt.figure(figsize=fig_size) plt.grid(True) plt.imshow(heatmap , extent=extent, origin='upper', norm=Normalize(), cmap=plt.get_cmap('jet')) plt.colorbar() #plt.title("Clicks Heatmap Normalized") plt.show() plt.savefig('output/clicks_heatmap_normed_self_loop_not_confident.pdf') print " not conf done" heatmap, xedges, yedges = np.histogram2d(x, y, bins=100, weights=values) extent = [xedges[0], xedges[-1], yedges[-1], yedges[0] ] fig_size = (2.4, 2) #fig_size = (3.5, 3) plt.clf() plt.figure(figsize=fig_size) plt.grid(True) plt.imshow(heatmap, extent=extent, origin='upper', norm=LogNorm(), cmap=plt.get_cmap('jet')) plt.colorbar() #plt.title("Clicks Heatmap Log Normalized") plt.show() plt.savefig('output/clicks_heatmap_lognormed_self_loop_1.pdf') plt.clf() plt.figure(figsize=fig_size) plt.grid(True) plt.imshow(heatmap , extent=extent, origin='upper', norm=Normalize(), cmap=plt.get_cmap('jet')) plt.colorbar() #plt.title("Clicks Heatmap Normalized") plt.show() plt.savefig('output/clicks_heatmap_normed_self_loop_1.pdf') print "done" def clicks_heatmap_first_occ(): print 'loading' db = MySQLDatabase(DATABASE_HOST, DATABASE_USER, DATABASE_PASSWORD, DATABASE_NAME) db_worker_view = db.get_work_view() coords = db_worker_view.retrieve_all_links_coords_clicks_first_occ() print 'coord loaded' links = {} x = [] y = [] values = [] for link in coords.values(): x_normed = float(link['x'])/float(1920) y_normed = float(link['y'])/float(link['page_length']) if x_normed <=1.0 and y_normed <=1.0: x.append(x_normed) y.append(y_normed) values.append(float(link['counts'])) heatmap, xedges, yedges = np.histogram2d(x, y, bins=100, weights=values) extent = [xedges[0], xedges[-1], yedges[-1], yedges[0] ] fig_size = (2.4, 2) plt.clf() plt.figure(figsize=fig_size) plt.grid(True) plt.imshow(heatmap , extent=extent, origin='upper', norm=LogNorm(), cmap=plt.get_cmap('jet')) plt.colorbar() #plt.title("Clicks Heatmap Log Normalized") plt.show() plt.savefig('output/clicks_heatmap_lognormed_self_loop_first_occ.pdf') plt.clf() plt.figure(figsize=fig_size) plt.grid(True) plt.imshow(heatmap , extent=extent, origin='upper', norm=Normalize(), cmap=plt.get_cmap('jet')) plt.colorbar() #plt.title("Clicks Heatmap Normalized") plt.show() plt.savefig('output/clicks_heatmap_normed_self_loop_first_occ.pdf') print "done" def clicks_heatmap_total(): print 'loading' db = MySQLDatabase(DATABASE_HOST, DATABASE_USER, DATABASE_PASSWORD, DATABASE_NAME) db_worker_view = db.get_work_view() coords = db_worker_view.retrieve_all_links_coords_clicks() print 'coord loaded' links = {} x = [] y = [] values = [] for coord in coords: x_normed = float(coord['x'])/float(1920) y_normed = float(coord['y'])/float(coord['page_length']) if x_normed <=1.0 and y_normed <=1.0: x.append(x_normed) y.append(y_normed) values.append(float(coord['counts'])) heatmap, xedges, yedges = np.histogram2d(x, y, bins=100, weights=values) extent = [xedges[0], xedges[-1], yedges[-1], yedges[0] ] fig_size = (2.4, 2) plt.clf() plt.figure(figsize=fig_size) plt.grid(True) plt.imshow(heatmap , extent=extent, origin='upper', norm=LogNorm(), cmap=plt.get_cmap('jet')) plt.colorbar() #plt.title("Clicks Heatmap Log Normalized") plt.show() plt.savefig('output/clicks_heatmap_lognormed_self_loop_total.pdf') plt.clf() plt.figure(figsize=fig_size) plt.grid(True) plt.imshow(heatmap , extent=extent, origin='upper', norm=Normalize(), cmap=plt.get_cmap('jet')) plt.colorbar() #plt.title("Clicks Heatmap Normalized") plt.show() plt.savefig('output/clicks_heatmap_normed_self_loop_total.pdf') print "done" def links_heatmap(): #http://stackoverflow.com/questions/2369492/generate-a-heatmap-in-matplotlib-using-a-scatter-data-set # Get URLs from a text file, remove white space. print 'loading' db = MySQLDatabase(DATABASE_HOST, DATABASE_USER, DATABASE_PASSWORD, DATABASE_NAME) db_worker_view = db.get_work_view() coords = db_worker_view.retrieve_all_links_coords() print 'coord loaded' x=[] y=[] page_lenghts = db_worker_view.retrieve_all_page_lengths() print 'lenghts loaded' for coord in coords: x_normed = float(coord['x'])/float(1920) y_normed = float(coord['y'])/float(page_lenghts[coord['source_article_id']]) if x_normed <=1.0 and y_normed <=1.0: x.append(x_normed) y.append(y_normed) heatmap, xedges, yedges = np.histogram2d(x, y, bins=100) extent = [xedges[0], xedges[-1], yedges[-1], yedges[0]] fig_size = (2.4, 2) #fig_size = (3.5, 3) plt.clf() plt.figure(figsize=fig_size) plt.grid(True) plt.imshow(heatmap, extent=extent, origin='upper', norm=LogNorm(), cmap=plt.get_cmap('jet')) plt.colorbar() #plt.title("Links Heatmap Log Normalized") plt.show() plt.savefig('output/links_heatmap_lognormed_self_loop.pdf') plt.clf() plt.figure(figsize=fig_size) plt.grid(True) plt.imshow(heatmap , extent=extent, origin='upper', norm=Normalize(),cmap=plt.get_cmap('jet')) plt.colorbar() #plt.title("Links Heatmap Normalized") plt.show() plt.savefig('output/links_heatmap_normed_self_loop.pdf') print "done" def multiple_links_heatmap(): print 'loading' db = MySQLDatabase(DATABASE_HOST, DATABASE_USER, DATABASE_PASSWORD, DATABASE_NAME) db_worker_view = db.get_work_view() coords = db_worker_view.retrieve_all_links_multpile_occ() print 'coord loaded' page_lenghts = db_worker_view.retrieve_all_page_lengths() print 'lenghts loaded' links = {} x = [] y = [] x_conf = [] y_conf = [] x_not_conf = [] y_not_conf = [] number_of_not_confident_clicks=0 number_of_confident_clicks = 0 number_of_valid_normed_links=0 for coord in coords: try: v = links[coord['key']] links[coord['key']]+=1 except: links[coord['key']]=0 for coord in coords: x_normed = float(coord['x'])/float(1920) y_normed = float(coord['y'])/float(page_lenghts[coord['key'][0]]) if x_normed <=1.0 and y_normed <=1.0: x.append(x_normed) y.append(y_normed) number_of_valid_normed_links+=1 if links[coord['key']]==0: x_conf.append(x_normed) y_conf.append(y_normed) number_of_confident_clicks+=1 else: x_not_conf.append(x_normed) y_not_conf.append(y_normed) number_of_not_confident_clicks+=1 print '###########' print number_of_confident_clicks print number_of_not_confident_clicks print number_of_valid_normed_links print len(coords) print '###########' heatmap, xedges, yedges = np.histogram2d(x_conf, y_conf, bins=100) extent = [xedges[0], xedges[-1], yedges[-1], yedges[0]] fig_size = (2.4, 2) #fig_size = (3.5, 3) plt.clf() plt.figure(figsize=fig_size) plt.grid(True) plt.imshow(heatmap, extent=extent, origin='upper', norm=LogNorm(), cmap=plt.get_cmap('jet')) plt.colorbar() #plt.title("Links Heatmap Log Normalized") plt.show() plt.savefig('output/links_heatmap_lognormed_self_loop_unique.pdf') plt.clf() plt.figure(figsize=fig_size) plt.grid(True) plt.imshow(heatmap , extent=extent, origin='upper', norm=Normalize(),cmap=plt.get_cmap('jet')) plt.colorbar() #plt.title("Links Heatmap Normalized") plt.show() plt.savefig('output/links_heatmap_normed_self_loop_unique.pdf') print "unique done" heatmap, xedges, yedges = np.histogram2d(x_not_conf, y_not_conf, bins=100) extent = [xedges[0], xedges[-1], yedges[-1], yedges[0]] fig_size = (2.4, 2) #fig_size = (3.5, 3) plt.clf() plt.figure(figsize=fig_size) plt.grid(True) plt.imshow(heatmap, extent=extent, origin='upper', norm=LogNorm(), cmap=plt.get_cmap('jet')) plt.colorbar() #plt.title("Links Heatmap Log Normalized") plt.show() plt.savefig('output/links_heatmap_lognormed_self_loop_multiple.pdf') plt.clf() plt.figure(figsize=fig_size) plt.grid(True) plt.imshow(heatmap , extent=extent, origin='upper', norm=Normalize(),cmap=plt.get_cmap('jet')) plt.colorbar() #plt.title("Links Heatmap Normalized") plt.show() plt.savefig('output/links_heatmap_normed_self_loop_multiple.pdf') print "done" def links_heatmap_rel_prob(): #http://stackoverflow.com/questions/2369492/generate-a-heatmap-in-matplotlib-using-a-scatter-data-set # Get URLs from a text file, remove white space. print 'loading' db = MySQLDatabase(DATABASE_HOST, DATABASE_USER, DATABASE_PASSWORD, DATABASE_NAME) db_worker_view = db.get_work_view() coords = db_worker_view.retrieve_all_links_coords() x=[] y=[] page_lenghts = db_worker_view.retrieve_all_page_lengths() for coord in coords: x_normed = float(coord['x'])/float(1920) y_normed = float(coord['y'])/float(page_lenghts[coord['source_article_id']]) if x_normed <=1.0 and y_normed <=1.0: x.append(x_normed) y.append(y_normed) links_heatmap_hist, xedges, yedges = np.histogram2d(x, y, normed=True, bins=100) links_extent = [xedges[0], xedges[-1], yedges[-1], yedges[0]] coords = db_worker_view.retrieve_all_links_coords_clicks() print 'coord loaded' links = {} x = [] y = [] values = [] for coord in coords: try: v = links[coord['key']] links[coord['key']]+=1 except: links[coord['key']]=0 for coord in coords: x_normed = float(coord['x'])/float(1920) y_normed = float(coord['y'])/float(coord['page_length']) if x_normed <=1.0 and y_normed <=1.0: x.append(x_normed) y.append(y_normed) if links[coord['key']]==0: #x.append(x_normed) #y.append(y_normed) values.append(float(coord['counts'])) else: values.append(float(coord['counts'])/float(links[coord['key']])) clicks_heatmap_hist, xedges, yedges = np.histogram2d(x, y, bins=100, normed=True, weights=values) clicks_extent = [xedges[0], xedges[-1], yedges[-1], yedges[0]] substraction_hist = np.subtract(clicks_heatmap_hist,links_heatmap_hist) #rel_prob_hist = np.divide(clicks_heatmap_hist, links_heatmap_hist) with np.errstate(divide='ignore', invalid='ignore'): rel_prob_hist = np.divide(clicks_heatmap_hist, links_heatmap_hist) rel_prob_hist[rel_prob_hist == np.inf] = 0 rel_prob_hist = np.nan_to_num(rel_prob_hist) fig_size = (2.4, 2) plt.clf() plt.figure(figsize=fig_size) plt.grid(True) plt.imshow(substraction_hist, extent=clicks_extent, origin='upper',norm=Normalize(), cmap=plt.get_cmap('jet')) plt.colorbar() plt.show() plt.savefig('output/clicks-links_heatmap_normed_self_loop.pdf') plt.clf() plt.figure(figsize=fig_size) plt.grid(True) plt.imshow(rel_prob_hist , extent=clicks_extent, origin='upper', norm=Normalize(),cmap=plt.get_cmap('jet')) plt.colorbar() plt.show() plt.savefig('output/clicks_over_links_heatmap_normed_self_loop.pdf') plt.clf() plt.figure(figsize=fig_size) plt.grid(True) plt.imshow(substraction_hist, extent=clicks_extent, origin='upper', norm=LogNorm(), cmap=plt.get_cmap('jet')) plt.colorbar() plt.show() plt.savefig('output/clicks-links_heatmap_lognormed_self_loop.pdf') plt.clf() plt.figure(figsize=fig_size) plt.grid(True) plt.imshow(rel_prob_hist , extent=clicks_extent, origin='upper', norm=LogNorm(), cmap=plt.get_cmap('jet')) plt.colorbar() plt.show() plt.savefig('output/clicks_over_links_heatmap_lognormed_self_loop.pdf') substraction_hist = np.subtract(links_heatmap_hist, clicks_heatmap_hist) #rel_prob_hist = np.divide(clicks_heatmap_hist, links_heatmap_hist) with np.errstate(divide='ignore', invalid='ignore'): rel_prob_hist = np.divide(links_heatmap_hist, clicks_heatmap_hist) rel_prob_hist[rel_prob_hist == np.inf] = 0 rel_prob_hist = np.nan_to_num(rel_prob_hist) plt.clf() plt.figure(figsize=fig_size) plt.grid(True) plt.imshow(substraction_hist, extent=clicks_extent, origin='upper', norm=Normalize(), cmap=plt.get_cmap('jet')) plt.colorbar() #plt.title("Links Heatmap Normalized") plt.show() plt.savefig('output/links-clicks_heatmap_normed_self_loop.pdf') plt.clf() plt.figure(figsize=fig_size) plt.grid(True) plt.imshow(rel_prob_hist , extent=clicks_extent, origin='upper', norm=Normalize(), cmap=plt.get_cmap('jet')) plt.colorbar() #plt.title("Links Heatmap Normalized") plt.show() plt.savefig('output/links_over_clicks_heatmap_normed_self_loop.pdf') plt.clf() plt.figure(figsize=fig_size) plt.grid(True) plt.imshow(substraction_hist, extent=clicks_extent, origin='upper', norm=LogNorm(), cmap=plt.get_cmap('jet')) plt.colorbar() #plt.title("Links Heatmap Normalized") plt.show() plt.savefig('output/links-clicks_heatmap_lognormed_self_loop.pdf') plt.clf() plt.figure(figsize=fig_size) plt.grid(True) plt.imshow(rel_prob_hist , extent=clicks_extent, origin='upper', norm=LogNorm(), cmap=plt.get_cmap('jet')) plt.colorbar() #plt.title("Links Heatmap Normalized") plt.show() plt.savefig('output/links_over_clicks_heatmap_lognormed_self_loop.pdf') print "done" if __name__ == '__main__': links_heatmap() clicks_heatmap_first_occ() clicks_heatmap_total() clicks_heatmap() multiple_links_heatmap() links_heatmap_rel_prob()
mit
8,380,294,351,022,908,000
29.200669
115
0.603226
false
quantumlib/Cirq
cirq-core/cirq/experiments/xeb_sampling.py
1
15304
# Copyright 2021 The Cirq Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Estimation of fidelity associated with experimental circuit executions.""" import concurrent import os import time import uuid from concurrent.futures.thread import ThreadPoolExecutor from dataclasses import dataclass from typing import ( Callable, List, Optional, Sequence, Tuple, TYPE_CHECKING, Set, ContextManager, Dict, Any, ) import numpy as np import pandas as pd import tqdm from cirq import ops, devices, value, protocols from cirq.circuits import Circuit from cirq.experiments.random_quantum_circuit_generation import CircuitLibraryCombination if TYPE_CHECKING: import cirq @dataclass(frozen=True) class _Sample2qXEBTask: """Helper container for grouping a circuit to be sampled. `prepared_circuit` is the full-length circuit (with index `circuit_i`) that has been truncated to `cycle_depth` and has a measurement gate on it. """ cycle_depth: int layer_i: int combination_i: int prepared_circuit: 'cirq.Circuit' combination: List[int] class _SampleInBatches: def __init__( self, sampler: 'cirq.Sampler', repetitions: int, combinations_by_layer: List[CircuitLibraryCombination], ): """This closure will execute a list of `tasks` with one call to `run_batch` on the provided sampler for a given number of repetitions. It also keeps a record of the circuit library combinations in order to back out which qubit pairs correspond to each pair index. We tag our return value with this so it is in the resultant DataFrame, which is very convenient for dealing with the results (but not strictly necessary, as the information could be extracted from (`layer_i`, `pair_i`). """ self.sampler = sampler self.repetitions = repetitions self.combinations_by_layer = combinations_by_layer def __call__(self, tasks: List[_Sample2qXEBTask]) -> List[Dict[str, Any]]: prepared_circuits = [task.prepared_circuit for task in tasks] results = self.sampler.run_batch(prepared_circuits, repetitions=self.repetitions) timestamp = time.time() assert len(results) == len(tasks) records = [] for task, nested_result in zip(tasks, results): (result,) = nested_result # remove nesting due to potential sweeps. for pair_i, circuit_i in enumerate(task.combination): pair_measurement_key = str(pair_i) pair = self.combinations_by_layer[task.layer_i].pairs[pair_i] sampled_inds = result.data[pair_measurement_key].values sampled_probs = np.bincount(sampled_inds, minlength=2 ** 2) / len(sampled_inds) records.append( { 'circuit_i': circuit_i, 'cycle_depth': task.cycle_depth, 'sampled_probs': sampled_probs, 'timestamp': timestamp, # Additional metadata to track *how* this circuit # was zipped and executed. 'layer_i': task.layer_i, 'pair_i': pair_i, 'combination_i': task.combination_i, 'pair': pair, } ) return records def _verify_and_get_two_qubits_from_circuits(circuits: Sequence['cirq.Circuit']): """Make sure each of the provided circuits uses the same two qubits and return them.""" all_qubits_set: Set['cirq.Qid'] = set() all_qubits_set = all_qubits_set.union(*(circuit.all_qubits() for circuit in circuits)) all_qubits_list = sorted(all_qubits_set) if len(all_qubits_list) != 2: raise ValueError( "`circuits` should be a sequence of circuits each operating on the same two qubits." ) return all_qubits_list def _verify_two_line_qubits_from_circuits(circuits: Sequence['cirq.Circuit']): if _verify_and_get_two_qubits_from_circuits(circuits) != devices.LineQubit.range(2): raise ValueError( "`circuits` should be a sequence of circuits each operating " "on LineQubit(0) and LineQubit(1)" ) class _NoProgress: """Dummy (lack of) tqdm-style progress bar.""" def __init__(self, total: int): pass def __enter__( self, ): return self def __exit__(self, exc_type, exc_val, exc_tb): pass def update(self, n: int = 1): pass @dataclass(frozen=True) class _ZippedCircuit: """A fully-wide circuit made by zipping together a bunch of two-qubit circuits and its provenance data. Args: wide_circuit: The zipped circuit on all pairs pairs: The pairs of qubits operated on in the wide circuit. combination: A list of indices into the (narrow) `circuits` library. Each entry indexes the narrow circuit operating on the corresponding pair in `pairs`. This is a given row of the combinations matrix. It is essential for being able to "unzip" the results of the `wide_circuit`. layer_i: Metadata indicating how the `pairs` were generated. This 0-based index is which `GridInteractionLayer` or `Moment` was used for these pairs when calibrating several spacial layouts in one request. This field does not modify any behavior. It is propagated to the output result object. combination_i: Metadata indicating how the `wide_circuit` was zipped. This is the row index of the combinations matrix that identifies this particular combination of component narrow circuits. This field does not modify any behavior. It is propagated to the output result object. """ wide_circuit: 'cirq.Circuit' pairs: List[Tuple['cirq.Qid', 'cirq.Qid']] combination: List[int] layer_i: int combination_i: int def _get_combinations_by_layer_for_isolated_xeb( circuits: Sequence['cirq.Circuit'], ) -> Tuple[List[CircuitLibraryCombination], List['cirq.Circuit']]: """Helper function used in `sample_2q_xeb_circuits`. This creates a CircuitLibraryCombination object for isolated XEB. First, the qubits are extracted from the lists of circuits and used to define one pair. Instead of using `combinations` to shuffle the circuits for each pair, we just use each circuit (in order) for the one pair. """ q0, q1 = _verify_and_get_two_qubits_from_circuits(circuits) circuits = [ circuit.transform_qubits(lambda q: {q0: devices.LineQubit(0), q1: devices.LineQubit(1)}[q]) for circuit in circuits ] return [ CircuitLibraryCombination( layer=None, combinations=np.arange(len(circuits))[:, np.newaxis], pairs=[(q0, q1)], ) ], circuits def _zip_circuits( circuits: Sequence['cirq.Circuit'], combinations_by_layer: List[CircuitLibraryCombination] ) -> List[_ZippedCircuit]: """Helper function used in `sample_2q_xeb_circuits` to zip together circuits. This takes a sequence of narrow `circuits` and "zips" them together according to the combinations in `combinations_by_layer`. """ # Check `combinations_by_layer` is compatible with `circuits`. for layer_combinations in combinations_by_layer: if np.any(layer_combinations.combinations < 0) or np.any( layer_combinations.combinations >= len(circuits) ): raise ValueError("`combinations_by_layer` has invalid indices.") zipped_circuits: List[_ZippedCircuit] = [] for layer_i, layer_combinations in enumerate(combinations_by_layer): for combination_i, combination in enumerate(layer_combinations.combinations): wide_circuit = Circuit.zip( *( circuits[i].transform_qubits(lambda q: pair[q.x]) for i, pair in zip(combination, layer_combinations.pairs) ) ) zipped_circuits.append( _ZippedCircuit( wide_circuit=wide_circuit, pairs=layer_combinations.pairs, combination=combination.tolist(), layer_i=layer_i, combination_i=combination_i, ) ) return zipped_circuits def _generate_sample_2q_xeb_tasks( zipped_circuits: List[_ZippedCircuit], cycle_depths: Sequence[int] ) -> List[_Sample2qXEBTask]: """Helper function used in `sample_2q_xeb_circuits` to prepare circuits in sampling tasks.""" tasks: List[_Sample2qXEBTask] = [] for cycle_depth in cycle_depths: for zipped_circuit in zipped_circuits: circuit_depth = cycle_depth * 2 + 1 assert circuit_depth <= len(zipped_circuit.wide_circuit) # Slicing creates a copy, although this isn't documented prepared_circuit = zipped_circuit.wide_circuit[:circuit_depth] prepared_circuit += ops.Moment( ops.measure(*pair, key=str(pair_i)) for pair_i, pair in enumerate(zipped_circuit.pairs) ) tasks.append( _Sample2qXEBTask( cycle_depth=cycle_depth, layer_i=zipped_circuit.layer_i, combination_i=zipped_circuit.combination_i, prepared_circuit=prepared_circuit, combination=zipped_circuit.combination, ) ) return tasks def _execute_sample_2q_xeb_tasks_in_batches( tasks: List[_Sample2qXEBTask], sampler: 'cirq.Sampler', combinations_by_layer: List[CircuitLibraryCombination], repetitions: int, batch_size: int, progress_bar: Callable[..., ContextManager], dataset_directory: Optional[str] = None, ) -> List[Dict[str, Any]]: """Helper function used in `sample_2q_xeb_circuits` to batch and execute sampling tasks.""" n_tasks = len(tasks) batched_tasks = [tasks[i : i + batch_size] for i in range(0, n_tasks, batch_size)] run_batch = _SampleInBatches( sampler=sampler, repetitions=repetitions, combinations_by_layer=combinations_by_layer ) with ThreadPoolExecutor(max_workers=2) as pool: futures = [pool.submit(run_batch, task_batch) for task_batch in batched_tasks] records = [] with progress_bar(total=len(batched_tasks) * batch_size) as progress: for future in concurrent.futures.as_completed(futures): new_records = future.result() if dataset_directory is not None: os.makedirs(f'{dataset_directory}', exist_ok=True) protocols.to_json(new_records, f'{dataset_directory}/xeb.{uuid.uuid4()}.json') records.extend(new_records) progress.update(batch_size) return records def sample_2q_xeb_circuits( sampler: 'cirq.Sampler', circuits: Sequence['cirq.Circuit'], cycle_depths: Sequence[int], *, repetitions: int = 10_000, batch_size: int = 9, progress_bar: Optional[Callable[..., ContextManager]] = tqdm.tqdm, combinations_by_layer: Optional[List[CircuitLibraryCombination]] = None, shuffle: Optional['cirq.RANDOM_STATE_OR_SEED_LIKE'] = None, dataset_directory: Optional[str] = None, ): """Sample two-qubit XEB circuits given a sampler. Args: sampler: A Cirq sampler for executing circuits. circuits: A library of two-qubit circuits generated from `random_rotations_between_two_qubit_circuit` of sufficient length for `cycle_depths`. cycle_depths: A sequence of cylce depths at which we will truncate each of the `circuits` to execute. repetitions: Each (circuit, cycle_depth) will be sampled for this many repetitions. batch_size: We call `run_batch` on the sampler, which can speed up execution in certain environments. The number of (circuit, cycle_depth) tasks to be run in each batch is given by this number. progress_bar: A progress context manager following the `tqdm` API or `None` to not report progress. combinations_by_layer: Either `None` or the result of `rqcg.get_random_combinations_for_device`. If this is `None`, the circuits specified by `circuits` will be sampled verbatim, resulting in isolated XEB characterization. Otherwise, this contains all the random combinations and metadata required to combine the circuits in `circuits` into wide, parallel-XEB-style circuits for execution. shuffle: If provided, use this random state or seed to shuffle the order in which tasks are executed. dataset_directory: If provided, save each batch of sampled results to a file `{dataset_directory}/xeb.{uuid4()}.json` where uuid4() is a random string. This can be used to incrementally save results to be analyzed later. Returns: A pandas dataframe with index given by ['circuit_i', 'cycle_depth']. Columns always include "sampled_probs". If `combinations_by_layer` is not `None` and you are doing parallel XEB, additional metadata columns will be attached to the returned DataFrame. """ # Set up progress reporting if progress_bar is None: progress_bar = _NoProgress # Shim isolated-XEB as a special case of combination-style parallel XEB. if combinations_by_layer is None: combinations_by_layer, circuits = _get_combinations_by_layer_for_isolated_xeb(circuits) one_pair = True else: _verify_two_line_qubits_from_circuits(circuits) one_pair = False # Construct fully-wide "zipped" circuits. zipped_circuits = _zip_circuits(circuits, combinations_by_layer) # Construct truncated-with-measurement circuits to run. tasks = _generate_sample_2q_xeb_tasks(zipped_circuits, cycle_depths) if shuffle is not None: shuffle = value.parse_random_state(shuffle) shuffle.shuffle(tasks) # Batch and run tasks. records = _execute_sample_2q_xeb_tasks_in_batches( tasks=tasks, sampler=sampler, combinations_by_layer=combinations_by_layer, repetitions=repetitions, batch_size=batch_size, progress_bar=progress_bar, dataset_directory=dataset_directory, ) # Set up the dataframe. df = pd.DataFrame(records).set_index(['circuit_i', 'cycle_depth']) if one_pair: df = df.drop(['layer_i', 'pair_i', 'combination_i'], axis=1) return df
apache-2.0
2,132,259,397,658,829,000
39.594164
99
0.644015
false