repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
891k
| license
stringclasses 15
values | hash
int64 -9,223,135,201,861,841,000
9,223,183,049B
| line_mean
float64 6
99.4
| line_max
int64 17
1k
| alpha_frac
float64 0.25
0.89
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
adamrvfisher/TechnicalAnalysisLibrary | PriceRelativeRemoteSignalATROptimizerTwoAsset.py | 1 | 7759 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 30 19:07:37 2017
@author: AmatVictoriaCuramIII
"""
import numpy as np
import random as rand
import pandas as pd
import time as t
from DatabaseGrabber import DatabaseGrabber
from YahooGrabber import YahooGrabber
Empty = []
Dataset = pd.DataFrame()
Portfolio = pd.DataFrame()
Start = t.time()
Counter = 0
#Input
Ticker1 = 'UVXY'
Ticker2 = '^VIX'
#Remote Signal
Ticker3 = '^VIX'
#Here we go
Asset1 = YahooGrabber(Ticker1)
Asset2 = YahooGrabber(Ticker2)
#Remote Signal
Asset3 = YahooGrabber(Ticker3)
#Match lengths
#Trimmer
trim = abs(len(Asset1) - len(Asset2))
if len(Asset1) == len(Asset2):
pass
else:
if len(Asset1) > len(Asset2):
Asset1 = Asset1[trim:]
else:
Asset2 = Asset2[trim:]
Asset3 = Asset3[-len(Asset2):]
#Asset2 = Asset2[-600:]
#Log Returns
Asset1['LogRet'] = np.log(Asset1['Adj Close']/Asset1['Adj Close'].shift(1))
Asset1['LogRet'] = Asset1['LogRet'].fillna(0)
Asset2['LogRet'] = np.log(Asset2['Adj Close']/Asset2['Adj Close'].shift(1))
Asset2['LogRet'] = Asset2['LogRet'].fillna(0)
#Prepare the remote controller
Asset3['LogRet'] = np.log(Asset3['Adj Close']/Asset3['Adj Close'].shift(1))
Asset3['LogRet'] = Asset3['LogRet'].fillna(0)
#window = 7
##Asset3['MA'] = Asset3['Adj Close'].rolling(window=window, center=False).mean()
#Asset3['Method1'] = Asset3['High'] - Asset3['Low']
#Asset3['Method2'] = abs((Asset3['High'] - Asset3['Adj Close'].shift(1)))
#Asset3['Method3'] = abs((Asset3['Low'] - Asset3['Adj Close'].shift(1)))
#Asset3['Method1'] = Asset3['Method1'].fillna(0)
#Asset3['Method2'] = Asset3['Method2'].fillna(0)
#Asset3['Method3'] = Asset3['Method3'].fillna(0)
#Asset3['TrueRange'] = Asset3[['Method1','Method2','Method3']].max(axis = 1)
#Asset3['AverageTrueRange'] = (Asset3['TrueRange'].rolling(window = window,
# center=False).sum())/window
#
##Retrim Assets
#Asset1 = Asset1[window:]
#Asset2 = Asset2[window:]
#Asset3 = Asset3[window:]
#Brute Force Optimization
iterations = range(0, 3000)
for i in iterations:
Counter = Counter + 1
a = rand.random()
b = 1 - a
c = rand.random()
d = 1 - c
e = rand.randint(3,20)
window = int(e)
#Asset3['MA'] = Asset3['Adj Close'].rolling(window=window, center=False).mean()
Asset3['Method1'] = Asset3['High'] - Asset3['Low']
Asset3['Method2'] = abs((Asset3['High'] - Asset3['Adj Close'].shift(1)))
Asset3['Method3'] = abs((Asset3['Low'] - Asset3['Adj Close'].shift(1)))
Asset3['Method1'] = Asset3['Method1'].fillna(0)
Asset3['Method2'] = Asset3['Method2'].fillna(0)
Asset3['Method3'] = Asset3['Method3'].fillna(0)
Asset3['TrueRange'] = Asset3[['Method1','Method2','Method3']].max(axis = 1)
Asset3['AverageTrueRange'] = (Asset3['TrueRange'].rolling(window = window,
center=False).sum())/window
Asset1['Position'] = a
Asset1['Position'] = np.where(Asset3['TrueRange'].shift(1) > Asset3['AverageTrueRange'].shift(1),
c,a)
Asset1['Pass'] = (Asset1['LogRet'] * Asset1['Position'])
Asset2['Position'] = b
Asset2['Position'] = np.where(Asset3['TrueRange'].shift(1) > Asset3['AverageTrueRange'].shift(1),
d,b)
Asset2['Pass'] = (Asset2['LogRet'] * Asset2['Position'])
Portfolio['Asset1Pass'] = (Asset1['Pass']) * (-1) #Pass a short position
Portfolio['Asset2Pass'] = (Asset2['Pass']) #* (-1)
# Portfolio['PriceRelative'] = Asset1['Adj Close'] / Asset2['Adj Close']
#asone['PriceRelative'][-180:].plot(grid = True, figsize = (8,5))
Portfolio['LongShort'] = (Portfolio['Asset1Pass']) + (Portfolio['Asset2Pass'])
# Portfolio['LongShort'][-180:].cumsum().apply(np.exp).plot(grid=True,
# figsize=(8,5))
if Portfolio['LongShort'].std() == 0:
continue
Portfolio['Multiplier'] = Portfolio['LongShort'].cumsum().apply(np.exp)
drawdown = 1 - Portfolio['Multiplier'].div(Portfolio['Multiplier'].cummax())
MaxDD = max(drawdown)
if MaxDD > float(.25):
continue
dailyreturn = Portfolio['LongShort'].mean()
if dailyreturn < .002:
continue
dailyvol = Portfolio['LongShort'].std()
sharpe =(dailyreturn/dailyvol)
Portfolio['Multiplier'] = Portfolio['LongShort'].cumsum().apply(np.exp)
drawdown = 1 - Portfolio['Multiplier'].div(Portfolio['Multiplier'].cummax())
MaxDD = max(drawdown)
print(Counter)
Empty.append(a)
Empty.append(b)
Empty.append(c)
Empty.append(d)
Empty.append(e)
Empty.append(sharpe)
Empty.append(sharpe/MaxDD)
Empty.append(dailyreturn/MaxDD)
Empty.append(MaxDD)
Emptyseries = pd.Series(Empty)
Dataset[0] = Emptyseries.values
Dataset[i] = Emptyseries.values
Empty[:] = []
z1 = Dataset.iloc[6]
w1 = np.percentile(z1, 80)
v1 = [] #this variable stores the Nth percentile of top performers
DS1W = pd.DataFrame() #this variable stores your financial advisors for specific dataset
for h in z1:
if h > w1:
v1.append(h)
for j in v1:
r = Dataset.columns[(Dataset == j).iloc[6]]
DS1W = pd.concat([DS1W,Dataset[r]], axis = 1)
y = max(z1)
k = Dataset.columns[(Dataset == y).iloc[6]] #this is the column number
kfloat = float(k[0])
End = t.time()
print(End-Start, 'seconds later')
print(Dataset[k])
window = int((Dataset[kfloat][4]))
#Asset3['MA'] = Asset3['Adj Close'].rolling(window=window, center=False).mean()
Asset3['Method1'] = Asset3['High'] - Asset3['Low']
Asset3['Method2'] = abs((Asset3['High'] - Asset3['Adj Close'].shift(1)))
Asset3['Method3'] = abs((Asset3['Low'] - Asset3['Adj Close'].shift(1)))
Asset3['Method1'] = Asset3['Method1'].fillna(0)
Asset3['Method2'] = Asset3['Method2'].fillna(0)
Asset3['Method3'] = Asset3['Method3'].fillna(0)
Asset3['TrueRange'] = Asset3[['Method1','Method2','Method3']].max(axis = 1)
Asset3['AverageTrueRange'] = (Asset3['TrueRange'].rolling(window = window,
center=False).sum())/window
Asset1['Pass'] = (Asset1['LogRet'] * Asset1['Position'])
Asset1['Position'] = (Dataset[kfloat][0])
Asset1['Position'] = np.where(Asset3['TrueRange'].shift(1) > Asset3['AverageTrueRange'].shift(1),
Dataset[kfloat][2],Dataset[kfloat][0])
Asset2['Pass'] = (Asset2['LogRet'] * Asset2['Position'])
Asset2['Position'] = (Dataset[kfloat][1])
Asset2['Position'] = np.where(Asset3['TrueRange'].shift(1) > Asset3['AverageTrueRange'].shift(1),
Dataset[kfloat][3],Dataset[kfloat][1])
Asset2['Pass'] = (Asset2['LogRet'] * Asset2['Position'])
Portfolio['Asset1Pass'] = Asset1['Pass'] * (-1)
Portfolio['Asset2Pass'] = Asset2['Pass'] #* (-1)
#Portfolio['PriceRelative'] = Asset1['Adj Close'] / Asset2['Adj Close']
#asone['PriceRelative'][-180:].plot(grid = True, figsize = (8,5))
Portfolio['LongShort'] = Portfolio['Asset1Pass'] + Portfolio['Asset2Pass']
Portfolio['LongShort'][:].cumsum().apply(np.exp).plot(grid=True,
figsize=(8,5))
dailyreturn = Portfolio['LongShort'].mean()
dailyvol = Portfolio['LongShort'].std()
sharpe =(dailyreturn/dailyvol)
Portfolio['Multiplier'] = Portfolio['LongShort'].cumsum().apply(np.exp)
drawdown2 = 1 - Portfolio['Multiplier'].div(Portfolio['Multiplier'].cummax())
#conversionfactor = Portfolio['PriceRelative'][-1]
print(max(drawdown2))
#pd.to_pickle(Portfolio, 'VXX:UVXY') | apache-2.0 | -6,308,838,753,224,976,000 | 36.61194 | 101 | 0.608197 | false |
neutrons/FastGR | addie/processing/idl/table_handler.py | 1 | 22403 | from __future__ import (absolute_import, division, print_function)
#import re
import glob
import os
import numpy as np
from qtpy.QtCore import (Qt)
from qtpy.QtGui import (QCursor)
from qtpy.QtWidgets import (QFileDialog, QMenu, QMessageBox, QTableWidgetSelectionRange)
import addie.processing.idl.populate_master_table
from addie.processing.idl.export_table import ExportTable
from addie.processing.idl.import_table import ImportTable
from addie.utilities.file_handler import FileHandler
from addie.processing.idl.populate_background_widgets import PopulateBackgroundWidgets
from addie.processing.idl.sample_environment_handler import SampleEnvironmentHandler
import addie.processing.idl.step2_gui_handler
from addie.widgets.filedialog import get_save_file
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cm
class TableHandler(object):
list_selected_row = None
def __init__(self, parent=None):
self.parent = parent
def retrieve_list_of_selected_rows(self):
self.list_selected_row = []
for _row_index in range(self.parent.postprocessing_ui.table.rowCount()):
_widgets = self.parent.postprocessing_ui.table.cellWidget(_row_index, 0).children()
if len(_widgets) > 0:
_selected_widget = self.parent.postprocessing_ui.table.cellWidget(_row_index, 0).children()[1]
if _selected_widget.checkState() == Qt.Checked:
_entry = self._collect_metadata(row_index=_row_index)
self.list_selected_row.append(_entry)
def _collect_metadata(self, row_index=-1):
if row_index == -1:
return []
_name = self.retrieve_item_text(row_index, 1)
_runs = self.retrieve_item_text(row_index, 2)
_sample_formula = self.retrieve_item_text(row_index, 3)
_mass_density = self.retrieve_item_text(row_index, 4)
_radius = self.retrieve_item_text(row_index, 5)
_packing_fraction = self.retrieve_item_text(row_index, 6)
_sample_shape = self._retrieve_sample_shape(row_index)
_do_abs_correction = self._retrieve_do_abs_correction(row_index)
_metadata = {'name': _name,
'runs': _runs,
'sample_formula': _sample_formula,
'mass_density': _mass_density,
'radius': _radius,
'packing_fraction': _packing_fraction,
'sample_shape': _sample_shape,
'do_abs_correction': _do_abs_correction}
return _metadata
def _retrieve_sample_shape(self, row_index):
_widget = self.parent.postprocessing_ui.table.cellWidget(row_index, 7)
_selected_index = _widget.currentIndex()
_sample_shape = _widget.itemText(_selected_index)
return _sample_shape
def _retrieve_do_abs_correction(self, row_index):
_widget = self.parent.postprocessing_ui.table.cellWidget(row_index, 8).children()[1]
if (_widget.checkState() == Qt.Checked):
return 'go'
else:
return 'nogo'
def current_row(self):
_row = self.parent.postprocessing_ui.table.currentRow()
return _row
def right_click(self, position=None):
_duplicate_row = -1
_plot_sofq = -1
_remove_row = -1
_new_row = -1
_copy = -1
_paste = -1
_cut = -1
_refresh_table = -1
_clear_table = -1
# _import = -1
# _export = -1 _check_all = -1
_uncheck_all = -1
_undo = -1
_redo = -1
_plot_sofq_diff_first_run_row = -1
_plot_sofq_diff_average_row = -1
_plot_cryostat = -1
_plot_furnace = -1
_invert_selection = -1
menu = QMenu(self.parent)
if self.parent.table_selection_buffer == {}:
paste_status = False
else:
paste_status = True
if (self.parent.postprocessing_ui.table.rowCount() > 0):
_undo = menu.addAction("Undo")
_undo.setEnabled(self.parent.undo_button_enabled)
_redo = menu.addAction("Redo")
_redo.setEnabled(self.parent.redo_button_enabled)
menu.addSeparator()
_copy = menu.addAction("Copy")
_paste = menu.addAction("Paste")
self._paste_menu = _paste
_paste.setEnabled(paste_status)
_cut = menu.addAction("Clear")
menu.addSeparator()
_check_all = menu.addAction("Check All")
_uncheck_all = menu.addAction("Unchecked All")
menu.addSeparator()
_invert_selection = menu.addAction("Inverse Selection")
menu.addSeparator()
_new_row = menu.addAction("Insert Blank Row")
if (self.parent.postprocessing_ui.table.rowCount() > 0):
_duplicate_row = menu.addAction("Duplicate Row")
_remove_row = menu.addAction("Remove Row(s)")
menu.addSeparator()
_plot_menu = menu.addMenu('Plot')
_plot_sofq = _plot_menu.addAction("S(Q) ...")
_plot_sofq_diff_first_run_row = _plot_menu.addAction("S(Q) Diff (1st run)...")
_plot_sofq_diff_average_row = _plot_menu.addAction("S(Q) Diff (Avg.)...")
_temp_menu = _plot_menu.addMenu("Temperature")
_plot_cryostat = _temp_menu.addAction("Cyrostat...")
_plot_furnace = _temp_menu.addAction("Furnace...")
menu.addSeparator()
_refresh_table = menu.addAction("Refresh/Reset Table")
_clear_table = menu.addAction("Clear Table")
action = menu.exec_(QCursor.pos())
self.current_row = self.current_row()
if action == _undo:
self.parent.action_undo_clicked()
elif action == _redo:
self.parent.action_redo_clicked()
elif action == _copy:
self._copy()
elif action == _paste:
self._paste()
elif action == _cut:
self._cut()
elif action == _duplicate_row:
self._duplicate_row()
elif action == _plot_sofq:
self._plot_sofq()
elif action == _plot_sofq_diff_first_run_row:
self._plot_sofq_diff_first_run_row()
elif action == _plot_sofq_diff_average_row:
self._plot_sofq_diff_average_row()
elif action == _plot_cryostat:
self._plot_temperature(samp_env_choice='cryostat')
elif action == _plot_furnace:
self._plot_temperature(samp_env_choice='furnace')
elif action == _invert_selection:
self._inverse_selection()
elif action == _new_row:
self._new_row()
elif action == _remove_row:
self._remove_selected_rows()
elif action == _refresh_table:
self._refresh_table()
elif action == _clear_table:
self._clear_table()
elif action == _check_all:
self.check_all()
elif action == _uncheck_all:
self.uncheck_all()
def _import(self):
_current_folder = self.parent.current_folder
[_table_file, _] = QFileDialog.getOpenFileName(parent=self.parent,
caption="Select File",
directory=_current_folder,
filter=("text (*.txt);; All Files (*.*)"))
if not _table_file:
return
if isinstance(_table_file, tuple):
_table_file = _table_file[0]
new_path = os.path.dirname(_table_file)
self.parent.current_folder = new_path
self._clear_table()
_import_handler = ImportTable(filename=_table_file, parent=self.parent)
_import_handler.run()
_pop_back_wdg = PopulateBackgroundWidgets(main_window=self.parent)
_pop_back_wdg.run()
_o_gui = addie.processing.idl.step2_gui_handler.Step2GuiHandler(main_window=self.parent)
_o_gui.check_gui()
def _export(self):
_current_folder = self.parent.current_folder
_table_file, _ = get_save_file(parent=self.parent,
caption="Select File",
directory=_current_folder,
filter={'text (*.txt)':'txt', 'All Files (*.*)':''})
if not _table_file:
return
if isinstance(_table_file, tuple):
_table_file = _table_file[0]
_file_handler = FileHandler(filename=_table_file)
_file_handler.check_file_extension(ext_requested='txt')
_table_file = _file_handler.filename
_export_handler = ExportTable(parent=self.parent,
filename=_table_file)
_export_handler.run()
def _copy(self):
_selection = self.parent.postprocessing_ui.table.selectedRanges()
_selection = _selection[0]
left_column = _selection.leftColumn()
right_column = _selection.rightColumn()
top_row = _selection.topRow()
bottom_row = _selection.bottomRow()
self.parent.table_selection_buffer = {'left_column': left_column,
'right_column': right_column,
'top_row': top_row,
'bottom_row': bottom_row}
self._paste_menu.setEnabled(True)
def _paste(self, _cut=False):
_copy_selection = self.parent.table_selection_buffer
_copy_left_column = _copy_selection['left_column']
# make sure selection start at the same column
_paste_selection = self.parent.postprocessing_ui.table.selectedRanges()
_paste_left_column = _paste_selection[0].leftColumn()
if not (_copy_left_column == _paste_left_column):
QMessageBox.warning(self.parent,
"Check copy/paste selection!",
"Check your selection! ")
return
_copy_right_column = _copy_selection["right_column"]
_copy_top_row = _copy_selection["top_row"]
_copy_bottom_row = _copy_selection["bottom_row"]
_paste_top_row = _paste_selection[0].topRow()
index = 0
for _row in range(_copy_top_row, _copy_bottom_row+1):
_paste_row = _paste_top_row + index
for _column in range(_copy_left_column, _copy_right_column + 1):
if _column in np.arange(1, 7):
if _cut:
_item_text = ''
else:
_item_text = self.retrieve_item_text(_row, _column)
self.paste_item_text(_paste_row, _column, _item_text)
if _column == 7:
if _cut:
_widget_index = 0
else:
_widget_index = self.retrieve_sample_shape_index(_row)
self.set_widget_index(_widget_index, _paste_row)
if _column == 8:
if _cut:
_widget_state = Qt.Unchecked
else:
_widget_state = self.retrieve_do_abs_correction_state(_row)
self.set_widget_state(_widget_state, _paste_row)
index += 1
def _inverse_selection(self):
selected_range = self.parent.postprocessing_ui.table.selectedRanges()
nbr_column = self.parent.postprocessing_ui.table.columnCount()
self.select_all(status=True)
# inverse selected rows
for _range in selected_range:
_range.leftColumn = 0
_range.rightColun = nbr_column-1
self.parent.postprocessing_ui.table.setRangeSelected(_range, False)
def select_all(self, status=True):
nbr_row = self.parent.postprocessing_ui.table.rowCount()
nbr_column = self.parent.postprocessing_ui.table.columnCount()
_full_range = QTableWidgetSelectionRange(0, 0, nbr_row-1, nbr_column-1)
self.parent.postprocessing_ui.table.setRangeSelected(_full_range, status)
def check_all(self):
self.select_first_column(status=True)
def uncheck_all(self):
self.select_first_column(status=False)
def select_row(self, row=-1, status=True):
nbr_column = self.parent.postprocessing_ui.table.columnCount()
_range = QTableWidgetSelectionRange(row, 0, row, nbr_column-1)
self.parent.postprocessing_ui.table.setRangeSelected(_range, status)
def check_row(self, row=-1, status=True):
_widgets = self.parent.postprocessing_ui.table.cellWidget(row, 0).children()
if len(_widgets) > 0:
_selected_widget = self.parent.postprocessing_ui.table.cellWidget(row, 0).children()[1]
_selected_widget.setChecked(status)
def select_first_column(self, status=True):
for _row in range(self.parent.postprocessing_ui.table.rowCount()):
_widgets = self.parent.postprocessing_ui.table.cellWidget(_row, 0).children()
if len(_widgets) > 0:
_selected_widget = self.parent.postprocessing_ui.table.cellWidget(_row, 0).children()[1]
_selected_widget.setChecked(status)
_o_gui = addie.processing.idl.step2_gui_handler.Step2GuiHandler(main_window=self.parent)
_o_gui.check_gui()
def check_selection_status(self, state, row):
list_ranges = self.parent.postprocessing_ui.table.selectedRanges()
for _range in list_ranges:
bottom_row = _range.bottomRow()
top_row = _range.topRow()
range_row = list(range(top_row, bottom_row + 1))
for _row in range_row:
_widgets = self.parent.postprocessing_ui.table.cellWidget(_row, 0).children()
if len(_widgets) > 0:
_selected_widget = self.parent.postprocessing_ui.table.cellWidget(_row, 0).children()[1]
_selected_widget.setChecked(state)
_o_gui = addie.processing.idl.step2_gui_handler.Step2GuiHandler(main_window=self.parent)
_o_gui.check_gui()
def _cut(self):
self._copy()
self._paste(_cut=True)
def _duplicate_row(self):
_row = self.current_row
metadata_to_copy = self._collect_metadata(row_index=_row)
o_populate = addie.processing.idl.populate_master_table.PopulateMasterTable(main_window=self.parent)
o_populate.add_new_row(metadata_to_copy, row=_row)
def _plot_fetch_files(self, file_type='SofQ'):
if file_type == 'SofQ':
search_dir = './SofQ'
prefix = 'NOM_'
suffix = 'SQ.dat'
elif file_type == 'nexus':
cwd = os.getcwd()
search_dir = cwd[:cwd.find('shared')]+'/nexus'
prefix = 'NOM_'
suffix = '.nxs.h5'
#ipts = int(re.search(r"IPTS-(\d*)\/", os.getcwd()).group(1))
_row = self.current_row
_row_runs = self._collect_metadata(row_index=_row)['runs'].split(',')
output_list = list()
file_list = [a_file for a_file in glob.glob(search_dir+'/'+prefix+'*')]
for run in _row_runs:
the_file = search_dir+'/'+prefix+str(run)+suffix
if the_file in file_list:
output_list.append({'file': the_file, 'run': run})
return output_list
def _plot_fetch_data(self):
file_list = self._plot_fetch_files(file_type='SofQ')
for data in file_list:
with open(data['file'], 'r') as handle:
x, y, e = np.loadtxt(handle, unpack=True)
data['x'] = x
data['y'] = y
return file_list
def _plot_datasets(self, datasets, shift_value=1.0, cmap_choice='inferno', title=None):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
# configure plot
cmap = plt.get_cmap(cmap_choice)
cNorm = colors.Normalize(vmin=0, vmax=len(datasets))
scalarMap = cm.ScalarMappable(norm=cNorm, cmap=cmap)
mrks = [0, -1]
# plot data
shifter = 0.0
for idx, data in enumerate(datasets):
data['y'] += shifter
colorVal = scalarMap.to_rgba(idx)
if 'linestyle' in data:
ax.plot(data['x'], data['y'], data['linestyle']+'o', label=data['run'], color=colorVal, markevery=mrks,)
else:
ax.plot(data['x'], data['y'], label=data['run'], color=colorVal, markevery=mrks)
shifter += shift_value
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1], labels[::-1], title='Runs', loc='center left', bbox_to_anchor=(1, 0.5))
if title:
fig.suptitle(title)
plt.show()
def _plot_sofq(self):
sofq_datasets = self._plot_fetch_data()
self._plot_datasets(sorted(sofq_datasets, key=lambda k: int(k['run'])), title='S(Q)')
def _plot_sofq_diff_first_run_row(self):
sofq_datasets = self._plot_fetch_data()
sofq_base = dict(sofq_datasets[0])
for sofq in sorted(sofq_datasets, key=lambda k: int(k['run'])):
sofq['y'] = sofq['y'] - sofq_base['y']
self._plot_datasets(sofq_datasets, shift_value=0.2, title='S(Q) - S(Q) for run '+sofq_base['run'])
def _plot_sofq_diff_average_row(self):
sofq_datasets = self._plot_fetch_data()
sofq_data = [sofq['y'] for sofq in sofq_datasets]
sofq_avg = np.average(sofq_data, axis=0)
for sofq in sorted(sofq_datasets, key=lambda k: int(k['run'])):
sofq['y'] = sofq['y'] - sofq_avg
self._plot_datasets(sofq_datasets, shift_value=0.2, title='S(Q) - <S(Q)>')
def _plot_temperature(self, samp_env_choice=None):
file_list = self._plot_fetch_files(file_type='nexus')
samp_env = SampleEnvironmentHandler(samp_env_choice)
datasets = list()
for data in file_list:
samp_x, samp_y = samp_env.getDataFromFile(data['file'], 'samp')
envi_x, envi_y = samp_env.getDataFromFile(data['file'], 'envi')
print(data['file'])
datasets.append({'run': data['run'] + '_samp', 'x': samp_x, 'y': samp_y, 'linestyle': '-'})
datasets.append({'run': None, 'x': envi_x, 'y': envi_y, 'linestyle': '--'})
self._plot_datasets(sorted(datasets, key=lambda k: k['run']),
shift_value=0.0, title='Temperature: '+samp_env_choice)
def _new_row(self):
_row = self.current_row
if _row == -1:
_row = 0
o_populate = addie.processing.idl.populate_master_table.PopulateMasterTable(main_window=self.parent)
_metadata = o_populate.empty_metadata()
o_populate.add_new_row(_metadata, row=_row)
def _remove_selected_rows(self):
selected_range = self.parent.postprocessing_ui.table.selectedRanges()
_nbr_row_removed = 0
_local_nbr_row_removed = 0
for _range in selected_range:
_top_row = _range.topRow()
_bottom_row = _range.bottomRow()
nbr_row = _bottom_row - _top_row + 1
for i in np.arange(nbr_row):
self._remove_row(row=_top_row - _nbr_row_removed)
_local_nbr_row_removed += 1
_nbr_row_removed = _local_nbr_row_removed
_pop_back_wdg = PopulateBackgroundWidgets(main_window=self.parent)
_pop_back_wdg.run()
def _remove_row(self, row=-1):
if row == -1:
row = self.current_row
self.parent.postprocessing_ui.table.removeRow(row)
_o_gui = addie.processing.idl.step2_gui_handler.Step2GuiHandler(main_window=self.parent)
_o_gui.check_gui()
def _refresh_table(self):
self.parent.populate_table_clicked()
_o_gui = addie.processing.idl.step2_gui_handler.Step2GuiHandler(main_window=self.parent)
_o_gui.check_gui()
def _clear_table(self):
_number_of_row = self.parent.postprocessing_ui.table.rowCount()
self.parent.postprocessing_ui.table.setSortingEnabled(False)
for _row in np.arange(_number_of_row):
self.parent.postprocessing_ui.table.removeRow(0)
self.parent.postprocessing_ui.background_line_edit.setText("")
self.parent.postprocessing_ui.background_comboBox.clear()
_o_gui = addie.processing.idl.step2_gui_handler.Step2GuiHandler(main_window=self.parent)
_o_gui.check_gui()
def set_widget_state(self, _widget_state, _row):
_widget = self.parent.postprocessing_ui.table.cellWidget(_row, 8).children()[1]
_widget.setCheckState(_widget_state)
def retrieve_do_abs_correction_state(self, _row):
_widget = self.parent.postprocessing_ui.table.cellWidget(_row, 8).children()[1]
return _widget.checkState()
def set_widget_index(self, _widget_index, _row):
_widget = self.parent.postprocessing_ui.table.cellWidget(_row, 7)
_widget.setCurrentIndex(_widget_index)
def paste_item_text(self, _row, _column, _item_text):
_item = self.parent.postprocessing_ui.table.item(_row, _column)
_item.setText(_item_text)
def retrieve_sample_shape_index(self, row_index):
_widget = self.parent.postprocessing_ui.table.cellWidget(row_index, 7)
_selected_index = _widget.currentIndex()
return _selected_index
def retrieve_item_text(self, row, column):
_item = self.parent.postprocessing_ui.table.item(row, column)
if _item is None:
return ''
else:
return str(_item.text())
def name_search(self):
nbr_row = self.parent.postprocessing_ui.table.rowCount()
if nbr_row == 0:
return
_string = str(self.parent.postprocessing_ui.name_search.text()).lower()
if _string == '':
self.select_all(status=False)
else:
for _row in range(nbr_row):
_text_row = str(self.parent.postprocessing_ui.table.item(_row, 1).text()).lower()
if _string in _text_row:
self.select_row(row=_row, status=True)
| mit | 796,581,622,623,209,200 | 39.148746 | 120 | 0.573138 | false |
ryanjmccall/nupic.research | union_pooling/union_pooling/experiments/union_sdr_overlap/plot_experiment.py | 4 | 4392 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import argparse
import csv
import os
import sys
import matplotlib.pyplot as plt
import numpy
from experiments.capacity import data_utils
_OVERLAPS_FILE_NAME = "/overlaps.csv"
def main(inputPath, csvOutputPath, imgOutputPath):
# remove existing /overlaps.csv if present
if os.path.exists(csvOutputPath + _OVERLAPS_FILE_NAME):
os.remove(csvOutputPath + _OVERLAPS_FILE_NAME)
if not os.path.exists(csvOutputPath):
os.makedirs(csvOutputPath)
if not os.path.exists(imgOutputPath):
os.makedirs(imgOutputPath)
print "Computing Union SDR overlap between SDR traces in following dir:"
print inputPath + "\n"
files = os.listdir(inputPath)
if len(files) != 2:
print "Found {0} files at input path {1} - Requires exactly 2.".format(
len(files), inputPath)
sys.exit(1)
pathNoLearn = inputPath + "/" + files[0]
pathLearn = inputPath + "/" + files[1]
print "Comparing files..."
print pathLearn
print pathNoLearn + "\n"
# Load source A
with open(pathLearn, "rU") as fileA:
csvReader = csv.reader(fileA)
dataA = [line for line in csvReader]
unionSizeA = [len(datum) for datum in dataA]
# Load source B
with open(pathNoLearn, "rU") as fileB:
csvReader = csv.reader(fileB)
dataB = [line for line in csvReader]
unionSizeB = [len(datum) for datum in dataB]
assert len(dataA) == len(dataB)
# To display all plots on the same y scale
yRangeMax = 1.05 * max(max(unionSizeA), max(unionSizeB))
# Plot union size for data A
x = [i for i in xrange(len(dataA))]
stdDevs = None
title = "Union Size with Learning vs. Time"
data_utils.getErrorbarFigure(title, x, unionSizeA, stdDevs, "Time",
"Union Size", yRangeMax=yRangeMax)
figPath = "{0}/{1}.png".format(imgOutputPath, title)
plt.savefig(figPath, bbox_inches="tight")
# Plot union size for data B and save image
title = "Union Size without Learning vs. Time"
data_utils.getErrorbarFigure(title, x, unionSizeB, stdDevs, "Time",
"Union Size", yRangeMax=yRangeMax)
figPath = "{0}/{1}.png".format(imgOutputPath, title)
plt.savefig(figPath, bbox_inches="tight")
with open(csvOutputPath + _OVERLAPS_FILE_NAME, "wb") as outputFile:
csvWriter = csv.writer(outputFile)
overlaps = [getOverlap(dataA[i], dataB[i]) for i in xrange(len(dataA))]
csvWriter.writerow(overlaps)
outputFile.flush()
# Plot overlap and save image
title = "Learn-NoLearn Union SDR Overlap vs. Time"
data_utils.getErrorbarFigure(title, x, overlaps, stdDevs, "Time","Overlap",
yRangeMax=yRangeMax)
figPath = "{0}/{1}.png".format(imgOutputPath, title)
plt.savefig(figPath, bbox_inches="tight")
raw_input("Press any key to exit...")
def getOverlap(listA, listB):
arrayA = numpy.array(listA)
arrayB = numpy.array(listB)
intersection = numpy.intersect1d(arrayA, arrayB)
return len(intersection)
def _getArgs():
"""
Parses and returns command line arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--input", help="Path to unionSdrTrace .csv files")
parser.add_argument("--csvOutput", help="Path for csv output.")
parser.add_argument("--imgOutput", help="Path for image output.")
return parser.parse_args()
if __name__ == "__main__":
args = _getArgs()
main(args.input, args.csvOutput, args.imgOutput)
| gpl-3.0 | -4,415,004,528,747,624,400 | 30.826087 | 77 | 0.673042 | false |
AhmedHani/Kaggle-Machine-Learning-Competitions | Medium/Toxic Comment Classification Challenge/train_ffnn.py | 1 | 1063 | import numpy as np
import pandas as pd
from keras.models import Model
from keras.layers import Dense, Embedding, Input
from keras.layers import LSTM, Bidirectional, GlobalMaxPool1D, Dropout
from keras.preprocessing import text, sequence
from keras.callbacks import EarlyStopping, ModelCheckpoint
max_features = 20000
maxlen = 100
train = pd.read_csv("train.csv")
test = pd.read_csv("test.csv")
train = train.sample(frac=1)
list_sentences_train = train["comment_text"].fillna("CVxTz").values
list_classes = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"]
y = train[list_classes].values
list_sentences_test = test["comment_text"].fillna("CVxTz").values
tokenizer = text.Tokenizer(num_words=max_features)
tokenizer.fit_on_texts(list(list_sentences_train))
list_tokenized_train = tokenizer.texts_to_sequences(list_sentences_train)
list_tokenized_test = tokenizer.texts_to_sequences(list_sentences_test)
X_t = sequence.pad_sequences(list_tokenized_train, maxlen=maxlen)
X_te = sequence.pad_sequences(list_tokenized_test, maxlen=maxlen) | mit | 61,240,177,327,376,260 | 35.689655 | 88 | 0.775165 | false |
ecervera/mindstorms-nb | nxt/functions.py | 1 | 7333 | import json
import shutil
from IPython.core.display import display, HTML
def configure(n):
config = {
'version' : 'nxt',
'number' : n
}
with open('../task/robot_config.json', 'w') as f:
json.dump(config, f)
shutil.copyfile('./functions.py', '../task/functions.py')
print("\x1b[32mConfiguració completa, podeu continuar.\x1b[0m")
display(HTML('<p>Ara ja podeu continuar, començant la primera tasca de programació: provareu el robot a vore si respon i es mou correctament.</p><h2><a href="../task/index.ipynb" target="_blank">>>> Prova de connexió</a></h2>'))
def next_notebook(nb):
if nb=='moviments':
display(HTML('<p>Ja podeu passar a la pàgina següent, on aprendreu a controlar els moviments del robot:</p><h2><a href="motors.ipynb" target="_blank">>>> Moviments del robot</a></h2>'))
elif nb=='quadrat':
display(HTML('<p>Ara ja podeu continuar, bona sort!</p><h2><a href="quadrat.ipynb" target="_blank">>>> Exercici de moviment</a></h2>'))
elif nb=='sensors':
display(HTML('<p>Fins ara heu aprés a controlar el moviment del robot, i també a programar bucles, no està gens malament!</p><p>Per a continuar, anem a vore els altres components del robot, els sensors, que ens permetran fer programes encara més sofisticats.</p><h2><a href="sensors.ipynb" target="_blank">>>> Sensors</a></h2>'))
elif nb=='touch':
display(HTML('<p>Ara ja podeu passar al primer exercici amb sensors:</p><h2><a href="touch.ipynb" target="_blank">>>> Tacte</a></h2>'))
elif nb=='navigation':
display(HTML('<p>Ara ja podeu continuar.</p><h2><a href="navigation.ipynb" target="_blank">>>> Exercici de navegació</a></h2>'))
elif nb=='sound':
display(HTML('<p>Ara ja podeu continuar.</p><h2><a href="sound.ipynb" target="_blank">>>> Sensor de so</a></h2>'))
elif nb=='light':
display(HTML('<p>Ara ja podeu continuar.</p><h2><a href="light.ipynb" target="_blank">>>> Sensor de llum</a></h2>'))
elif nb=='ultrasonic':
display(HTML('<p>Ara ja podeu continuar.</p><h2><a href="ultrasonic.ipynb" target="_blank">>>> Sensor ultrasònic</a></h2>'))
elif nb=='sumo':
display(HTML('<p>Ara ja podeu continuar.</p><h2><a href="sumo.ipynb" target="_blank">>>> El Gran Repte</a></h2>'))
else:
pass
import nxt.bluesock
import nxt.motor
import math
import time
from bluetooth.btcommon import BluetoothError
def connect():
global brick
global mB; global mC
global s1; global s2; global s3; global s4
global tempo
global connected_robot
with open('robot_config.json', 'r') as f:
config = json.load(f)
n = config['number']
try:
address = {2: '00:16:53:0A:9B:72', \
3: '00:16:53:0A:9D:F2', \
4: '00:16:53:0A:5C:72',
5: '00:16:53:08:D5:59', \
6: '00:16:53:08:DE:51', \
7: '00:16:53:0A:5A:B4', \
8: '00:16:53:0A:9B:27', \
9: '00:16:53:0A:9E:2C', \
10: '00:16:53:17:92:8A', \
11: '00:16:53:17:94:E0', \
12: '00:16:53:1A:C6:BD'}
brick = nxt.bluesock.BlueSock(address[n]).connect()
mB = nxt.motor.Motor(brick, nxt.motor.PORT_B)
mC = nxt.motor.Motor(brick, nxt.motor.PORT_C)
s1 = nxt.sensor.Touch(brick, nxt.sensor.PORT_1)
s2 = nxt.sensor.Sound(brick, nxt.sensor.PORT_2)
s2.set_input_mode(0x08,0x80) # dB adjusted, percentage
s3 = nxt.sensor.Light(brick, nxt.sensor.PORT_3)
s3.set_illuminated(True)
s3.set_input_mode(0x05,0x80) # Light active, percentage
s4 = nxt.sensor.Ultrasonic(brick, nxt.sensor.PORT_4)
tempo = 0.5
connected_robot = n
print("\x1b[32mRobot %d connectat.\x1b[0m" % n)
except BluetoothError as e:
errno, errmsg = eval(e.args[0])
if errno==16:
print("\x1b[31mNo es pot connectar, hi ha un altre programa ocupant la connexió.\x1b[0m")
elif errno==13:
print("\x1b[31mNo es pot connectar, el dispositiu no està emparellat.\x1b[0m")
elif errno == 112:
print("\x1b[31mNo es troba el brick, assegurat que estiga encés.\x1b[0m")
else:
print("Error %d: %s" % (errno, errmsg))
except KeyError:
print("\x1b[31mNúmero de robot incorrecte.\x1b[0m")
def disconnect():
try:
brick.sock.close()
print("\x1b[32mRobot %d desconnectat.\x1b[0m" % connected_robot)
except NameError:
print("\x1b[31mNo hi ha connexió amb el robot.\x1b[0m")
def stop():
try:
mB.brake()
mC.brake()
except NameError:
print("\x1b[31mNo hi ha connexió amb el robot.\x1b[0m")
def forward(speed=100,speed_B=100,speed_C=100):
move(speed_B=min(abs(speed),abs(speed_B)),speed_C=min(abs(speed),abs(speed_C)))
def backward(speed=100,speed_B=100,speed_C=100):
move(speed_B=-min(abs(speed),abs(speed_B)),speed_C=-min(abs(speed),abs(speed_C)))
def left(speed=100):
move(speed_B=0,speed_C=abs(speed))
def left_sharp(speed=100):
move(speed_B=-abs(speed),speed_C=abs(speed))
def right(speed=100):
move(speed_B=abs(speed),speed_C=0)
def right_sharp(speed=100):
move(speed_B=abs(speed),speed_C=-abs(speed))
def move(speed_B=0,speed_C=0):
max_speed = 100
speed_B = int(speed_B)
speed_C = int(speed_C)
if speed_B > 100:
speed_B = 100
print("\x1b[33mLa velocitat màxima és 100.\x1b[0m")
if speed_B < -100:
speed_B = -100
print("\x1b[33mLa velocitat màxima és 100.\x1b[0m")
if speed_C > 100:
speed_C = 100
print("\x1b[33mLa velocitat màxima és 100.\x1b[0m")
if speed_C < -100:
speed_C = -100
print("\x1b[33mLa velocitat màxima és 100.\x1b[0m")
try:
mB.run(-int(speed_B*max_speed/100))
mC.run(int(speed_C*max_speed/100))
except NameError:
print("\x1b[31mNo hi ha connexió amb el robot.\x1b[0m")
def touch():
return s1.is_pressed()
def sound():
return s2.get_loudness()
def light():
return s3.get_lightness()
from nxt.telegram import InvalidOpcodeError, InvalidReplyError
def ultrasonic():
global s4
try:
return s4.get_distance()
except (InvalidOpcodeError, InvalidReplyError):
disconnect()
print("\x1b[33mError de connexió, reintentant...\x1b[0m")
time.sleep(1)
connect(connected_robot)
return s4.get_distance()
def play_sound(s):
brick.play_sound_file(False, bytes((s+'.rso').encode('ascii')))
def say(s):
play_sound(s)
def play_tone(f,t):
try:
brick.play_tone_and_wait(f, int(t*1000*tempo))
time.sleep(0.01)
except:
pass
from IPython.display import clear_output
def read_and_print(sensor):
try:
while True:
clear_output(wait=True)
print(sensor())
except KeyboardInterrupt:
pass
def test_sensors():
try:
while True:
clear_output(wait=True)
print(" Touch: %d\n Light: %d\n Sound: %d\nUltrasonic: %d" % (touch(),light(),sound(), ultrasonic()))
except KeyboardInterrupt:
pass
import matplotlib.pyplot as plt
def plot(l):
plt.plot(l)
| mit | -1,915,779,626,699,435,000 | 34.634146 | 346 | 0.609309 | false |
leojohnthomas/ahkab | ekv.py | 1 | 25762 | # -*- coding: iso-8859-1 -*-
# ekv.py
# Partial implementation of the EKV 3.0 MOS transistor model
# Copyright 2010 Giuseppe Venturini
#
# The EKV model was developed by Matthias Bucher, Christophe Lallement,
# Christian Enz, Fabien Théodoloz, François Krummenacher at the Electronics
# Laboratories, Swiss Federal Institute of Technology (EPFL),
# Lausanne, Switzerland.
# This implementation is based upon:
# 1. Matthias Bucher, Christian Enz, François Krummenacher, Jean-M. Sallese,
# Christophe Lallement and Alain-S. Porret,
# The EKV 3.0 Compact MOS Transistor Model: Accounting for Deep-Submicron
# Aspects, <http://www.nsti.org/publications/MSM/2002/pdf/346.pdf>
# 2. EKV 2.6 Technical report, <http://legwww.epfl.ch/ekv/pdf/ekv_v262.pdf>.
#
# This file is part of the ahkab simulator.
#
# Ahkab is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2 of the License.
#
# Ahkab is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License v2
# along with ahkab. If not, see <http://www.gnu.org/licenses/>.
"""
The EKV model was developed by Matthias Bucher, Christophe Lallement,
Christian Enz, Fabien Théodoloz, François Krummenacher at the Electronics
Laboratories, Swiss Federal Institute of Technology (EPFL),
Lausanne, Switzerland. The Tecnical Report upon which this implementation
is based is available here:
<http://legwww.epfl.ch/ekv/pdf/ekv_v262.pdf>.
This module defines two classes:
ekv_device
ekv_mos_model
Features:
- EKV model implementation, computation of charges, potentials,
reverse and forward currents, slope factor and normalization factors,
- Calculation of trans-conductances based on the charge approach.
- N/P MOS symmetry
- Rudimentary temperature effects.
The Missing Features:
- Channel length modulation
- Reverse Short Channel Effect (RSCE)
- Complex mobility degradation is missing
- Transcapacitances
- Quasistatic implementation
"""
import constants, options, utilities, printing
import math
# DEFAULT VALUES FOR 500n CH LENGTH
COX_DEFAULT = .7e-3
VTO_DEFAULT = .5
GAMMA_DEFAULT = 1
PHI_DEFAULT = .7
KP_DEFAULT = 50e-6
UCRIT_DEFAULT = 2e6
LAMBDA_DEFAULT = .5
XJ_DEFAULT = .1e-6
TCV_DEFAULT = 1e-3
BEX_DEFAULT = -1.5
ISMALL_GUESS_MIN = 1e-10
class ekv_device:
INIT_IFRN_GUESS = 1
def __init__(self, nd, ng, ns, nb, W, L, model, M=1, N=1):
""" EKV device
Parameters:
nd: drain node
ng: gate node
ns: source node
nb: bulk node
L: element width [m]
W: element length [m]
M: multiplier (n. of shunt devices)
N: series mult. (n. of series devices)
model: pass an instance of ekv_mos_model
Selected methods:
- get_output_ports() -> (nd, ns)
- get_drive_ports() -> (nd, nb), (ng, nb), (ns, nb)
"""
self.ng = ng
self.nb = nb
self.n1 = nd
self.n2 = ns
self.ports = ((self.n1, self.nb), (self.ng, self.nb), (self.n2, self.nb))
class dev_class: pass # empty class to hold device parameters
self.device = dev_class()
self.device.L = float(L) #channel length -
self.device.W = float(W) #channel width -
self.device.M = int(M) #parallel multiple device number
self.device.N = int(N) #series multiple device number
self.ekv_model = model
self.opdict = {}
self.opdict.update({'state':(float('nan'), float('nan'), float('nan'))})
self.opdict.update({'ifn':self.INIT_IFRN_GUESS})
self.opdict.update({'irn':self.INIT_IFRN_GUESS})
self.opdict.update({'ip_abs_err':self.ekv_model.get_ip_abs_err(self.device)})
self.letter_id = 'M'
self.is_nonlinear = True
self.is_symbolic = True
self.dc_guess = [self.ekv_model.VTO*(0.1)*self.ekv_model.NPMOS, self.ekv_model.VTO*(1.1)*self.ekv_model.NPMOS, 0]
devcheck, reason = self.ekv_model._device_check(self.device)
if not devcheck:
raise Exception, reason + " out of boundaries."
def get_drive_ports(self, op):
"""Returns a tuple of tuples of ports nodes, as:
(port0, port1, port2...)
Where each port is in the form:
port0 = (nplus, nminus)
"""
return self.ports #d,g,s
def get_output_ports(self):
return ((self.n1, self.n2),)
def __str__(self):
mos_type = self._get_mos_type()
rep = " " + self.ekv_model.name + " w="+ str(self.device.W) + " l=" + \
str(self.device.L) + " M="+ str(self.device.M) + " N=" + \
str(self.device.N)
return rep
def _get_mos_type(self):
"""Returns N or P (capitalized)
"""
mtype = 'N' if self.ekv_model.NPMOS == 1 else 'P'
return mtype
def i(self, op_index, ports_v, time=0):
"""Returns the current flowing in the element with the voltages
applied as specified in the ports_v vector.
ports_v: [voltage_across_port0, voltage_across_port1, ...]
time: the simulation time at which the evaluation is performed.
It has no effect here. Set it to None during DC analysis.
"""
ret, j1, j2 = self.ekv_model.get_ids(self.device, ports_v, \
self.opdict)
return ret
def update_status_dictionary(self, ports_v):
if self.opdict is None:
self.opdict = {}
if not (self.opdict['state'] == ports_v[0] and self.opdict.has_key('gmd')) or \
not (self.opdict['state'] == ports_v[0] and self.opdict.has_key('gmg')) or \
not (self.opdict['state'] == ports_v[0] and self.opdict.has_key('gms')) or \
not (self.opdict['state'] == ports_v[0] and self.opdict.has_key('Ids')):
self.opdict['state'] == ports_v[0]
self.opdict['gmd'] = self.g(0, ports_v[0], 0)
self.opdict['gmg'] = self.g(0, ports_v[0], 1)
self.opdict['gms'] = self.g(0, ports_v[0], 2)
self.opdict['Ids'] = self.i(0, ports_v[0])
gmd = self.opdict['gmd']
gmg = self.opdict['gmg']
gms = self.opdict['gms']
ids = self.opdict['Ids']
if ids == 0:
TEF = float('nan')
else:
TEF = abs(gms*constants.Vth()/ids)
self.opdict['TEF'] = TEF
def print_op_info(self, ports_v):
arr = self.get_op_info(ports_v)
print arr,
def get_op_info(self, ports_v):
"""Operating point info, for design/verification. """
mos_type = self._get_mos_type()
self.update_status_dictionary(ports_v)
sat_status = "SATURATION" if self.opdict['SAT'] else "LINEAR"
if self.opdict["WMSI"] == 0:
wmsi_status = "WEAK INVERSION"
if self.opdict["WMSI"] == 1:
wmsi_status = "MODERATE INVERSION"
if self.opdict["WMSI"] == 2:
wmsi_status = "STRONG INVERSION"
arr = [["M"+self.descr, mos_type.upper()+" ch",wmsi_status, "", "", sat_status, "", "", "", "", "",""],]
arr.append(["beta", "[A/V^2]:", self.opdict['beta'], "Weff", "[m]:", str(self.opdict['Weff'])+" ("+str(self.device.W)+")", "Leff", "[m]:", str(self.opdict['Leff'])+ " ("+str(self.device.L)+")", "M/N:", "", str(self.device.M)+"/"+str(self.device.N)])
arr.append(["Vdb", "[V]:", float(ports_v[0][0]), "Vgb", "[V]:", float(ports_v[0][1]), "Vsb", "[V]:", float(ports_v[0][2]), "Vp", "[V]:", self.opdict['Vp'],])
arr.append([ "VTH", "[V]:", self.opdict['VTH'], "VOD", "[V]:", self.opdict['VOD'], "nq: ", "",self.opdict['nq'], "VA", "[V]:", str(self.opdict['Ids']/self.opdict['gmd'])])
arr.append(["Ids", "[A]:", self.opdict['Ids'], "nv: ", "",self.opdict['nv'], "Ispec", "[A]:", self.opdict["Ispec"], "TEF:", "", str(self.opdict['TEF']),])
arr.append(["gmg", "[S]:", self.opdict['gmg'], "gms", "[S]:", self.opdict['gms'], "rob", "[Ohm]:", 1/self.opdict['gmd'], "", "", ""])
arr.append(["if:", "", self.opdict['ifn'],"ir:", "", self.opdict['irn'], "Qf", "[C/m^2]:", self.opdict["qf"], "Qr", "[C/m^2]:", self.opdict["qr"],])
#arr.append([ "", "", "", "", "", ""])
return printing.table_setup(arr)
def g(self, op_index, ports_v, port_index, time=0):
"""Returns the differential (trans)conductance rs the port specified by port_index
when the element has the voltages specified in ports_v across its ports,
at (simulation) time.
ports_v: a list in the form: [voltage_across_port0, voltage_across_port1, ...]
port_index: an integer, 0 <= port_index < len(self.get_ports())
time: the simulation time at which the evaluation is performed. Set it to
None during DC analysis.
"""
assert op_index == 0
assert port_index < 3
if port_index == 0:
g = self.ekv_model.get_gmd(self.device, ports_v, self.opdict)
elif port_index == 1:
g = self.ekv_model.get_gmg(self.device, ports_v, self.opdict)
if port_index == 2:
g = self.ekv_model.get_gms(self.device, ports_v, self.opdict)
if op_index == 0 and g == 0:
if port_index == 2:
sign = -1
else:
sign = +1
g = sign*options.gmin*2
#print type(g), g
if op_index == 0 and port_index == 0:
self.opdict.update({'gmd':g})
elif op_index == 0 and port_index == 1:
self.opdict.update({'gmg':g})
elif op_index == 0 and port_index == 2:
self.opdict.update({'gms':g})
return g
def get_value_function(self, identifier):
def get_value(self):
return self.opdict[identifier]
return get_value
class scaling_holder: pass # will hold the scaling factors
class ekv_mos_model:
def __init__(self, name=None, TYPE='n', TNOM=None, COX=None, \
GAMMA=None, NSUB=None, PHI=None, VTO=None, KP=None, \
XJ=None, LAMBDA=None, \
TOX=None, VFB=None, U0=None, TCV=None, BEX=None):
self.scaling = scaling_holder()
self.name = "model_ekv0" if name is None else name
Vth = constants.Vth()
self.TNOM = float(TNOM) if TNOM is not None else constants.Tref
#print "TYPE IS:" + TYPE
self.NPMOS = 1 if TYPE == 'n' else -1
# optional parameters (no defaults)
self.TOX = float(TOX) if TOX is not None else None
self.NSUB = float(NSUB) if NSUB is not None else None
self.VFB = self.NPMOS*float(VFB) if VFB is not None else None
self.U0 = float(U0) if U0 is not None else None
# crucial parameters
if COX is not None:
self.COX = float(COX)
elif TOX is not None:
self.COX = constants.si.eox/TOX
else:
self.COX = COX_DEFAULT
if GAMMA is not None:
self.GAMMA = float(GAMMA)
elif NSUB is not None:
self.GAMMA = math.sqrt(2*constants.e*constants.si.esi*NSUB*10**6/self.COX)
else:
self.GAMMA = GAMMA_DEFAULT
if PHI is not None:
self.PHI = float(PHI)
elif NSUB is not None:
self.PHI = 2*constants.Vth(self.TNOM)*math.log(NSUB*10**6/constants.si.ni(self.TNOM))
else:
self.PHI = PHI_DEFAULT
if VTO is not None:
self.VTO = self.NPMOS*float(VTO)
if self.VTO < 0:
print "(W): model %s has internal negative VTO (%f V)." % (self.name, self.VTO)
elif VFB is not None:
self.VTO = VFB + PHI + GAMMA*PHI #inv here??
else:
self.VTO = self.NPMOS*VTO_DEFAULT
if KP is not None:
self.KP = float(KP)
elif U0 is not None:
self.KP = (U0*10**-4)*self.COX
else:
self.KP = KP_DEFAULT
self.LAMBDA = LAMBDA if LAMBDA is not None else LAMBDA_DEFAULT
self.XJ = XJ if XJ is not None else XJ_DEFAULT
self.UCRIT = UCRIT_DEFAULT
# Intrinsic model temperature parameters
self.TCV = self.NPMOS*float(TCV) if TCV is not None else self.NPMOS*TCV_DEFAULT
self.BEX = float(BEX) if BEX is not None else BEX_DEFAULT
self.set_device_temperature(constants.T)
#Setup switches
self.SATLIM = math.exp(4)
self.WMSI_factor = 10
self.NR_damp_factor = options.nl_voltages_lock_factor
sc, sc_reason = self._self_check()
if not sc:
raise Exception, sc_reason + " out of range"
def set_device_temperature(self, T):
"""Change the temperature of the device. VTO, KP and PHI get updated.
"""
self.TEMP = T
self.VTO = self.VTO - self.TCV*(T-self.TNOM)
self.KP = self.KP*(T/self.TNOM)**self.BEX
self.PHI = self.PHI * T/self.TNOM + 3*constants.Vth(self.TNOM)*math.log(T/self.TNOM) \
- constants.si.Eg(self.TNOM)*T/self.TNOM + constants.si.Eg(T)
def get_device_temperature(self):
"""Returns the temperature of the device - in K.
"""
return self.TEMP
def print_model(self):
"""All the internal parameters of the model get printed out,
for visual inspection. Notice some can be set to None
(ie not available) if they were not provided in the netlist
or some not provided are calculated from the others.
"""
arr = []
TYPE = 'N' if self.NPMOS == 1 else "P"
arr.append([self.name, "", "", TYPE+" MOS", "EKV MODEL", "", "", "", "", "", "", ""])
arr.append(["KP", "[A/V^2]", self.KP, "VTO", "[V]:", self.VTO, "TOX", "[m]", self.TOX, "COX", "[F/m^2]:", self.COX])
arr.append(["PHI", "[V]:", self.PHI, "GAMMA", "sqrt(V)", self.GAMMA, "NSUB", "[cm^-3]", self.NSUB, "VFB", "[V]:", self.VFB])
arr.append(["U0", "[cm^2/(V*s)]:", self.U0, "TCV", "[V/K]", self.TCV, "BEX", "", self.BEX, "", "", ""])
arr.append(["INTERNAL", "", "", "SAT LIMIT", "", self.SATLIM, "W/M/S INV FACTOR", "", self.WMSI_factor, "", "", ""])
printing.table_print(arr)
def get_voltages(self, vd, vg, vs):
"""Performs the VD <-> VS swap if needed.
Returns:
(VD, VG, VS) after the swap
CS, an integer which equals to:
+1 if no swap was necessary,
-1 if VD and VS have been swapped.
"""
# vd / vs swap
vd = vd*self.NPMOS
vg = vg*self.NPMOS
vs = vs*self.NPMOS
if vs > vd:
vd_new = vs
vs_new = vd
cs = -1
else:
vd_new = vd
vs_new = vs
cs = +1
return ((float(vd_new), float(vg), float(vs_new)), cs)
def get_ip_abs_err(self, device):
"""Absolute error to be enforced in the calculation of the normalized currents.
"""
return options.iea / (2*constants.Vth(self.TEMP)**2*self.KP*device.M*device.W/device.L)
def setup_scaling(self, nq, device):
"""Calculates and stores in self.scaling the following factors:
Ut, the thermal voltage,
Is, the specific current,
Gs, the specific transconductance,
Qs, the specific charge.
"""
self.scaling.Ut = constants.Vth()
self.scaling.Is = 2 * nq * self.scaling.Ut**2 * self.KP * device.W/device.L
self.scaling.Gs = 2 * nq * self.scaling.Ut * self.KP * device.W/device.L
self.scaling.Qs = 2 * nq * self.scaling.Ut * self.COX
return
def get_vp_nv_nq(self, VG):
"""Calculates and returns:
VP, the pinch-off voltage,
nv, the slope factor,
nq, the charge linearization factor.
"""
VGeff = VG - self.VTO + self.PHI + self.GAMMA*math.sqrt(self.PHI)
if VGeff > 0 and VG - self.VTO + (math.sqrt(self.PHI)+self.GAMMA/2)**2 > 0:
VP = VG - self.VTO - self.GAMMA*(math.sqrt(VG -self.VTO +(math.sqrt(self.PHI)+self.GAMMA/2)**2) -(math.sqrt(self.PHI)+self.GAMMA/2))
if math.isnan(VP): VP = 0 # the argument of sqrt ^^ went negative
else:
VP = -self.PHI
#print "VG", VG, "VGeff", VGeff, "VP", VP, self.GAMMA, self.PHI, math.sqrt(VG -self.VTO +(math.sqrt(self.PHI)+self.GAMMA/2)**2), VG -self.VTO +(math.sqrt(self.PHI)+self.GAMMA/2)**2
nq = 1 + .5 * self.GAMMA / math.sqrt(self.PHI + .5*VP)
nv = 1 + .5 * self.GAMMA / math.sqrt(self.PHI + VP + 1e-12)
return VP, nv, nq
def get_ids(self, device, (vd, vg, vs), opdict=None, debug=False):
"""Returns:
IDS, the drain-to-source current (de-normalized),
qs, the (scaled) charge at the source,
qr, the (scaled) charge at the drain.
"""
if debug: print "=== Current for vd:", vd, "vg:", vg, "vs:", vs
ip_abs_err = self.get_ip_abs_err(device) if opdict['ip_abs_err'] is None else opdict['ip_abs_err']
(VD, VG, VS), CS_FACTOR = self.get_voltages(vd, vg, vs)
#Weff, Leff = self.get_eff_wl(device.W, device.L)
VP, nv, nq = self.get_vp_nv_nq(VG)
self.setup_scaling(nq, device)
vp = VP/self.scaling.Ut
vs = VS/self.scaling.Ut
vd = VD/self.scaling.Ut
if debug: print "Scaled voltages: vd:", vd, "vp:", vp, "vs:", vs
v_ifn = vp - vs
ifn = self.get_ismall(v_ifn, opdict['ip_abs_err'], max(opdict['ifn'], ISMALL_GUESS_MIN), debug=debug)
if False:
Leff = device.L
v_irn = vp - vd
else:
Leff, v_irn = self.get_leq_virp(device, (vd, vg, vs), VP, device.L, ifn)
irn = self.get_ismall(v_irn, opdict['ip_abs_err'], max(opdict['irn'], ISMALL_GUESS_MIN), debug=debug)
if debug:
print "vd:", vd, "vg:",VG/self.scaling.Ut, "vs:", vs, "vds:", vd-vs
print "v_ifn:", v_ifn, "v_irn:",v_irn
print "ifn:", ifn, "irn:",irn
print "ip_abs_err:", ip_abs_err
print "Vth:", self.scaling.Ut
print "nv", nv, "Is", self.scaling.Is
print "Weff:", device.W, "Leff:", Leff
print "NPMOS:", self.NPMOS, "CS_FACTOR", CS_FACTOR
qf = self.ismall2qsmall(ifn)
qr = self.ismall2qsmall(irn)
Ids = CS_FACTOR*self.NPMOS * device.L/Leff * device.M * self.scaling.Is * (ifn - irn)
vd_real = vd if CS_FACTOR == 1 else vs
vs_real = vs if CS_FACTOR == 1 else vd
opdict.update({'state':(vd_real*self.NPMOS, vg*self.NPMOS, vs_real*self.NPMOS)})
opdict.update({'Ids':Ids, "Weff":device.W, "Leff":Leff, 'Vp':VP})
opdict.update({'ifn':ifn, "irn":irn, "nv":nv, "nq":nq, 'beta':.5*self.KP*device.W/Leff, 'Ispec':self.scaling.Is})
opdict.update({'VTH':self.VTO, "VOD":self.NPMOS*nv*(VP-VS), 'SAT':ifn>irn*self.SATLIM})
opdict.update({'qf':qf*self.scaling.Qs, 'qr':qr*self.scaling.Qs})
if max(ifn, irn) > self.WMSI_factor:
WMSI = 2
elif max(ifn, irn) < 1/self.WMSI_factor:
WMSI = 0
else:
WMSI = 1
opdict.update({'WMSI':WMSI})
if debug: print "current:", Ids
return Ids, qf, qr
def get_leq_virp(self, device, (vd, vg, vs), Vp, Leff, ifn):
#if ifn > 0 and Vp - constants.Vth()*vd > 0:
assert vd >= vs
Vc = self.UCRIT * device.N * Leff
Vdss = Vc * (math.sqrt(.25 + constants.Vth()/Vc*math.sqrt(ifn)) - .5) # eq. 46
# Drain-to-source saturation voltage for reverse normalized current, eq. 47
Vdssp = Vc * (math.sqrt(.25 +constants.Vth()/Vc *(math.sqrt(ifn) - .75*math.log(ifn))) - .5) + \
constants.Vth()*(math.log(.5 * Vc/constants.Vth()) - .6)
# channel length modulation
vser_1 = math.sqrt(ifn) - Vdss/constants.Vth()
#if vser_1 < 0:
# vser_1 = 0
Vds = (vd - vs)*.5*constants.Vth()
delta_v = 4*constants.Vth()*math.sqrt(self.LAMBDA*vser_1 + 1.0/64) # eq. 48
Vip = math.sqrt(Vdss**2 + delta_v**2) - math.sqrt((Vds - Vdss)**2 + delta_v**2) #eq 50
Lc = math.sqrt(constants.si.esi*self.XJ/self.COX) #eq. 51
delta_l = self.LAMBDA * Lc * math.log(1 + (Vds - Vip)/(Lc*self.UCRIT)) #eq. 52
# Equivalent channel length including channel-length modulation and velocity saturation
Lp = device.N*Leff - delta_l + (Vds + Vip)/self.UCRIT #eq. 53
Lmin = device.N*Leff/10.0 #eq. 54
Leq = .5*(Lp + math.sqrt(Lp**2 + Lmin**2)) #eq. 55
assert not math.isnan(Vdssp)
assert not math.isnan(delta_v)
v_irp = (Vp - Vds - vs*constants.Vth() - math.sqrt(Vdssp**2 + delta_v**2) + math.sqrt((Vds-Vdssp)**2+delta_v**2))/constants.Vth()
#else:
# v_irp = Vp/constants.Vth() - vd
# Leq = Leff
return Leq, v_irp
def get_gms(self, device, (vd, vg, vs), opdict=None, debug=False):
"""Returns the source-bulk transconductance or d(IDS)/d(VS-VB)."""
(j1, j2, j3), CS_FACTOR = self.get_voltages(vd, vg, vs)
Ids, qf, qr = self.get_ids(device, (vd, vg, vs), opdict, debug)
if CS_FACTOR == +1:
gms = -1.0*self.scaling.Gs*qf
elif CS_FACTOR == -1:
gms = -self.scaling.Gs*qr
return gms
def get_gmd(self, device, (vd, vg, vs), opdict=None, debug=False):
"""Returns the drain-bulk transconductance or d(IDS)/d(VD-VB)."""
(j1, j2, j3), CS_FACTOR = self.get_voltages(vd, vg, vs)
Ids, qf, qr = self.get_ids(device, (vd, vg, vs), opdict, debug)
if CS_FACTOR == +1:
gmd = self.scaling.Gs*qr
elif CS_FACTOR == -1:
gmd = self.scaling.Gs*qf
return gmd
def get_gmg(self, device, (vd, vg, vs), opdict=None, debug=False):
"""Returns the gate-bulk transconductance or d(IDS)/d(VG-VB)."""
VP, nv, nq = self.get_vp_nv_nq(float(vg))
Ids, qf, qr = self.get_ids(device, (vd, vg, vs), opdict, debug)
(j1, j2, j3), CS_FACTOR = self.get_voltages(vd, vg, vs)
gmg = CS_FACTOR*self.scaling.Gs*(qf-qr)/nv
return gmg
def get_ismall(self, vsmall, ip_abs_err, iguess=None, debug=False):
"""Solves the problem: given v, find i such that:
v = ln(q) + 2q
q = sqrt(.25 + i) - .5
A damped Newton algorithm is used inside.
"""
# starting guess for Newton's Method.
if iguess is None:
iguess = 1
# sanity checks
if math.isnan(vsmall):
raise Exception, \
"Attempted to calculate a current corresponding to a NaN voltage."
if not ip_abs_err > 0:
raise Exception, \
"The normalized current absolute error has been set to a negative value."
#if vsmall < 0:
# return 0.0
check = False
ismall = iguess
if debug: iter_c = 0
while True:
if debug: iter_c = iter_c + 1
vsmall_iter, numeric_problem_v = self.get_vsmall(ismall)
dvdi, numeric_problem_i = self.get_dvsmall_dismall(ismall)
deltai = (vsmall - vsmall_iter)/dvdi
numeric_problem = numeric_problem_i or numeric_problem_v
if debug:
print "Numeric problem:", numeric_problem
print "ABS: deltai < ip_abs_err", deltai, "<", ip_abs_err, ":", abs(deltai) < ip_abs_err
print "REL: deltai < ismall*options.ier", deltai, "<", ismall*options.ier, abs(deltai) < ismall*options.ier
print deltai, ismall
# absolute and relative value convergence checks.
if ((abs(deltai) < ip_abs_err or numeric_problem) and abs(deltai) < ismall*options.ier) or \
(abs(deltai) < ip_abs_err*1e-6 or numeric_problem):
# To make the algorithm more robust,
# the convergence check has to be passed twice in a row
# to reach convergence.
if not check:
check = True
else:
break
else:
check = False
# convergence was not reached, update ismall
if math.isnan(ismall):
print "Ismall is NaN!!"
exit()
if ismall == 0:
# this is a sign we went below the machine resolution
# it makes no sense to iterate there as quantization errors
# prevent reaching a meaningful result.
break
else:
# Damped Newton with domain restriction: ismall >= 0.
ratio = deltai/ismall
if ratio > self.NR_damp_factor:
# Do not allow a change in ismall bigger than self.NR_damp_factor
# in a single iteration
ismall = self.NR_damp_factor*ismall
elif ratio <= -1:
# this would give a negative ismall
ismall = 0.1*ismall
else:
ismall = ismall + deltai
if debug:
print str(iter_c) + " iterations."
return ismall
def get_vsmall(self, ismall, verbose=3):
"""Returns v according to the equations:
q = sqrt(.25 + i) - .5
v = ln(q) + 2q
"""
if abs(ismall) < utilities.EPS:
ismall = utilities.EPS # otherwise we get log(0)
if verbose == 6:
print "EKV: Machine precision limited the resolution on i. (i<EPS)"
numeric_problem = True
else:
numeric_problem = False
vsmall = math.log(math.sqrt(.25 + ismall) - 0.5) + 2*math.sqrt(.25 + ismall) - 1.0
return vsmall, numeric_problem
def get_dvsmall_dismall(self, ismall, verbose=3):
"""The Newton algorithm in get_ismall(...) requires the evaluation of the
first derivative of the fixed point function:
dv/di = 1.0/(sqrt(.25+i)-.5) * .5/sqrt(.25 + i) + 1/sqrt(.25 + i)
This is provided by this module.
"""
if abs(ismall) < utilities.EPS:
ismall = utilities.EPS
numeric_problem = True
if verbose == 6:
print "EKV: Machine precision limited the resolution on dv/di in the NR iteration. (i<EPS)"
else:
numeric_problem = False
dvdi = 1.0/(math.sqrt(.25+ismall)-.5) * .5/math.sqrt(.25 + ismall) + 1.0/math.sqrt(.25 + ismall)
return dvdi, numeric_problem
def ismall2qsmall(self, ismall, verbose=0):
""" i(f,r) -> q(f,r)
Convert a source/drain scaled current to the corresponding normalized charge."""
if verbose == 6: #ismall is lower than EPS, errors here are usually not important
print "EKV: Machine precision limited the resolution on q(s,d). (i<EPS)"
qsmall = math.sqrt(.25 + ismall) - .5
return qsmall
def qsmall2ismall(self, qsmall):
""" q(f,r) -> i(f,r)
Convert a source/drain scaled charge to the corresponding normalized current."""
ismall = qsmall**2 + qsmall
return ismall
def _self_check(self):
"""Performs sanity check on the model parameters."""
ret = True, ""
if self.NSUB is not None and self.NSUB < 0:
ret = (False, "NSUB "+str(self.NSUB))
elif self.U0 is not None and not self.U0 > 0:
ret = (False, "UO "+str(self.U0))
elif not self.GAMMA > 0:
ret = (False, "GAMMA "+str(self.GAMMA))
elif not self.PHI > 0.1:
ret = (False, "PHI "+str(self.PHI))
return ret
def _device_check(self, adev):
"""Performs sanity check on the device parameters."""
if not adev.L > 0:
ret = (False, "L")
elif not adev.W > 0:
ret = (False, "W")
elif not adev.N > 0:
ret = (False, "N")
elif not adev.M > 0:
ret = (False, "M")
else:
ret = (True, "")
return ret
if __name__ == '__main__':
# Tests
import matplotlib.pyplot as plt
ekv_m = ekv_mos_model(TYPE='n', KP=50e-6, VTO=.4)
ma = ekv_device(1, 2, 3, 4, W=10e-6,L=1e-6, model=ekv_m)
ma.descr = "1"
# OP test
vd = 0
vg = 1
vs = 0
ma.print_op_info(((vd, vg, vs),))
ekv_m.print_model()
# gmUt/Ids test
import mosq
msq = mosq.mosq(1, 2, 3, kp=50e-6, w=10e-6, l=1e-6, vt=.4, lambd=0, mos_type='n')
data0 = []
data1 = []
data2 = []
data3 = []
vd = 2.5
if True:
vs = 1
for Vhel in range(1,2500):
print ".",
vg = Vhel/1000.0
ma.update_status_dictionary(((vd, vg, 0),))
data0.append(ma.opdict['Ids'])
#print "Current for vd", vd, "vg", vg, "vs", vs
data1.append(ma.opdict['TEF'])
isq = msq.i((vd, vg, vs),)
gmsq = msq.g((vd, vg, vs),0)
if isq > 0:
data2.append(gmsq/isq*constants.Vth())
else:
data2.append(float('nan'))
data3.append(isq)
plt.semilogx(data0, data1, data3, data2)
plt.title('Transconductance efficiency factor')
plt.legend(['(GM*UT)/ID'])
plt.show()
| gpl-2.0 | 2,957,746,477,690,178,000 | 33.296937 | 251 | 0.636914 | false |
ceroytres/RBM | binary_RBM.py | 1 | 4503 | from __future__ import print_function
import numpy as np
from numba import jit
class binary_RBM(object):
def __init__(self,n_visible=None,n_hidden=256,batchSize=256,lr=0.1,alpha=0,
mu=.95,epochs=1,k=10):
self.n_hidden=n_hidden
self.n_visible=n_visible
self.batchSize=batchSize
self.k=k
self.alpha=alpha
self.W=np.random.rand(n_visible,n_hidden)
self.W*=8*np.sqrt(6./(n_hidden + n_visible))
self.W-=4*np.sqrt(6./(n_hidden + n_visible))
self.hbias=np.zeros(n_hidden)
self.vbias=np.zeros(n_visible)
self.epochs=epochs
self.lr=lr
self.mu=mu
@jit
def fit(self,x):
v_W=np.zeros(self.W.shape)
v_h=np.zeros(self.hbias.shape)
v_v=np.zeros(self.vbias.shape)
cost=self.get_pseudo_likelihood(x)
print("Epoch %d Pseudo-likelihood cost:%f" % (0,cost))
for t in range(0,self.epochs):
N=x.shape[0]
batches,num_batches=self._batchLists(N)
num_batches=int(num_batches)
self.mu=(1-(3.0/(5.0+t)))
for i in range(0,num_batches):
idx=batches[i]
data=np.squeeze(x[idx,:])
B=data.shape[0]
p_h=self._sigmoid(np.dot(data,self.W)+self.hbias)
if t==0 and i==0:
h=p_h>np.random.rand(p_h.shape[0],p_h.shape[1])
for k in range(0,self.k):
p_v=self._sigmoid(np.dot(h,self.W.T)+self.vbias)
v=p_v>np.random.rand(p_v.shape[0],p_v.shape[1])
q_h=self._sigmoid(np.dot(v,self.W)+self.hbias)
h=q_h>np.random.rand(q_h.shape[0],q_h.shape[1])
g_W=np.dot(data.T,p_h)-np.dot(v.T,q_h)
g_W/=B
g_v=data.mean(axis=0)-v.mean(axis=0)
g_h=p_h.mean(axis=0)-q_h.mean(axis=0)
v_W=self.mu*v_W*(t/(t+1.0))+self.lr*(g_W-self.alpha*self.W)
v_h=self.mu*v_h*(t/(t+1.0))+self.lr*g_h
v_v=self.mu*v_v*(t/(t+1.0))+self.lr*g_v
self.W+=v_W
self.hbias+=v_h
self.vbias+=v_v
self.lr/=np.sqrt(t+2)
cost=self.get_pseudo_likelihood(x)
print("Epoch %d Pseudo-likelihood cost:%f" % (t+1,cost))
return None
def _batchLists(self,N):
num_batches=np.ceil(N/self.batchSize)
batch_idx=np.tile(np.arange(0,num_batches)\
,self.batchSize)
batch_idx=batch_idx[0:N]
np.random.shuffle(batch_idx)
batch_list=[]
for i in range(0,int(num_batches)):
idx=np.argwhere(batch_idx==i)
batch_list.append(idx)
return batch_list,num_batches
@jit
def _sigmoid(self,z):
return 1/(1+np.exp(-z))
@jit
def get_pseudo_likelihood(self,x):
v=x.copy()
idx = (np.arange(v.shape[0]),
np.random.randint(0, v.shape[1], v.shape[0]))
v[idx]=1-v[idx]
N=self.vbias.shape[0]
PL=N*np.log(self._sigmoid(self.free_energy(v)-self.free_energy(x)))
return PL.mean()
@jit
def free_energy(self,x):
F=-np.dot(x,self.vbias)-np.sum(np.logaddexp(0,np.dot(x,self.W)+self.hbias),axis=1)
return F
@jit
def gibbs_sample(self,iters):
v=np.random.rand(self.n_visible)
for i in range(0,iters):
p_h=self._sigmoid(np.dot(v,self.W)+self.hbias)
h=p_h>np.random.rand(p_h.shape[0])
p_v=self._sigmoid(np.dot(h,self.W.T)+self.vbias)
v=p_v>np.random.rand(p_v.shape[0])
return v,p_v
if __name__=="__main__":
import matplotlib.pyplot as plt
x=np.load('trainIm.pkl')/255.0
x=x.reshape((784,60000)).T
rbm=binary_RBM(n_visible=784,n_hidden=50,alpha=1e-6,lr=.1,batchSize=20,epochs=10,mu=1)
rbm.fit(x)
v,p_v=rbm.gibbs_sample(100000)
plt.figure()
plt.imshow(p_v.reshape((28,28)),cmap='gray')
plt.show()
W=rbm.W
plt.figure()
for i in xrange(25):
plt.subplot(5,5,i+1)
plt.imshow(W[:,i].reshape((28,28)),cmap='gray')
| mit | -6,674,141,656,296,916,000 | 28.431373 | 90 | 0.487897 | false |
bloyl/mne-python | mne/channels/tests/test_layout.py | 4 | 14417 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: Simplified BSD
import copy
import os.path as op
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose, assert_equal)
import pytest
import matplotlib.pyplot as plt
from mne.channels import (make_eeg_layout, make_grid_layout, read_layout,
find_layout, HEAD_SIZE_DEFAULT)
from mne.channels.layout import (_box_size, _find_topomap_coords,
generate_2d_layout)
from mne import pick_types, pick_info
from mne.io import read_raw_kit, _empty_info, read_info
from mne.io.constants import FIFF
io_dir = op.join(op.dirname(__file__), '..', '..', 'io')
fif_fname = op.join(io_dir, 'tests', 'data', 'test_raw.fif')
lout_path = op.join(io_dir, 'tests', 'data')
bti_dir = op.join(io_dir, 'bti', 'tests', 'data')
fname_ctf_raw = op.join(io_dir, 'tests', 'data', 'test_ctf_comp_raw.fif')
fname_kit_157 = op.join(io_dir, 'kit', 'tests', 'data', 'test.sqd')
fname_kit_umd = op.join(io_dir, 'kit', 'tests', 'data', 'test_umd-raw.sqd')
def _get_test_info():
"""Make test info."""
test_info = _empty_info(1000)
loc = np.array([0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.],
dtype=np.float32)
test_info['chs'] = [
{'cal': 1, 'ch_name': 'ICA 001', 'coil_type': 0, 'coord_frame': 0,
'kind': 502, 'loc': loc.copy(), 'logno': 1, 'range': 1.0, 'scanno': 1,
'unit': -1, 'unit_mul': 0},
{'cal': 1, 'ch_name': 'ICA 002', 'coil_type': 0, 'coord_frame': 0,
'kind': 502, 'loc': loc.copy(), 'logno': 2, 'range': 1.0, 'scanno': 2,
'unit': -1, 'unit_mul': 0},
{'cal': 0.002142000012099743, 'ch_name': 'EOG 061', 'coil_type': 1,
'coord_frame': 0, 'kind': 202, 'loc': loc.copy(), 'logno': 61,
'range': 1.0, 'scanno': 376, 'unit': 107, 'unit_mul': 0}]
test_info._update_redundant()
test_info._check_consistency()
return test_info
def test_io_layout_lout(tmpdir):
"""Test IO with .lout files."""
tempdir = str(tmpdir)
layout = read_layout('Vectorview-all', scale=False)
layout.save(op.join(tempdir, 'foobar.lout'))
layout_read = read_layout(op.join(tempdir, 'foobar.lout'), path='./',
scale=False)
assert_array_almost_equal(layout.pos, layout_read.pos, decimal=2)
assert layout.names == layout_read.names
print(layout) # test repr
def test_io_layout_lay(tmpdir):
"""Test IO with .lay files."""
tempdir = str(tmpdir)
layout = read_layout('CTF151', scale=False)
layout.save(op.join(tempdir, 'foobar.lay'))
layout_read = read_layout(op.join(tempdir, 'foobar.lay'), path='./',
scale=False)
assert_array_almost_equal(layout.pos, layout_read.pos, decimal=2)
assert layout.names == layout_read.names
def test_find_topomap_coords():
"""Test mapping of coordinates in 3D space to 2D."""
info = read_info(fif_fname)
picks = pick_types(info, meg=False, eeg=True, eog=False, stim=False)
# Remove extra digitization point, so EEG digitization points match up
# with the EEG channels
del info['dig'][85]
# Use channel locations
kwargs = dict(ignore_overlap=False, to_sphere=True,
sphere=HEAD_SIZE_DEFAULT)
l0 = _find_topomap_coords(info, picks, **kwargs)
# Remove electrode position information, use digitization points from now
# on.
for ch in info['chs']:
ch['loc'].fill(np.nan)
l1 = _find_topomap_coords(info, picks, **kwargs)
assert_allclose(l1, l0, atol=1e-3)
for z_pt in ((HEAD_SIZE_DEFAULT, 0., 0.),
(0., HEAD_SIZE_DEFAULT, 0.)):
info['dig'][-1]['r'] = z_pt
l1 = _find_topomap_coords(info, picks, **kwargs)
assert_allclose(l1[-1], z_pt[:2], err_msg='Z=0 point moved', atol=1e-6)
# Test plotting mag topomap without channel locations: it should fail
mag_picks = pick_types(info, meg='mag')
with pytest.raises(ValueError, match='Cannot determine location'):
_find_topomap_coords(info, mag_picks, **kwargs)
# Test function with too many EEG digitization points: it should fail
info['dig'].append({'r': [1, 2, 3], 'kind': FIFF.FIFFV_POINT_EEG})
with pytest.raises(ValueError, match='Number of EEG digitization points'):
_find_topomap_coords(info, picks, **kwargs)
# Test function with too little EEG digitization points: it should fail
info['dig'] = info['dig'][:-2]
with pytest.raises(ValueError, match='Number of EEG digitization points'):
_find_topomap_coords(info, picks, **kwargs)
# Electrode positions must be unique
info['dig'].append(info['dig'][-1])
with pytest.raises(ValueError, match='overlapping positions'):
_find_topomap_coords(info, picks, **kwargs)
# Test function without EEG digitization points: it should fail
info['dig'] = [d for d in info['dig'] if d['kind'] != FIFF.FIFFV_POINT_EEG]
with pytest.raises(RuntimeError, match='Did not find any digitization'):
_find_topomap_coords(info, picks, **kwargs)
# Test function without any digitization points, it should fail
info['dig'] = None
with pytest.raises(RuntimeError, match='No digitization points found'):
_find_topomap_coords(info, picks, **kwargs)
info['dig'] = []
with pytest.raises(RuntimeError, match='No digitization points found'):
_find_topomap_coords(info, picks, **kwargs)
def test_make_eeg_layout(tmpdir):
"""Test creation of EEG layout."""
tempdir = str(tmpdir)
tmp_name = 'foo'
lout_name = 'test_raw'
lout_orig = read_layout(kind=lout_name, path=lout_path)
info = read_info(fif_fname)
info['bads'].append(info['ch_names'][360])
layout = make_eeg_layout(info, exclude=[])
assert_array_equal(len(layout.names), len([ch for ch in info['ch_names']
if ch.startswith('EE')]))
layout.save(op.join(tempdir, tmp_name + '.lout'))
lout_new = read_layout(kind=tmp_name, path=tempdir, scale=False)
assert_array_equal(lout_new.kind, tmp_name)
assert_allclose(layout.pos, lout_new.pos, atol=0.1)
assert_array_equal(lout_orig.names, lout_new.names)
# Test input validation
pytest.raises(ValueError, make_eeg_layout, info, radius=-0.1)
pytest.raises(ValueError, make_eeg_layout, info, radius=0.6)
pytest.raises(ValueError, make_eeg_layout, info, width=-0.1)
pytest.raises(ValueError, make_eeg_layout, info, width=1.1)
pytest.raises(ValueError, make_eeg_layout, info, height=-0.1)
pytest.raises(ValueError, make_eeg_layout, info, height=1.1)
def test_make_grid_layout(tmpdir):
"""Test creation of grid layout."""
tempdir = str(tmpdir)
tmp_name = 'bar'
lout_name = 'test_ica'
lout_orig = read_layout(kind=lout_name, path=lout_path)
layout = make_grid_layout(_get_test_info())
layout.save(op.join(tempdir, tmp_name + '.lout'))
lout_new = read_layout(kind=tmp_name, path=tempdir)
assert_array_equal(lout_new.kind, tmp_name)
assert_array_equal(lout_orig.pos, lout_new.pos)
assert_array_equal(lout_orig.names, lout_new.names)
# Test creating grid layout with specified number of columns
layout = make_grid_layout(_get_test_info(), n_col=2)
# Vertical positions should be equal
assert layout.pos[0, 1] == layout.pos[1, 1]
# Horizontal positions should be unequal
assert layout.pos[0, 0] != layout.pos[1, 0]
# Box sizes should be equal
assert_array_equal(layout.pos[0, 3:], layout.pos[1, 3:])
def test_find_layout():
"""Test finding layout."""
pytest.raises(ValueError, find_layout, _get_test_info(), ch_type='meep')
sample_info = read_info(fif_fname)
grads = pick_types(sample_info, meg='grad')
sample_info2 = pick_info(sample_info, grads)
mags = pick_types(sample_info, meg='mag')
sample_info3 = pick_info(sample_info, mags)
# mock new convention
sample_info4 = copy.deepcopy(sample_info)
for ii, name in enumerate(sample_info4['ch_names']):
new = name.replace(' ', '')
sample_info4['chs'][ii]['ch_name'] = new
eegs = pick_types(sample_info, meg=False, eeg=True)
sample_info5 = pick_info(sample_info, eegs)
lout = find_layout(sample_info, ch_type=None)
assert lout.kind == 'Vectorview-all'
assert all(' ' in k for k in lout.names)
lout = find_layout(sample_info2, ch_type='meg')
assert_equal(lout.kind, 'Vectorview-all')
# test new vector-view
lout = find_layout(sample_info4, ch_type=None)
assert_equal(lout.kind, 'Vectorview-all')
assert all(' ' not in k for k in lout.names)
lout = find_layout(sample_info, ch_type='grad')
assert_equal(lout.kind, 'Vectorview-grad')
lout = find_layout(sample_info2)
assert_equal(lout.kind, 'Vectorview-grad')
lout = find_layout(sample_info2, ch_type='grad')
assert_equal(lout.kind, 'Vectorview-grad')
lout = find_layout(sample_info2, ch_type='meg')
assert_equal(lout.kind, 'Vectorview-all')
lout = find_layout(sample_info, ch_type='mag')
assert_equal(lout.kind, 'Vectorview-mag')
lout = find_layout(sample_info3)
assert_equal(lout.kind, 'Vectorview-mag')
lout = find_layout(sample_info3, ch_type='mag')
assert_equal(lout.kind, 'Vectorview-mag')
lout = find_layout(sample_info3, ch_type='meg')
assert_equal(lout.kind, 'Vectorview-all')
lout = find_layout(sample_info, ch_type='eeg')
assert_equal(lout.kind, 'EEG')
lout = find_layout(sample_info5)
assert_equal(lout.kind, 'EEG')
lout = find_layout(sample_info5, ch_type='eeg')
assert_equal(lout.kind, 'EEG')
# no common layout, 'meg' option not supported
lout = find_layout(read_info(fname_ctf_raw))
assert_equal(lout.kind, 'CTF-275')
fname_bti_raw = op.join(bti_dir, 'exported4D_linux_raw.fif')
lout = find_layout(read_info(fname_bti_raw))
assert_equal(lout.kind, 'magnesWH3600')
raw_kit = read_raw_kit(fname_kit_157)
lout = find_layout(raw_kit.info)
assert_equal(lout.kind, 'KIT-157')
raw_kit.info['bads'] = ['MEG 013', 'MEG 014', 'MEG 015', 'MEG 016']
raw_kit.info._check_consistency()
lout = find_layout(raw_kit.info)
assert_equal(lout.kind, 'KIT-157')
# fallback for missing IDs
for val in (35, 52, 54, 1001):
raw_kit.info['kit_system_id'] = val
lout = find_layout(raw_kit.info)
assert lout.kind == 'custom'
raw_umd = read_raw_kit(fname_kit_umd)
lout = find_layout(raw_umd.info)
assert_equal(lout.kind, 'KIT-UMD-3')
# Test plotting
lout.plot()
lout.plot(picks=np.arange(10))
plt.close('all')
def test_box_size():
"""Test calculation of box sizes."""
# No points. Box size should be 1,1.
assert_allclose(_box_size([]), (1.0, 1.0))
# Create one point. Box size should be 1,1.
point = [(0, 0)]
assert_allclose(_box_size(point), (1.0, 1.0))
# Create two points. Box size should be 0.5,1.
points = [(0.25, 0.5), (0.75, 0.5)]
assert_allclose(_box_size(points), (0.5, 1.0))
# Create three points. Box size should be (0.5, 0.5).
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_allclose(_box_size(points), (0.5, 0.5))
# Create a grid of points. Box size should be (0.1, 0.1).
x, y = np.meshgrid(np.linspace(-0.5, 0.5, 11), np.linspace(-0.5, 0.5, 11))
x, y = x.ravel(), y.ravel()
assert_allclose(_box_size(np.c_[x, y]), (0.1, 0.1))
# Create a random set of points. This should never break the function.
rng = np.random.RandomState(42)
points = rng.rand(100, 2)
width, height = _box_size(points)
assert width is not None
assert height is not None
# Test specifying an existing width.
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_allclose(_box_size(points, width=0.4), (0.4, 0.5))
# Test specifying an existing width that has influence on the calculated
# height.
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_allclose(_box_size(points, width=0.2), (0.2, 1.0))
# Test specifying an existing height.
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_allclose(_box_size(points, height=0.4), (0.5, 0.4))
# Test specifying an existing height that has influence on the calculated
# width.
points = [(0.25, 0.25), (0.75, 0.45), (0.5, 0.75)]
assert_allclose(_box_size(points, height=0.1), (1.0, 0.1))
# Test specifying both width and height. The function should simply return
# these.
points = [(0.25, 0.25), (0.75, 0.45), (0.5, 0.75)]
assert_array_equal(_box_size(points, width=0.1, height=0.1), (0.1, 0.1))
# Test specifying a width that will cause unfixable horizontal overlap and
# essentially breaks the function (height will be 0).
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_array_equal(_box_size(points, width=1), (1, 0))
# Test adding some padding.
# Create three points. Box size should be a little less than (0.5, 0.5).
points = [(0.25, 0.25), (0.75, 0.25), (0.5, 0.75)]
assert_allclose(_box_size(points, padding=0.1), (0.9 * 0.5, 0.9 * 0.5))
def test_generate_2d_layout():
"""Test creation of a layout from 2d points."""
snobg = 10
sbg = 15
side = range(snobg)
bg_image = np.random.RandomState(42).randn(sbg, sbg)
w, h = [.2, .5]
# Generate fake data
xy = np.array([(i, j) for i in side for j in side])
lt = generate_2d_layout(xy, w=w, h=h)
# Correct points ordering / minmaxing
comp_1, comp_2 = [(5, 0), (7, 0)]
assert lt.pos[:, :2].max() == 1
assert lt.pos[:, :2].min() == 0
with np.errstate(invalid='ignore'): # divide by zero
assert_allclose(xy[comp_2] / float(xy[comp_1]),
lt.pos[comp_2] / float(lt.pos[comp_1]))
assert_allclose(lt.pos[0, [2, 3]], [w, h])
# Correct number elements
assert lt.pos.shape[1] == 4
assert len(lt.box) == 4
# Make sure background image normalizing is correct
lt_bg = generate_2d_layout(xy, bg_image=bg_image)
assert_allclose(lt_bg.pos[:, :2].max(), xy.max() / float(sbg))
| bsd-3-clause | -6,684,274,647,835,185,000 | 38.283379 | 79 | 0.623569 | false |
mailund/pairwise-IM | IMSystem.py | 1 | 3547 | from numpy import matrix
from scipy.linalg import expm
## Constants used as indices in rate and transition matrices
LINEAGES_IN_SEP_POPS = 0
LINEAGES_IN_POP_1 = 1
LINEAGES_IN_POP_2 = 2
COALESCED = 3
NOT_COALESCED = [0,1,2]
def make_rate_matrix(c1, c2, m12, m21):
'''Create a rate matrix based on coalescence rates c1 and c2 and
migration rates m12 and m21.'''
Q = matrix(
[
# State 1: lineages in different populations
[-(m12+m21), m21, m12, 0],
# State 2: both lineages in population 1
[2*m12, -(2*m12+c1), 0, c1],
# State 3: both lineages in population 2
[2*m21, 0, -(2*m21+c2), c2],
# State 4: coalesced (catches both populations; absorbing)
[0, 0, 0, 0]
])
return Q
class IMSystem(object):
'''Wrapping a two-population isolation-with-migration system.'''
def __init__(self, ts, c1s, c2s, m12s, m21s):
'''Build the system based on end-points of time intervals, ts,
coalescence rates c1s and c2s and migration rates m12s and m21s.
'''
self.ts = ts
self.c1s = c1s
self.c2s = c2s
self.m12s = m12s
self.m21s = m21s
self.no_intervals = len(ts)
assert len(self.c1s) == self.no_intervals
assert len(self.c2s) == self.no_intervals
assert len(self.m12s) == self.no_intervals
assert len(self.m21s) == self.no_intervals
self.Qs = [make_rate_matrix(self.c1s[i],self.c2s[i],self.m12s[i],self.m21s[i])
for i in xrange(self.no_intervals)]
self.Ps = [None] * self.no_intervals
self.Ps[0] = matrix(expm(self.Qs[0] * self.ts[0]))
for i in xrange(1,self.no_intervals):
self.Ps[i] = self.Ps[i-1] * matrix(expm(self.Qs[i] * (self.ts[i]-self.ts[i-1])))
def coalescence_distribution(self):
'''Returns the (discritized) coalescence distribution for the time
intervals. Implicitly the time interval from the last ts till infinity
is assumed to carry the probability mass that gets this to sum to 1.'''
pdm_20 = [0] * (self.no_intervals + 1)
pdm_11 = [0] * (self.no_intervals + 1)
pdm_02 = [0] * (self.no_intervals + 1)
pdm_20[0] = self.Ps[0][LINEAGES_IN_POP_1,COALESCED]
pdm_11[0] = self.Ps[0][LINEAGES_IN_SEP_POPS,COALESCED]
pdm_02[0] = self.Ps[0][LINEAGES_IN_POP_2,COALESCED]
for i in xrange(1,self.no_intervals):
P1 = self.Ps[i-1]
P2 = self.Ps[i]
pdm_20[i] = P2[LINEAGES_IN_POP_1,COALESCED] - P1[LINEAGES_IN_POP_1,COALESCED]
pdm_11[i] = P2[LINEAGES_IN_SEP_POPS,COALESCED] - P1[LINEAGES_IN_SEP_POPS,COALESCED]
pdm_02[i] = P2[LINEAGES_IN_POP_2,COALESCED] - P1[LINEAGES_IN_POP_2,COALESCED]
pdm_20[-1] = 1 - sum(pdm_20)
pdm_11[-1] = 1 - sum(pdm_11)
pdm_02[-1] = 1 - sum(pdm_02)
return (pdm_20,pdm_11,pdm_02)
if __name__ == '__main__':
from scipy import linspace
ts = linspace(0.1,4)
c1s = [1] * len(ts)
c2s = [2] * len(ts)
m12s = [0.0] * len(ts)
m21s = [0.0] * len(ts)
im = IMSystem(ts, c1s, c2s, m12s, m21s)
pdm_20,pdm_11,pdm_02 = im.coalescence_distribution()
from matplotlib import pyplot
pyplot.plot(im.ts,pdm_20[0:-1])
pyplot.plot(im.ts,pdm_11[0:-1])
pyplot.plot(im.ts,pdm_02[0:-1])
pyplot.axis([0, max(ts), 0, max([max(pdm_20),max(pdm_11),max(pdm_02)])])
pyplot.show()
| gpl-3.0 | 5,477,560,428,867,526,000 | 36.336842 | 95 | 0.572597 | false |
ky822/Data_Bootcamp | Code/Python/WB_wdi_all.py | 2 | 2294 | """
Messing around with World Bank data. We start by reading in the whole WDI
from the online csv. Since the online file is part of a zipped collection,
this turned into an exploration of how to handle zip files -- see Section 1.
Section 2 (coming) does slicing and plotting.
Prepared for the NYU Course "Data Bootcamp."
More at https://github.com/DaveBackus/Data_Bootcamp
References
* http://datacatalog.worldbank.org/
* http://stackoverflow.com/questions/19602931/basic-http-file-downloading-and-saving-to-disk-in-python
* https://docs.python.org/3.4/library/urllib.html
Written by Dave Backus @ NYU, September 2014
Created with Python 3.4
"""
import pandas as pd
import urllib
import zipfile
import os
"""
1. Read data from component of online zip file
"""
# locations of file input and output
url = 'http://databank.worldbank.org/data/download/WDI_csv.zip'
file = os.path.basename(url) # cool tool via SBH
# the idea is to dump data in a different directory, kill with data = ''
data = '' # '../Data/'
#%%
# copy file from url to hard drive (big file, takes a minute or two)
urllib.request.urlretrieve(url, data+file)
#%%
# zipfile contains several files, we want WDI_Data.csv
print(['Is zipfile?', zipfile.is_zipfile(file)])
# key step, give us a file object to work with
zf = zipfile.ZipFile(data+file, 'r')
print('List of zipfile contents (two versions)')
[print(file) for file in zf.namelist()]
zf.printdir()
#%%
# copy data file to hard drive's working directory, then read it
csv = zf.extract('WDI_Data.csv')
df1 = pd.read_csv('WDI_Data.csv')
print(df1.columns)
#%%
# alternative: open and read
csv = zf.open('WDI_Data.csv')
df2 = pd.read_csv(csv)
print(df3.columns)
#%%
# same thing in one line
df3 = pd.read_csv(zf.open('WDI_Data.csv'))
print(df3.columns)
# zf.close() #??
# do we want to close zf? do we care?
# seems to be essential with writes, not so much with reads
# if so, can either close or use the "with" construction Sarah used.
# basic open etc:
# https://docs.python.org/3.4/tutorial/inputoutput.html#reading-and-writing-files
# on with (go to bottom): http://effbot.org/zone/python-with-statement.htm
#%%
# could we further consolidate zip read and extract? seems not.
#zf = zipfile.ZipFile(url, 'r')
| mit | 3,442,475,768,866,982,000 | 30.424658 | 102 | 0.709677 | false |
AtsushiSakai/jsk_visualization_packages | jsk_rqt_plugins/src/jsk_rqt_plugins/hist.py | 1 | 7882 | #!/usr/bin/env python
from rqt_gui_py.plugin import Plugin
from python_qt_binding import loadUi
from python_qt_binding.QtCore import Qt, QTimer, qWarning, Slot
from python_qt_binding.QtGui import QAction, QIcon, QMenu, QWidget
from python_qt_binding.QtGui import QWidget, QVBoxLayout, QSizePolicy, QColor
from rqt_py_common.topic_completer import TopicCompleter
from matplotlib.colors import colorConverter
from rqt_py_common.topic_helpers import is_slot_numeric
from rqt_plot.rosplot import ROSData as _ROSData
from rqt_plot.rosplot import RosPlotException
from matplotlib.collections import (PolyCollection,
PathCollection, LineCollection)
import matplotlib
import matplotlib.patches as mpatches
import rospkg
import rospy
from cStringIO import StringIO
import cv2
from cv_bridge import CvBridge
from sensor_msgs.msg import Image
from jsk_recognition_msgs.msg import HistogramWithRange, HistogramWithRangeBin
import os, sys
import argparse
try:
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
except ImportError:
# work around bug in dateutil
import sys
import thread
sys.modules['_thread'] = thread
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
import numpy as np
import matplotlib.pyplot as plt
class ROSData(_ROSData):
def _get_data(self, msg):
val = msg
try:
if not self.field_evals:
return val
for f in self.field_evals:
val = f(val)
return val
except IndexError:
self.error = RosPlotException("[%s] index error for: %s" % (self.name, str(val).replace('\n', ', ')))
except TypeError:
self.error = RosPlotException("[%s] value was not numeric: %s" % (self.name, val))
class HistogramPlot(Plugin):
def __init__(self, context):
super(HistogramPlot, self).__init__(context)
self.setObjectName('HistogramPlot')
self._args = self._parse_args(context.argv())
self._widget = HistogramPlotWidget(self._args.topics)
context.add_widget(self._widget)
def _parse_args(self, argv):
parser = argparse.ArgumentParser(prog='rqt_histogram_plot', add_help=False)
HistogramPlot.add_arguments(parser)
args = parser.parse_args(argv)
return args
@staticmethod
def add_arguments(parser):
group = parser.add_argument_group('Options for rqt_histogram plugin')
group.add_argument('topics', nargs='?', default=[], help='Topics to plot')
class HistogramPlotWidget(QWidget):
_redraw_interval = 40
def __init__(self, topics):
super(HistogramPlotWidget, self).__init__()
self.setObjectName('HistogramPlotWidget')
rp = rospkg.RosPack()
ui_file = os.path.join(rp.get_path('jsk_rqt_plugins'),
'resource', 'plot_histogram.ui')
loadUi(ui_file, self)
self.cv_bridge = CvBridge()
self.subscribe_topic_button.setIcon(QIcon.fromTheme('add'))
self.pause_button.setIcon(QIcon.fromTheme('media-playback-pause'))
self.clear_button.setIcon(QIcon.fromTheme('edit-clear'))
self.data_plot = MatHistogramPlot(self)
self.data_plot_layout.addWidget(self.data_plot)
self._topic_completer = TopicCompleter(self.topic_edit)
self._topic_completer.update_topics()
self.topic_edit.setCompleter(self._topic_completer)
self.data_plot.dropEvent = self.dropEvent
self.data_plot.dragEnterEvent = self.dragEnterEvent
self._start_time = rospy.get_time()
self._rosdata = None
if len(topics) != 0:
self.subscribe_topic(topics)
self._update_plot_timer = QTimer(self)
self._update_plot_timer.timeout.connect(self.update_plot)
self._update_plot_timer.start(self._redraw_interval)
@Slot('QDropEvent*')
def dropEvent(self, event):
if event.mimeData().hasText():
topic_name = str(event.mimeData().text())
else:
droped_item = event.source().selectedItems()[0]
topic_name = str(droped_item.data(0, Qt.UserRole))
self.subscribe_topic(topic_name)
@Slot()
def on_topic_edit_returnPressed(self):
if self.subscribe_topic_button.isEnabled():
self.subscribe_topic(str(self.topic_edit.text()))
@Slot()
def on_subscribe_topic_button_clicked(self):
self.subscribe_topic(str(self.topic_edit.text()))
def subscribe_topic(self, topic_name):
self.topic_with_field_name = topic_name
self.pub_image = rospy.Publisher(topic_name + "/histogram_image", Image)
if not self._rosdata:
self._rosdata = ROSData(topic_name, self._start_time)
else:
if self._rosdata != topic_name:
self._rosdata.close()
self.data_plot.clear()
self._rosdata = ROSData(topic_name, self._start_time)
else:
rospy.logwarn("%s is already subscribed", topic_name)
def enable_timer(self, enabled=True):
if enabled:
self._update_plot_timer.start(self._redraw_interval)
else:
self._update_plot_timer.stop()
@Slot()
def on_clear_button_clicked(self):
self.data_plot.clear()
@Slot(bool)
def on_pause_button_clicked(self, checked):
self.enable_timer(not checked)
def update_plot(self):
if not self._rosdata:
return
data_x, data_y = self._rosdata.next()
if len(data_y) == 0:
return
axes = self.data_plot._canvas.axes
axes.cla()
if self._rosdata.sub.data_class is HistogramWithRange:
xs = [y.count for y in data_y[-1].bins]
pos = [y.min_value for y in data_y[-1].bins]
widths = [y.max_value - y.min_value for y in data_y[-1].bins]
axes.set_xlim(xmin=pos[0], xmax=pos[-1] + widths[-1])
else:
xs = data_y[-1]
pos = np.arange(len(xs))
widths = [1] * len(xs)
axes.set_xlim(xmin=0, xmax=len(xs))
#axes.xticks(range(5))
for p, x, w in zip(pos, xs, widths):
axes.bar(p, x, color='r', align='center', width=w)
axes.legend([self.topic_with_field_name], prop={'size': '8'})
self.data_plot._canvas.draw()
buffer = StringIO()
self.data_plot._canvas.figure.savefig(buffer, format="png")
buffer.seek(0)
img_array = np.asarray(bytearray(buffer.read()), dtype=np.uint8)
img = cv2.imdecode(img_array, cv2.CV_LOAD_IMAGE_COLOR)
self.pub_image.publish(self.cv_bridge.cv2_to_imgmsg(img, "bgr8"))
class MatHistogramPlot(QWidget):
class Canvas(FigureCanvas):
def __init__(self, parent=None):
super(MatHistogramPlot.Canvas, self).__init__(Figure())
self.axes = self.figure.add_subplot(111)
self.figure.tight_layout()
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.updateGeometry()
def resizeEvent(self, event):
super(MatHistogramPlot.Canvas, self).resizeEvent(event)
self.figure.tight_layout()
def __init__(self, parent=None):
super(MatHistogramPlot, self).__init__(parent)
self._canvas = MatHistogramPlot.Canvas()
self._toolbar = NavigationToolbar(self._canvas, self._canvas)
vbox = QVBoxLayout()
vbox.addWidget(self._toolbar)
vbox.addWidget(self._canvas)
self.setLayout(vbox)
def redraw(self):
pass
def clear(self):
self._canvas.axes.cla()
self._canvas.draw()
| mit | 4,489,442,462,833,596,400 | 39.214286 | 113 | 0.632581 | false |
guziy/basemap | examples/save_background.py | 2 | 1364 | from __future__ import (absolute_import, division, print_function)
import matplotlib, sys
matplotlib.use('Agg')
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
# this example shows how to save a map background and
# reuse it in another figure.
# make sure we have all the same properties on all figs
figprops = dict(figsize=(8,6), dpi=100, facecolor='white')
# generate the first figure.
fig1 = plt.figure(1,**figprops)
ax1 = fig1.add_subplot(111)
# create basemap instance, plot coastlines.
map = Basemap(projection='moll',lon_0=0)
map.drawcoastlines()
map.drawmapboundary(fill_color='aqua')
map.fillcontinents(color='coral',lake_color='aqua')
fig1.canvas.draw()
background = fig1.canvas.copy_from_bbox(fig1.bbox)
# save figure 1.
fig1.savefig('figure1.png', dpi=100)
# generate the second figure, re-using the background
# from figure 1.
fig2 = plt.figure(2,frameon=False,**figprops)
# make sure frame is off, or everything in existing background
# will be obliterated.
ax2 = fig2.add_subplot(111,frameon=False)
# restore previous background.
fig2.canvas.restore_region(background)
# draw parallels and meridians on existing background.
map.drawparallels(range(-90,90,30))
map.drawmeridians(range(-180,180,60))
# save figure 2.
fig2.savefig('figure2.png', dpi=100)
sys.stdout.write('images saved in figure1.png and figure2.png\n')
| gpl-2.0 | 4,759,339,645,438,496,000 | 32.268293 | 66 | 0.76173 | false |
vatsan/pandas_via_psql | setup.py | 2 | 4301 | from setuptools import setup, find_packages
from distutils.util import convert_path
import os,sys
from fnmatch import fnmatchcase
# Provided as an attribute, so you can append to these instead
# of replicating them:
standard_exclude = ('*.pyc', '*$py.class', '*~', '.*', '*.bak')
standard_exclude_directories = ('.*', 'CVS', '_darcs', './build',
'./dist', 'EGG-INFO', '*.egg-info','plots')
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# Note: you may want to copy this into your setup.py file verbatim, as
# you can't import this from another package, when you don't know if
# that package is installed yet.
def find_package_data(
where='.', package='',
exclude=standard_exclude,
exclude_directories=standard_exclude_directories,
only_in_packages=True,
show_ignored=False):
"""
Return a dictionary suitable for use in ``package_data``
in a distutils ``setup.py`` file.
The dictionary looks like::
{'package': [files]}
Where ``files`` is a list of all the files in that package that
don't match anything in ``exclude``.
If ``only_in_packages`` is true, then top-level directories that
are not packages won't be included (but directories under packages
will).
Directories matching any pattern in ``exclude_directories`` will
be ignored; by default directories with leading ``.``, ``CVS``,
and ``_darcs`` will be ignored.
If ``show_ignored`` is true, then all the files that aren't
included in package data are shown on stderr (for debugging
purposes).
Note patterns use wildcards, or can be exact paths (including
leading ``./``), and all searching is case-insensitive.
"""
out = {}
stack = [(convert_path(where), '', package, only_in_packages)]
while stack:
where, prefix, package, only_in_packages = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"Directory %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
if (os.path.isfile(os.path.join(fn, '__init__.py'))
and not prefix):
if not package:
new_package = name
else:
new_package = package + '.' + name
stack.append((fn, '', new_package, False))
else:
stack.append((fn, prefix + name + '/', package, only_in_packages))
elif package or not only_in_packages:
# is a file
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"File %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
out.setdefault(package, []).append(prefix+name)
return out
setup(
name='ppsqlviz',
version='1.0.1',
author='Srivatsan Ramanujam',
author_email='vatsan.cs@utexas.edu',
url='http://vatsan.github.io/pandas_via_psql/',
packages=find_packages(),
package_data=find_package_data(only_in_packages=False,show_ignored=True),
include_package_data=True,
license='LICENSE.txt',
description='A command line visualization utility for SQL using Pandas library in Python.',
long_description=open('README.md').read(),
install_requires=[
"pandas >= 0.13.0"
],
)
| bsd-2-clause | -4,267,674,333,726,588,400 | 40.355769 | 95 | 0.549872 | false |
stephane-caron/pymanoid | examples/contact_stability/zmp_support_area.py | 3 | 5631 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2020 Stephane Caron <stephane.caron@normalesup.org>
#
# This file is part of pymanoid <https://github.com/stephane-caron/pymanoid>.
#
# pymanoid is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# pymanoid is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# pymanoid. If not, see <http://www.gnu.org/licenses/>.
"""
This example computes the multi-contact ZMP support area for a given robot
stance (contacts and CoM position). See [Caron16] for details.
"""
import IPython
from numpy import zeros
import pymanoid
from pymanoid import Stance
from pymanoid.gui import PointMassWrenchDrawer
from pymanoid.gui import draw_polygon
from pymanoid.misc import matplotlib_to_rgb, norm
com_height = 0.9 # [m]
z_polygon = 2.
class SupportAreaDrawer(pymanoid.Process):
"""
Draw the pendular ZMP area of a contact set.
Parameters
----------
stance : Stance
Contacts and COM position of the robot.
height : scalar, optional
Height of the ZMP support area in the world frame.
color : tuple or string, optional
Area color.
"""
def __init__(self, stance, height=0., color=None):
self.stance = stance # before calling parent constructor
if color is None:
color = (0., 0.5, 0., 0.5)
if type(color) is str:
color = matplotlib_to_rgb(color) + [0.5]
super(SupportAreaDrawer, self).__init__()
self.color = color
self.contact_poses = {}
self.handle = None
self.height = height
self.last_com = stance.com.p
self.stance = stance
#
self.update_contact_poses()
self.update_polygon()
def clear(self):
self.handle = None
def update_contact_poses(self):
for contact in self.stance.contacts:
self.contact_poses[contact.name] = contact.pose
def update_height(self, height):
self.height = height
self.update_polygon()
def update_polygon(self):
self.handle = None
try:
vertices = self.stance.compute_zmp_support_area(self.height)
self.handle = draw_polygon(
[(x[0], x[1], self.height) for x in vertices],
normal=[0, 0, 1], color=(0.0, 0.0, 0.5, 0.5))
except Exception as e:
print("SupportAreaDrawer: {}".format(e))
def on_tick(self, sim):
if self.handle is None:
self.update_polygon()
for contact in self.stance.contacts:
if norm(contact.pose - self.contact_poses[contact.name]) > 1e-10:
self.update_contact_poses()
self.update_polygon()
break
if norm(self.stance.com.p - self.last_com) > 1e-10:
self.update_contact_poses()
self.update_polygon()
self.last_com = self.stance.com.p
class StaticWrenchDrawer(PointMassWrenchDrawer):
"""
Draw contact wrenches applied to a robot in static-equilibrium.
Parameters
----------
stance : Stance
Contacts and COM position of the robot.
"""
def __init__(self, stance):
super(StaticWrenchDrawer, self).__init__(stance.com, stance)
stance.com.set_accel(zeros((3,)))
self.stance = stance
def find_supporting_wrenches(self, sim):
return self.stance.find_static_supporting_wrenches()
class COMSync(pymanoid.Process):
def __init__(self, stance, com_above):
super(COMSync, self).__init__()
self.com_above = com_above
self.stance = stance
def on_tick(self, sim):
self.stance.com.set_x(self.com_above.x)
self.stance.com.set_y(self.com_above.y)
if __name__ == "__main__":
sim = pymanoid.Simulation(dt=0.03)
robot = pymanoid.robots.JVRC1('JVRC-1.dae', download_if_needed=True)
sim.set_viewer()
sim.viewer.SetCamera([
[0.60587192, -0.36596244, 0.70639274, -2.4904027],
[-0.79126787, -0.36933163, 0.48732874, -1.6965636],
[0.08254916, -0.85420468, -0.51334199, 2.79584694],
[0., 0., 0., 1.]])
robot.set_transparency(0.25)
com_above = pymanoid.Cube(0.02, [0.05, 0.04, z_polygon], color='b')
stance = Stance.from_json('stances/double.json')
stance.bind(robot)
robot.ik.solve()
com_sync = COMSync(stance, com_above)
support_area_drawer = SupportAreaDrawer(stance, z_polygon)
wrench_drawer = StaticWrenchDrawer(stance)
sim.schedule(robot.ik)
sim.schedule_extra(com_sync)
sim.schedule_extra(support_area_drawer)
sim.schedule_extra(wrench_drawer)
sim.start()
print("""
ZMP support area
================
Ready to go! The GUI displays the ZMP pendular support area in blue. You can
move the blue box (in the plane above the robot) around to make the robot move
its center of mass. Contact wrenches are displayed at each contact (green dot
is COP location, arrow is resultant force). When the COM exists the
static-equilibrium polygon, you should see the background turn red as no
feasible contact wrenches can be found.
Enjoy :)
""")
if IPython.get_ipython() is None:
IPython.embed()
| gpl-3.0 | -3,208,270,963,893,853,000 | 30.110497 | 79 | 0.640206 | false |
rema-git/lichtmalen | image_to_tpm2.py | 1 | 3589 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 27 00:33:17 2014
@author: Reinhardt A.W. Maier <rema@zaehlwerk.net>
"""
import os
import argparse
import binascii
import numpy as np
import Image as pil
#import textwrap
#import matplotlib.pyplot as plt
def tpm2(image, lastFrameBlack=False):
"""
generate TPM2 file format:
* image as numpy array with dim(height, width, color)
* returns tpm2 as string
"""
dim = tuple((np.shape(image)[0], np.shape(image)[1]))
frameheader = 'C9DA{:04X}'.format(dim[1]*3)
output = []
for frame in range(dim[0]): # loop over lines = height
output += frameheader
for led in range(dim[1]): # loop over columns = width
output += '{:02X}{:02X}{:02X}'.format(*image[frame][led])
output += '36' # end-of-frame
if lastFrameBlack:
output += frameheader + '0'*6*dim[1] + '36' # black frame
print 'Added black frame to EOF'
return ''.join(output)
def imageFilter(image):
"""
example filter function
"""
filteredImage = image.rotate(-90)
return filteredImage
def imageFit2LEDs(image, n_LEDs=121):
"""
resize image to number of LEDs
"""
scale = n_LEDs / float(image.size[0])
hsize = int((float(image.size[1]) * float(scale)))
image = image.resize((n_LEDs, hsize))
return image
def rgb2grb(image):
"""
swap color order of numpy array: RGB -> GRB
"""
R, G, B = image.T
return np.array([G, R, B]).T
def main(imageFilename, tpm2Filename, *opts):
"""
open image, apply filter function and save as TPM2 binary file
"""
# open image file
try:
image = pil.open(imageFilename)
print 'Image read from', imageFilename
except:
print 'ERROR: cannot read input image file!'
# filter image
if image.mode != 'RGB':
print 'Convert image to RGB'
image = image.convert('RGB')
image = imageFilter(image)
image = imageFit2LEDs(image)
# convert to numpy array with dim(height, width, color)
image = np.array(image)
# swap colors
image = rgb2grb(image)
# display image
#plt.imshow(image, interpolation='none')
#plt.show()
# convert image to tpm2
tpm2string = tpm2(image, *opts)
print 'Image successfully converted'
# show result to screen
#print textwrap.fill('\n' + tpm2string + '\n')
# write result to file
with open(tpm2Filename, 'wb') as binFile:
tpm2binary = binascii.a2b_hex(tpm2string)
binFile.write(tpm2binary)
print 'TPM2 file written to', tpm2Filename
if __name__ == "__main__":
# if this module is being run directly use command line arguments
parser = argparse.ArgumentParser(description='convert an image file to tpm2 format')
parser.add_argument('--noloop',
action='store_true', dest='lastFrameBlack',
help='add a black frame to stop with')
parser.add_argument('infile',
type=argparse.FileType('r'),
help="image file to be converted. Supported are all common image formats, e.g. .jpg, .png, .gif, .bmp")
parser.add_argument('outfile',
type=argparse.FileType('w'), default=None, nargs='?',
help="tpm2 file to be created (default: infile.tp2)")
args = parser.parse_args()
# set output filename, if not given use input filename with extension .tp2
if args.outfile == None:
outfile = os.path.splitext(args.infile.name)[0] + '.tp2'
else:
outfile = args.outfile.name
main(args.infile.name, outfile, args.lastFrameBlack)
| gpl-3.0 | -8,740,749,091,339,044,000 | 28.178862 | 111 | 0.629145 | false |
activitynet/ActivityNet | Evaluation/get_ava_active_speaker_performance.py | 1 | 8421 | r"""Compute active speaker detection performance for the AVA dataset.
Please send any questions about this code to the Google Group ava-dataset-users:
https://groups.google.com/forum/#!forum/ava-dataset-users
Example usage:
python -O get_ava_active_speaker_performance.py \
-g testdata/eval.csv \
-p testdata/predictions.csv \
-v
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import time
import numpy as np
import pandas as pd
def compute_average_precision(precision, recall):
"""Compute Average Precision according to the definition in VOCdevkit.
Precision is modified to ensure that it does not decrease as recall
decrease.
Args:
precision: A float [N, 1] numpy array of precisions
recall: A float [N, 1] numpy array of recalls
Raises:
ValueError: if the input is not of the correct format
Returns:
average_precison: The area under the precision recall curve. NaN if
precision and recall are None.
"""
if precision is None:
if recall is not None:
raise ValueError("If precision is None, recall must also be None")
return np.NAN
if not isinstance(precision, np.ndarray) or not isinstance(
recall, np.ndarray):
raise ValueError("precision and recall must be numpy array")
if precision.dtype != np.float or recall.dtype != np.float:
raise ValueError("input must be float numpy array.")
if len(precision) != len(recall):
raise ValueError("precision and recall must be of the same size.")
if not precision.size:
return 0.0
if np.amin(precision) < 0 or np.amax(precision) > 1:
raise ValueError("Precision must be in the range of [0, 1].")
if np.amin(recall) < 0 or np.amax(recall) > 1:
raise ValueError("recall must be in the range of [0, 1].")
if not all(recall[i] <= recall[i + 1] for i in range(len(recall) - 1)):
raise ValueError("recall must be a non-decreasing array")
recall = np.concatenate([[0], recall, [1]])
precision = np.concatenate([[0], precision, [0]])
# Smooth precision to be monotonically decreasing.
for i in range(len(precision) - 2, -1, -1):
precision[i] = np.maximum(precision[i], precision[i + 1])
indices = np.where(recall[1:] != recall[:-1])[0] + 1
average_precision = np.sum(
(recall[indices] - recall[indices - 1]) * precision[indices])
return average_precision
def load_csv(filename, column_names):
"""Loads CSV from the filename using given column names.
Adds uid column.
Args:
filename: Path to the CSV file to load.
column_names: A list of column names for the data.
Returns:
df: A Pandas DataFrame containing the data.
"""
# Here and elsewhere, df indicates a DataFrame variable.
df = pd.read_csv(filename, header=None, names=column_names)
# Creates a unique id from frame timestamp and entity id.
df["uid"] = (df["frame_timestamp"].map(str) + ":" + df["entity_id"])
return df
def eq(a, b, tolerance=1e-09):
"""Returns true if values are approximately equal."""
return abs(a - b) <= tolerance
def merge_groundtruth_and_predictions(df_groundtruth, df_predictions):
"""Merges groundtruth and prediction DataFrames.
The returned DataFrame is merged on uid field and sorted in descending order
by score field. Bounding boxes are checked to make sure they match between
groundtruth and predictions.
Args:
df_groundtruth: A DataFrame with groundtruth data.
df_predictions: A DataFrame with predictions data.
Returns:
df_merged: A merged DataFrame, with rows matched on uid column.
"""
if df_groundtruth["uid"].count() != df_predictions["uid"].count():
raise ValueError(
"Groundtruth and predictions CSV must have the same number of "
"unique rows.")
if df_predictions["label"].unique() != ["SPEAKING_AUDIBLE"]:
raise ValueError(
"Predictions CSV must contain only SPEAKING_AUDIBLE label.")
if df_predictions["score"].count() < df_predictions["uid"].count():
raise ValueError("Predictions CSV must contain score value for every row.")
# Merges groundtruth and predictions on uid, validates that uid is unique
# in both frames, and sorts the resulting frame by the predictions score.
df_merged = df_groundtruth.merge(
df_predictions,
on="uid",
suffixes=("_groundtruth", "_prediction"),
validate="1:1").sort_values(
by=["score"], ascending=False).reset_index()
# Validates that bounding boxes in ground truth and predictions match for the
# same uids.
df_merged["bounding_box_correct"] = np.where(
eq(df_merged["entity_box_x1_groundtruth"],
df_merged["entity_box_x1_prediction"])
& eq(df_merged["entity_box_x2_groundtruth"],
df_merged["entity_box_x2_prediction"])
& eq(df_merged["entity_box_y1_groundtruth"],
df_merged["entity_box_y1_prediction"])
& eq(df_merged["entity_box_y2_groundtruth"],
df_merged["entity_box_y2_prediction"]), True, False)
if (~df_merged["bounding_box_correct"]).sum() > 0:
raise ValueError(
"Mismatch between groundtruth and predictions bounding boxes found at "
+ str(list(df_merged[~df_merged["bounding_box_correct"]]["uid"])))
return df_merged
def get_all_positives(df_merged):
"""Counts all positive examples in the groundtruth dataset."""
return df_merged[df_merged["label_groundtruth"] ==
"SPEAKING_AUDIBLE"]["uid"].count()
def calculate_precision_recall(df_merged):
"""Calculates precision and recall arrays going through df_merged row-wise."""
all_positives = get_all_positives(df_merged)
# Populates each row with 1 if this row is a true positive
# (at its score level).
df_merged["is_tp"] = np.where(
(df_merged["label_groundtruth"] == "SPEAKING_AUDIBLE") &
(df_merged["label_prediction"] == "SPEAKING_AUDIBLE"), 1, 0)
# Counts true positives up to and including that row.
df_merged["tp"] = df_merged["is_tp"].cumsum()
# Calculates precision for every row counting true positives up to
# and including that row over the index (1-based) of that row.
df_merged["precision"] = df_merged["tp"] / (df_merged.index + 1)
# Calculates recall for every row counting true positives up to
# and including that row over all positives in the groundtruth dataset.
df_merged["recall"] = df_merged["tp"] / all_positives
logging.info(
"\n%s\n",
df_merged.head(10)[[
"uid", "score", "label_groundtruth", "is_tp", "tp", "precision",
"recall"
]])
return np.array(df_merged["precision"]), np.array(df_merged["recall"])
def run_evaluation(groundtruth, predictions):
"""Runs AVA Active Speaker evaluation, printing average precision result."""
df_groundtruth = load_csv(
groundtruth,
column_names=[
"video_id", "frame_timestamp", "entity_box_x1", "entity_box_y1",
"entity_box_x2", "entity_box_y2", "label", "entity_id"
])
df_predictions = load_csv(
predictions,
column_names=[
"video_id", "frame_timestamp", "entity_box_x1", "entity_box_y1",
"entity_box_x2", "entity_box_y2", "label", "entity_id", "score"
])
df_merged = merge_groundtruth_and_predictions(df_groundtruth, df_predictions)
precision, recall = calculate_precision_recall(df_merged)
print("average precision: ", compute_average_precision(precision, recall))
def parse_arguments():
"""Parses command-line flags.
Returns:
args: a named tuple containing three file objects args.labelmap,
args.groundtruth, and args.detections.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"-g",
"--groundtruth",
help="CSV file containing ground truth.",
type=argparse.FileType("r"),
required=True)
parser.add_argument(
"-p",
"--predictions",
help="CSV file containing active speaker predictions.",
type=argparse.FileType("r"),
required=True)
parser.add_argument(
"-v", "--verbose", help="Increase output verbosity.", action="store_true")
return parser.parse_args()
def main():
start = time.time()
args = parse_arguments()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
del args.verbose
run_evaluation(**vars(args))
logging.info("Computed in %s seconds", time.time() - start)
if __name__ == "__main__":
main()
| mit | -3,192,975,491,083,719,000 | 33.093117 | 80 | 0.676879 | false |
CCS-Lab/hBayesDM | Python/hbayesdm/models/_pst_Q.py | 1 | 10143 | from typing import Sequence, Union, Any
from collections import OrderedDict
from numpy import Inf, exp
import pandas as pd
from hbayesdm.base import TaskModel
from hbayesdm.preprocess_funcs import pst_preprocess_func
__all__ = ['pst_Q']
class PstQ(TaskModel):
def __init__(self, **kwargs):
super().__init__(
task_name='pst',
model_name='Q',
model_type='',
data_columns=(
'subjID',
'type',
'choice',
'reward',
),
parameters=OrderedDict([
('alpha', (0, 0.5, 1)),
('beta', (0, 1, 10)),
]),
regressors=OrderedDict([
]),
postpreds=['y_pred'],
parameters_desc=OrderedDict([
('alpha', 'learning rate'),
('beta', 'inverse temperature'),
]),
additional_args_desc=OrderedDict([
]),
**kwargs,
)
_preprocess_func = pst_preprocess_func
def pst_Q(
data: Union[pd.DataFrame, str, None] = None,
niter: int = 4000,
nwarmup: int = 1000,
nchain: int = 4,
ncore: int = 1,
nthin: int = 1,
inits: Union[str, Sequence[float]] = 'vb',
ind_pars: str = 'mean',
model_regressor: bool = False,
vb: bool = False,
inc_postpred: bool = False,
adapt_delta: float = 0.95,
stepsize: float = 1,
max_treedepth: int = 10,
**additional_args: Any) -> TaskModel:
"""Probabilistic Selection Task - Q Learning Model
Hierarchical Bayesian Modeling of the Probabilistic Selection Task
using Q Learning Model [Frank2007]_ with the following parameters:
"alpha" (learning rate), "beta" (inverse temperature).
.. [Frank2007] Frank, M. J., Moustafa, A. A., Haughey, H. M., Curran, T., & Hutchison, K. E. (2007). Genetic triple dissociation reveals multiple roles for dopamine in reinforcement learning. Proceedings of the National Academy of Sciences, 104(41), 16311-16316.
.. codeauthor:: David Munoz Tord <david.munoztord@unige.ch>
User data should contain the behavioral data-set of all subjects of interest for
the current analysis. When loading from a file, the datafile should be a
**tab-delimited** text file, whose rows represent trial-by-trial observations
and columns represent variables.
For the Probabilistic Selection Task, there should be 4 columns of data
with the labels "subjID", "type", "choice", "reward". It is not necessary for the columns to be
in this particular order; however, it is necessary that they be labeled
correctly and contain the information below:
- "subjID": A unique identifier for each subject in the data-set.
- "type": Two-digit number indicating which pair of stimuli were presented for that trial, e.g. 12, 34, or 56. The digit on the left (tens-digit) indicates the presented stimulus for option1, while the digit on the right (ones-digit) indicates that for option2. Code for each stimulus type (1~6) is defined as for 80\% (type 1), 20\% (type 2), 70\% (type 3), 30\% (type 4), 60\% (type 5), 40\% (type 6). The modeling will still work even if different probabilities are used for the stimuli; however, the total number of stimuli should be less than or equal to 6.
- "choice": Whether the subject chose the left option (option1) out of the given two options (i.e. if option1 was chosen, 1; if option2 was chosen, 0).
- "reward": Amount of reward earned as a result of the trial.
.. note::
User data may contain other columns of data (e.g. ``ReactionTime``,
``trial_number``, etc.), but only the data within the column names listed
above will be used during the modeling. As long as the necessary columns
mentioned above are present and labeled correctly, there is no need to
remove other miscellaneous data columns.
.. note::
``adapt_delta``, ``stepsize``, and ``max_treedepth`` are advanced options that
give the user more control over Stan's MCMC sampler. It is recommended that
only advanced users change the default values, as alterations can profoundly
change the sampler's behavior. See [Hoffman2014]_ for more information on the
sampler control parameters. One can also refer to 'Section 34.2. HMC Algorithm
Parameters' of the `Stan User's Guide and Reference Manual`__.
.. [Hoffman2014]
Hoffman, M. D., & Gelman, A. (2014).
The No-U-Turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo.
Journal of Machine Learning Research, 15(1), 1593-1623.
__ https://mc-stan.org/users/documentation/
Parameters
----------
data
Data to be modeled. It should be given as a Pandas DataFrame object,
a filepath for a data file, or ``"example"`` for example data.
Data columns should be labeled as: "subjID", "type", "choice", "reward".
niter
Number of iterations, including warm-up. Defaults to 4000.
nwarmup
Number of iterations used for warm-up only. Defaults to 1000.
``nwarmup`` is a numerical value that specifies how many MCMC samples
should not be stored upon the beginning of each chain. For those
familiar with Bayesian methods, this is equivalent to burn-in samples.
Due to the nature of the MCMC algorithm, initial values (i.e., where the
sampling chains begin) can have a heavy influence on the generated
posterior distributions. The ``nwarmup`` argument can be set to a
higher number in order to curb the effects that initial values have on
the resulting posteriors.
nchain
Number of Markov chains to run. Defaults to 4.
``nchain`` is a numerical value that specifies how many chains (i.e.,
independent sampling sequences) should be used to draw samples from
the posterior distribution. Since the posteriors are generated from a
sampling process, it is good practice to run multiple chains to ensure
that a reasonably representative posterior is attained. When the
sampling is complete, it is possible to check the multiple chains for
convergence by running the following line of code:
.. code:: python
output.plot(type='trace')
ncore
Number of CPUs to be used for running. Defaults to 1.
nthin
Every ``nthin``-th sample will be used to generate the posterior
distribution. Defaults to 1. A higher number can be used when
auto-correlation within the MCMC sampling is high.
``nthin`` is a numerical value that specifies the "skipping" behavior
of the MCMC sampler. That is, only every ``nthin``-th sample is used to
generate posterior distributions. By default, ``nthin`` is equal to 1,
meaning that every sample is used to generate the posterior.
inits
String or list specifying how the initial values should be generated.
Options are ``'fixed'`` or ``'random'``, or your own initial values.
ind_pars
String specifying how to summarize the individual parameters.
Current options are: ``'mean'``, ``'median'``, or ``'mode'``.
model_regressor
Whether to export model-based regressors. Currently not available for this model.
vb
Whether to use variational inference to approximately draw from a
posterior distribution. Defaults to ``False``.
inc_postpred
Include trial-level posterior predictive simulations in
model output (may greatly increase file size). Defaults to ``False``.
adapt_delta
Floating point value representing the target acceptance probability of a new
sample in the MCMC chain. Must be between 0 and 1. See note below.
stepsize
Integer value specifying the size of each leapfrog step that the MCMC sampler
can take on each new iteration. See note below.
max_treedepth
Integer value specifying how many leapfrog steps the MCMC sampler can take
on each new iteration. See note below.
**additional_args
Not used for this model.
Returns
-------
model_data
An ``hbayesdm.TaskModel`` instance with the following components:
- ``model``: String value that is the name of the model ('pst_Q').
- ``all_ind_pars``: Pandas DataFrame containing the summarized parameter values
(as specified by ``ind_pars``) for each subject.
- ``par_vals``: OrderedDict holding the posterior samples over different parameters.
- ``fit``: A PyStan StanFit object that contains the fitted Stan model.
- ``raw_data``: Pandas DataFrame containing the raw data used to fit the model,
as specified by the user.
Examples
--------
.. code:: python
from hbayesdm import rhat, print_fit
from hbayesdm.models import pst_Q
# Run the model and store results in "output"
output = pst_Q(data='example', niter=2000, nwarmup=1000, nchain=4, ncore=4)
# Visually check convergence of the sampling chains (should look like "hairy caterpillars")
output.plot(type='trace')
# Plot posterior distributions of the hyper-parameters (distributions should be unimodal)
output.plot()
# Check Rhat values (all Rhat values should be less than or equal to 1.1)
rhat(output, less=1.1)
# Show the LOOIC and WAIC model fit estimates
print_fit(output)
"""
return PstQ(
data=data,
niter=niter,
nwarmup=nwarmup,
nchain=nchain,
ncore=ncore,
nthin=nthin,
inits=inits,
ind_pars=ind_pars,
model_regressor=model_regressor,
vb=vb,
inc_postpred=inc_postpred,
adapt_delta=adapt_delta,
stepsize=stepsize,
max_treedepth=max_treedepth,
**additional_args)
| gpl-3.0 | -2,116,428,267,287,271,000 | 42.161702 | 566 | 0.643104 | false |
linebp/pandas | pandas/tests/dtypes/test_inference.py | 1 | 35947 | # -*- coding: utf-8 -*-
"""
These the test the public routines exposed in types/common.py
related to inference and not otherwise tested in types/test_common.py
"""
from warnings import catch_warnings
import collections
import re
from datetime import datetime, date, timedelta, time
from decimal import Decimal
import numpy as np
import pytz
import pytest
import pandas as pd
from pandas._libs import tslib, lib
from pandas import (Series, Index, DataFrame, Timedelta,
DatetimeIndex, TimedeltaIndex, Timestamp,
Panel, Period, Categorical)
from pandas.compat import u, PY2, PY3, StringIO, lrange
from pandas.core.dtypes import inference
from pandas.core.dtypes.common import (
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_number,
is_integer,
is_float,
is_bool,
is_scalar,
is_scipy_sparse,
_ensure_int32,
_ensure_categorical)
from pandas.core.dtypes.missing import isnull
from pandas.util import testing as tm
def test_is_sequence():
is_seq = inference.is_sequence
assert (is_seq((1, 2)))
assert (is_seq([1, 2]))
assert (not is_seq("abcd"))
assert (not is_seq(u("abcd")))
assert (not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert (not is_seq(A()))
def test_is_list_like():
passes = ([], [1], (1, ), (1, 2), {'a': 1}, set([1, 'a']), Series([1]),
Series([]), Series(['a']).str)
fails = (1, '2', object())
for p in passes:
assert inference.is_list_like(p)
for f in fails:
assert not inference.is_list_like(f)
@pytest.mark.parametrize('inner', [
[], [1], (1, ), (1, 2), {'a': 1}, set([1, 'a']), Series([1]),
Series([]), Series(['a']).str, (x for x in range(5))
])
@pytest.mark.parametrize('outer', [
list, Series, np.array, tuple
])
def test_is_nested_list_like_passes(inner, outer):
result = outer([inner for _ in range(5)])
assert inference.is_list_like(result)
@pytest.mark.parametrize('obj', [
'abc', [], [1], (1,), ['a'], 'a', {'a'},
[1, 2, 3], Series([1]), DataFrame({"A": [1]}),
([1, 2] for _ in range(5)),
])
def test_is_nested_list_like_fails(obj):
assert not inference.is_nested_list_like(obj)
def test_is_dict_like():
passes = [{}, {'A': 1}, Series([1])]
fails = ['1', 1, [1, 2], (1, 2), range(2), Index([1])]
for p in passes:
assert inference.is_dict_like(p)
for f in fails:
assert not inference.is_dict_like(f)
def test_is_file_like():
class MockFile(object):
pass
is_file = inference.is_file_like
data = StringIO("data")
assert is_file(data)
# No read / write attributes
# No iterator attributes
m = MockFile()
assert not is_file(m)
MockFile.write = lambda self: 0
# Write attribute but not an iterator
m = MockFile()
assert not is_file(m)
# gh-16530: Valid iterator just means we have the
# __iter__ attribute for our purposes.
MockFile.__iter__ = lambda self: self
# Valid write-only file
m = MockFile()
assert is_file(m)
del MockFile.write
MockFile.read = lambda self: 0
# Valid read-only file
m = MockFile()
assert is_file(m)
# Iterator but no read / write attributes
data = [1, 2, 3]
assert not is_file(data)
if PY3:
from unittest import mock
assert not is_file(mock.Mock())
def test_is_named_tuple():
passes = (collections.namedtuple('Test', list('abc'))(1, 2, 3), )
fails = ((1, 2, 3), 'a', Series({'pi': 3.14}))
for p in passes:
assert inference.is_named_tuple(p)
for f in fails:
assert not inference.is_named_tuple(f)
def test_is_hashable():
# all new-style classes are hashable by default
class HashableClass(object):
pass
class UnhashableClass1(object):
__hash__ = None
class UnhashableClass2(object):
def __hash__(self):
raise TypeError("Not hashable")
hashable = (1,
3.14,
np.float64(3.14),
'a',
tuple(),
(1, ),
HashableClass(), )
not_hashable = ([], UnhashableClass1(), )
abc_hashable_not_really_hashable = (([], ), UnhashableClass2(), )
for i in hashable:
assert inference.is_hashable(i)
for i in not_hashable:
assert not inference.is_hashable(i)
for i in abc_hashable_not_really_hashable:
assert not inference.is_hashable(i)
# numpy.array is no longer collections.Hashable as of
# https://github.com/numpy/numpy/pull/5326, just test
# is_hashable()
assert not inference.is_hashable(np.array([]))
# old-style classes in Python 2 don't appear hashable to
# collections.Hashable but also seem to support hash() by default
if PY2:
class OldStyleClass():
pass
c = OldStyleClass()
assert not isinstance(c, collections.Hashable)
assert inference.is_hashable(c)
hash(c) # this will not raise
def test_is_re():
passes = re.compile('ad'),
fails = 'x', 2, 3, object()
for p in passes:
assert inference.is_re(p)
for f in fails:
assert not inference.is_re(f)
def test_is_recompilable():
passes = (r'a', u('x'), r'asdf', re.compile('adsf'), u(r'\u2233\s*'),
re.compile(r''))
fails = 1, [], object()
for p in passes:
assert inference.is_re_compilable(p)
for f in fails:
assert not inference.is_re_compilable(f)
class TestInference(object):
def test_infer_dtype_bytes(self):
compare = 'string' if PY2 else 'bytes'
# string array of bytes
arr = np.array(list('abc'), dtype='S1')
assert lib.infer_dtype(arr) == compare
# object array of bytes
arr = arr.astype(object)
assert lib.infer_dtype(arr) == compare
def test_isinf_scalar(self):
# GH 11352
assert lib.isposinf_scalar(float('inf'))
assert lib.isposinf_scalar(np.inf)
assert not lib.isposinf_scalar(-np.inf)
assert not lib.isposinf_scalar(1)
assert not lib.isposinf_scalar('a')
assert lib.isneginf_scalar(float('-inf'))
assert lib.isneginf_scalar(-np.inf)
assert not lib.isneginf_scalar(np.inf)
assert not lib.isneginf_scalar(1)
assert not lib.isneginf_scalar('a')
def test_maybe_convert_numeric_infinities(self):
# see gh-13274
infinities = ['inf', 'inF', 'iNf', 'Inf',
'iNF', 'InF', 'INf', 'INF']
na_values = set(['', 'NULL', 'nan'])
pos = np.array(['inf'], dtype=np.float64)
neg = np.array(['-inf'], dtype=np.float64)
msg = "Unable to parse string"
for infinity in infinities:
for maybe_int in (True, False):
out = lib.maybe_convert_numeric(
np.array([infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['-' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, neg)
out = lib.maybe_convert_numeric(
np.array([u(infinity)], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['+' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
# too many characters
with tm.assert_raises_regex(ValueError, msg):
lib.maybe_convert_numeric(
np.array(['foo_' + infinity], dtype=object),
na_values, maybe_int)
def test_maybe_convert_numeric_post_floatify_nan(self):
# see gh-13314
data = np.array(['1.200', '-999.000', '4.500'], dtype=object)
expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)
nan_values = set([-999, -999.0])
for coerce_type in (True, False):
out = lib.maybe_convert_numeric(data, nan_values, coerce_type)
tm.assert_numpy_array_equal(out, expected)
def test_convert_infs(self):
arr = np.array(['inf', 'inf', 'inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
arr = np.array(['-inf', '-inf', '-inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
def test_scientific_no_exponent(self):
# See PR 12215
arr = np.array(['42E', '2E', '99e', '6e'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False, True)
assert np.all(np.isnan(result))
def test_convert_non_hashable(self):
# GH13324
# make sure that we are handing non-hashables
arr = np.array([[10.0, 2], 1.0, 'apple'])
result = lib.maybe_convert_numeric(arr, set(), False, True)
tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))
def test_convert_numeric_uint64(self):
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([str(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
def test_convert_numeric_uint64_nan(self):
msg = 'uint64 array detected'
cases = [(np.array([2**63, np.nan], dtype=object), set()),
(np.array([str(2**63), np.nan], dtype=object), set()),
(np.array([np.nan, 2**63], dtype=object), set()),
(np.array([np.nan, str(2**63)], dtype=object), set()),
(np.array([2**63, 2**63 + 1], dtype=object), set([2**63])),
(np.array([str(2**63), str(2**63 + 1)],
dtype=object), set([2**63]))]
for coerce in (True, False):
for arr, na_values in cases:
if coerce:
with tm.assert_raises_regex(ValueError, msg):
lib.maybe_convert_numeric(arr, na_values,
coerce_numeric=coerce)
else:
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(
arr, na_values), arr)
def test_convert_numeric_int64_uint64(self):
msg = 'uint64 and negative values detected'
cases = [np.array([2**63, -1], dtype=object),
np.array([str(2**63), -1], dtype=object),
np.array([str(2**63), str(-1)], dtype=object),
np.array([-1, 2**63], dtype=object),
np.array([-1, str(2**63)], dtype=object),
np.array([str(-1), str(2**63)], dtype=object)]
for coerce in (True, False):
for case in cases:
if coerce:
with tm.assert_raises_regex(ValueError, msg):
lib.maybe_convert_numeric(case, set(),
coerce_numeric=coerce)
else:
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(
case, set()), case)
def test_maybe_convert_objects_uint64(self):
# see gh-4471
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
# NumPy bug: can't compare uint64 to int64, as that
# results in both casting to float64, so we should
# make sure that this function is robust against it
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2, -1], dtype=object)
exp = np.array([2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2**63, -1], dtype=object)
exp = np.array([2**63, -1], dtype=object)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
def test_mixed_dtypes_remain_object_array(self):
# GH14956
array = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1],
dtype=object)
result = lib.maybe_convert_objects(array, convert_datetime=1)
tm.assert_numpy_array_equal(result, array)
class TestTypeInference(object):
def test_length_zero(self):
result = lib.infer_dtype(np.array([], dtype='i4'))
assert result == 'integer'
result = lib.infer_dtype([])
assert result == 'empty'
def test_integers(self):
arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'integer'
arr = np.array([1, 2, 3, np.int64(4), np.int32(5), 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='i4')
result = lib.infer_dtype(arr)
assert result == 'integer'
def test_bools(self):
arr = np.array([True, False, True, True, True], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([np.bool_(True), np.bool_(False)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, False, True, 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([True, False, True], dtype=bool)
result = lib.infer_dtype(arr)
assert result == 'boolean'
def test_floats(self):
arr = np.array([1., 2., 3., np.float64(4), np.float32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, np.float64(4), np.float32(5), 'foo'],
dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='f4')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, 4, 5], dtype='f8')
result = lib.infer_dtype(arr)
assert result == 'floating'
def test_decimals(self):
# GH15690
arr = np.array([Decimal(1), Decimal(2), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([1.0, 2.0, Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'mixed'
def test_string(self):
pass
def test_unicode(self):
pass
def test_datetime(self):
dates = [datetime(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'datetime64'
def test_infer_dtype_datetime(self):
arr = np.array([Timestamp('2011-01-01'),
Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.datetime64('2011-01-01'),
np.datetime64('2011-01-01')], dtype=object)
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([datetime(2011, 1, 1), datetime(2012, 2, 1)])
assert lib.infer_dtype(arr) == 'datetime'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1)])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, pd.Timestamp('2011-01-02'), n])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1), n])
assert lib.infer_dtype(arr) == 'datetime'
# different type of nat
arr = np.array([np.timedelta64('nat'),
np.datetime64('2011-01-02')], dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.datetime64('2011-01-02'),
np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
# mixed datetime
arr = np.array([datetime(2011, 1, 1),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
# should be datetime?
arr = np.array([np.datetime64('2011-01-01'),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([pd.Timestamp('2011-01-02'),
np.datetime64('2011-01-01')])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1])
assert lib.infer_dtype(arr) == 'mixed-integer'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1.1])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.nan, '2011-01-01', pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_timedelta(self):
arr = np.array([pd.Timedelta('1 days'),
pd.Timedelta('2 days')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([np.timedelta64(1, 'D'),
np.timedelta64(2, 'D')], dtype=object)
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([timedelta(1), timedelta(2)])
assert lib.infer_dtype(arr) == 'timedelta'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, Timedelta('1 days')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, timedelta(1)])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, pd.Timedelta('1 days'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, timedelta(1), n])
assert lib.infer_dtype(arr) == 'timedelta'
# different type of nat
arr = np.array([np.datetime64('nat'), np.timedelta64(1, 'D')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.timedelta64(1, 'D'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_period(self):
# GH 13664
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='D')])
assert lib.infer_dtype(arr) == 'period'
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='M')])
assert lib.infer_dtype(arr) == 'period'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Period('2011-01', freq='D')])
assert lib.infer_dtype(arr) == 'period'
arr = np.array([n, pd.Period('2011-01', freq='D'), n])
assert lib.infer_dtype(arr) == 'period'
# different type of nat
arr = np.array([np.datetime64('nat'), pd.Period('2011-01', freq='M')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([pd.Period('2011-01', freq='M'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_all_nan_nat_like(self):
arr = np.array([np.nan, np.nan])
assert lib.infer_dtype(arr) == 'floating'
# nan and None mix are result in mixed
arr = np.array([np.nan, np.nan, None])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([None, np.nan, np.nan])
assert lib.infer_dtype(arr) == 'mixed'
# pd.NaT
arr = np.array([pd.NaT])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([pd.NaT, np.nan])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.nan, pd.NaT])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.nan, pd.NaT, np.nan])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([None, pd.NaT, None])
assert lib.infer_dtype(arr) == 'datetime'
# np.datetime64(nat)
arr = np.array([np.datetime64('nat')])
assert lib.infer_dtype(arr) == 'datetime64'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([pd.NaT, n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr) == 'timedelta'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([pd.NaT, n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr) == 'timedelta'
# datetime / timedelta mixed
arr = np.array([pd.NaT, np.datetime64('nat'),
np.timedelta64('nat'), np.nan])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.timedelta64('nat'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
def test_is_datetimelike_array_all_nan_nat_like(self):
arr = np.array([np.nan, pd.NaT, np.datetime64('nat')])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert not lib.is_timedelta_array(arr)
assert not lib.is_timedelta64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert lib.is_timedelta_array(arr)
assert lib.is_timedelta64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.datetime64('nat'),
np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_array(arr)
assert not lib.is_timedelta64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert lib.is_timedelta_array(arr)
assert lib.is_timedelta64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, np.nan], dtype=object)
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_array(arr)
assert not lib.is_timedelta64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
def test_date(self):
dates = [date(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'date'
def test_to_object_array_tuples(self):
r = (5, 6)
values = [r]
result = lib.to_object_array_tuples(values)
try:
# make sure record array works
from collections import namedtuple
record = namedtuple('record', 'x y')
r = record(5, 6)
values = [r]
result = lib.to_object_array_tuples(values) # noqa
except ImportError:
pass
def test_object(self):
# GH 7431
# cannot infer more than this as only a single element
arr = np.array([None], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed'
def test_to_object_array_width(self):
# see gh-13320
rows = [[1, 2, 3], [4, 5, 6]]
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows)
tm.assert_numpy_array_equal(out, expected)
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows, min_width=1)
tm.assert_numpy_array_equal(out, expected)
expected = np.array([[1, 2, 3, None, None],
[4, 5, 6, None, None]], dtype=object)
out = lib.to_object_array(rows, min_width=5)
tm.assert_numpy_array_equal(out, expected)
def test_is_period(self):
assert lib.is_period(pd.Period('2011-01', freq='M'))
assert not lib.is_period(pd.PeriodIndex(['2011-01'], freq='M'))
assert not lib.is_period(pd.Timestamp('2011-01'))
assert not lib.is_period(1)
assert not lib.is_period(np.nan)
def test_categorical(self):
# GH 8974
from pandas import Categorical, Series
arr = Categorical(list('abc'))
result = lib.infer_dtype(arr)
assert result == 'categorical'
result = lib.infer_dtype(Series(arr))
assert result == 'categorical'
arr = Categorical(list('abc'), categories=['cegfab'], ordered=True)
result = lib.infer_dtype(arr)
assert result == 'categorical'
result = lib.infer_dtype(Series(arr))
assert result == 'categorical'
class TestNumberScalar(object):
def test_is_number(self):
assert is_number(True)
assert is_number(1)
assert is_number(1.1)
assert is_number(1 + 3j)
assert is_number(np.bool(False))
assert is_number(np.int64(1))
assert is_number(np.float64(1.1))
assert is_number(np.complex128(1 + 3j))
assert is_number(np.nan)
assert not is_number(None)
assert not is_number('x')
assert not is_number(datetime(2011, 1, 1))
assert not is_number(np.datetime64('2011-01-01'))
assert not is_number(Timestamp('2011-01-01'))
assert not is_number(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_number(timedelta(1000))
assert not is_number(Timedelta('1 days'))
# questionable
assert not is_number(np.bool_(False))
assert is_number(np.timedelta64(1, 'D'))
def test_is_bool(self):
assert is_bool(True)
assert is_bool(np.bool(False))
assert is_bool(np.bool_(False))
assert not is_bool(1)
assert not is_bool(1.1)
assert not is_bool(1 + 3j)
assert not is_bool(np.int64(1))
assert not is_bool(np.float64(1.1))
assert not is_bool(np.complex128(1 + 3j))
assert not is_bool(np.nan)
assert not is_bool(None)
assert not is_bool('x')
assert not is_bool(datetime(2011, 1, 1))
assert not is_bool(np.datetime64('2011-01-01'))
assert not is_bool(Timestamp('2011-01-01'))
assert not is_bool(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_bool(timedelta(1000))
assert not is_bool(np.timedelta64(1, 'D'))
assert not is_bool(Timedelta('1 days'))
def test_is_integer(self):
assert is_integer(1)
assert is_integer(np.int64(1))
assert not is_integer(True)
assert not is_integer(1.1)
assert not is_integer(1 + 3j)
assert not is_integer(np.bool(False))
assert not is_integer(np.bool_(False))
assert not is_integer(np.float64(1.1))
assert not is_integer(np.complex128(1 + 3j))
assert not is_integer(np.nan)
assert not is_integer(None)
assert not is_integer('x')
assert not is_integer(datetime(2011, 1, 1))
assert not is_integer(np.datetime64('2011-01-01'))
assert not is_integer(Timestamp('2011-01-01'))
assert not is_integer(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_integer(timedelta(1000))
assert not is_integer(Timedelta('1 days'))
# questionable
assert is_integer(np.timedelta64(1, 'D'))
def test_is_float(self):
assert is_float(1.1)
assert is_float(np.float64(1.1))
assert is_float(np.nan)
assert not is_float(True)
assert not is_float(1)
assert not is_float(1 + 3j)
assert not is_float(np.bool(False))
assert not is_float(np.bool_(False))
assert not is_float(np.int64(1))
assert not is_float(np.complex128(1 + 3j))
assert not is_float(None)
assert not is_float('x')
assert not is_float(datetime(2011, 1, 1))
assert not is_float(np.datetime64('2011-01-01'))
assert not is_float(Timestamp('2011-01-01'))
assert not is_float(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_float(timedelta(1000))
assert not is_float(np.timedelta64(1, 'D'))
assert not is_float(Timedelta('1 days'))
def test_is_datetime_dtypes(self):
ts = pd.date_range('20130101', periods=3)
tsa = pd.date_range('20130101', periods=3, tz='US/Eastern')
assert is_datetime64_dtype('datetime64')
assert is_datetime64_dtype('datetime64[ns]')
assert is_datetime64_dtype(ts)
assert not is_datetime64_dtype(tsa)
assert not is_datetime64_ns_dtype('datetime64')
assert is_datetime64_ns_dtype('datetime64[ns]')
assert is_datetime64_ns_dtype(ts)
assert is_datetime64_ns_dtype(tsa)
assert is_datetime64_any_dtype('datetime64')
assert is_datetime64_any_dtype('datetime64[ns]')
assert is_datetime64_any_dtype(ts)
assert is_datetime64_any_dtype(tsa)
assert not is_datetime64tz_dtype('datetime64')
assert not is_datetime64tz_dtype('datetime64[ns]')
assert not is_datetime64tz_dtype(ts)
assert is_datetime64tz_dtype(tsa)
for tz in ['US/Eastern', 'UTC']:
dtype = 'datetime64[ns, {}]'.format(tz)
assert not is_datetime64_dtype(dtype)
assert is_datetime64tz_dtype(dtype)
assert is_datetime64_ns_dtype(dtype)
assert is_datetime64_any_dtype(dtype)
def test_is_timedelta(self):
assert is_timedelta64_dtype('timedelta64')
assert is_timedelta64_dtype('timedelta64[ns]')
assert not is_timedelta64_ns_dtype('timedelta64')
assert is_timedelta64_ns_dtype('timedelta64[ns]')
tdi = TimedeltaIndex([1e14, 2e14], dtype='timedelta64')
assert is_timedelta64_dtype(tdi)
assert is_timedelta64_ns_dtype(tdi)
assert is_timedelta64_ns_dtype(tdi.astype('timedelta64[ns]'))
# Conversion to Int64Index:
assert not is_timedelta64_ns_dtype(tdi.astype('timedelta64'))
assert not is_timedelta64_ns_dtype(tdi.astype('timedelta64[h]'))
class Testisscalar(object):
def test_isscalar_builtin_scalars(self):
assert is_scalar(None)
assert is_scalar(True)
assert is_scalar(False)
assert is_scalar(0.)
assert is_scalar(np.nan)
assert is_scalar('foobar')
assert is_scalar(b'foobar')
assert is_scalar(u('efoobar'))
assert is_scalar(datetime(2014, 1, 1))
assert is_scalar(date(2014, 1, 1))
assert is_scalar(time(12, 0))
assert is_scalar(timedelta(hours=1))
assert is_scalar(pd.NaT)
def test_isscalar_builtin_nonscalars(self):
assert not is_scalar({})
assert not is_scalar([])
assert not is_scalar([1])
assert not is_scalar(())
assert not is_scalar((1, ))
assert not is_scalar(slice(None))
assert not is_scalar(Ellipsis)
def test_isscalar_numpy_array_scalars(self):
assert is_scalar(np.int64(1))
assert is_scalar(np.float64(1.))
assert is_scalar(np.int32(1))
assert is_scalar(np.object_('foobar'))
assert is_scalar(np.str_('foobar'))
assert is_scalar(np.unicode_(u('foobar')))
assert is_scalar(np.bytes_(b'foobar'))
assert is_scalar(np.datetime64('2014-01-01'))
assert is_scalar(np.timedelta64(1, 'h'))
def test_isscalar_numpy_zerodim_arrays(self):
for zerodim in [np.array(1), np.array('foobar'),
np.array(np.datetime64('2014-01-01')),
np.array(np.timedelta64(1, 'h')),
np.array(np.datetime64('NaT'))]:
assert not is_scalar(zerodim)
assert is_scalar(lib.item_from_zerodim(zerodim))
def test_isscalar_numpy_arrays(self):
assert not is_scalar(np.array([]))
assert not is_scalar(np.array([[]]))
assert not is_scalar(np.matrix('1; 2'))
def test_isscalar_pandas_scalars(self):
assert is_scalar(Timestamp('2014-01-01'))
assert is_scalar(Timedelta(hours=1))
assert is_scalar(Period('2014-01-01'))
def test_lisscalar_pandas_containers(self):
assert not is_scalar(Series())
assert not is_scalar(Series([1]))
assert not is_scalar(DataFrame())
assert not is_scalar(DataFrame([[1]]))
with catch_warnings(record=True):
assert not is_scalar(Panel())
assert not is_scalar(Panel([[[1]]]))
assert not is_scalar(Index([]))
assert not is_scalar(Index([1]))
def test_datetimeindex_from_empty_datetime64_array():
for unit in ['ms', 'us', 'ns']:
idx = DatetimeIndex(np.array([], dtype='datetime64[%s]' % unit))
assert (len(idx) == 0)
def test_nan_to_nat_conversions():
df = DataFrame(dict({
'A': np.asarray(
lrange(10), dtype='float64'),
'B': Timestamp('20010101')
}))
df.iloc[3:6, :] = np.nan
result = df.loc[4, 'B'].value
assert (result == tslib.iNaT)
s = df['B'].copy()
s._data = s._data.setitem(indexer=tuple([slice(8, 9)]), value=np.nan)
assert (isnull(s[8]))
# numpy < 1.7.0 is wrong
from distutils.version import LooseVersion
if LooseVersion(np.__version__) >= '1.7.0':
assert (s[8].value == np.datetime64('NaT').astype(np.int64))
def test_is_scipy_sparse(spmatrix): # noqa: F811
tm._skip_if_no_scipy()
assert is_scipy_sparse(spmatrix([[0, 1]]))
assert not is_scipy_sparse(np.array([1]))
def test_ensure_int32():
values = np.arange(10, dtype=np.int32)
result = _ensure_int32(values)
assert (result.dtype == np.int32)
values = np.arange(10, dtype=np.int64)
result = _ensure_int32(values)
assert (result.dtype == np.int32)
def test_ensure_categorical():
values = np.arange(10, dtype=np.int32)
result = _ensure_categorical(values)
assert (result.dtype == 'category')
values = Categorical(values)
result = _ensure_categorical(values)
tm.assert_categorical_equal(result, values)
| bsd-3-clause | 3,442,768,526,486,067,000 | 33.300573 | 79 | 0.567919 | false |
Ernestyj/PyStudy | finance/DaysTest/MICAnalysis.py | 1 | 4363 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from minepy import MINE
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="white", context="talk")
from sklearn import preprocessing
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 30)
pd.set_option('precision', 7)
pd.options.display.float_format = '{:,.3f}'.format
import warnings
warnings.simplefilter(action = "ignore", category = FutureWarning)
'''
读入一支股票指定年份的ohlcv数据
输入:baseDir,stockCode为字符, startYear,yearNum为整数,
输出:dataframe
'''
def readWSDFile(baseDir, stockCode, startYear, yearNum=1):
# 解析日期
dateparse = lambda x: pd.datetime.strptime(x, '%Y-%m-%d').date()
df = 0
for i in range(yearNum):
tempDF = pd.read_csv(baseDir+stockCode+'/wsd_'+stockCode+'_'+str(startYear+i)+'.csv',
index_col=0, sep='\t', usecols=[0,2,3,4,5,6,7,9,10,12,15], header=None,
skiprows=1, names=['Date','Open','High','Low','Close','Volume','Amount',
'Chg','Chg Pct','Avg','Turn'],
parse_dates=True, date_parser=dateparse)
if i==0: df = tempDF
else: df = df.append(tempDF)
return df
usecols = [0, 2, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 33, 34, 36, 37]
# usecols = [0, 6, 16, 17, 24, 31]
usecols = [0, 2,11,24,26,29,30]
# usecols = [0, 5,7,11,19,24,26,28]
def readWSDIndexFile(baseDir, stockCode, startYear, yearNum=1):
# 解析日期
dateparse = lambda x: pd.datetime.strptime(x, '%Y-%m-%d').date()
df = 0
for i in range(yearNum):
tempDF = pd.read_csv(baseDir+'I'+stockCode+'/wsd_'+stockCode+'_'+str(startYear+i)+'.csv',
index_col=0, sep=',', parse_dates=True, date_parser=dateparse
# , usecols=usecols
)
if i==0: df = tempDF
else: df = df.append(tempDF)
return df
baseDir = '/Users/eugene/Downloads/data/'
stockCodes = ['000300.SH', '000016.SH', '000905.SH']
i = 0
startYear = 2014
number = 2
df = readWSDFile(baseDir, stockCodes[i], startYear, number)
R = df['Close'].pct_change()
R[0] = R[1]
upOrDowns = []
for v in R.values:
if v>0: upOrDowns.append(1)
else: upOrDowns.append(-1)
# print upOrDowns
print 'Day count:', len(df)
# print df.head(5)
# df['R'] = R
dfi = readWSDIndexFile(baseDir, stockCodes[i], startYear, number)
dfi['R'] = R
print np.shape(df), np.shape(dfi)
allDF = pd.concat([df, dfi], axis=1)
scaler = preprocessing.MinMaxScaler()
X_Standard = scaler.fit_transform(df)
X_Standard_T = np.transpose(X_Standard)
Xi_Standard = scaler.fit_transform(dfi)
Xi_Standard_T = np.transpose(Xi_Standard)
X_ALL_Standard = scaler.fit_transform(allDF)
X_ALL_Standard_T = np.transpose(X_ALL_Standard)
print np.shape(X_ALL_Standard_T)
mine = MINE(alpha=0.6, c=15, est="mic_approx")
mics = []
# mine.compute_score(df['Close'].values, df['R'].values); print mine.mic()
# # for i in range(0,10):
# # mine.compute_score(X_Standard_T[i], X_Standard_T[10])
# # mics.append(mine.mic())
# # print i, mine.mic()
# for i in [7,9]:
# mine.compute_score(X_Standard_T[i], X_Standard_T[10])
# mics.append(mine.mic())
# print i, mine.mic()
# # for i in range(0,38):
# # mine.compute_score(Xi_Standard_T[i], Xi_Standard_T[38])
# # mics.append(mine.mic())
# # print i, mine.mic()
# for i in range(0,7):
# mine.compute_score(Xi_Standard_T[i], Xi_Standard_T[7])
# mics.append(mine.mic())
# print i, mine.mic()
#
for i in range(48):
mine.compute_score(X_ALL_Standard_T[i], X_ALL_Standard_T[48])
mics.append(mine.mic())
names = []
for c in allDF.columns.values: names.append(c)
map = {}
for i in range(48):
map[names[i]] = mics[i]
import operator
sorted_tuple = sorted(map.items(), key=operator.itemgetter(1))
vs = []
ks = []
for k,v in sorted_tuple:
ks.append(k); vs.append(v)
ks = ks[::-1]
vs = vs[::-1]
def plotMICHist():
f, ax = plt.subplots(figsize=(12, 6))
sns.barplot(ks, vs, palette="BuGn_d", ax=ax)
ax.set_ylabel("MIC")
plt.xticks(rotation=90)
f.subplots_adjust(bottom=0.2)
plt.show() | apache-2.0 | -3,788,239,183,814,603,000 | 29.06993 | 104 | 0.604327 | false |
LouisPlisso/analysis_tools | complements.py | 1 | 51874 | #!/usr/bin/env python
"""Module to provide missing stats for streaming analysis
"""
from __future__ import division, print_function
from operator import concat, itemgetter
from collections import defaultdict
from itertools import islice, cycle
from random import random
from tempfile import NamedTemporaryFile
import os
import numpy as np
# in case of non-interactive usage
#import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
#import matplotlib.ticker as ticker
import INDEX_VALUES
import streaming_tools
#import aggregate
import flow2session
#from filter_streaming import generate_remaining_download_cnx_stream
# for 3D plot
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.collections import PolyCollection
from matplotlib.colors import colorConverter
#from matplotlib.ticker import NullLocator
# from sage colors
ORANGE = (1.0, 0.6470588235294118, 0.0)
from INDEX_VALUES import UNKNOWN_ID, EPSILON
WIDTH_IDX = 1
NBFLOW_IDX = 0
VOL_IDX = 0
def construct_bar_data(data_raw, vol_th, percent_functions,
as_list=('DAILYMOTION', 'ALL_YOUTUBE', 'GOOGLE')):
"Return a list of tuple (AS, dict of values) for plotting bars"
# warning hardcoded match of percent and as values
return dict([(trace,
dict([(as_name, dict([(percent_type,
len(filter(comp_func, data_as)))
for percent_type, comp_func
in percent_functions.items()]))
for as_name in as_list
for data_as in ([x[1] for x in data if x[2]
in INDEX_VALUES.__getattribute__('AS_' + as_name)],)]))
for trace in data_raw
for data in ([y for y in data_raw[trace] if y[0] > vol_th],)])
def load_stream_qual(data_dir='flows/stream_quality/links/AS'):
"Wrapper to load all streaming quality stats in a dict"
return dict([(f.split('GVB_', 1)[1].split('_GVB_STR_AS.npy')[0],
np.load(os.sep.join((data_dir, f))))
for f in os.listdir(data_dir)])
def generate_remaining_download(cnx_stream):
"Filter flows and generate the data of remainig download volume in percent"
return dict([(k, zip(v['Content-Length'],
100 * v['Session-Bytes'] / v['Content-Length'],
v['asBGP'], v['valid']))
for k, data in cnx_stream.iteritems()
for v in (data.compress(data['Content-Length'] != 0),)])
# for tmp in (data,) #.compress(data['valid'] == 'OK'),)
# for v in (tmp.compress(tmp['Session-Bytes'] <=
# tmp['Content-Length']),)])
def load_files_to_validate():
"Return a dict of remaining download stats for validation"
cnx_stream = {}
active_stats_dir = 'traces/active_captures/captures_streaming_full'
stats_file = 'streaming_stats_txt_AS_txt.npy'
for entry in os.listdir(active_stats_dir):
dir_entry = os.sep.join((active_stats_dir, entry))
if (os.path.isdir(dir_entry) and 'deezer' not in entry and
stats_file in os.listdir(dir_entry)):
cnx_stream[entry] = np.load(os.sep.join((dir_entry, stats_file)))
return cnx_stream
def plot_data(data, ax, color, as_name, val_max=120, err=False, hatch='+'):
"Plot data and its linear interpolation"
if len(data) > 0:
x, y = zip(*[(a, b) for a, b in data if b < val_max])
ax.plot(x, y , color + hatch,
label=': '.join((short(as_name)
+ (' err' if err else ''), str(len(data)))))
z = np.polyfit(x, y, 1)
p = np.poly1d(z)
xis = sorted(x)
ax.plot(xis, map(p, xis), color + (':' if err else ''), lw=2)
def plot_filtered(filtered, ax, as_name, color, hatch='+'):
"Plot the filtered data on axes"
# instead of itemgetter: lambda (vol, per, bgp, valid): (vol, per)
# data_ok, data_err = [[(vol, per) for vol, per, bgp, valid in filtered
# if valid == valids]
# for valids in ('OK', 'ERROR')]
# need to hardcode in order to cope with multiple format: bad!
data_ok, data_err = [[(x[0], x[1]) for x in filtered
if x[3] == valids]
for valids in ('OK', 'ERROR')]
plot_data(data_err, ax, color, as_name, err=True, hatch=hatch)
plot_data(data_ok, ax, color, as_name, hatch=hatch)
def short(as_name):
"""Formats the as name in short form
"""
as_list = ('DAILYMOTION', 'GOOGLE', 'ALL_YOUTUBE', 'OTHER', 'ALL_GOOGLE')
short_list = ('DM', 'GOO', 'YT', 'OTH', 'GOO+YT')
try:
return dict(zip(as_list, short_list))[as_name]
except KeyError:
return as_name
def plot_remaining_download(data_remaining,
as_list=('ALL_YOUTUBE', 'GOOGLE', 'DAILYMOTION'),
#as_list=('DAILYMOTION', 'ALL_GOOGLE'),
plot_excluded=True, prefix='remaining_time',
out_dir='rapport/complements',
use_host_referer=False, good_indic=None,
loglog=True, logx=True, th=None, rescale=True):
"""Plot cdf for each value in dict
USE WITH
tools.filter_streaming.generate_remaining_cnx_stream
OR
DEPRECATED generate_remaining_download
"""
# formatter = dict(zip(as_list + ('OTHER',), ('bx', 'r*', 'g+')))
colors = dict(zip(('OTHER',) + as_list, ('g', 'b', 'r', 'c')))
hatches = cycle('xo+')
as_excluded = reduce(concat,
[INDEX_VALUES.__getattribute__('AS_' + as_name)
for as_name in as_list])
if good_indic:
all_streams = []
for k, v in data_remaining.iteritems():
if good_indic:
all_streams.extend(v)
fig = plt.figure(figsize=(8, 8))
ax = fig.add_axes([0.105, 0.2, 0.8, 0.7])
if th:
v = [x for x in v if x[0] > th]
# to have other plotted behind
# filtered = filter(lambda (vol, per, bgp, valid):
# bgp not in as_excluded, v)
if plot_excluded:
filtered = filter(lambda x: itemgetter(2)(x) not in as_excluded, v)
if len(filtered) != 0:
plot_filtered(filtered, ax, 'OTHER', colors['OTHER'],
hatch=hatches.next())
for as_name in as_list:
# filtered = filter(lambda (vol, per, bgp, valid): bgp in
# INDEX_VALUES.__getattribute__('AS_' + as_name), v)
#assert False
if not use_host_referer:
filtered = filter(lambda x: itemgetter(2)(x) in
INDEX_VALUES.__getattribute__('AS_' + as_name), v)
else:
if 'YOUTUBE' in as_name:
host_referer = 'youtube'
elif 'DAILYMOTION' in as_name:
host_referer = 'dailymotion'
else:
print('Warning: Assign youtube host to AS:', as_name)
host_referer = 'youtube'
filtered = filter(lambda x: host_referer in x[4], v)
corrected = filter(lambda x: x[1] >= 0 and x[1] <= 110, filtered)
if len(corrected) != 0:
plot_filtered(corrected, ax, as_name, colors[as_name],
hatch=hatches.next())
ax.set_title('Remaining Volume for ' + streaming_tools.format_title(k))
ax.set_ylabel('Percentage of Dowloaded Volume')
ax.set_xlabel('Content Length in Bytes' +
((' filtered on flows > %g' % th) if th else ''))
ax.grid(True)
# ax.legend(loc=(1.03,0.2), prop={'size': 10})
ax.legend(bbox_to_anchor=(0., -.22, 1., .102), loc=4,
ncol=len(as_list) + 1, mode="expand", borderaxespad=0.)
save_file = os.sep.join((out_dir, '_'.join((prefix, k))))
if th:
save_file += '_%g' % th
if logx:
ax.semilogx()
if rescale:
ax.set_ylim(0, 110)
# ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%g'))
ax.grid(True)
fig.savefig(save_file + '_logx.pdf', format='pdf')
if loglog:
ax.loglog()
ax.grid(True)
fig.savefig(save_file + '_loglog.pdf', format='pdf')
del(fig)
if good_indic:
good_streams = [(x[0], x[1]) for x in all_streams if x[5]]
bad_streams = [(x[0], x[1]) for x in all_streams if not x[5]]
fig = plt.figure(figsize=(8, 8))
ax = fig.add_axes([0.105, 0.2, 0.8, 0.7])
plot_data(good_streams, ax, 'r', 'Good', hatch='+')
plot_data(bad_streams, ax, 'b', 'Bad', hatch='*')
ax.set_title('Remaining Volume for Streams on All Traces '
+ str(good_indic))
ax.set_ylabel('Percentage of Dowloaded Volume')
ax.set_xlabel('Content Length in Bytes' +
((' filtered on flows > %g' % th) if th else ''))
ax.grid(True)
# ax.legend(loc=(1.03,0.2), prop={'size': 10})
ax.legend(bbox_to_anchor=(0., -.22, 1., .102), loc=4,
ncol=len(as_list) + 1, mode="expand", borderaxespad=0.)
save_file = os.sep.join((out_dir,
'_'.join((prefix, 'all',
str(good_indic).replace(' ', '_')))))
if th:
save_file += '_%g' % th
if logx:
ax.semilogx()
if rescale:
ax.set_ylim(0, 110)
# ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%g'))
ax.grid(True)
fig.savefig(save_file + '_logx.pdf', format='pdf')
if loglog:
ax.loglog()
ax.grid(True)
fig.savefig(save_file + '_loglog.pdf', format='pdf')
del(fig)
def get_hhs(cnx_stream, as_field, agg_field, nb_hh=100):
"Return a dict of all heavy hitters for all ASes"
clients_per_vol = streaming_tools.connections_per_as(cnx_stream,
as_field=as_field, key_ext='', agg_field=agg_field,
already_filtered=True, dir_check=False)
return streaming_tools.get_top_n(clients_per_vol, nb_top=nb_hh,
exclude_list=UNKNOWN_ID)
def stats_for_hh(flows, flows_1mb, nb_hh=20,
as_list=('DAILYMOTION', 'GOOGLE', 'ALL_YOUTUBE')):
"""Print stats for heavy hitters
Use with data from h5 then separate_flows_dscp
datas = tools.load_hdf5_data.load_h5_file('flows/hdf5/traces_lzf.h5')
flows, flows_1mb = tools.streaming_tools.separate_flows_dscp(datas)
stats, stats_hh = tools.complements.stats_for_hh(flows, flows_1mb)
print(stats, open('rapport/table_stats_compl.tex', 'w'))
print(stats_hh, open('rapport/table_stats_compl_hh.tex', 'w'))
"""
# clients_per_vol = streaming_tools.connections_per_as(flows, key_ext='',
# as_field='client_id',
# already_filtered=True)
# hh = streaming_tools.get_top_n(clients_per_vol, nb_top=(nb_hh+1),
# exclude_list=UNKNOWN_ID)
# test if ok
hh = get_hhs(flows, 'client_id', 'l3Bytes', nb_hh=(nb_hh + 1))
nb_flows_hh = {}
nb_clients_hh = {}
vol_hh = {}
nb_flows = {}
nb_clients = {}
vol = {}
# for k in filter(lambda k: any([k.endswith(t) for t in as_list]),
# easier to manage
assert flows.keys() == flows_1mb.keys()
for k in sorted(flows):
for is_1mb in (False, True):
if is_1mb:
flow_type = flows_1mb
flow_size = '1MB'
else:
flow_type = flows
flow_size = ''
# only downstream matters
data = flow_type[k].compress(flow_type[k]['direction'] ==
INDEX_VALUES.DOWN)
data_hh = data.compress([x['client_id']
in map(itemgetter(0), hh[k])
for x in data])
for as_name in as_list:
data_as = data.compress([x['asBGP']
in INDEX_VALUES.__getattribute__('AS_' + as_name)
for x in data])
nb_flows[k + as_name + flow_size] = len(data_as)
nb_clients[k + as_name + flow_size] = len(np.unique(
data_as['client_id']))
vol[k + as_name + flow_size] = sum(data_as['l3Bytes'])
data_hh_as = data_hh.compress([x['asBGP']
in INDEX_VALUES.__getattribute__('AS_' + as_name)
for x in data_hh])
nb_flows_hh[k + as_name + flow_size] = len(data_hh_as)
nb_clients_hh[k + as_name + flow_size] = len(np.unique(
data_hh_as['client_id']))
vol_hh[k + as_name + flow_size] = sum(data_hh_as['l3Bytes'])
stats = []
stats_hh = []
for adsl, ftth in [t for t in zip(islice(sorted(flows), 0, None, 2),
islice(sorted(flows), 1, None, 2))]:
stats.append('\n\hline\n'.join((
' & '.join([' ']
+ concat(*[[k.split('_', 3)[-1].replace('_', ' ')
+ ' ' + short(a) for a in as_list]
for k in (adsl, ftth)])) + r' \\',
' & '.join(['Date'] +
concat(*[['/'.join((k.split('_', 3)[0:3]))
for a in as_list]
for k in (adsl, ftth)])) + r' \\',
' & '.join(['nb flows down in Nb']
+ concat(*[[str(nb_flows[k + a]) for a in as_list]
for k in (adsl, ftth)])) + r' \\',
' & '.join(['nb flows 1mb down in Nb']
+ concat(*[[str(nb_flows[k + a + '1MB']) for a in as_list]
for k in (adsl, ftth)])) + r' \\',
' & '.join(['ratio nb flows 1MB/all']
+ concat(*[['NA' if nb_flows[k + a] == 0 else
'%.3g' %
(nb_flows[k + a + '1MB'] / nb_flows[k + a])
for a in as_list]
for k in (adsl, ftth)])) + r' \\',
' & '.join(['nb clients down in Nb']
+ concat(*[[str(nb_clients[k + a]) for a in as_list]
for k in (adsl, ftth)])) + r' \\',
' & '.join(['nb clients 1mb down in Nb']
+ concat(*[[str(nb_clients[k + a + '1MB'])
for a in as_list]
for k in (adsl, ftth)])) + r' \\',
' & '.join(['vol down in Bytes']
+ concat(*[['%.3g' % vol[k + a] for a in as_list]
for k in (adsl, ftth)])) + r' \\',
' & '.join(['vol down 1mb in Bytes']
+ concat(*[['%.3g' % vol[k + a + '1MB'] for a in as_list]
for k in (adsl, ftth)])) + r' \\',
' & '.join(['avg nb flows down in Nb']
+ concat(*[['NA' if nb_clients[k + a] == 0 else
'%.3g' % (nb_flows[k + a] / nb_clients[k + a])
for a in as_list]
for k in (adsl, ftth)])) + r' \\',
' & '.join(['avg nb flows down 1mb in Nb']
+ concat(*[['NA' if nb_clients[k + a + '1MB'] == 0 else
'%.3g' % (nb_flows[k + a + '1MB']
/ nb_clients[k + a + '1MB'])
for a in as_list]
for k in (adsl, ftth)])) + r' \\',
' & '.join(['avg vol down in Bytes']
+ concat(*[['NA' if nb_clients[k + a] == 0 else
'%.3g' % (vol[k + a] / nb_clients[k + a])
for a in as_list]
for k in (adsl, ftth)])) + r' \\',
' & '.join(['avg vol down 1mb in Bytes']
+ concat(*[['NA' if nb_clients[k + a + '1MB'] == 0 else
'%.3g' %
(vol[k + a + '1MB']
/ nb_clients[k + a + '1MB'])
for a in as_list]
for k in (adsl, ftth)])) + r' \\'))
+ '\n\hline\n')
# duplicate code :(
stats_hh.append('\n\hline\n'.join((
' & '.join(['%d Heavy Hitters' % nb_hh]
+ concat(*[[k.split('_', 3)[-1].replace('_', ' ')
+ ' ' + short(a) for a in as_list]
for k in (adsl, ftth)])) + r' \\',
' & '.join(['Date'] +
concat(*[['/'.join((k.split('_', 3)[0:3]))
for a in as_list]
for k in (adsl, ftth)])) + r' \\',
' & '.join(['nb flows down in Nb']
+ concat(*[[str(nb_flows_hh[k + a]) for a in as_list]
for k in (adsl, ftth)])) + r' \\',
' & '.join(['nb flows 1mb down in Nb']
+ concat(*[[str(nb_flows_hh[k + a + '1MB'])
for a in as_list]
for k in (adsl, ftth)])) + r' \\',
' & '.join(['ratio nb flows 1MB/all']
+ concat(*[['NA' if nb_flows_hh[k + a] == 0 else
'%.3g' %
(nb_flows_hh[k + a + '1MB']
/ nb_flows_hh[k + a])
for a in as_list]
for k in (adsl, ftth)])) + r' \\',
' & '.join(['nb clients down in Nb']
+ concat(*[[str(nb_clients_hh[k + a]) for a in as_list]
for k in (adsl, ftth)])) + r' \\',
' & '.join(['nb clients 1mb down in Nb']
+ concat(*[[str(nb_clients_hh[k + a + '1MB'])
for a in as_list]
for k in (adsl, ftth)])) + r' \\',
' & '.join(['vol down in Bytes']
+ concat(*[['%.3g' % vol_hh[k + a] for a in as_list]
for k in (adsl, ftth)])) + r' \\',
' & '.join(['vol down 1mb in Bytes']
+ concat(*[['%.3g' % vol_hh[k + a + '1MB']
for a in as_list]
for k in (adsl, ftth)])) + r' \\',
' & '.join(['avg nb flows down in Nb']
+ concat(*[['NA' if nb_clients_hh[k + a] == 0 else
'%.3g' % (nb_flows_hh[k + a]
/ nb_clients_hh[k + a])
for a in as_list]
for k in (adsl, ftth)])) + r' \\',
' & '.join(['avg nb flows down 1mb in Nb']
+ concat(*[['NA' if nb_clients_hh[k + a + '1MB'] == 0
else '%.3g' % (nb_flows_hh[k + a + '1MB']
/ nb_clients_hh[k + a + '1MB'])
for a in as_list]
for k in (adsl, ftth)])) + r' \\',
' & '.join(['avg vol down in Bytes']
+ concat(*[['NA' if nb_clients_hh[k + a] == 0 else
'%.3g' % (vol_hh[k + a]
/ nb_clients_hh[k + a])
for a in as_list]
for k in (adsl, ftth)])) + r' \\',
' & '.join(['avg vol down 1mb in Bytes']
+ concat(*[['NA' if nb_clients_hh[k + a + '1MB'] == 0
else '%.3g' % (vol_hh[k + a + '1MB'] /
nb_clients_hh[k + a + '1MB'])
for a in as_list]
for k in (adsl, ftth)])) + r' \\'))
+ '\n\hline\n')
return '\hline\n'.join(stats), '\hline\n'.join(stats_hh)
def extract_sessions_hh_as(data_hh_as, tmp_dir, gap=600,
client_field='client_id'):
"Return a np array of streaming sessions for data_hh_as"
with NamedTemporaryFile(prefix='tmp_session_file', suffix='.txt',
dir=tmp_dir) as tmp_session_file:
flow2session.process_cnx_sessions(data_hh_as, tmp_session_file.name,
gap=gap, reset_errors=True,
client_field=client_field)
return np.loadtxt(tmp_session_file.name,
delimiter=';',
dtype=INDEX_VALUES.dtype_cnx_streaming_session)
def aggregate_sessions_hh_as(hh, sessions_hh_as):
"Return a list of tuple of stats resume on heavy hitters"
return [(sum(data_ok['duration']), sum(data_ok['tot_bytes']),
np.mean(8e-3 * data_ok['tot_bytes'] / data_ok['duration']))
for client in hh[:-1]
for data in (sessions_hh_as.compress(
sessions_hh_as['Name'] == str(client)),)
for data_ok in (data.compress(data['duration'] > 0),)
if len(data_ok) > 0]
def full_sessions_hh_as(data_hh_as):
"Return a list of tuple of bytes and average bit-rate for all flows"
data_ok = data_hh_as.compress(data_hh_as['DurationDn'] > 0)
return zip(data_ok['DurationDn'], data_ok['ByteDn'],
8e-3 * data_ok['ByteDn'] / data_ok['DurationDn'])
def extract_sessions_single_hh_as(sessions_hh_as):
"Return a list of tuple of bytes and average bit-rate for all sessions"
data_ok = sessions_hh_as.compress(sessions_hh_as['duration'] > 0)
return zip(data_ok['duration'], data_ok['tot_bytes'],
8e-3 * data_ok['tot_bytes'] / data_ok['duration'])
def compute_active_time_per_as(nb_hh=100, flows_dir='flows/links/cnx_stream',
cnx_stream=None,
# as_list=('ALL_YOUTUBE', 'GOOGLE', 'DAILYMOTION'),
url_list=('.youtube.', '.dailymotion.'),
file_name_start='cnx_stream_', extra_data=False,
gap=600):
"""Graph for heavy hitters separating by AS
USE with plot_active_time_as
"""
if not cnx_stream:
cnx_stream = dict([(f.split(file_name_start)[1].split('.npy')[0],
np.load(os.sep.join((flows_dir, f))))
for f in os.listdir(flows_dir)
if f.startswith(file_name_start)])
hhs = get_hhs(cnx_stream, 'client_id', 'ByteDn', nb_hh=nb_hh)
out_dict = {}
# as_excluded = map(lambda x: 'AS' + str(x),
# [v for as_name in as_list
# for v in INDEX_VALUES.__getattribute__('AS_' + as_name)])
tmp_dir = os.sep.join((os.getcwd(), 'active_time'))
for trace, data in cnx_stream.iteritems():
data = cnx_stream[trace]
print('data: ', len(data))
hh = map(itemgetter(0), hhs[trace])
# for client_id in UNKNOWN_ID:
# # removing unknown user id
# try:
# hh.remove(client_id)
# print('trace: %s; removed: %s' % (trace, client_id))
# except ValueError:
# pass
data_hh = data.compress([x['client_id'] in hh[:-1] for x in data])
print('data_hh: ', len(data_hh))
for as_url in url_list:
as_name = as_url.strip('.').upper()
data_hh_as = data_hh.compress(
[as_url in x['Host_Referer'] for x in data_hh])
print('data_hh_as: ', len(data_hh_as))
sessions_hh_as = extract_sessions_hh_as(data_hh_as, tmp_dir,
gap=gap)
print('sessions_hh_as: ', len(sessions_hh_as))
out_dict['_'.join((trace, as_name))] = aggregate_sessions_hh_as(hh,
sessions_hh_as)
if extra_data:
out_dict['_'.join((trace, as_name, 'full'))] = \
extract_sessions_single_hh_as(sessions_hh_as)
# full_sessions_hh_as(data_hh_as)
else:
data_hh_as = data_hh.compress(
[not(any([url in x['Host_Referer'] for url in url_list]))
for x in data_hh])
sessions_hh_as = extract_sessions_hh_as(data_hh_as, tmp_dir,
gap=gap)
out_dict['_'.join((trace, 'OTHERS'))] = aggregate_sessions_hh_as(hh,
sessions_hh_as)
if extra_data:
out_dict['_'.join((trace, 'OTHERS', 'full'))] = \
extract_sessions_single_hh_as(sessions_hh_as)
# full_sessions_hh_as(data_hh_as)
return out_dict
def plot_full_active_time_as(act, out_dir='active_time', nb_hh=100, prefix=None,
as_list=('DAILYMOTION', 'YOUTUBE'),
semilog=False, gap=600, large_th=None):
"""Plots the active time graphs
Use as
top100_600 = tools.complements.compute_active_time_per_as(extra_data=True)
tools.complements.plot_full_active_time_as(top100_600,
out_dir='active_time_new')
tools.complements.plot_full_active_time_as(top100_600,
large_th=5e6, out_dir='active_time_new')
top100_60 = tools.complements.compute_active_time_per_as(extra_data=True)
tools.complements.plot_full_active_time_as(top100_60,
out_dir='active_time_new', gap=60)
tools.complements.plot_full_active_time_as(top100_60,
large_th=1e6, out_dir='active_time_new', gap=60)
"""
if not prefix:
prefix = 'top%d_per_as' % nb_hh
for trace in [k.split('_OTHERS_full')[0] for k in act
if k.endswith('_OTHERS_full')]:
for (indic, field, unit) in (('Volume', 1, 'Bytes'),
('Average bit-rate', 2, 'kb/s')):
fig = plt.figure(figsize=(8, 8))
ax = fig.add_axes([0.12, 0.18, 0.8, 0.75])
markers = cycle("x+*o")
colors = cycle("bgrcmyk")
for as_name in ('OTHERS',) + as_list:
data = act['_'.join((trace, as_name, 'full'))]
if large_th:
# filter on large sessions (not on flows!)
# hard coded index
data = [x for x in data if x[1] > large_th]
if len(data) == 0:
print('PROBLEM with %s on %s' % (trace, as_name))
continue
x, y = zip(*(map(itemgetter(0, field), data)))
cur_color = colors.next()
ax.plot(x, y, color=cur_color, linestyle='',
marker=markers.next(),
label='%s: %d' % (as_name.replace('_', ' '), len(data)))
z = np.polyfit(x, y, 1)
p = np.poly1d(z)
if p(max(x)) < 0:
x_max = (1 - z[1]) / z[0]
print(trace, as_name, 'switch lin x', x_max, p(x_max))
else:
x_max = max(x)
xis = sorted([xi for xi in x if xi <= x_max])
ax.plot(xis, map(p, xis),
color=cur_color, lw=2)
ax.legend(bbox_to_anchor=(0., -.22, 1., .102), loc=4,
ncol=2, mode="expand", borderaxespad=0.)
ax.set_title(indic + ' vs. Session Duration for %d Heavy Hitters\n'
% nb_hh + trace.replace('_', ' '))
ax.set_ylabel('Dowloaded %s in %s' % (indic, unit))
ax.set_xlabel(
'\n'.join(('Session Duration in Seconds (session: gap %d sec'
% gap + ((', threshold: %g B)' % large_th)
if large_th else ')'),
'1 point per session '
+ '(constructed on per provider flows)')))
ax.grid(True)
save_file = '_'.join((prefix, indic.replace(' ', '_').lower(),
'act_time_full', trace, str(gap),
('th_%g' % large_th) if large_th else 'all'))
# fig.savefig(os.sep.join((out_dir, save_file + '_lin.pdf')))
ax.loglog()
fig.savefig(os.sep.join((out_dir, save_file + '_loglog.pdf')))
if semilog:
ax.semilogx()
fig.savefig(os.sep.join((out_dir, save_file + '_logx.pdf')),
format='pdf')
ax.semilogy()
fig.savefig(os.sep.join((out_dir, save_file + '_logy.pdf')),
format='pdf')
def plot_active_time_as(act, out_dir='active_time', nb_hh=100, prefix=None,
as_list=('ALL_YOUTUBE', 'DAILYMOTION'), semilog=False):
"""Plots the active time graphs
Use as
top100 = tools.complements.compute_active_time_per_as()
tools.complements.plot_active_time_as(top100)
"""
if not prefix:
prefix = 'top%d_per_as' % nb_hh
for trace in [k.split('_OTHERS')[0] for k in act if k.endswith('_OTHERS')]:
markers = cycle("x+*o")
colors = cycle("bgrcmyk")
for (indic, field, unit) in (('Volume', 1, 'Bytes'),
('Average bit-rate', 2, 'kb/s')):
fig = plt.figure(figsize=(8, 8))
ax = fig.add_axes([0.12, 0.18, 0.8, 0.75])
for as_name in as_list + ('OTHERS',):
data = act['_'.join((trace, as_name))]
x, y = zip(*(map(itemgetter(0, field), data)))
color = colors.next()
ax.plot(x, y, color=color, linestyle='',
marker=markers.next(),
label='%s: %d' % (as_name.replace('_', ' '), len(data)))
z = np.polyfit(x, y, 1)
p = np.poly1d(z)
if p(max(x)) < 0:
x_max = (1 - z[1]) / z[0]
print(trace, as_name, 'switch lin x', x_max, p(x_max))
else:
x_max = max(x)
xis = sorted([xi for xi in x if xi <= x_max])
ax.plot(xis, map(p, xis),
color=color)
ax.legend(bbox_to_anchor=(0., -.22, 1., .102), loc=4,
ncol=2, mode="expand", borderaxespad=0.)
ax.set_title(indic + ' vs. Active Time for %d Heavy Hitters '
% nb_hh + streaming_tools.format_title(trace))
ax.set_ylabel('Dowloaded %s in %s' % (indic, unit))
ax.set_xlabel('Active Time (sum of all sessions) in Seconds')
ax.grid(True)
save_file = '_'.join((prefix, indic.replace(' ', '_').lower(),
'act_time', 'lin', trace))
fig.savefig(os.sep.join((out_dir, save_file + '.pdf')))
ax.loglog()
save_file = '_'.join((prefix, indic.replace(' ', '_').lower(),
'act_time', 'loglog', trace))
fig.savefig(os.sep.join((out_dir, save_file + '.pdf')))
if semilog:
ax.semilogx()
save_file = '_'.join((prefix, indic.replace(' ', '_').lower(),
'act_time', 'logx', trace))
fig.savefig(os.sep.join((out_dir, save_file + '.pdf')),
format='pdf')
ax.semilogy()
save_file = '_'.join((prefix, indic.replace(' ', '_').lower(),
'act_time', 'logy', trace))
fig.savefig(os.sep.join((out_dir, save_file + '.pdf')),
format='pdf')
del(fig)
def compute_active_time(nb_hh=20, sessions_dir='flows/links/cnx_stream',
file_name_start='sessions_cnx_stream_'):
"""Compute some nice graphs for Heavy Hitters
use file_name_start='sessions_10min_cnx_stream_' for other sessions
see plot_active_time
"""
cnx_stream = dict([(f.split(file_name_start)[1].split('.txt')[0],
np.loadtxt('flows/links/cnx_stream/' + f, delimiter=';',
dtype=INDEX_VALUES.dtype_cnx_streaming_session))
for f in os.listdir(sessions_dir)
if f.startswith(file_name_start)])
hh = get_hhs(cnx_stream, 'client_id', 'tot_bytes', nb_hh=nb_hh)
return dict([(k, [(sum(data_ok['duration']), sum(data_ok['tot_bytes']),
np.mean(8e-3 * data_ok['tot_bytes']/data_ok['duration']))
for client in map(itemgetter(0), hh[k][:-1])
for data in (v.compress(v['client_id'] == client),)
for data_ok in (data.compress(data['duration']>0),)]
+ [(sum(data_ok['duration']), sum(data_ok['tot_bytes']),
np.mean(8e-3 * data_ok['tot_bytes']/data_ok['duration']))
for data in (v.compress([
x['client_id'] not in map(itemgetter(0), hh[k][:-1])
for x in v]),)
for data_ok in (data.compress(data['duration']>0),)])
for k, v in cnx_stream.iteritems()])
def plot_active_time(act, out_dir='active_time', prefix='top20', semilog=False):
"""Plots the active time graphs
Use as
top20 = tools.complements.compute_active_time()
tools.complements.plot_active_time(top20)
top100 = tools.complements.compute_active_time(nb_hh=100)
tools.complements.plot_active_time(top100, prefix='top100')
"""
for k, v in act.iteritems():
for (indic, field, unit) in (('Volume', 1, 'Bytes'),
('Average bit-rate', 2, 'kb/s')):
fig = plt.figure(figsize=(8, 8))
ax = fig.add_axes([0.12, 0.18, 0.8, 0.75])
x, y = zip(*(map(itemgetter(0, field), v)))
ax.plot(x[:-1], y[:-1], '+', label='Heavy Hitters')
ax.plot(x[-1], y[-1], '*', label='All Others')
z = np.polyfit(x[:-1], y[:-1], 1)
p = np.poly1d(z)
if p(max(x[:-1])) < 0:
x_max = (1 - z[1]) / z[0]
print(k, 'switch lin x', x_max, p(x_max))
else:
x_max = max(x[:-1])
xis = sorted([xi for xi in x[:-1] if xi <= x_max])
ax.plot(xis, map(p, xis))
ax.legend(bbox_to_anchor=(0., -.22, 1., .102), loc=4,
ncol=3, mode="expand", borderaxespad=0.)
ax.set_title(indic + ' vs. Active Time for %d Heavy Hitters '
% (len(v) - 1) + streaming_tools.format_title(k))
ax.set_ylabel('Dowloaded %s in %s' % (indic, unit))
ax.set_xlabel('Active Time (sum of all sessions) in Seconds')
ax.grid(True)
ax.loglog()
# plt.setp(ax.get_xticklabels() + ax.get_yticklabels())
save_file = '_'.join((prefix, indic.replace(' ', '_').lower(),
'act_time', 'loglog', k))
fig.savefig(os.sep.join((out_dir, save_file + '.pdf')))
if semilog:
ax.semilogx()
save_file = '_'.join((prefix, indic.replace(' ', '_').lower(),
'act_time', 'logx', k))
fig.savefig(os.sep.join((out_dir, save_file + '.pdf')))
ax.semilogy()
save_file = '_'.join((prefix, indic.replace(' ', '_').lower(),
'act_time', 'logy', k))
fig.savefig(os.sep.join((out_dir, save_file + '.pdf')))
del(fig)
def nb_flows_per_client_color(flows_stats, heavy_hitters, bin_size=30,
duration_indic='Session-Duration',
start_indic='StartDn'):
#duration_indic='DurationDn'):
"""Return the data representing the number of flows per bin for the heavy
hitters
"""
assert (flows_stats.dtype == INDEX_VALUES.dtype_cnx_stream
or flows_stats.dtype == INDEX_VALUES.dtype_cnx_stream_loss
or flows_stats.dtype ==
INDEX_VALUES.dtype_all_stream_indics_final_tstat), \
"Incorrect dtype"
nb_flows = {}
nb_vols = {}
for client in heavy_hitters:
nb_flows[client] = defaultdict(int)
nb_vols[client] = defaultdict(int)
for flow in flows_stats.compress(flows_stats['client_id']==client):
flow_bins = range(int(flow[start_indic]) // bin_size,
1 + (int(flow[start_indic] + flow[duration_indic])
// bin_size))
# if vol is zero, the accumulation process will remove it and
# mismatch volume bins with nb_flows bins, so I trick it
# random because of bad luck of 2 flows having exactly the same
# total volume than the next flow in the bin (incredible but true)
vol_per_bin = (random() + max(1, flow['ByteDn'])) / len(flow_bins)
for i in flow_bins:
nb_flows[client][i] += 1
nb_vols[client][i] += vol_per_bin
return nb_flows, nb_vols
def plot_all_nb_flows(cnx_stream, nb_hh=20, out_dir='nb_flows_plots',
postfix='', duration=86400, bin_size=30, hour_graph=False,
color=False, thresholds=(1e6, 10e6),
start_indic='StartDn'):
"""Wrapper for plot_nb_flows_per_client_acc on a flows dict
Use as:
cnx_stream = tools.streaming_tools.load_cnx_stream()
for b in (60, 30, 10, 1):
th = (1e6 * b / 30, 1e7 * b / 30)
tools.complements.plot_all_nb_flows(cnx_stream, bin_size=b, color=True,
hour_graph=True, thresholds=th, out_dir='nb_flows_plots/all_flows_color')
Filter on large flows with:
cnx_stream_1mb = dict([(k, v.compress(v['nByte'] > 1e6))
for k, v in cnx_stream.iteritems()])
for b in (60, 30, 10, 1):
th = (1e6 * b / 30, 1e7 * b / 30)
tools.complements.plot_all_nb_flows(cnx_stream_1mb, bin_size=b,
color=True, thresholds=th,
postfix='_1mb', out_dir='nb_flows_plots/large_flows_color')
"""
# client_id instead of Name
clients_per_vol = streaming_tools.connections_per_as(cnx_stream,
as_field='client_id', key_ext='',
agg_field='ByteDn', already_filtered=True,
dir_check=False)
hh = streaming_tools.get_top_n(clients_per_vol, nb_top=nb_hh, rank='nb',
exclude_list=UNKNOWN_ID)
for trace, data in cnx_stream.iteritems():
print(' '.join(("Processing trace:", trace)))
heavy_hitters = map(itemgetter(0), hh[trace])
# heavy_hitters = map(itemgetter(0), sorted(
# aggregate.aggregate_sum(data, 'Name', 'nByte'),
# key=itemgetter(1), reverse=True)[0:nb_hh])
if color:
all_nb_flows, all_nb_vol = nb_flows_per_client_color(
data.compress([x['client_id'] in heavy_hitters for x in data]),
heavy_hitters, bin_size=bin_size, start_indic=start_indic)
accumulated_vol = accumulate_flows_nb(all_nb_vol,
duration=duration,
bin_size=bin_size)
tmp_accumulated_nb = accumulate_flows_nb(all_nb_flows,
duration=duration,
bin_size=bin_size)
accumulated_nb = split_nb_flows_according_vols(tmp_accumulated_nb,
accumulated_vol)
del(tmp_accumulated_nb)
del(all_nb_flows)
del(all_nb_vol)
else:
all_nb_flows = nb_flows_per_client(
data.compress([x['client_id'] in heavy_hitters for x in data]),
heavy_hitters, bin_size=bin_size, start_indic=start_indic)
accumulated_nb = accumulate_flows_nb(all_nb_flows,
duration=duration,
bin_size=bin_size)
del(all_nb_flows)
if hour_graph:
#x_min = INDEX_VALUES.TIME_START[trace]
x_min = min(map(itemgetter(1), map(itemgetter(0),
accumulated_nb.values())))
x_max = max(sum(map(itemgetter(1), xs)) for xs
in [ys[:-1] for ys in accumulated_nb.values()])
else:
x_min = 0
fig = plt.figure()
if color:
ax = fig.add_axes([0.125, 0.16, 0.81, 0.7])
else:
ax = fig.add_subplot(111)
plot_nb_flows_per_client_acc((accumulated_nb, heavy_hitters), ax,
bin_size=bin_size,
duration=((x_max - x_min) if hour_graph
else duration),
trace_name=trace, color=color,
x_min=x_min, thresholds=thresholds,
all_nb_vols=accumulated_vol if color else None,
title='Number of %s Flows for %d Heavy Hitters'
% (postfix.replace('_', ' ').upper(), nb_hh))
fig.savefig(os.sep.join((out_dir, 'nb_flows_%d_HH_%s_%s%s.pdf'
% (nb_hh, trace, bin_size, postfix))))
fig.clf()
del(ax)
del(fig)
fig = plt.figure(8, 8)
#ax = fig.add_subplot(111)
plot_nb_flows_per_client_line((accumulated_nb, heavy_hitters), fig,
bin_size=bin_size,
duration=((x_max - x_min) if hour_graph
else duration),
x_min=x_min, trace_name=trace,
title='Number of %s Flows for %d Heavy Hitters'
% (postfix.replace('_', ' ').upper(), nb_hh))
fig.savefig(os.sep.join((out_dir, 'nb_flows_line_3d_%d_HH_%s_%s_%s.pdf'
% (nb_hh, trace, postfix, bin_size))))
fig.clf()
del(fig)
def split_nb_flows_according_vols(clients_nb_flows, clients_nb_vols):
"""Return corrected version of nb_flows, because vol accumulation is more
restrictive than nb_flows one
"""
new_clients_nb_flows = {}
for client in clients_nb_vols.keys():
nb_vols = clients_nb_vols[client]
nb_flows = clients_nb_flows[client]
assert len(nb_vols) >= len(nb_flows), \
"nb_flows more restrictive than vols"
new_nb_flows = []
vol_index = 0
for (nb_flow, n_width) in nb_flows:
v_width = nb_vols[vol_index][WIDTH_IDX]
if v_width == n_width:
new_nb_flows.append((nb_flow, n_width))
vol_index += 1
else:
assert n_width > v_width, \
"Problem in width for client %s" % client
added_width = 0
# hack due to float representation
while (n_width - added_width > EPSILON):
v_width = nb_vols[vol_index][WIDTH_IDX]
new_nb_flows.append((nb_flow, v_width))
added_width += v_width
vol_index += 1
assert vol_index == len(nb_vols), "Not all volumes processed"
new_clients_nb_flows[client] = new_nb_flows
return new_clients_nb_flows
def nb_flows_per_client(flows_stats, heavy_hitters, bin_size=30,
start_indic='StartDn', duration_indic='DurationDn'):
"""Return the data representing the number of flows per bin for the heavy
hitters
"""
assert flows_stats.dtype == INDEX_VALUES.dtype_cnx_stream \
or flows_stats.dtype == INDEX_VALUES.dtype_cnx_stream_loss, \
"Incorrect dtype"
nb_flows = {}
for client in heavy_hitters:
nb_flows[client] = defaultdict(int)
for flow in flows_stats.compress(flows_stats['client_id']==client):
for i in range(flow[start_indic] // bin_size,
1 + ((flow[start_indic] + flow[duration_indic])
// bin_size)):
nb_flows[client][i] += 1
return nb_flows
def accumulate_flows_nb(client_nb_flows, duration=86400, bin_size=30):
"Return a new dict of nb_flows to aggregate the bins with same values"
new_nb_flows = {}
for client, nb_flows in client_nb_flows.iteritems():
nb_flows_widths = [(0, 0)]
for i in xrange(duration // bin_size):
if nb_flows[i] == nb_flows_widths[-1][0]:
nb, width = nb_flows_widths[-1]
nb_flows_widths[-1] = (nb, width + bin_size / 60)
else:
nb_flows_widths.append((nb_flows[i], bin_size / 60))
new_nb_flows[client] = nb_flows_widths
return new_nb_flows
def plot_nb_flows_per_client_acc((clients_nb_flows_widths, heavy_hitters), ax,
color=False, all_nb_vols=None,
x_min=0, thresholds=(1e6, 10e6),
duration=86400, bin_size=30, trace_name=None,
title='Number of flows per client'):
"""Plot a graph representing the number of sessions out of flows stats
Compress data with accumulate_flows_nb before
"""
y_offset = 0
height = 1
if color:
assert len(thresholds) == 2, "Incorrect thresholds list"
(min_vol, max_vol) = thresholds
for client in heavy_hitters:
nb_flows_width = clients_nb_flows_widths[client]
if color:
vols_width = all_nb_vols[client]
assert len(nb_flows_width) == len(vols_width), \
"Mismatch between volumes and flows"
max_nb_flows = max(map(itemgetter(0), nb_flows_width))
if max_nb_flows == 0:
print('Problem with client: ', client)
continue
cur_height = height * max_nb_flows
bin_start = 0
for index, (nb, width) in enumerate(nb_flows_width):
if color:
vol, width_v = vols_width[index]
assert width == width_v, "Mismatch between vol and nb widths"
if vol < min_vol:
flow_color = 'g'
elif vol < max_vol:
flow_color = ORANGE
else:
flow_color = 'r'
else:
flow_color = 'k'
rect = mpatches.Rectangle((bin_start, y_offset),
width, cur_height,
linewidth=0, color=flow_color,
alpha=nb / max_nb_flows)
ax.add_patch(rect)
bin_start += width
# ax.text(duration/60, y_offset, '$%s$ ' % client + str(max_nb_flows)
# size=8)
del rect
y_offset += cur_height + height
ax.set_xlim([x_min / 60, (x_min + duration) / 60])
#ax.set_xlim([0, duration/60])
ax.set_ylim([0, y_offset])
ax.set_ylabel('''Client (largest volume at bottom)
height: maximum number of parallel flows in a bin''')
ax.set_xlabel('\n'.join((
'Time in Minutes (bin size of %d seconds)' % bin_size,
'Bin color: green ($x < %g$), orange ($%g < x < %g$), red ($x > %g$)'
% (min_vol, min_vol, max_vol, max_vol) if color else '')))
# ax.yaxis.set_major_locator(NullLocator())
if trace_name:
ax.set_title(': '.join((title, streaming_tools.format_title(
trace_name.split('sessions_GVB_')[1]
if 'sessions_GVB_' in trace_name else trace_name))))
def plot_nb_flows_per_client_line((clients_nb_flows_width, heavy_hitters), fig,
duration=86400, bin_size=30, trace_name=None,
x_min=0, title='Number of flows per client'):
"""Plot a graph representing the number of sessions out of flows stats
here we plot a line for each heavy hitter with a diffrent color
Compress data with accumulate_flows_nb before
fou = '1579_00:23:48:12:c2:6a'
"""
ax = Axes3D(fig, azim=-55, elev=35)
colors = cycle("bgrcmy")
cc = lambda arg: colorConverter.to_rgba(arg, alpha=0.6)
markers = cycle("x+*o")
y_offset = 0
zs = []
verts = []
face_colors = []
x_lim_min, x_lim_max = x_min, x_min + duration
for i, client in enumerate(heavy_hitters):
zs.append(2 * i)
x = []
y = []
nb_flows_width = clients_nb_flows_width[client]
cur_time = 0
for (nb, width) in nb_flows_width:
if cur_time != 0:
x.append(cur_time - x_min)
y.append(nb)
cur_time += width
x.append(cur_time - x_min)
y.append(nb)
#ax.plot(x, y, label='HH nb: %d' % i,
#y[0], y[-1] = 0, 0
if y[-1] == 0:
y.pop()
x.pop()
#x_lim_max -= width
verts.append(zip(x, y))
face_colors.append(cc(colors.next()))
#ax.plot(x, y, label='HH nb: %s' % client,
#color=colors.next(), marker=markers.next())
y_offset = max(max(map(itemgetter(0), nb_flows_width)), y_offset)
poly = PolyCollection(verts, facecolors=face_colors, linewidth=1)
poly.set_alpha(0.6)
ax.add_collection3d(poly, zs=zs, zdir='y')
#ax.set_xlim3d(x_lim_min, x_lim_max)
ax.set_xlim3d(0, duration)
ax.set_zlim3d(0, y_offset + 1)
ax.set_ylim3d(-1, 2 * len(heavy_hitters) + 1)
#ax.grid(True)
#ax.legend()
ax.set_zlabel('Number of parallel flows in a bin')
ax.set_xlabel('Time in Minutes\n(bin size of %d seconds)' % bin_size)
ax.set_ylabel('Clients')
# make ticklabels and ticklines invisible
for a in ax.w_yaxis.get_ticklabels(): # ax.w_yaxis.get_ticklines() +
a.set_visible(False)
#ax.yaxis.set_major_locator(NullLocator())
if trace_name:
#ax.set_title
fig.suptitle(': '.join((title, streaming_tools.format_title(
trace_name.split('sessions_GVB_')[1]
if 'sessions_GVB_' in trace_name else trace_name))))
del(ax)
| gpl-3.0 | 7,636,610,822,924,463,000 | 48.640191 | 81 | 0.471758 | false |
elsehow/moneybot | moneybot/fund.py | 1 | 6641 | # -*- coding: utf-8 -*-
from datetime import datetime
from logging import getLogger
from time import sleep
from time import time
from typing import Generator
from copy import deepcopy
import pandas as pd
from pyloniex.errors import PoloniexServerError
from moneybot.market.adapters import MarketAdapter
from moneybot.strategy import Strategy
logger = getLogger(__name__)
class Fund:
'''
Funds are the MoneyBot's highest level abstraction.
Funds have a Strategy, which proposes trades to
their MarketAdapter.
There are two ways for a Fund to run: live, or in a backtest.
my_fund.run_live()
or
my_fund.begin_backtest(start, end)
In both cases, the fund executes its private method `step(time)`
repeatedly. Strategies decide their own trading interval; this
dictates the temporal spacing between a fund's steps.
'''
def __init__(self, strategy: Strategy, adapter: MarketAdapter) -> None:
self.strategy = strategy
# MarketAdapter executes trades, fetches balances
self.market_adapter = adapter
# MarketHistory stores historical market data
self.market_history = adapter.market_history
# A boolean the caller can set
# to force a rebalance on the next trading step
# (after that rebalance, this will be reset to `False`)
self.force_rebalance_next_step = False
def step(
self,
time: datetime,
force_rebalance: bool = False,
) -> float:
self.market_adapter.update_market_state(time)
# Copy MarketState to prevent mutation by the Strategy (even
# accidentally). The Strategy's sole means of communication with the
# MarketAdapter and Fund is the list of ProposedTrades it creates.
# TODO: Make this unnecessary, either by making MarketState immutable
# or by other means.
market_state = deepcopy(self.market_adapter.market_state)
if force_rebalance is True:
proposed_trades = self.strategy.propose_trades_for_total_rebalancing(market_state)
else:
# Generally, the Strategy decides when to rebalance. If you're
# writing your own, this is the method you'll implement!
proposed_trades = self.strategy.propose_trades(
market_state,
self.market_history,
)
if proposed_trades:
# We "reify" (n. make (something abstract) more concrete or real)
# our proposed AbstractTrades to produce Orders that our
# MarketAdapter actually knows how to execute.
orders = self.market_adapter.reify_trades(
proposed_trades,
market_state,
)
logger.debug(
f'Attempting to execute {len(orders)} orders based on '
f'{len(proposed_trades)} proposed trades'
)
successful_order_ids = []
for order in orders:
# Each concrete subclass of MarketAdapter decides what it means
# to execute an order. For example, PoloniexMarketAdapter
# actually sends requests to Poloniex's trading API, but
# BacktestMarketAdapter just mutates some of its own internal
# state.
#
# In general we don't want this to be side-effect-y, so the way
# BacktestMarketAdapter is a little gross. We should try to fix
# that.
#
# MarketAdapter::execute_order returns an Optional[int] that we
# currently ignore: an order identifier if the execution was
# "successful" (whatever that means for the adapter subclass),
# or None otherwise.
order_id = self.market_adapter.execute_order(order)
if order_id is not None:
successful_order_ids.append(order_id)
logger.info(
f'{len(successful_order_ids)} of {len(orders)} orders '
'executed successfully'
)
# After the dust has settled, we update our view of the market state.
self.market_adapter.update_market_state(time)
# Finally, return the aggregate USD value of our fund.
return self.market_adapter.market_state.estimate_total_value_usd(
self.market_adapter.market_state.balances,
)
def run_live(self):
period = self.strategy.trade_interval
logger.info(f'Live trading with {period} seconds between steps')
while True:
step_start = time()
cur_dt = datetime.now()
try:
# Before anything, get freshest data from Poloniex
self.market_history.scrape_latest()
logger.info(f'Fund::step({cur_dt})')
# The caller can "queue up" a force rebalance for the next
# trading step.
usd_val = self.step(
cur_dt,
force_rebalance=self.force_rebalance_next_step,
)
# In either case, we disable this rebalance for next time
self.force_rebalance_next_step = False
logger.info(f'Est. USD value: {usd_val:.2f}')
except PoloniexServerError:
logger.exception(
'Received server error from Poloniex; sleeping until next step'
)
# Wait until our next time to run, accounting for the time taken by
# this step to run
step_time = time() - step_start
sleep_time = (period - step_time) % period
logger.debug(f'Trading step took {step_time} seconds')
logger.debug(f'Sleeping {sleep_time} seconds until next step')
sleep(sleep_time)
def run_backtest(
self,
start_time: str,
end_time: str,
) -> Generator[float, None, None]:
'''
Takes a start time and end time (as parse-able date strings).
Returns a generator over a list of USD values for each point (trade
interval) between start and end.
'''
# MarketAdapter executes trades
# Set up the historical coinstore
# A series of trade-times to run each of our strategies through.
dates = pd.date_range(
pd.Timestamp(start_time),
pd.Timestamp(end_time),
freq=f'{self.strategy.trade_interval}S',
)
for date in dates:
val = self.step(date)
yield val
| bsd-3-clause | -663,076,200,772,912,300 | 38.064706 | 94 | 0.596597 | false |
jdhp-sap/sap-cta-data-pipeline | utils/simtel_to_fits_nectarcam.py | 2 | 15361 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Jérémie DECOCK (http://www.jdhp.org)
# This script is provided under the terms and conditions of the MIT license:
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
... TODO
"""
__all__ = ['extract_images']
import argparse
from astropy.io import fits
import datetime
import numpy as np
import os
import sys
import ctapipe
from ctapipe.io.hessio import hessio_event_source
import pyhessio
from datapipe.io import images
from datapipe import __version__ as VERSION
print(ctapipe.__version__)
print(pyhessio.__version__)
import ctapipe.image.geometry_converter as ctapipe_geom_converter
from ctapipe.instrument import CameraGeometry
# calibrator
from ctapipe.calib import CameraCalibrator
DEFAULT_TEL_FILTER = list(range(43, 77)) + list(range(116, 121)) # WARNING: THESE TEL_IDs ARE ONLY VALID FOR PROD3b LAPALMA (NORTH SITE) !!!
NECTAR_CAM_CHANNEL_THRESHOLD = 190 # cf. "calib_find_channel_selection_threshold" notebook
def extract_images(simtel_file_path,
tel_id_filter_list=None,
event_id_filter_list=None,
output_directory=None):
# EXTRACT IMAGES ##########################################################
# hessio_event_source returns a Python generator that streams data from an
# EventIO/HESSIO MC data file (e.g. a standard CTA data file).
# This generator contains ctapipe.core.Container instances ("event").
#
# Parameters:
# - max_events: maximum number of events to read
# - allowed_tels: select only a subset of telescope, if None, all are read.
source = hessio_event_source(simtel_file_path, allowed_tels=tel_id_filter_list)
# ITERATE OVER EVENTS #####################################################
calib = CameraCalibrator(None, None)
for event in source:
calib.calibrate(event) # calibrate the event
event_id = int(event.dl0.event_id)
if (event_id_filter_list is None) or (event_id in event_id_filter_list):
#print("event", event_id)
# ITERATE OVER IMAGES #############################################
for tel_id in event.trig.tels_with_trigger:
tel_id = int(tel_id)
if tel_id in tel_id_filter_list:
#print("telescope", tel_id)
# CHECK THE IMAGE GEOMETRY ################################
#print("checking geometry")
x, y = event.inst.pixel_pos[tel_id]
foclen = event.inst.optical_foclen[tel_id]
geom = CameraGeometry.guess(x, y, foclen)
if (geom.pix_type != "hexagonal") or (geom.cam_id != "NectarCam"):
raise ValueError("Telescope {}: error (the input image is not a valide NectarCam telescope image) -> {} ({})".format(tel_id, geom.pix_type, geom.cam_id))
# GET IMAGES ##############################################
pe_image = event.mc.tel[tel_id].photo_electron_image # 1D np array
#uncalibrated_image = event.dl0.tel[tel_id].adc_sums # ctapipe 0.3.0
uncalibrated_image = event.r0.tel[tel_id].adc_sums # ctapipe 0.4.0
pedestal = event.mc.tel[tel_id].pedestal
gain = event.mc.tel[tel_id].dc_to_pe
pixel_pos = event.inst.pixel_pos[tel_id]
calibrated_image = event.dl1.tel[tel_id].image
calibrated_image[1, calibrated_image[0,:] <= NECTAR_CAM_CHANNEL_THRESHOLD] = 0
calibrated_image[0, calibrated_image[0,:] > NECTAR_CAM_CHANNEL_THRESHOLD] = 0
calibrated_image = calibrated_image.sum(axis=0)
#print(pe_image.shape)
#print(calibrated_image.shape)
#print(uncalibrated_image.shape)
#print(pedestal.shape)
#print(gain.shape)
#print(pixel_pos.shape)
#print(pixel_pos[0])
#print(pixel_pos[1])
# CONVERTING GEOMETRY (1D TO 2D) ##########################
buffer_id_str = geom.cam_id + "0"
geom2d, pe_image_2d = ctapipe_geom_converter.convert_geometry_1d_to_2d(geom, pe_image, buffer_id_str, add_rot=0)
geom2d, calibrated_image_2d = ctapipe_geom_converter.convert_geometry_1d_to_2d(geom, calibrated_image, buffer_id_str, add_rot=0)
geom2d, uncalibrated_image_2d_ch0 = ctapipe_geom_converter.convert_geometry_1d_to_2d(geom, uncalibrated_image[0], buffer_id_str, add_rot=0)
geom2d, uncalibrated_image_2d_ch1 = ctapipe_geom_converter.convert_geometry_1d_to_2d(geom, uncalibrated_image[1], buffer_id_str, add_rot=0)
geom2d, pedestal_2d_ch0 = ctapipe_geom_converter.convert_geometry_1d_to_2d(geom, pedestal[0], buffer_id_str, add_rot=0)
geom2d, pedestal_2d_ch1 = ctapipe_geom_converter.convert_geometry_1d_to_2d(geom, pedestal[1], buffer_id_str, add_rot=0)
geom2d, gains_2d_ch0 = ctapipe_geom_converter.convert_geometry_1d_to_2d(geom, gain[0], buffer_id_str, add_rot=0)
geom2d, gains_2d_ch1 = ctapipe_geom_converter.convert_geometry_1d_to_2d(geom, gain[1], buffer_id_str, add_rot=0)
# Make a mock pixel position array...
pixel_pos_2d = np.array(np.meshgrid(np.linspace(pixel_pos[0].min(), pixel_pos[0].max(), pe_image_2d.shape[0]),
np.linspace(pixel_pos[1].min(), pixel_pos[1].max(), pe_image_2d.shape[1])))
###########################################################
# The ctapipe geometry converter operate on one channel
# only and then takes and return a 2D array but datapipe
# fits files keep all channels and thus takes 3D arrays...
uncalibrated_image_2d = np.array([uncalibrated_image_2d_ch0, uncalibrated_image_2d_ch1])
pedestal_2d = np.array([pedestal_2d_ch0, pedestal_2d_ch1 ])
gains_2d = np.array([gains_2d_ch0, gains_2d_ch1])
# PUT NAN IN BLANK PIXELS #################################
calibrated_image_2d[np.logical_not(geom2d.mask)] = np.nan
pe_image_2d[np.logical_not(geom2d.mask)] = np.nan
uncalibrated_image_2d[0, np.logical_not(geom2d.mask)] = np.nan
uncalibrated_image_2d[1, np.logical_not(geom2d.mask)] = np.nan
pedestal_2d[0, np.logical_not(geom2d.mask)] = np.nan
pedestal_2d[1, np.logical_not(geom2d.mask)] = np.nan
gains_2d[0, np.logical_not(geom2d.mask)] = np.nan
gains_2d[1, np.logical_not(geom2d.mask)] = np.nan
pixel_pos_2d[0, np.logical_not(geom2d.mask)] = np.nan
pixel_pos_2d[1, np.logical_not(geom2d.mask)] = np.nan
###########################################################
#print(pe_image_2d.shape)
#print(calibrated_image_2d.shape)
#print(uncalibrated_image_2d.shape)
#print(pedestal_2d.shape)
#print(gains_2d.shape)
#img = pixel_pos_2d
#print(img[1])
#import matplotlib.pyplot as plt
#im = plt.imshow(img[1])
#plt.colorbar(im)
#plt.show()
#sys.exit(0)
# GET PIXEL MASK ##########################################
pixel_mask = geom2d.mask.astype(int) # 1 for pixels with actual data, 0 for virtual (blank) pixels
# MAKE METADATA ###########################################
metadata = {}
metadata['version'] = 1 # Version of the datapipe fits format
metadata['cam_id'] = "NectarCam"
metadata['tel_id'] = tel_id
metadata['event_id'] = event_id
metadata['simtel'] = simtel_file_path
metadata['tel_trig'] = len(event.trig.tels_with_trigger)
metadata['energy'] = quantity_to_tuple(event.mc.energy, 'TeV')
metadata['mc_az'] = quantity_to_tuple(event.mc.az, 'rad')
metadata['mc_alt'] = quantity_to_tuple(event.mc.alt, 'rad')
metadata['mc_corex'] = quantity_to_tuple(event.mc.core_x, 'm')
metadata['mc_corey'] = quantity_to_tuple(event.mc.core_y, 'm')
metadata['mc_hfi'] = quantity_to_tuple(event.mc.h_first_int, 'm')
metadata['count'] = int(event.count)
metadata['run_id'] = int(event.dl0.run_id)
metadata['tel_data'] = len(event.dl0.tels_with_data)
metadata['foclen'] = quantity_to_tuple(event.inst.optical_foclen[tel_id], 'm')
metadata['tel_posx'] = quantity_to_tuple(event.inst.tel_pos[tel_id][0], 'm')
metadata['tel_posy'] = quantity_to_tuple(event.inst.tel_pos[tel_id][1], 'm')
metadata['tel_posz'] = quantity_to_tuple(event.inst.tel_pos[tel_id][2], 'm')
# TODO: Astropy fails to store the following data in FITS files
#metadata['uid'] = os.getuid()
#metadata['datetime'] = str(datetime.datetime.now())
#metadata['version'] = VERSION
#metadata['argv'] = " ".join(sys.argv).encode('ascii', errors='ignore').decode('ascii')
#metadata['python'] = " ".join(sys.version.splitlines()).encode('ascii', errors='ignore').decode('ascii')
#metadata['system'] = " ".join(os.uname())
# SAVE THE IMAGE ##########################################
output_file_path_template = "{}_TEL{:03d}_EV{:05d}.fits"
if output_directory is not None:
simtel_basename = os.path.basename(simtel_file_path)
prefix = os.path.join(output_directory, simtel_basename)
else:
prefix = simtel_file_path
output_file_path = output_file_path_template.format(prefix,
tel_id,
event_id)
print("saving", output_file_path)
images.save_benchmark_images(img = calibrated_image_2d,
pe_img = pe_image_2d,
adc_sums_img = uncalibrated_image_2d,
pedestal_img = pedestal_2d,
gains_img = gains_2d,
pixel_pos = pixel_pos_2d,
pixel_mask = pixel_mask,
metadata = metadata,
output_file_path = output_file_path)
def quantity_to_tuple(quantity, unit_str):
"""
Splits a quantity into a tuple of (value,unit) where unit is FITS complient.
Useful to write FITS header keywords with units in a comment.
Parameters
----------
quantity : astropy quantity
The Astropy quantity to split.
unit_str: str
Unit string representation readable by astropy.units (e.g. 'm', 'TeV', ...)
Returns
-------
tuple
A tuple containing the value and the quantity.
"""
return quantity.to(unit_str).value, quantity.to(unit_str).unit.to_string(format='FITS')
def main():
# PARSE OPTIONS ###########################################################
desc = "Generate FITS files compliant for cleaning benchmark (from simtel files)."
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("--telescope", "-t",
metavar="INTEGER LIST",
help="The telescopes to query (telescopes number separated by a comma)")
parser.add_argument("--event", "-e",
metavar="INTEGER LIST",
help="The events to extract (events ID separated by a comma)")
parser.add_argument("--output", "-o",
metavar="DIRECTORY",
help="The output directory")
parser.add_argument("fileargs", nargs="+", metavar="FILE",
help="The simtel files to process")
args = parser.parse_args()
if args.telescope is None:
tel_id_filter_list = DEFAULT_TEL_FILTER
else:
tel_id_filter_list = [int(tel_id_str) for tel_id_str in args.telescope.split(",")]
if args.event is None:
event_id_filter_list = None
else:
event_id_filter_list = [int(event_id_str) for event_id_str in args.event.split(",")]
print("Telescopes:", tel_id_filter_list)
print("Events:", event_id_filter_list)
output_directory = args.output
simtel_file_path_list = args.fileargs
if output_directory is not None:
if not (os.path.exists(output_directory) and os.path.isdir(output_directory)):
raise Exception("{} does not exist or is not a directory.".format(output_directory))
# ITERATE OVER SIMTEL FILES ###############################################
for simtel_file_path in simtel_file_path_list:
print("Processing", simtel_file_path)
# EXTRACT, CROP AND SAVE THE IMAGES ###################################
extract_images(simtel_file_path, tel_id_filter_list, event_id_filter_list, output_directory)
if __name__ == "__main__":
main()
| mit | 4,052,761,697,814,277,600 | 43.909357 | 177 | 0.528094 | false |
X-DataInitiative/tick | examples/plot_prox_example.py | 2 | 1432 | """
==============================
Examples of proximal operators
==============================
Plot examples of proximal operators available in tick
"""
import numpy as np
import matplotlib.pyplot as plt
from tick.prox import ProxL1, ProxElasticNet, ProxL2Sq, \
ProxPositive, ProxSlope, ProxTV, ProxZero, ProxBinarsity, ProxGroupL1, \
ProxEquality, ProxL1w
np.random.seed(12)
x = np.random.randn(50)
a, b = x.min() - 1e-1, x.max() + 1e-1
s = 0.4
proxs = [
ProxZero(),
ProxPositive(),
ProxL2Sq(strength=s),
ProxL1(strength=s),
ProxElasticNet(strength=s, ratio=0.5),
ProxSlope(strength=s),
ProxTV(strength=s),
ProxEquality(range=(25, 40)),
ProxL1w(strength=s, weights=0.1 * np.arange(50, dtype=np.double)),
ProxGroupL1(strength=2 * s, blocks_start=np.arange(0, 50, 10),
blocks_length=10 * np.ones((5,))),
ProxBinarsity(strength=s, blocks_start=np.arange(0, 50, 10),
blocks_length=10 * np.ones((5,)))
]
fig, _ = plt.subplots(3, 4, figsize=(16, 12), sharey=True, sharex=True)
fig.axes[0].stem(x)
fig.axes[0].set_title("original vector", fontsize=16)
fig.axes[0].set_xlim((-1, 51))
fig.axes[0].set_ylim((a, b))
for i, prox in enumerate(proxs):
fig.axes[i + 1].stem(prox.call(x))
fig.axes[i + 1].set_title(prox.name, fontsize=16)
fig.axes[i + 1].set_xlim((-1, 51))
fig.axes[i + 1].set_ylim((a, b))
plt.tight_layout()
plt.show()
| bsd-3-clause | 2,166,794,253,541,222,400 | 28.833333 | 76 | 0.617318 | false |
byuflowlab/vawt-wake-model | wake_model/validation/tescione_val.py | 2 | 34805 | import numpy as np
import matplotlib.pyplot as plt
import csv
from VAWT_Wake_Model import velocity_field
from scipy.io import loadmat
from numpy import fabs
from os import path
from matplotlib import rcParams
rcParams['font.family'] = 'Times New Roman'
rom = True
# rom = False
r = 0.5 # radius
v = 1.0 # velocity
veltype = 'x'
errortype = 'abs'
errortype = 'rel'
epsilon = 1e-3
errortype = 'rms'
direction = 'horz'
direction = 'vert'
load_mat = True # load the original data from dataH_VAWT.mat if available
load_mat = False # use already imported values
# Import Star-CCM+ simulation data (from validation study)
basepath = path.join(path.dirname(path.realpath('__file__')))
fdata = basepath + path.sep + 'tes_cfd.csv'
f = open(fdata)
csv_f = csv.reader(f)
for i in range(6):
name = str(i+1)
exec('pos'+name+' = np.array([])')
exec('vel'+name+' = np.array([])')
i = 0
for row in csv_f:
if row[0] != 'null' and i != 0:
pos1 = np.append(pos1,float(row[0]))
if row[2] != 'null' and i != 0:
pos2 = np.append(pos2,float(row[2]))
if row[4] != 'null' and i != 0:
pos3 = np.append(pos3,float(row[4]))
if row[6] != 'null' and i != 0:
pos4 = np.append(pos4,float(row[6]))
if row[8] != 'null' and i != 0:
pos5 = np.append(pos5,float(row[8]))
if row[10] != 'null' and i != 0:
pos6 = np.append(pos6,float(row[10]))
if row[1] != 'null' and i != 0:
vel1 = np.append(vel1,float(row[1]))
if row[3] != 'null' and i != 0:
vel2 = np.append(vel2,float(row[3]))
if row[5] != 'null' and i != 0:
vel3 = np.append(vel3,float(row[5]))
if row[7] != 'null' and i != 0:
vel4 = np.append(vel4,float(row[7]))
if row[9] != 'null' and i != 0:
vel5 = np.append(vel5,float(row[9]))
if row[11] != 'null' and i != 0:
vel6 = np.append(vel6,float(row[11]))
i += 1
f.close()
for i in range(6):
name = str(i+1)
exec('pos'+name+' = (1./(r*2.))*pos'+name+'')
exec('vel'+name+' = (1./v)*vel'+name+'')
#Ordering the data numerically by the position
exec('pos'+name+', vel'+name+' = (list(t) for t in zip(*sorted(zip(pos'+name+', vel'+name+'))))')
#STAR-CCM+ data contained repeated values; this creates new sets of data with repeats eliminated
exec('pos'+name+'_0 = np.array([])\nvel'+name+'_0 = np.array([])\nfor i in range(np.size(pos'+name+')):\n\tif pos'+name+'[i] not in pos'+name+'_0:\n\t\tpos'+name+'_0 = np.append(pos'+name+'_0,pos'+name+'[i])\n\t\tvel'+name+'_0 = np.append(vel'+name+'_0,vel'+name+'[i])\npos'+name+' = np.copy(pos'+name+'_0)\nvel'+name+' = np.copy(vel'+name+'_0)')
if load_mat == True:
tesdata = loadmat(basepath + path.sep + 'dataH_VAWT.mat')
x15 = np.zeros(33)
x20 = np.zeros(33)
x25 = np.zeros(33)
x30 = np.zeros(33)
x35 = np.zeros(33)
x40 = np.zeros(26)
y15 = np.zeros(33)
y20 = np.zeros(33)
y25 = np.zeros(33)
y30 = np.zeros(33)
y35 = np.zeros(33)
y40 = np.zeros(26)
space = 52.4545
for i in range(33):
x15[i] = (tesdata['y'][int(i*space),1706])/1000.
x20[i] = (tesdata['y'][int(i*space),1956])/1000.
x25[i] = (tesdata['y'][int(i*space),2206])/1000.
x30[i] = (tesdata['y'][int(i*space),2456])/1000.
x35[i] = (tesdata['y'][int(i*space),2706])/1000.
y15[i] = (tesdata['u_x'][int(i*space),1706])/9.3
y20[i] = (tesdata['u_x'][int(i*space),1956])/9.3
y25[i] = (tesdata['u_x'][int(i*space),2206])/9.3
y30[i] = (tesdata['u_x'][int(i*space),2456])/9.3
y35[i] = (tesdata['u_x'][int(i*space),2706])/9.3
for i in range(26):
x40[i] = (tesdata['y'][int(i*space+7.*space),2956])/1000.
y40[i] = (tesdata['u_x'][int(i*space+7.*space),2956])/9.3
elif load_mat == False:
x15 = np.array( [-0.867, -0.815, -0.763, -0.71, -0.658, -0.605, -0.553, -0.5, -0.448, -0.395, -0.343, -0.291, -0.238, -0.186, -0.133, -0.081, -0.028, 0.024, 0.077, 0.129, 0.182, 0.234, 0.286, 0.339, 0.391, 0.444, 0.496, 0.549, 0.601, 0.654, 0.706, 0.759, 0.811] )
x20 = np.array( [-0.867, -0.815, -0.763, -0.71, -0.658, -0.605, -0.553, -0.5, -0.448, -0.395, -0.343, -0.291, -0.238, -0.186, -0.133, -0.081, -0.028, 0.024, 0.077, 0.129, 0.182, 0.234, 0.286, 0.339, 0.391, 0.444, 0.496, 0.549, 0.601, 0.654, 0.706, 0.759, 0.811] )
x25 = np.array( [-0.867, -0.815, -0.763, -0.71, -0.658, -0.605, -0.553, -0.5, -0.448, -0.395, -0.343, -0.291, -0.238, -0.186, -0.133, -0.081, -0.028, 0.024, 0.077, 0.129, 0.182, 0.234, 0.286, 0.339, 0.391, 0.444, 0.496, 0.549, 0.601, 0.654, 0.706, 0.759, 0.811] )
x30 = np.array( [-0.867, -0.815, -0.763, -0.71, -0.658, -0.605, -0.553, -0.5, -0.448, -0.395, -0.343, -0.291, -0.238, -0.186, -0.133, -0.081, -0.028, 0.024, 0.077, 0.129, 0.182, 0.234, 0.286, 0.339, 0.391, 0.444, 0.496, 0.549, 0.601, 0.654, 0.706, 0.759, 0.811] )
x35 = np.array( [-0.867, -0.815, -0.763, -0.71, -0.658, -0.605, -0.553, -0.5, -0.448, -0.395, -0.343, -0.291, -0.238, -0.186, -0.133, -0.081, -0.028, 0.024, 0.077, 0.129, 0.182, 0.234, 0.286, 0.339, 0.391, 0.444, 0.496, 0.549, 0.601, 0.654, 0.706, 0.759, 0.811] )
x40 = np.array( [-0.5, -0.448, -0.395, -0.343, -0.291, -0.238, -0.186, -0.133, -0.081, -0.028, 0.024, 0.077, 0.129, 0.182, 0.234, 0.286, 0.339, 0.391, 0.444, 0.496, 0.549, 0.601, 0.654, 0.706, 0.759, 0.811] )
y15 = np.array( [0.994109059733553, 0.9963677091332376, 1.0005562115755295, 1.0129863984422722, 1.0507201982582595, 0.9041429426683261, 0.6716433705834841, 0.5201309340176287, 0.508144821028986, 0.45366206008875143, 0.4120816289764176, 0.3841500969704306, 0.36762989356268116, 0.34878204855733175, 0.3239136841769655, 0.30087738601863206, 0.2879935890797586, 0.26790770257593155, 0.2625797365850182, 0.292431451988392, 0.32394314025359344, 0.3491215866639411, 0.3443858871235991, 0.3482695942305222, 0.3129449223429124, 0.36582747813800515, 0.4154704750687794, 0.49039503797192485, 0.6038500532436828, 0.7927363063069286, 0.9055264980137487, 0.9492042298526092, 0.9678480377419288] )
y20 = np.array( [0.9920370542708112, 0.9937027721069257, 0.9957821547724749, 1.0042398848773346, 0.9749571783229305, 0.8545201774180555, 0.565802548086458, 0.5143332054399019, 0.489198302575, 0.44408180130876845, 0.3816702901351002, 0.35005844980465, 0.33679345047454295, 0.3230305737612392, 0.3146901353633469, 0.29915244503218225, 0.2790206166599524, 0.2464443364444221, 0.2475139147846546, 0.25357920856345817, 0.27126299099141044, 0.2987673093397647, 0.3182385501649433, 0.3243813328675722, 0.30742297967502097, 0.32253464736566645, 0.3693305499571722, 0.4191276334361715, 0.5015171898966418, 0.6228502433753057, 0.8230607176338183, 0.9264600739810046, 0.9616530515736079] )
y25 = np.array( [0.9879572983671737, 0.9905509911272896, 0.9933676654604374, 0.9923430478507566, 0.9160865587232668, 0.727774179726256, 0.5833627736796199, 0.4815735966162955, 0.4258818988364446, 0.40697818686203785, 0.3645090556659908, 0.33624432950148214, 0.3254855613810157, 0.3103304514855841, 0.3045151031176352, 0.28401646740896264, 0.2593430020697244, 0.23659060256721978, 0.22300420317944888, 0.2244438460371643, 0.23642978330838485, 0.2568650503421204, 0.28114157843083193, 0.294601202395863, 0.3006303134268269, 0.3118773622351477, 0.3203024532857655, 0.3747931924965308, 0.41075281837916877, 0.5033971635645369, 0.6381178175981282, 0.8832861499445961, 0.9780827152012185] )
y30 = np.array( [0.9807919873645041, 0.9799943204500705, 0.9756659438025117, 0.9597111733105987, 0.8640379300795783, 0.6756090098761603, 0.5574514345456549, 0.48935854692854497, 0.428523583438216, 0.3833992822748339, 0.3480531699708427, 0.32487761318471114, 0.3153752965437699, 0.2971902045364618, 0.2830498661729626, 0.2701094817124857, 0.2525140339109516, 0.22990689461698513, 0.21388156547631884, 0.2009225725260476, 0.2109170460152375, 0.22197598259760387, 0.24007485599064649, 0.2629099716848817, 0.28306559237296497, 0.29465097651166405, 0.29791611270965696, 0.320843380159032, 0.365988216767817, 0.435994478045424, 0.5459295715799363, 0.7790196684279612, 0.9232640197764616] )
y35 = np.array( [0.9771079214279068, 0.9739267876288416, 0.9659755072544549, 0.9409586395095753, 0.8372410989082885, 0.6761226132078453, 0.5567979451297009, 0.46883935558555223, 0.41825105219857445, 0.3643627166731387, 0.33608496528240905, 0.31852020274820736, 0.3061917562496233, 0.28608694443477783, 0.273103042317234, 0.26439124635620204, 0.2478225127262987, 0.22900647970420313, 0.20929176925815257, 0.19435346927934738, 0.19534832002282343, 0.19777809452729966, 0.21620168910917673, 0.23300547991382745, 0.25288024387549046, 0.27049877766131897, 0.2804228982454348, 0.2916868458391746, 0.3235522394271216, 0.3723078207006349, 0.4706069281766252, 0.6090953502184843, 0.8713603615558797] )
y40 = np.array( [0.45465337175396475, 0.3980109024658175, 0.35904507111616846, 0.32774805291916986, 0.31088720860935865, 0.29832097587839296, 0.2821950936769575, 0.26788022954331897, 0.25719920421744913, 0.24586751730014048, 0.2286008521345077, 0.20763836919642048, 0.1912065524478209, 0.1952723101174681, 0.1863567864754898, 0.19527809328409215, 0.21004969265451465, 0.22887599974659367, 0.2507107104057936, 0.26844672767918015, 0.27999051414332765, 0.2936850402917538, 0.33560687204407424, 0.41822453322242903, 0.5404777347489278, 0.7985895165740945] )
index15 = np.zeros_like(x15)
index20 = np.zeros_like(x20)
index25 = np.zeros_like(x25)
index30 = np.zeros_like(x30)
index35 = np.zeros_like(x35)
index40 = np.zeros_like(x40)
for i in range(33):
indext15 = np.fabs(pos1-x15[i])
index15[i] = np.argmin(indext15)
indext20 = np.fabs(pos2-x20[i])
index20[i] = np.argmin(indext20)
indext25 = np.fabs(pos3-x25[i])
index25[i] = np.argmin(indext25)
indext30 = np.fabs(pos4-x30[i])
index30[i] = np.argmin(indext30)
indext35 = np.fabs(pos5-x35[i])
index35[i] = np.argmin(indext35)
for i in range(26):
indext40 = np.fabs(pos6-x40[i])
index40[i] = np.argmin(indext40)
cfd15t = np.zeros(33)
cfd20t = np.zeros(33)
cfd25t = np.zeros(33)
cfd30t = np.zeros(33)
cfd35t = np.zeros(33)
cfd40t = np.zeros(26)
for i in range(33):
if errortype == 'abs':
cfd15t[i] = fabs((1. - vel1[int(index15[i])])-(1. - y15[i]))
elif errortype == 'rel':
if fabs(1. - y15[i]) >= epsilon:
cfd15t[i] = fabs(((1. - vel1[int(index15[i])])-(1. - y15[i]))/(1. - y15[i]))
else:
cfd15t[i] = fabs((1. - vel1[int(index15[i])])-(1. - y15[i]))
elif errortype == 'rms':
cfd15t[i] = ((1. - vel1[int(index15[i])])-(1. - y15[i]))**2
if errortype == 'abs':
cfd20t[i] = fabs((1. - vel2[int(index20[i])])-(1. - y20[i]))
elif errortype == 'rel':
if fabs(1. - y20[i]) >= epsilon:
cfd20t[i] = fabs(((1. - vel2[int(index20[i])])-(1. - y20[i]))/(1. - y20[i]))
else:
cfd20t[i] = fabs((1. - vel2[int(index20[i])])-(1. - y20[i]))
elif errortype == 'rms':
cfd20t[i] = ((1. - vel2[int(index20[i])])-(1. - y20[i]))**2
if errortype == 'abs':
cfd25t[i] = fabs((1. - vel3[int(index25[i])])-(1. - y25[i]))
elif errortype == 'rel':
if fabs(1. - y25[i]) >= epsilon:
cfd25t[i] = fabs(((1. - vel3[int(index25[i])])-(1. - y25[i]))/(1. - y25[i]))
else:
cfd25t[i] = fabs((1. - vel3[int(index25[i])])-(1. - y25[i]))
elif errortype == 'rms':
cfd25t[i] = ((1. - vel3[int(index25[i])])-(1. - y25[i]))**2
if errortype == 'abs':
cfd30t[i] = fabs((1. - vel4[int(index30[i])])-(1. - y30[i]))
elif errortype == 'rel':
if fabs(1. - y30[i]) >= epsilon:
cfd30t[i] = fabs(((1. - vel4[int(index30[i])])-(1. - y30[i]))/(1. - y30[i]))
else:
cfd30t[i] = fabs((1. - vel4[int(index30[i])])-(1. - y30[i]))
elif errortype == 'rms':
cfd30t[i] = ((1. - vel4[int(index30[i])])-(1. - y30[i]))**2
if errortype == 'abs':
cfd35t[i] = fabs((1. - vel5[int(index35[i])])-(1. - y35[i]))
elif errortype == 'rel':
if fabs(1. - y35[i]) >= epsilon:
cfd35t[i] = fabs(((1. - vel5[int(index35[i])])-(1. - y35[i]))/(1. - y35[i]))
else:
cfd35t[i] = fabs((1. - vel5[int(index35[i])])-(1. - y35[i]))
elif errortype == 'rms':
cfd35t[i] = ((1. - vel5[int(index35[i])])-(1. - y35[i]))**2
for i in range(26):
if errortype == 'abs':
cfd40t[i] = fabs((1. - vel6[int(index40[i])])-(1. - y40[i]))
elif errortype == 'rel':
if fabs(1. - y40[i]) >= epsilon:
cfd40t[i] = fabs(((1. - vel6[int(index40[i])])-(1. - y40[i]))/(1. - y40[i]))
else:
cfd40t[i] = fabs((1. - vel6[int(index40[i])])-(1. - y40[i]))
elif errortype == 'rms':
cfd40t[i] = ((1. - vel6[int(index40[i])])-(1. - y40[i]))**2
if errortype == 'abs' or errortype == 'rel':
cfd15error = np.average(cfd15t)
cfd15errorstd = np.std(cfd15t)
cfd20error = np.average(cfd20t)
cfd20errorstd = np.std(cfd20t)
cfd25error = np.average(cfd25t)
cfd25errorstd = np.std(cfd25t)
cfd30error = np.average(cfd30t)
cfd30errorstd = np.std(cfd30t)
cfd35error = np.average(cfd35t)
cfd35errorstd = np.std(cfd35t)
cfd40error = np.average(cfd40t)
cfd40errorstd = np.std(cfd40t)
elif errortype == 'rms':
cfd15error = np.sqrt(np.average(cfd15t))
cfd15errorstd = 1.
cfd20error = np.sqrt(np.average(cfd20t))
cfd20errorstd = 1.
cfd25error = np.sqrt(np.average(cfd25t))
cfd25errorstd = 1.
cfd30error = np.sqrt(np.average(cfd30t))
cfd30errorstd = 1.
cfd35error = np.sqrt(np.average(cfd35t))
cfd35errorstd = 1.
cfd40error = np.sqrt(np.average(cfd40t))
cfd40errorstd = 1.
cfdoaerror = (cfd15error+cfd20error+cfd25error+cfd30error+cfd35error+cfd40error)/6.
cfdoaerrorstd = (cfd15errorstd+cfd20errorstd+cfd25errorstd+cfd30errorstd+cfd35errorstd+cfd40errorstd)/6.
## Plot CFD
fs = 15 # journal
# fs = 20 # thesis
if direction == 'horz':
fig1 = plt.figure(1,figsize=(12.5,6.5))
fig1.subplots_adjust(left=.08,bottom=.12,right=.84,wspace=.45,hspace=.5)
elif direction == 'vert':
fig1 = plt.figure(1,figsize=(12.5,6))
fig1.subplots_adjust(left=.07,bottom=.12,right=.84,wspace=.36,hspace=.5)
plt.subplot(2,3,1)
if direction == 'horz':
plt.plot(y15,x15,'g.')
plt.plot(vel1,pos1,'b--',linewidth=2)
plt.ylim(-1,1)
plt.xlim(0.1,1.2)
plt.ylabel('$y/D$',fontsize=fs)
plt.xlabel(r'$u/U_\infty$',fontsize=fs)
plt.text(0.55,0.,r'$x/D$ = 0.75',fontsize=fs)
elif direction == 'vert':
plt.plot(x15,y15,'g.')
plt.plot(pos1,vel1,'b--',linewidth=2)
plt.xlim(-1,1)
plt.ylim(0.1,1.2)
plt.xlabel('$y/D$',fontsize=fs)
plt.ylabel(r'$u/U_\infty$',fontsize=fs)
plt.text(-0.38,1.05,r'$x/D$ = 0.75',fontsize=fs)
# plt.text(-0.45,1.05,r'$x/D$ = 0.75',fontsize=fs)
plt.xticks(fontsize=fs)
plt.yticks(fontsize=fs)
print '1.5 cfd',cfd15error,cfd15errorstd
plt.subplot(2,3,2)
if direction == 'horz':
plt.plot(y20,x20,'g.')
plt.plot(vel2,pos2,'b--',linewidth=2)
plt.ylim(-1,1)
plt.xlim(0.1,1.2)
plt.ylabel('$y/D$',fontsize=fs)
plt.xlabel(r'$u/U_\infty$',fontsize=fs)
plt.text(0.55,0.,r'$x/D$ = 1.0',fontsize=fs)
elif direction == 'vert':
plt.plot(x20,y20,'g.')
plt.plot(pos2,vel2,'b--',linewidth=2)
plt.xlim(-1,1)
plt.ylim(0.1,1.2)
plt.xlabel('$y/D$',fontsize=fs)
plt.ylabel(r'$u/U_\infty$',fontsize=fs)
plt.text(-0.38,1.05,r'$x/D$ = 1.0',fontsize=fs)
# plt.text(-0.45,1.05,r'$x/D$ = 1.0',fontsize=fs)
plt.xticks(fontsize=fs)
plt.yticks(fontsize=fs)
print '2.0 cfd',cfd20error,cfd20errorstd
plt.subplot(2,3,3)
if direction == 'horz':
plt.plot(y25,x25,'g.',label='PIV')
plt.plot(vel3,pos3,'b--',linewidth=2,label='CFD')
plt.ylim(-1,1)
plt.xlim(0.1,1.2)
plt.ylabel('$y/D$',fontsize=fs)
plt.xlabel(r'$u/U_\infty$',fontsize=fs)
plt.text(0.55,0.,r'$x/D$ = 1.25',fontsize=fs)
elif direction == 'vert':
plt.plot(x25,y25,'g.',label='PIV')
plt.plot(pos3,vel3,'b--',linewidth=2,label='CFD')
plt.xlim(-1,1)
plt.ylim(0.1,1.2)
plt.xlabel('$y/D$',fontsize=fs)
plt.ylabel(r'$u/U_\infty$',fontsize=fs)
plt.text(-0.38,1.05,r'$x/D$ = 1.25',fontsize=fs)
# plt.text(-0.45,1.05,r'$x/D$ = 1.25',fontsize=fs)
plt.xticks(fontsize=fs)
plt.yticks(fontsize=fs)
print '2.5 cfd',cfd25error,cfd25errorstd
plt.legend(loc="upper left", bbox_to_anchor=(1,1),fontsize=fs)
plt.subplot(2,3,4)
if direction == 'horz':
plt.plot(y30,x30,'g.')
plt.plot(vel4,pos4,'b--',linewidth=2)
plt.ylim(-1,1)
plt.xlim(0.1,1.2)
plt.ylabel('$y/D$',fontsize=fs)
plt.xlabel(r'$u/U_\infty$',fontsize=fs)
plt.text(0.55,0.,r'$x/D$ = 1.5',fontsize=fs)
elif direction == 'vert':
plt.plot(x30,y30,'g.')
plt.plot(pos4,vel4,'b--',linewidth=2)
plt.xlim(-1,1)
plt.ylim(0.1,1.2)
plt.xlabel('$y/D$',fontsize=fs)
plt.ylabel(r'$u/U_\infty$',fontsize=fs)
plt.text(-0.38,1.05,r'$x/D$ = 1.5',fontsize=fs)
# plt.text(-0.45,1.05,r'$x/D$ = 1.5',fontsize=fs)
plt.xticks(fontsize=fs)
plt.yticks(fontsize=fs)
print '3.0 cfd',cfd30error,cfd30errorstd
plt.subplot(2,3,5)
if direction == 'horz':
plt.plot(y35,x35,'g.')
plt.plot(vel5,pos5,'b--',linewidth=2)
plt.ylim(-1,1)
plt.xlim(0.1,1.2)
plt.ylabel('$y/D$',fontsize=fs)
plt.xlabel(r'$u/U_\infty$',fontsize=fs)
plt.text(0.55,0.,r'$x/D$ = 1.75',fontsize=fs)
elif direction == 'vert':
plt.plot(x35,y35,'g.')
plt.plot(pos5,vel5,'b--',linewidth=2)
plt.xlim(-1,1)
plt.ylim(0.1,1.2)
plt.xlabel('$y/D$',fontsize=fs)
plt.ylabel(r'$u/U_\infty$',fontsize=fs)
plt.text(-0.38,1.05,r'$x/D$ = 1.75',fontsize=fs)
# plt.text(-0.45,1.05,r'$x/D$ = 1.75',fontsize=fs)
plt.xticks(fontsize=fs)
plt.yticks(fontsize=fs)
print '3.5 cfd',cfd35error,cfd35errorstd
plt.subplot(2,3,6)
if direction == 'horz':
plt.plot(y40,x40,'g.')
plt.plot(vel6,pos6,'b--',linewidth=2)
plt.ylim(-1,1)
plt.xlim(0.1,1.2)
plt.ylabel('$y/D$',fontsize=fs)
plt.xlabel(r'$u/U_\infty$',fontsize=fs)
plt.text(0.55,0.,r'$x/D$ = 2.0',fontsize=fs)
elif direction == 'vert':
plt.plot(x40,y40,'g.')
plt.plot(pos6,vel6,'b--',linewidth=2)
plt.xlim(-1,1)
plt.ylim(0.1,1.2)
plt.xlabel('$y/D$',fontsize=fs)
plt.ylabel(r'$u/U_\infty$',fontsize=fs)
plt.text(-0.38,1.05,r'$x/D$ = 2.0',fontsize=fs)
# plt.text(-0.45,1.05,r'$x/D$ = 2.0',fontsize=fs)
plt.xticks(fontsize=fs)
plt.yticks(fontsize=fs)
print '4.0 cfd',cfd40error,cfd40errorstd
print '\nOverall Error:',cfdoaerror
print 'Overall Stand Dev:',cfdoaerrorstd,'\n'
## Plot Model
if rom == True:
rad = 0.5
dia = 2*rad
velf = 9.308422677
sol = 0.24
tsr = 4.5
rot = tsr*velf/rad
chord = 0.06
B = 2
xt = 0.
yt = 0.
rom15 = np.zeros(33)
rom20 = np.zeros(33)
rom25 = np.zeros(33)
rom30 = np.zeros(33)
rom35 = np.zeros(33)
rom40 = np.zeros(26)
rom40f = np.zeros(33)
rom15t = np.zeros(33)
rom20t = np.zeros(33)
rom25t = np.zeros(33)
rom30t = np.zeros(33)
rom35t = np.zeros(33)
rom40t = np.zeros(26)
for i in range(33):
rom15[i] = velocity_field(xt,yt,0.75*dia,x15[i]*dia,velf,dia,rot,chord,B,param=None,veltype=veltype)
rom20[i] = velocity_field(xt,yt,1.0*dia,x20[i]*dia,velf,dia,rot,chord,B,param=None,veltype=veltype)
rom25[i] = velocity_field(xt,yt,1.25*dia,x25[i]*dia,velf,dia,rot,chord,B,param=None,veltype=veltype)
rom30[i] = velocity_field(xt,yt,1.5*dia,x30[i]*dia,velf,dia,rot,chord,B,param=None,veltype=veltype)
rom35[i] = velocity_field(xt,yt,1.75*dia,x35[i]*dia,velf,dia,rot,chord,B,param=None,veltype=veltype)
rom40f[i] = velocity_field(xt,yt,2.0*dia,x35[i]*dia,velf,dia,rot,chord,B,param=None,veltype=veltype)
print i
for i in range(26):
rom40[i] = velocity_field(xt,yt,2.0*dia,x40[i]*dia,velf,dia,rot,chord,B,param=None,veltype=veltype)
print i
for i in range(33):
if errortype == 'abs':
rom15t[i] = fabs((1. - rom15[i])-(1. - y15[i]))
elif errortype == 'rel':
if fabs(1. - y15[i]) >= epsilon:
rom15t[i] = fabs(((1. - rom15[i])-(1. - y15[i]))/(1. - y15[i]))
else:
rom15t[i] = fabs((1. - rom15[i])-(1. - y15[i]))
elif errortype == 'rms':
rom15t[i] = ((1. - rom15[i])-(1. - y15[i]))**2.
if errortype == 'abs':
rom20t[i] = fabs((1. - rom20[i])-(1. - y20[i]))
elif errortype == 'rel':
if fabs(1. - y20[i]) >= epsilon:
rom20t[i] = fabs(((1. - rom20[i])-(1. - y20[i]))/(1. - y20[i]))
else:
rom20t[i] = fabs((1. - rom20[i])-(1. - y20[i]))
elif errortype == 'rms':
rom20t[i] = ((1. - rom20[i])-(1. - y20[i]))**2.
if errortype == 'abs':
rom25t[i] = fabs((1. - rom25[i])-(1. - y25[i]))
elif errortype == 'rel':
if fabs(1. - y25[i]) >= epsilon:
rom25t[i] = fabs(((1. - rom25[i])-(1. - y25[i]))/(1. - y25[i]))
else:
rom25t[i] = fabs((1. - rom25[i])-(1. - y25[i]))
elif errortype == 'rms':
rom25t[i] = ((1. - rom25[i])-(1. - y25[i]))**2.
if errortype == 'abs':
rom30t[i] = fabs((1. - rom30[i])-(1. - y30[i]))
elif errortype == 'rel':
if fabs(1. - y30[i]) >= epsilon:
rom30t[i] = fabs(((1. - rom30[i])-(1. - y30[i]))/(1. - y30[i]))
else:
rom30t[i] = fabs((1. - rom30[i])-(1. - y30[i]))
elif errortype == 'rms':
rom30t[i] = ((1. - rom30[i])-(1. - y30[i]))**2.
if errortype == 'abs':
rom35t[i] = fabs((1. - rom35[i])-(1. - y35[i]))
elif errortype == 'rel':
if fabs(1. - y35[i]) >= epsilon:
rom35t[i] = fabs(((1. - rom35[i])-(1. - y35[i]))/(1. - y35[i]))
else:
rom35t[i] = fabs((1. - rom35[i])-(1. - y35[i]))
elif errortype == 'rms':
rom35t[i] = ((1. - rom35[i])-(1. - y35[i]))**2.
for i in range(26):
if errortype == 'abs':
rom40t[i] = fabs((1. - rom40[i])-(1. - y40[i]))
elif errortype == 'rel':
if fabs(1. - y40[i]) >= epsilon:
rom40t[i] = fabs(((1. - rom40[i])-(1. - y40[i]))/(1. - y40[i]))
else:
rom40t[i] = fabs((1. - rom40[i])-(1. - y40[i]))
elif errortype == 'rms':
rom40t[i] = ((1. - rom40[i])-(1. - y40[i]))**2.
if errortype == 'abs' or errortype == 'rel':
rom15error = np.average(rom15t)
rom15errorstd = np.std(rom15t)
rom20error = np.average(rom20t)
rom20errorstd = np.std(rom20t)
rom25error = np.average(rom25t)
rom25errorstd = np.std(rom25t)
rom30error = np.average(rom30t)
rom30errorstd = np.std(rom30t)
rom35error = np.average(rom35t)
rom35errorstd = np.std(rom35t)
rom40error = np.average(rom40t)
rom40errorstd = np.std(rom40t)
elif errortype == 'rms':
rom15error = np.sqrt(np.average(rom15t))
rom15errorstd = 1.
rom20error = np.sqrt(np.average(rom20t))
rom20errorstd = 1.
rom25error = np.sqrt(np.average(rom25t))
rom25errorstd = 1.
rom30error = np.sqrt(np.average(rom30t))
rom30errorstd = 1.
rom35error = np.sqrt(np.average(rom35t))
rom35errorstd = 1.
rom40error = np.sqrt(np.average(rom40t))
rom40errorstd = 1.
oaerror = (rom15error+rom20error+rom25error+rom30error+rom35error+rom40error)/6.
oaerrorstd = (rom15errorstd+rom20errorstd+rom25errorstd+rom30errorstd+rom35errorstd+rom40errorstd)/6.
rom15f = velocity_field(xt,yt,0.75*dia,-1.*dia,velf,dia,rot,chord,B,param=None,veltype=veltype)
rom20f = velocity_field(xt,yt,1.0*dia,-1.*dia,velf,dia,rot,chord,B,param=None,veltype=veltype)
rom25f = velocity_field(xt,yt,1.25*dia,-1.*dia,velf,dia,rot,chord,B,param=None,veltype=veltype)
rom30f = velocity_field(xt,yt,1.5*dia,-1.*dia,velf,dia,rot,chord,B,param=None,veltype=veltype)
rom35f = velocity_field(xt,yt,1.75*dia,-1.*dia,velf,dia,rot,chord,B,param=None,veltype=veltype)
rom40ff = velocity_field(xt,yt,2.0*dia,-1.*dia,velf,dia,rot,chord,B,param=None,veltype=veltype)
rom15l = velocity_field(xt,yt,0.75*dia,1.*dia,velf,dia,rot,chord,B,param=None,veltype=veltype)
rom20l = velocity_field(xt,yt,1.0*dia,1.*dia,velf,dia,rot,chord,B,param=None,veltype=veltype)
rom25l = velocity_field(xt,yt,1.25*dia,1.*dia,velf,dia,rot,chord,B,param=None,veltype=veltype)
rom30l = velocity_field(xt,yt,1.5*dia,1.*dia,velf,dia,rot,chord,B,param=None,veltype=veltype)
rom35l = velocity_field(xt,yt,1.75*dia,1.*dia,velf,dia,rot,chord,B,param=None,veltype=veltype)
rom40fl = velocity_field(xt,yt,2.0*dia,1.*dia,velf,dia,rot,chord,B,param=None,veltype=veltype)
rom15 = np.insert(rom15,0,rom15f)
rom20 = np.insert(rom20,0,rom20f)
rom25 = np.insert(rom25,0,rom25f)
rom30 = np.insert(rom30,0,rom30f)
rom35 = np.insert(rom35,0,rom35f)
rom40f = np.insert(rom40f,0,rom40ff)
rom15 = np.append(rom15,rom15l)
rom20 = np.append(rom20,rom20l)
rom25 = np.append(rom25,rom25l)
rom30 = np.append(rom30,rom30l)
rom35 = np.append(rom35,rom35l)
rom40f = np.append(rom40f,rom40fl)
x15p = np.copy(x15)
x20p = np.copy(x20)
x25p = np.copy(x25)
x30p = np.copy(x30)
x35p = np.copy(x35)
x40p = np.copy(x35)
x15p = np.insert(x15p,0,-1.)
x20p = np.insert(x20p,0,-1.)
x25p = np.insert(x25p,0,-1.)
x30p = np.insert(x30p,0,-1.)
x35p = np.insert(x35p,0,-1.)
x40p = np.insert(x40p,0,-1.)
x15p = np.append(x15p,1.)
x20p = np.append(x20p,1.)
x25p = np.append(x25p,1.)
x30p = np.append(x30p,1.)
x35p = np.append(x35p,1.)
x40p = np.append(x40p,1.)
if direction == 'horz':
fig2 = plt.figure(2,figsize=(12.5,6.5))
fig2.subplots_adjust(left=.08,bottom=.12,right=.84,wspace=.5,hspace=.50)
elif direction == 'vert':
fig2 = plt.figure(2,figsize=(12.5,6))
fig2.subplots_adjust(left=.07,bottom=.12,right=.84,wspace=.36,hspace=.50)
plt.subplot(2,3,1)
if direction == 'horz':
plt.plot(y15,x15,'g.')
plt.plot(vel1,pos1,'b--',linewidth=2)
plt.plot(rom15,x15p,'r-')
plt.ylim(-1,1)
plt.xlim(0.1,1.2)
plt.ylabel('$y/D$',fontsize=fs)
plt.xlabel(r'$u/U_\infty$',fontsize=fs)
plt.text(0.55,0.0,r'$x/D$ = 0.75',fontsize=fs)
elif direction == 'vert':
plt.plot(x15,y15,'g.')
plt.plot(pos1,vel1,'b--',linewidth=2)
plt.plot(x15p,rom15,'r-')
plt.xlim(-1,1)
plt.ylim(0.1,1.2)
plt.xlabel('$y/D$',fontsize=fs)
plt.ylabel(r'$u/U_\infty$',fontsize=fs)
plt.text(-0.38,1.05,r'$x/D$ = 0.75',fontsize=fs)
# plt.text(-0.45,1.05,r'$x/D$ = 0.75',fontsize=fs)
plt.xticks(fontsize=fs)
plt.yticks(fontsize=fs)
print '1.5 mod',(min(rom15)-min(y15))/min(y15),rom15error,rom15errorstd
print '1.5 cfd',(min(vel1)-min(y15))/min(y15),cfd15error,cfd15errorstd
plt.subplot(2,3,2)
if direction == 'horz':
plt.plot(y20,x20,'g.')
plt.plot(vel2,pos2,'b--',linewidth=2)
plt.plot(rom20,x20p,'r-')
plt.ylim(-1,1)
plt.xlim(0.1,1.2)
plt.ylabel('$y/D$',fontsize=fs)
plt.xlabel(r'$u/U_\infty$',fontsize=fs)
plt.text(0.55,0.0,r'$x/D$ = 1.0',fontsize=fs)
elif direction == 'vert':
plt.plot(x20,y20,'g.')
plt.plot(pos2,vel2,'b--',linewidth=2)
plt.plot(x20p,rom20,'r-')
plt.xlim(-1,1)
plt.ylim(0.1,1.2)
plt.xlabel('$y/D$',fontsize=fs)
plt.ylabel(r'$u/U_\infty$',fontsize=fs)
plt.text(-0.38,1.05,r'$x/D$ = 1.0',fontsize=fs)
# plt.text(-0.45,1.05,r'$x/D$ = 1.0',fontsize=fs)
plt.xticks(fontsize=fs)
plt.yticks(fontsize=fs)
print '2.0 mod',(min(rom20)-min(y20))/min(y20),rom20error,rom20errorstd
print '2.0 cfd',(min(vel2)-min(y20))/min(y20),cfd20error,cfd20errorstd
plt.subplot(2,3,3)
if direction == 'horz':
plt.plot(y25,x25,'g.',label='PIV')
plt.plot(vel3,pos3,'b--',linewidth=2,label='CFD')
plt.plot(rom25,x25p,'r-',label='Model')
plt.ylim(-1,1)
plt.xlim(0.1,1.2)
plt.ylabel('$y/D$',fontsize=fs)
plt.xlabel(r'$u/U_\infty$',fontsize=fs)
plt.text(0.55,0.0,r'$x/D$ = 1.25',fontsize=fs)
elif direction == 'vert':
plt.plot(x25,y25,'g.',label='PIV')
plt.plot(pos3,vel3,'b--',linewidth=2,label='CFD')
plt.plot(x25p,rom25,'r-',label='Model')
plt.xlim(-1,1)
plt.ylim(0.1,1.2)
plt.xlabel('$y/D$',fontsize=fs)
plt.ylabel(r'$u/U_\infty$',fontsize=fs)
plt.text(-0.38,1.05,r'$x/D$ = 1.25',fontsize=fs)
# plt.text(-0.45,1.05,r'$x/D$ = 1.25',fontsize=fs)
plt.xticks(fontsize=fs)
plt.yticks(fontsize=fs)
# elif k == 1:
plt.legend(loc="upper left", bbox_to_anchor=(1,1),fontsize=fs)
print '2.5 mod',(min(rom25)-min(y25))/min(y25),rom25error,rom25errorstd
print '2.5 cfd',(min(vel3)-min(y25))/min(y25),cfd25error,cfd25errorstd
plt.subplot(2,3,4)
if direction == 'horz':
plt.plot(y30,x30,'g.')
plt.plot(vel4,pos4,'b--',linewidth=2)
plt.plot(rom30,x30p,'r-')
plt.ylim(-1,1)
plt.xlim(0.1,1.2)
plt.ylabel('$y/D$',fontsize=fs)
plt.xlabel(r'$u/U_\infty$',fontsize=fs)
plt.text(0.55,0.0,r'$x/D$ = 1.5',fontsize=fs)
elif direction == 'vert':
plt.plot(x30,y30,'g.')
plt.plot(pos4,vel4,'b--',linewidth=2)
plt.plot(x30p,rom30,'r-')
plt.xlim(-1,1)
plt.ylim(0.1,1.2)
plt.xlabel('$y/D$',fontsize=fs)
plt.ylabel(r'$u/U_\infty$',fontsize=fs)
plt.text(-0.38,1.05,r'$x/D$ = 1.5',fontsize=fs)
# plt.text(-0.45,1.05,r'$x/D$ = 1.5',fontsize=fs)
plt.xticks(fontsize=fs)
plt.yticks(fontsize=fs)
print '3.0 mod',(min(rom30)-min(y30))/min(y30),rom30error,rom30errorstd
print '3.0 cfd',(min(vel4)-min(y30))/min(y30),cfd30error,cfd30errorstd
plt.subplot(2,3,5)
if direction == 'horz':
plt.plot(y35,x35,'g.')
plt.plot(vel5,pos5,'b--',linewidth=2)
plt.plot(rom35,x35p,'r-')
plt.ylim(-1,1)
plt.xlim(0.1,1.2)
plt.ylabel('$y/D$',fontsize=fs)
plt.xlabel(r'$u/U_\infty$',fontsize=fs)
plt.text(0.55,0.0,r'$x/D$ = 1.75',fontsize=fs)
elif direction == 'vert':
plt.plot(x35,y35,'g.')
plt.plot(pos5,vel5,'b--',linewidth=2)
plt.plot(x35p,rom35,'r-')
plt.xlim(-1,1)
plt.ylim(0.1,1.2)
plt.xlabel('$y/D$',fontsize=fs)
plt.ylabel(r'$u/U_\infty$',fontsize=fs)
plt.text(-0.38,1.05,r'$x/D$ = 1.75',fontsize=fs)
# plt.text(-0.45,1.05,r'$x/D$ = 1.75',fontsize=fs)
plt.xticks(fontsize=fs)
plt.yticks(fontsize=fs)
print '3.5 mod',(min(rom35)-min(y35))/min(y35),rom35error,rom35errorstd
print '3.5 cfd',(min(vel5)-min(y35))/min(y35),cfd35error,cfd35errorstd
plt.subplot(2,3,6)
if direction == 'horz':
plt.plot(y40,x40,'g.')
plt.plot(vel6,pos6,'b--',linewidth=2)
plt.plot(rom40f,x40p,'r-')
plt.ylim(-1,1)
plt.xlim(0.1,1.2)
plt.ylabel('$y/D$',fontsize=fs)
plt.xlabel(r'$u/U_\infty$',fontsize=fs)
plt.text(0.55,0.0,r'$x/D$ = 2.0',fontsize=fs)
elif direction == 'vert':
plt.plot(x40,y40,'g.')
plt.plot(pos6,vel6,'b--',linewidth=2)
plt.plot(x40p,rom40f,'r-')
plt.xlim(-1,1)
plt.ylim(0.1,1.2)
plt.xlabel('$y/D$',fontsize=fs)
plt.ylabel(r'$u/U_\infty$',fontsize=fs)
plt.text(-0.38,1.05,r'$x/D$ = 2.0',fontsize=fs)
# plt.text(-0.45,1.05,r'$x/D$ = 2.0',fontsize=fs)
plt.xticks(fontsize=fs)
plt.yticks(fontsize=fs)
print '4.0 mod',(min(rom40f)-min(y40))/min(y40),rom40error,rom40errorstd
print '4.0 cfd',(min(vel6)-min(y40))/min(y40),cfd40error,cfd40errorstd
if errortype == 'abs':
print '\n Average Absolute Error'
elif errortype == 'rel':
print '\n Average Relative Error'
elif errortype == 'rms':
print '\n Root Mean Squared Error'
print '\nAverage CFD Error:',cfdoaerror
print 'Average CFD Stand Dev:',cfdoaerrorstd
print '\nAverage ROM Error:',oaerror
print 'Average ROM Stand Dev:',oaerrorstd
low15 = y15.min()
low20 = y20.min()
low25 = y25.min()
low30 = y30.min()
low35 = y35.min()
low40 = y40.min()
low15cfd = vel1.min()
low20cfd = vel2.min()
low25cfd = vel3.min()
low30cfd = vel4.min()
low35cfd = vel5.min()
low40cfd = vel6.min()
low15rom = rom15.min()
low20rom = rom20.min()
low25rom = rom25.min()
low30rom = rom30.min()
low35rom = rom35.min()
low40rom = rom40.min()
if errortype == 'abs' or errortype == 'rms':
mdfcfd15 = fabs((1.-low15cfd)-(1.-low15))
mdfcfd20 = fabs((1.-low20cfd)-(1.-low20))
mdfcfd25 = fabs((1.-low25cfd)-(1.-low25))
mdfcfd30 = fabs((1.-low30cfd)-(1.-low30))
mdfcfd35 = fabs((1.-low35cfd)-(1.-low35))
mdfcfd40 = fabs((1.-low40cfd)-(1.-low40))
mdfrom15 = fabs((1.-low15rom)-(1.-low15))
mdfrom20 = fabs((1.-low20rom)-(1.-low20))
mdfrom25 = fabs((1.-low25rom)-(1.-low25))
mdfrom30 = fabs((1.-low30rom)-(1.-low30))
mdfrom35 = fabs((1.-low35rom)-(1.-low35))
mdfrom40 = fabs((1.-low40rom)-(1.-low40))
elif errortype == 'rel':
mdfcfd15 = fabs(((1.-low15cfd)-(1.-low15))/(1.-low15))
mdfcfd20 = fabs(((1.-low20cfd)-(1.-low20))/(1.-low20))
mdfcfd25 = fabs(((1.-low25cfd)-(1.-low25))/(1.-low25))
mdfcfd30 = fabs(((1.-low30cfd)-(1.-low30))/(1.-low30))
mdfcfd35 = fabs(((1.-low35cfd)-(1.-low35))/(1.-low35))
mdfcfd40 = fabs(((1.-low40cfd)-(1.-low40))/(1.-low40))
mdfrom15 = fabs(((1.-low15rom)-(1.-low15))/(1.-low15))
mdfrom20 = fabs(((1.-low20rom)-(1.-low20))/(1.-low20))
mdfrom25 = fabs(((1.-low25rom)-(1.-low25))/(1.-low25))
mdfrom30 = fabs(((1.-low30rom)-(1.-low30))/(1.-low30))
mdfrom35 = fabs(((1.-low35rom)-(1.-low35))/(1.-low35))
mdfrom40 = fabs(((1.-low40rom)-(1.-low40))/(1.-low40))
print 'Maximum Deficit Error (CFD):',mdfcfd15,mdfcfd20,mdfcfd25,mdfcfd30,mdfcfd35,mdfcfd40,'\n\t',max(mdfcfd15,mdfcfd20,mdfcfd25,mdfcfd30,mdfcfd35,mdfcfd40)
print 'Maximum Deficit Error (ROM):',mdfrom15,mdfrom20,mdfrom25,mdfrom30,mdfrom35,mdfrom40,'\n\t',max(mdfrom15,mdfrom20,mdfrom25,mdfrom30,mdfrom35,mdfrom40)
plt.show()
| mit | 3,470,095,439,570,599,400 | 42.724874 | 696 | 0.594139 | false |
hchim/stockanalyzer | analysis/indicators.py | 1 | 13443 | import pandas as pd
import numpy as np
from analysis.basic import compute_daily_returns
import math
def sma(prices, params):
"""
Calculate the simple moving average indicator.
Parameters
----------
prices: DataFrame
params: dict
e.g. {"windows": [5, 10]}
Returns
----------
sma_val : DataFrame
the simple moving average of the close price.
"""
windows = params["windows"]
close = prices["Close"].values
values = []
column_names = []
for w in windows:
values.append(pd.rolling_mean(close, w))
column_names.append("SMA{}".format(w))
return pd.DataFrame(np.column_stack(tuple(values)), index=prices.index, columns=column_names)
def __ema(close, window):
data = pd.ewma(close, span=window)
data[0:window-1] = np.nan
return data
def ema(prices, params):
"""
Calculate the exponential moving average indicator.
Parameters
----------
prices: DataFrame
params: dict
e.g. {"windows": [5, 10]}
Returns
----------
ema_val : DataFrame
the simple moving average of the close price.
"""
windows = params["windows"]
close = prices["Close"].values
values = []
column_names = []
for w in windows:
values.append(__ema(close, w))
column_names.append("EMA{}".format(w))
return pd.DataFrame(np.column_stack(tuple(values)), index=prices.index, columns=column_names)
def bb(prices, params={"window": 20}):
"""
Calculate the bollinger bands indicator
Parameters
----------
prices: DataFrame
params: dict
Returns
----------
bb_vals: DataFrame
"""
window = params["window"]
close = prices["Close"].values
rm = pd.rolling_mean(close, window) # 20 day mean
rstd = pd.rolling_std(close, window) # 20 day standard deviation
upper_band = rm + (rstd * 2)
lower_band = rm - (rstd * 2)
values = np.column_stack((rm, upper_band, lower_band))
return pd.DataFrame(values, index=prices.index, columns=["Middle", "Upper", "Lower"])
def macd(prices, params={"windows": [12, 26, 9]}):
"""
Calculate the MACD indicator
Parameters
----------
prices: DataFrame
params: dict
Returns
----------
macd_val: DataFrame
"""
close = prices["Close"]
windows = params["windows"]
ema12 = __ema(close, windows[0])
ema26 = __ema(close, windows[1])
diff = ema12 - ema26
dea = __ema(diff, windows[2])
macd_val = diff - dea
values = np.column_stack((diff, dea, macd_val))
return pd.DataFrame(values, index=prices.index, columns=["DIFF", "DEA", "MACD"])
def rsi(prices, params={"window": 14}):
"""
Calculate the RSI indicator.
Parameters
----------
prices: DataFrame
params: dict
Returns
----------
rsi_val: DataFrame
"""
window = params["window"]
close = prices["Close"]
delta = close - close.shift(1) # the difference between rows
gain = delta.copy()
lose = delta.copy()
gain[gain < 0] = 0
lose[lose > 0] = 0
rs = pd.rolling_mean(gain, window) / abs(pd.rolling_mean(lose, window))
rsi_val = 100 - 100 / (1 + rs)
return pd.DataFrame(rsi_val.values, index=prices.index, columns=["RSI"])
def __mfm(prices):
mfm = ((prices['Close'] - prices['Low']) - (prices['High'] - prices['Close'])) \
/(prices['High'] - prices['Low'])
return mfm
def __mfv(prices):
mfm = __mfm(prices)
return mfm * prices['Volume']
def cmf(prices, params={"window": 20}):
"""
1. Money Flow Multiplier = [(Close - Low) - (High - Close)] /(High - Low)
2. Money Flow Volume = Money Flow Multiplier x Volume for the Period
3. 20-period CMF = 20-period Sum of Money Flow Volume / 20 period Sum of Volume
Parameters
----------
prices: DataFrame
Includes the open, close, high, low and volume.
params: dict
Returns
----------
cmf_val: DataFrame
"""
window = params["window"]
mfv = __mfv(prices)
mfv = pd.rolling_sum(mfv, window)
volumes = pd.rolling_sum(prices['Volume'], window)
cmf_val = (mfv/volumes)
return pd.DataFrame(cmf_val.values, index=prices.index, columns=["CMF"])
def __tp(prices):
return (prices['High'] + prices['Low'] + prices['Close']) / 3.0
def mfi(prices, params={"window": 14}):
"""
1. Typical Price = (High + Low + Close)/3
2. Raw Money Flow = Typical Price x Volume
3. Money Flow Ratio = (14-period Positive Money Flow)/(14-period Negative Money Flow)
4. Money Flow Index = 100 - 100/(1 + Money Flow Ratio)
Parameters
----------
prices: DataFrame
Includes the open, close, high, low and volume.
params: dict
Returns
----------
mfi_val: DataFrame
"""
window = params["window"]
tp = __tp(prices)
rmf = tp * prices['Volume']
close = prices["Close"]
ret = close - close.shift(1)
prmf = rmf.copy()
nrmf = rmf.copy()
prmf[ret < 0] = 0
nrmf[ret > 0] = 0
mfr = pd.rolling_sum(prmf, window)/pd.rolling_sum(nrmf, window)
mfi_val = 100 - 100. / (1 + mfr)
return pd.DataFrame(mfi_val.values, index=prices.index, columns=["MFI"])
def __rsv(prices, window):
close = prices["Close"]
high = prices["High"]
low = prices["Low"]
length = len(prices.index)
rsv_val = np.zeros(length)
rsv_val[0:window-1] = np.nan
for i in range(window-1, length):
hn = high[i-window+1:i+1].max()
ln = low[i-window+1:i+1].min()
rsv_val[i] = (close[i] - ln) * 100.0 / (hn - ln)
return rsv_val
def kdj(prices, params={"windows": [9, 3, 3]}):
"""
Calculate KDJ indicator:
RSV = (Ct - Ln) / (Hn - Ln) * 100
K = sma3(RSV)
D = sma3(K)
J = 3 * D - 2 * K
Parameters
----------
prices: DataFrame
Includes the open, close, high, low and volume.
params: dict
Returns
----------
kdj_val: DataFrame
"""
windows = params["windows"]
rsv = __rsv(prices, windows[0])
k = pd.rolling_mean(rsv, windows[1])
d = pd.rolling_mean(k, windows[2])
j = 3 * k - 2 * d
kdj_val = np.column_stack((k, d, j))
return pd.DataFrame(kdj_val, index=prices.index, columns=["K", "D", "J"])
def stoch(prices, params={"windows": [14, 3, 3]}):
"""
RSV = (Ct - Ln) / (Hn - Ln) * 100
K = sma(RSV, params["windows"][1])
D = sma(K, params["windows"][2])
Parameters
----------
prices: DataFrame
Includes the open, close, high, low and volume.
params: dict
Returns
----------
kd_val: DataFrame
"""
windows = params["windows"]
rsv = __rsv(prices, windows[0])
k = pd.rolling_mean(rsv, windows[1])
d = pd.rolling_mean(k, windows[2])
stoch_val = np.column_stack((k, d))
return pd.DataFrame(stoch_val, index=prices.index, columns=["K", "D"])
def __tr(prices):
"""
TR is defined as the greatest of the following:
Method 1: Current High less the current Low
Method 2: Current High less the previous Close (absolute value)
Method 3: Current Low less the previous Close (absolute value)
"""
m1 = prices['High'] - prices['Low']
m2 = abs(prices['High'] - prices['Close'].shift(1))
m3 = abs(prices['Low'] - prices['Close'].shift(1))
tr = pd.concat([m1, m2, m3], axis=1).max(axis=1)
tr[0] = np.nan
return tr
def __wilder_smooth_1(values, window):
"""
First TR14 = Sum of first 14 periods of TR1
Second TR14 = First TR14 - (First TR14/14) + Current TR1
Subsequent Values = Prior TR14 - (Prior TR14/14) + Current TR1
"""
length = len(values.index)
smooth_val = pd.Series(np.zeros(length), index=values.index)
smooth_val[0:window] = np.nan
smooth_val[window] = np.sum(values[1:window+1].values)
for i in range(window + 1, length):
smooth_val[i] = (smooth_val[i-1] * (1 - 1.0 / window)) + values[i]
return smooth_val
def __wilder_smooth_2(values, window):
"""
First ADX14 = 14 period Average of DX
Second ADX14 = ((First ADX14 x 13) + Current DX Value)/14
Subsequent ADX14 = ((Prior ADX14 x 13) + Current DX Value)/14
"""
start = window
length = len(values.index)
smooth_val = pd.Series(np.zeros(length), index=values.index)
smooth_val[0:start + window - 1] = np.nan
smooth_val[start + window - 1] = np.mean(values[start: start + window].values)
for i in range(start + window, length):
smooth_val[i] = (smooth_val[i-1] * (window - 1) + values[i]) / window
return smooth_val
def atr(prices, params={"window":14}):
"""
Current ATR = [(Prior ATR x 13) + Current TR] / 14
Average True Range (ATR) is an indicator that measures volatility.
Parameters
----------
prices: DataFrame
Includes the open, close, high, low and volume.
params: dict
Returns
----------
atr_val: DataFrame
"""
tr = __tr(prices)
window = params["window"]
length = len(prices.index)
atr_val = pd.Series(np.zeros(length), index=prices.index)
atr_val[1] = tr[1]
for i in range(2, length):
atr_val[i] = (atr_val[i-1] * (window - 1) + tr[i]) / window
return pd.DataFrame(atr_val.values, index=prices.index, columns=["ATR"])
def adx(prices, params={"window":14}):
"""
Parameters
----------
prices: DataFrame
Includes the open, close, high, low and volume.
params: dict
Returns
----------
adx_val: DataFrame
the DataFrame has three columns: ADX, +DI, -DI
"""
window = params["window"]
tr = __tr(prices)
high = prices["High"]
low = prices["Low"]
length = len(prices.index)
pdm = pd.Series(np.zeros(length), index=prices.index)
mdm = pd.Series(np.zeros(length), index=prices.index)
pdm[0] = np.nan
mdm[0] = np.nan
for i in range(1, length):
up = high[i] - high[i-1]
down = low[i-1] - low[i]
if up > down and up > 0:
pdm[i] = up
if down > up and down > 0:
mdm[i] = down
str = __wilder_smooth_1(tr, window)
spdm = __wilder_smooth_1(pdm, window)
smdm = __wilder_smooth_1(mdm, window)
# green line
pdi = spdm / str * 100
# red line
mdi = smdm / str * 100
dx = abs(pdi - mdi) / (pdi + mdi) * 100
adx_val = __wilder_smooth_2(dx, window)
values = np.column_stack((adx_val, pdi, mdi))
return pd.DataFrame(values, index=prices.index, columns=["ADX", "+DI", "-DI"])
def cci(prices, params={"window":20}):
"""
Parameters
----------
prices: DataFrame
Includes the open, close, high, low and volume.
params: dict
Returns
----------
cci_val: DataFrame
"""
length = len(prices.index)
window = params["window"]
tp = __tp(prices)
stp = pd.rolling_mean(tp, window)
cci_val = pd.Series(np.zeros(length), index=prices.index)
cci_val[0:window-1] = np.nan
for i in range(window-1, length):
dev = np.sum(abs(stp[i] - tp[i-window+1:i+1])) / window
cci_val[i] = (tp[i] - stp[i]) / (0.015 * dev)
return pd.DataFrame(cci_val.values, index=prices.index, columns=["CCI"])
def obv(prices, params=None):
length = len(prices.index)
close = prices["Close"]
volume = prices["Volume"]
obv_val = pd.Series(np.zeros(length), index=prices.index)
obv_val[0] = volume[0]
for i in range(1, length):
if close[i] > close[i-1]:
obv_val[i] = obv_val[i-1] + volume[i]
elif close[i] < close[i-1]:
obv_val[i] = obv_val[i-1] - volume[i]
else:
obv_val[i] = obv_val[i-1]
return pd.DataFrame(obv_val.values, index=prices.index, columns=["OBV"])
def adl(prices, params=None):
"""
1. Money Flow Multiplier = [(Close - Low) - (High - Close)] /(High - Low)
2. Money Flow Volume = Money Flow Multiplier x Volume for the Period
3. ADL = Previous ADL + Current Period's Money Flow Volume
"""
length = len(prices.index)
mfv = __mfv(prices)
adl_val = pd.Series(np.zeros(length), index=prices.index)
adl_val[0] = mfv[0]
for i in range(1, length):
adl_val[i] = adl_val[i-1] + mfv[i]
return pd.DataFrame(adl_val.values, index=prices.index, columns=["ADL"])
def trix(prices, params={"windows": [15, 9]}):
windows = params["windows"]
raw = __ema(prices["Close"], windows[0])
tr = __ema(__ema(raw, windows[0]), windows[0])
shift_tr = tr.shift(1)
trix_val = (tr - shift_tr) / shift_tr * 100
matrix = pd.rolling_mean(trix_val, windows[1])
values = np.column_stack((trix_val, matrix))
return pd.DataFrame(values, index=prices.index, columns=["TRIX", "MATRIX"])
def mafe(prices, params={"window": 5}):
"""
This indicator evaluate the efficiency of funds. It is very similar to the momentum
indicator, their figure looks the same most of the time, except when a significant
price change (either increase or decrease) does not have a corresponding volume increase.
It is better to read this indicator with volume.
"""
window = params["window"]
ret = compute_daily_returns(prices["Close"])
rmf = __tp(prices) * prices['Volume']
values = ret / rmf
mean = math.fabs(np.mean(values))
ma = pd.rolling_mean(values / mean, window)
return pd.DataFrame(ma, index=prices.index, columns=["MAFE"]) | mit | 4,074,898,968,506,970,000 | 26.050302 | 97 | 0.583054 | false |
CVerhoosel/nutils | examples/burgers.py | 1 | 3390 | #! /usr/bin/python3
#
# In this script we solve the Burgers equation on a 1D or 2D periodic dommain,
# starting from a centered Gaussian and convecting in the positive direction of
# the first coordinate.
import nutils, numpy
# The main function defines the parameter space for the script. Configurable
# parameters are the mesh density (in number of elements along an edge), number
# of dimensions, polynomial degree, time scale, Newton tolerance and the
# stopping time.
def main(nelems: 'number of elements' = 20,
ndims: 'spatial dimension' = 1,
degree: 'polynomial degree' = 1,
timescale: 'time scale (timestep=timescale/nelems)' = .5,
newtontol: 'solver tolerance' = 1e-5,
endtime: 'end time' = numpy.inf):
domain, geom = nutils.mesh.rectilinear([numpy.linspace(0,1,nelems+1)]*ndims, periodic=range(ndims))
ns = nutils.function.Namespace()
ns.x = geom
ns.basis = domain.basis('discont', degree=degree)
ns.u = 'basis_n ?lhs_n'
ns.f = '.5 u^2'
ns.C = 1
res = domain.integral('-basis_n,0 f d:x' @ ns, degree=5)
res += domain.interfaces.integral('-[basis_n] n_0 ({f} - .5 C [u] n_0) d:x' @ ns, degree=degree*2)
inertia = domain.integral('basis_n u d:x' @ ns, degree=5)
sqr = domain.integral('(u - exp(-?y_i ?y_i)(y_i = 5 (x_i - 0.5_i)))^2 d:x' @ ns, degree=5)
lhs0 = nutils.solver.optimize('lhs', sqr)
timestep = timescale/nelems
bezier = domain.sample('bezier', 7)
for itime, lhs in nutils.log.enumerate('timestep', nutils.solver.impliciteuler('lhs', res, inertia, timestep=timestep, lhs0=lhs0, newtontol=newtontol)):
x, u = bezier.eval(['x_i', 'u'] @ ns, lhs=lhs)
nutils.export.triplot('solution.png', x, u, tri=bezier.tri, hull=bezier.hull, clim=(0,1))
if itime * timestep >= endtime:
break
return lhs
# If the script is executed (as opposed to imported), :func:`nutils.cli.run`
# calls the main function with arguments provided from the command line. For
# example, to simulate until 0.5 seconds run :sh:`python3 burgers.py
# endtime=0.5`.
if __name__ == '__main__':
nutils.cli.run(main)
# Once a simulation is developed and tested, it is good practice to save a few
# strategicly chosen return values for routine regression testing. Here we use
# the standard :mod:`unittest` framework, with
# :func:`nutils.numeric.assert_allclose64` facilitating the embedding of
# desired results as compressed base64 data.
class test(nutils.testing.TestCase):
@nutils.testing.requires('matplotlib')
def test_1d_p1(self):
lhs = main(ndims=1, nelems=10, timescale=.1, degree=1, endtime=.01)
nutils.numeric.assert_allclose64(lhs, 'eNrbocann6u3yqjTyMLUwfSw2TWzKPNM8+9mH8wyTMNNZxptMir'
'W49ffpwYAI6cOVA==')
@nutils.testing.requires('matplotlib')
def test_1d_p2(self):
lhs = main(ndims=1, nelems=10, timescale=.1, degree=2, endtime=.01)
nutils.numeric.assert_allclose64(lhs, 'eNrr0c7SrtWfrD/d4JHRE6Ofxj6mnqaKZofNDpjZmQeYB5pHmL8'
'we23mb5ZvWmjKY/LV6KPRFIMZ+o368dp92gCxZxZG')
@nutils.testing.requires('matplotlib')
def test_2d_p1(self):
lhs = main(ndims=2, nelems=4, timescale=.1, degree=1, endtime=.01)
nutils.numeric.assert_allclose64(lhs, 'eNoNyKENhEAQRuGEQsCv2SEzyQZHDbRACdsDJNsBjqBxSBxBHIg'
'J9xsqQJ1Drro1L1/eYBZceGz8njrRyacm8UQLBvPYCw1airpyUVYSJLhKijK4IC01WDnqqxvX8OTl427'
'aU73sctPGr3qqceBnRzOjo0xy9JpJR73m6R6YMZo/Q+FCLQ==')
| mit | 8,277,718,623,519,437,000 | 41.375 | 154 | 0.70708 | false |
burakbayramli/dersblog | vision/vision_02/plot3d.py | 2 | 2141 | from mpl_toolkits.mplot3d import axes3d
from matplotlib.patches import Circle, PathPatch
import matplotlib.pyplot as plt
from matplotlib.transforms import Affine2D
from mpl_toolkits.mplot3d import art3d
import numpy as np
def plot_vector(fig, orig, v, color='blue'):
ax = fig.gca(projection='3d')
orig = np.array(orig); v=np.array(v)
ax.quiver(orig[0], orig[1], orig[2], v[0], v[1], v[2],color=color)
ax.set_xlim(0,10);ax.set_ylim(0,10);ax.set_zlim(0,10)
ax = fig.gca(projection='3d')
return fig
def rotation_matrix(d):
sin_angle = np.linalg.norm(d)
if sin_angle == 0:return np.identity(3)
d /= sin_angle
eye = np.eye(3)
ddt = np.outer(d, d)
skew = np.array([[ 0, d[2], -d[1]],
[-d[2], 0, d[0]],
[d[1], -d[0], 0]], dtype=np.float64)
M = ddt + np.sqrt(1 - sin_angle**2) * (eye - ddt) + sin_angle * skew
return M
def pathpatch_2d_to_3d(pathpatch, z, normal):
if type(normal) is str: #Translate strings to normal vectors
index = "xyz".index(normal)
normal = np.roll((1.0,0,0), index)
normal /= np.linalg.norm(normal) #Make sure the vector is normalised
path = pathpatch.get_path() #Get the path and the associated transform
trans = pathpatch.get_patch_transform()
path = trans.transform_path(path) #Apply the transform
pathpatch.__class__ = art3d.PathPatch3D #Change the class
pathpatch._code3d = path.codes #Copy the codes
pathpatch._facecolor3d = pathpatch.get_facecolor #Get the face color
verts = path.vertices #Get the vertices in 2D
d = np.cross(normal, (0, 0, 1)) #Obtain the rotation vector
M = rotation_matrix(d) #Get the rotation matrix
pathpatch._segment3d = np.array([np.dot(M, (x, y, 0)) + (0, 0, z) for x, y in verts])
def pathpatch_translate(pathpatch, delta):
pathpatch._segment3d += delta
def plot_plane(ax, point, normal, size=10, color='y'):
p = Circle((0, 0), size, facecolor = color, alpha = .2)
ax.add_patch(p)
pathpatch_2d_to_3d(p, z=0, normal=normal)
pathpatch_translate(p, (point[0], point[1], point[2]))
| gpl-3.0 | 8,619,222,826,841,033,000 | 35.288136 | 89 | 0.632882 | false |
duttashi/Data-Analysis-Visualization | scripts/general/anovaTest.py | 1 | 6339 | # Importing the required libraries
# Note %matplotlib inline works only for ipython notebook. It will not work for PyCharm. It is used to show the plot distributions
#%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.formula.api as smf
import statsmodels.stats.multicomp as multi
sns.set(color_codes=True)
# Reading the data where low_memory=False increases the program efficiency
data= pd.read_csv("gapminder.csv", low_memory=False)
# setting variables that you will be working with to numeric
data['breastcancerper100th']= data['breastcancerper100th'].convert_objects(convert_numeric=True)
data['femaleemployrate']= data['femaleemployrate'].convert_objects(convert_numeric=True)
data['alcconsumption']= data['alcconsumption'].convert_objects(convert_numeric=True)
# shows the number of rows and columns
print (len(data))
print (len(data.columns))
print (len(data.index))
# Print the column headers/headings
names=data.columns.values
print names
# using the describe function to get the standard deviation and other descriptive statistics of our variables
desc1=data['breastcancerper100th'].describe()
desc2=data['femaleemployrate'].describe()
desc3=data['alcconsumption'].describe()
print "\nBreast Cancer per 100th person\n", desc1
print "\nfemale employ rate\n", desc2
print "\nAlcohol consumption in litres\n", desc3
data.describe()
# Show the frequency distribution
print "\nAlcohol Consumption\nFrequency Distribution (in %)"
c1=data['alcconsumption'].value_counts(sort=False,dropna=False)
print c1
print "\nBreast Cancer per 100th"
c2=data['breastcancerper100th'].value_counts(sort=False)
print c2
print "\nFemale Employee Rate"
c3=data['femaleemployrate'].value_counts(sort=False)
print c3
# Show the frequency distribution of the quantitative variable using the groupby function
ac1=data.groupby('alcconsumption').size()
print "ac1\n",ac1
# Creating a subset of the data
sub1=data[(data['femaleemployrate']>40) & (data['alcconsumption']>=20)& (data['breastcancerper100th']<50)]
# creating a copy of the subset. This copy will be used for subsequent analysis
sub2=sub1.copy()
print "\nContries where Female Employee Rate is greater than 40 &" \
" Alcohol Consumption is greater than 20L & new breast cancer cases reported are less than 50\n"
print sub2
print "\nContries where Female Employee Rate is greater than 50 &" \
" Alcohol Consumption is greater than 10L & new breast cancer cases reported are greater than 70\n"
sub3=data[(data['alcconsumption']>10)&(data['breastcancerper100th']>70)&(data['femaleemployrate']>50)]
print sub3
# Checking for missing values in the data row-wise
print "Missing data rows count: ",sum([True for idx,row in data.iterrows() if any(row.isnull())])
# Checking for missing values in the data column-wise
print "Showing missing data coulmn-wise"
print data.isnull().sum()
# Create a copy of the original dataset as sub4 by using the copy() method
sub4=data.copy()
# Now showing the count of null values in the variables
print sub4.isnull().sum()
# Since the data is all continuous variables therefore the use the mean() for missing value imputation
# if dealing with categorical data, than use the mode() for missing value imputation
sub4.fillna(sub4['breastcancerper100th'].mean(), inplace=True)
sub4.fillna(sub4['femaleemployrate'].mean(), inplace=True)
sub4.fillna(sub4['alcconsumption'].mean(), inplace=True)
# Showing the count of null values after imputation
print sub4.isnull().sum()
# categorize quantitative variable based on customized splits using the cut function
sub4['alco']=pd.qcut(sub4.alcconsumption,6,labels=["0","1-4","5-9","10-14","15-19","20-24"])
sub4['brst']=pd.qcut(sub4.breastcancerper100th,5,labels=["1-20","21-40","41-60","61-80","81-90"])
sub4['emply']=pd.qcut(sub4.femaleemployrate,4,labels=["30-39","40-59","60-79","80-90"])
# Showing the frequency distribution of the categorised quantitative variables
print "\n\nFrequency distribution of the categorized quantitative variables\n"
fd1=sub4['alco'].value_counts(sort=False,dropna=False)
fd2=sub4['brst'].value_counts(sort=False,dropna=False)
fd3=sub4['emply'].value_counts(sort=False,dropna=False)
print "Alcohol Consumption\n",fd1
print "\n------------------------\n"
print "Breast Cancer per 100th\n",fd2
print "\n------------------------\n"
print "Female Employee Rate\n",fd3
print "\n------------------------\n"
# Now plotting the univariate quantitative variables using the distribution plot
sub5=sub4.copy()
sns.distplot(sub5['alcconsumption'].dropna(),kde=True)
plt.xlabel('Alcohol consumption in litres')
plt.title('Breast cancer in working class women')
plt.show() # Note: Although there is no need to use the show() method for ipython notebook as %matplotlib inline does the trick but
#I am adding it here because matplotlib inline does not work for an IDE like Pycharm and for that i need to use plt.show
sns.distplot(sub5['breastcancerper100th'].dropna(),kde=True)
plt.xlabel('Breast cancer per 100th women')
plt.title('Breast cancer in working class women')
plt.show()
sns.distplot(sub5['femaleemployrate'].dropna(),kde=True)
plt.xlabel('Female employee rate')
plt.title('Breast cancer in working class women')
plt.show()
# using scatter plot the visulaize quantitative variable.
# if categorical variable then use histogram
scat1= sns.regplot(x='alcconsumption', y='breastcancerper100th', data=data)
plt.xlabel('Alcohol consumption in liters')
plt.ylabel('Breast cancer per 100th person')
plt.title('Scatterplot for the Association between Alcohol Consumption and Breast Cancer 100th person')
scat2= sns.regplot(x='femaleemployrate', y='breastcancerper100th', data=data)
plt.xlabel('Female Employ Rate')
plt.ylabel('Breast cancer per 100th person')
plt.title('Scatterplot for the Association between Female Employ Rate and Breast Cancer per 100th Rate')
sub6=sub4.copy()
model1=smf.ols(formula='breastcancerper100th~C(alco)',data=sub6)
results1=model1.fit()
print(results1.summary())
m1=sub5.groupby('alcconsumption').mean()
sd1=sub5.groupby('alcconsumption').std()
'''
print m1
print "\n"
print sd1
'''
# Conducting a post hoc comparison test to check for type 1 error
mc1=multi.MultiComparison(sub6['breastcancerper100th'],sub6['alco'])
res1=mc1.tukeyhsd()
print res1.summary()
| mit | 4,783,858,996,819,061,000 | 44.934783 | 132 | 0.762108 | false |
DTMilodowski/LiDAR_canopy | src/PAI_limitations_figures.py | 1 | 22708 | #===============================================================================
# PAI_limitations_figures.py
# D.T.Milodowski, November 2017
#-------------------------------------------------------------------------------
# This function contains the scripts used to produce the figures in the paper:
# "Point density imposes limitations on PAI estimation from discrete return
# LiDAR"
#-------------------------------------------------------------------------------
import numpy as np
import fiona
# import plotting libraries
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib import rcParams
from matplotlib import cm
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
import matplotlib.ticker as plticker
import sys
sys.path.append('/home/dmilodow/DataStore_DTM/FOREST2020/EOdata/EO_data_processing/src/plot_EO_data/colormap/')
sys.path.append('/home/dmilodow/DataStore_DTM/FOREST2020/EOdata/EO_data_processing/src/')
import data_io as io
import colormaps as cmaps
plt.register_cmap(name='viridis', cmap=cmaps.viridis)
plt.register_cmap(name='plasma', cmap=cmaps.plasma)
plt.set_cmap(cmaps.viridis)
# Set up some basiic parameters for the plots
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['arial']
rcParams['font.size'] = 16
rcParams['legend.numpoints'] = 1
axis_size = rcParams['font.size']+2
# code to get trio of nice colourblind friendly colours
cmap = cm.get_cmap('plasma')
scale = np.arange(0.,3.)
scale /=2.5
colour = cmap(scale)
#colour = ['#46E900','#1A2BCE','#E0007F']
# import required LiDAR libaries
import LiDAR_MacHorn_LAD_profiles as MH
import LiDAR_io as lidar_io
import LiDAR_tools as lidar
import auxilliary_functions as aux
import raster_io as io
# Directory listings
SAVEDIR = '~/'
# Load files
dens_file = 'arrays_for_lombok/SAFE_pointcloud_metrics_10m_point_density_data.tif'
PAI_file = 'arrays_for_lombok/SAFE_pointcloud_metrics_10m_pai_data.tif'
dens, geo, coord = io.load_GeoTIFF_band_and_georeferencing(dens_file)
PAI, geo, coord = io.load_GeoTIFF_band_and_georeferencing(PAI_file)
dens[np.isnan(PAI)]=np.nan
coords = np.array([[565042,522612],[572092,520634],[549913,514144]])
labels = ['A','B','C']
rows,cols = PAI.shape
N = geo[3]
S = geo[3] + (rows+1)*geo[5]
W = geo[0]
E = geo[0] + (cols+1)*geo[1]
Y, X = np.mgrid[slice(S,N,-geo[5]),slice(W,E,geo[1])]
#-------------------------------------------------------------------------------
# Get analytical solution
k = 0.7
theta_deg = np.asarray([0, 5, 10])
theta_rad = theta_deg*np.pi/180.
radius = np.asarray([10.,20.,05.])
area = np.pi*radius**2
dens_a = np.arange(0.01,np.nanmax(dens),0.01)
PAImax = MH.calculate_analytical_limit(dens,area[0],k,theta_rad[0])
PAImax_10m_00deg = MH.calculate_analytical_limit(dens_a,area[0],k,theta_rad[0])
PAImax_20m_00deg = MH.calculate_analytical_limit(dens_a,area[1],k,theta_rad[0])
PAImax_05m_00deg = MH.calculate_analytical_limit(dens_a,area[2],k,theta_rad[0])
"""
PAImax_10m_05deg = MH.calculate_analytical_limit(dens_a,area[0],k,theta_rad[1])
PAImax_20m_05deg = MH.calculate_analytical_limit(dens_a,area[1],k,theta_rad[1])
PAImax_30m_05deg = MH.calculate_analytical_limit(dens_a,area[2],k,theta_rad[1])
PAImax_10m_10deg = MH.calculate_analytical_limit(dens_a,area[0],k,theta_rad[2])
PAImax_20m_10deg = MH.calculate_analytical_limit(dens_a,area[1],k,theta_rad[2])
PAImax_30m_10deg = MH.calculate_analytical_limit(dens_a,area[2],k,theta_rad[2])
"""
#-------------------------------------------------------------------------------
# Figure 2 - this figure illustrates the analytical solution presented in this
# paper describing the threshold PAI that can be detected using discrete return
# LiDAR
fig = plt.figure(2, facecolor='White',figsize=[12,6])
ax1a= plt.subplot2grid((1,7),(0,0),colspan=3)
ax1a.annotate('a', xy=(0.05,0.95), xycoords='axes fraction',backgroundcolor='none',horizontalalignment='left', verticalalignment='top', fontsize=axis_size+2)
ax1a.set_xlabel('point density / pts m$^{-2}$',fontsize = axis_size)
ax1a.set_ylabel('PAI$_{max}$',fontsize = axis_size)
ax1a.plot(dens_a,PAImax_20m_00deg,dashes=[8, 5],c=colour[2],label = '%.3f ha' % (np.pi*radius[1]**2/10.**4))
ax1a.plot(dens_a,PAImax_10m_00deg,dashes=[16, 5],c=colour[1],label = '%.3f ha' % (np.pi*radius[0]**2/10.**4))
ax1a.plot(dens_a,PAImax_05m_00deg,'-',c=colour[0],label = '%.3f ha' % (np.pi*radius[2]**2/10.**4))
ax1a.legend(loc='lower right')
ax1b= plt.subplot2grid((1,7),(0,3),colspan=3,sharex=ax1a,sharey=ax1a)
ax1b.annotate('b', xy=(0.05,0.95), xycoords='axes fraction',backgroundcolor='none',horizontalalignment='left', verticalalignment='top', fontsize=axis_size+2, color='white')
ax1b.set_xlabel('point density / pts m$^{-2}$',fontsize = axis_size)
ax1b.set_ylabel('PAI',fontsize = axis_size)
hb = ax1b.hexbin(dens.reshape(dens.size), PAI.reshape(PAI.size), gridsize=(1000,200), bins='log', cmap='plasma')
ax1c= plt.subplot2grid((1,7),(0,6))
cb = fig.colorbar(hb, cax=ax1c)
cb.set_label('log$_{10}$(Number of grid cells)',fontsize = axis_size)
ax1b.plot(dens_a,PAImax_10m_00deg,'-',c='white',linewidth=2)
ax1b.set_xlim(0,30)
ax1b.set_ylim(0,np.nanmax(PAI))
plt.tight_layout()
plt.savefig('Fig2_SAFE_point_density_vs_PAI.png')
plt.savefig('Fig2_SAFE_point_density_vs_PAI.pdf')
#-------------------------------------------------------------------------------
# Figure 1 - this figure presents maps of PAI and point density across the SAFE
# landscape
# Load shapefiles
shapefile_dir = '../Data/Fig1_Shapefiles/'
land_cover_file = 'HCS_Stratification_DOI.shp'
vjr_file = 'VJR_prj.shp'
ea_file = 'EA_prj.shp'
land_cover = fiona.open(shapefile_dir+land_cover_file)
vjr = fiona.open(shapefile_dir+vjr_file)
ea = fiona.open(shapefile_dir+ea_file)
patches = []
colours = ['#30A000','#85EE58','#E756A8','0.75']
for poly in land_cover:
color_iter = colours[poly['properties']['HCS_CLass']-1]
if poly['geometry']['type']=='MultiPolygon':
Npoly = len(poly['geometry']['coordinates'][1])
for nn in range(0,Npoly):
polygon = Polygon(poly['geometry']['coordinates'][1][nn], True,ec='None',fc=color_iter)
patches.append(polygon)
else:
polygon = Polygon(poly['geometry']['coordinates'][0], True,ec='None',fc=color_iter)
patches.append(polygon)
VJR_poly = vjr[0]
polygon = Polygon(VJR_poly['geometry']['coordinates'][0], True,ec='#1A2CCE',fc='None')
patches.append(polygon)
ea_poly = ea[0]
polygon = Polygon(ea_poly['geometry']['coordinates'][0], True,ec='#1A2CCE',fc='None')
patches.append(polygon)
coords_trans = coords-np.array([W,S])/float(geo[1])+np.array([W,S])
fig = plt.figure(2, facecolor='White',figsize=[12,12])
loc_x = plticker.MultipleLocator(base=10**4)
loc_y = plticker.MultipleLocator(base=10**4)
loc_cb = plticker.MultipleLocator(base=10)
loc_cc = plticker.MultipleLocator(base=4)
loc_cd = plticker.MultipleLocator(base=0.05)
ax2a= plt.subplot2grid((2,2),(0,0))
ax2a.yaxis.set_major_locator(loc_y)
ax2a.xaxis.set_major_locator(loc_x)
p2a = PatchCollection(patches, match_original=True)
ax2a.annotate('a - Land cover', xy=(0.05,0.95), xycoords='axes fraction',backgroundcolor='none',horizontalalignment='left', verticalalignment='top', fontsize=axis_size)
ax2a.add_collection(p2a)
ax2a.set_aspect('equal', adjustable='box-forced')
ax2a.set_xlim(xmin=W,xmax=E)
ax2a.set_ylim(ymin=S,ymax=N)
divider = make_axes_locatable(ax2a)
ax_cb = divider.new_horizontal(size="5%", pad=0.05, pack_start=False)
ax2a.annotate('VJR', xy=(560437,516426), xycoords='data',backgroundcolor='none',horizontalalignment='center', verticalalignment='center')
ax2a.annotate('SAFE', xy=(564805,520225), xycoords='data',backgroundcolor='none',horizontalalignment='center', verticalalignment='center')
for tick in ax2a.get_yticklabels():
tick.set_rotation(90)
#ax2a.set_xticklabels([])
for pp in range(0,3):
ax2a.plot(coords[pp,0],coords[pp,1],'o',c='white')
ax2a.annotate(labels[pp], xy=coords[pp]+250, xycoords='data',color='black')
ax2b= plt.subplot2grid((2,2),(0,1))
ax2b.yaxis.set_major_locator(loc_y)
ax2b.xaxis.set_major_locator(loc_x)
ax2b.annotate('b - Point density', xy=(0.05,0.95), xycoords='axes fraction',backgroundcolor='none',horizontalalignment='left', verticalalignment='top', fontsize=axis_size)
ax2b.set_xlim(xmin=W,xmax=E)
ax2b.set_ylim(ymin=S,ymax=N)
im2b=ax2b.imshow(dens,vmin=0,vmax=30,cmap='plasma',origin='lower',extent=[W,E,S,N])
#densm = np.ma.masked_where(np.isnan(dens),dens)
#im2b = ax2b.pcolormesh(X,Y,densm,vmin=0,vmax=30,cmap='plasma')
ax2b.axis('image')
#ax2b.set_xticklabels([])
#ax2b.set_yticklabels([])
ax2b.yaxis.set_major_locator(loc_y)
ax2b.xaxis.set_major_locator(loc_x)
for tick in ax2b.get_yticklabels():
tick.set_rotation(90)
divider2b = make_axes_locatable(ax2b)
cax2b = divider2b.append_axes("right", size="5%", pad=0.05)
cbar2b=plt.colorbar(im2b, cax=cax2b)
cbar2b.ax.set_ylabel('point density / pts m$^{-2}$',fontsize = axis_size)
cbar2b.solids.set_edgecolor("face")
cbar2b.locator = loc_cb
cbar2b.update_ticks()
"""
for pp in range(0,3):
ax2b.plot(coords[pp,0],coords[pp,1],'o',c='white')
ax2b.annotate(labels[pp], xy=coords_trans[pp], xycoords='data', xytext=(2, 2), textcoords='offset points',color='white')
"""
ax2c= plt.subplot2grid((2,2),(1,0))
ax2c.yaxis.set_major_locator(loc_y)
ax2c.xaxis.set_major_locator(loc_x)
ax2c.annotate('c - PAI', xy=(0.05,0.95), xycoords='axes fraction',backgroundcolor='none',horizontalalignment='left', verticalalignment='top', fontsize=axis_size)
im2c = ax2c.imshow(PAI,cmap='viridis',origin='lower',extent=[W,E,S,N])
#PAIm = np.ma.masked_where(np.isnan(PAI),PAI)
#im2c = ax2c.pcolormesh(X,Y,PAIm,cmap='viridis')
ax2c.axis('image')
for tick in ax2c.get_yticklabels():
tick.set_rotation(90)
divider2c = make_axes_locatable(ax2c)
cax2c = divider2c.append_axes("right", size="5%", pad=0.05)
cbar2c=plt.colorbar(im2c, cax=cax2c)
cbar2c.ax.set_ylabel('PAI',fontsize = axis_size)
cbar2c.solids.set_edgecolor("face")
cbar2c.locator = loc_cc
cbar2c.update_ticks()
"""
for pp in range(0,3):
ax2c.plot(coords[pp,0],coords[pp,1],'o',c='white')
ax2c.annotate(labels[pp], xy=coords_trans[pp], xycoords='data', xytext=(2, 2), textcoords='offset points',color='white')
"""
ax2d= plt.subplot2grid((2,2),(1,1))
ax2d.yaxis.set_major_locator(loc_y)
ax2d.xaxis.set_major_locator(loc_x)
ax2d.annotate('d - PAI/PAI$_{max}$', xy=(0.05,0.95), xycoords='axes fraction',backgroundcolor='none',horizontalalignment='left', verticalalignment='top', fontsize=axis_size)
im2d = ax2d.imshow(PAI/PAImax,vmin = 0.85, vmax=1, cmap='plasma',origin='lower',extent=[W,E,S,N])
#proximity = np.ma.masked_where(np.isnan(PAI),PAI/PAImax)
#im2d = ax2d.pcolormesh(X,Y,proximity,vmin=0.85,vmax=1,cmap='plasma')
ax2d.axis('image')
#ax2d.set_yticklabels([])
divider2d = make_axes_locatable(ax2d)
cax2d = divider2d.append_axes("right", size="5%", pad=0.05)
cbar2d=plt.colorbar(im2d, cax=cax2d)
cbar2d.ax.set_ylabel('PAI/PAI$_{max}$',fontsize = axis_size)
cbar2d.solids.set_edgecolor("face")
cbar2d.locator = loc_cd
cbar2d.update_ticks()
for tick in ax2d.get_yticklabels():
tick.set_rotation(90)
plt.tight_layout()
plt.savefig('Fig1_SAFE_point_density_PAI_maps.png')
plt.savefig('Fig1_SAFE_point_density_PAI_maps.pdf')
plt.show()
# Figure 3 - investigating role of point density on PAI estimates made at specific points
# (controlling for other sources of variation)
las_list = '/home/dmilodow/DataStore_DTM/BALI/LiDAR/Data/SAFE_las_files/las_list_full_path.txt'
laz_files = False
# Some parameters
min_PAD = 0.1
radius = 10.
area = np.pi*radius**2
max_height = 80.
min_height = 2.
layer_thickness = 1
heights = np.arange(0,max_height,layer_thickness)+layer_thickness
kappa = 0.7
n_iterations = 100
target_dens = np.arange(40,0,-0.2)
target_points = (np.ceil(area*target_dens)).astype('int')
n_dens = target_dens.size
PAI_iter = np.zeros((3,n_dens,n_iterations))
sample_pts_collated = []
for pp in range(0,3):
print("point %i, x = %.0f, y = %.0f" % (pp+1, coords[pp,0], coords[pp,1]))
# define a bounding box around target points to load in point cloud around area of interest
E = coords[pp,0]+500
N = coords[pp,1]+500
W = coords[pp,0]-500
S = coords[pp,1]-500
# Read in LiDAR points for region of interest
polygon = np.asarray([[W,N],[E,N],[E,S],[W,S]])
lidar_pts, starting_ids_for_trees, trees = lidar_io.load_lidar_data_by_polygon(las_list,polygon,max_pts_per_tree = 5*10**5, laz_files=laz_files)
N_trees = len(trees)
# retrieve point clouds samples
sample_pts = np.array([])
for tt in range(0,N_trees):
ids = trees[tt].query_ball_point(coords[pp], radius)
if len(ids)>0:
if sample_pts.size==0:
sample_pts = lidar_pts[np.asarray(ids)+starting_ids_for_trees[tt]]
else:
sample_pts = np.concatenate((sample_pts,lidar_pts[np.asarray(ids)+starting_ids_for_trees[tt]]),axis=0)
sample_pts_collated.append(sample_pts.copy())
sample_pts = None
# Now loop through the points again pulling out the metrics
for pp in range(0,3):
# If we have the returns, then calculate metric of interest - in
# this case the PAI
sample_pts = sample_pts_collated[pp].copy()
if sample_pts.size > 0:
# keep only first returns
sample_pts=sample_pts[sample_pts[:,3]==1,:]
if sample_pts.size > 0:
sample_ids = np.arange(sample_pts.shape[0])
for dd in range(0,n_dens):
for ii in range(0,n_iterations):
sample_pts_iter = sample_pts[np.random.choice(sample_ids,size=target_points[dd]),:]
# calculate PAD profile
heights,first_return_profile,n_ground_returns = MH.bin_returns(sample_pts_iter, max_height, layer_thickness)
PADprof = MH.estimate_LAD_MacArthurHorn(first_return_profile, n_ground_returns, layer_thickness, kappa)
# remove lowermost portion of profile
PAD_iter = PADprof.copy()
PAD_iter[heights<min_height]=0
PAI_iter[pp,dd,ii] = np.sum(PAD_iter)
else:
print("no first returns in neighbourhood")
else:
print("no returns in neighbourhood")
# Now plot up the results
PAI_A = np.mean(PAI_iter[0,:,:],axis=1)
ulim_A = np.mean(PAI_iter[0,:,:],axis=1)+np.std(PAI_iter[0,:,:],axis=1)
llim_A = np.mean(PAI_iter[0,:,:],axis=1)-np.std(PAI_iter[0,:,:],axis=1)
PAI_B = np.mean(PAI_iter[1,:,:],axis=1)
ulim_B = np.mean(PAI_iter[1,:,:],axis=1)+np.std(PAI_iter[1,:,:],axis=1)
llim_B = np.mean(PAI_iter[1,:,:],axis=1)-np.std(PAI_iter[1,:,:],axis=1)
PAI_C = np.mean(PAI_iter[2,:,:],axis=1)
ulim_C = np.mean(PAI_iter[2,:,:],axis=1)+np.std(PAI_iter[2,:,:],axis=1)
llim_C = np.mean(PAI_iter[2,:,:],axis=1)-np.std(PAI_iter[2,:,:],axis=1)
fig = plt.figure(3, facecolor='White',figsize=[6,6])
ax3= plt.subplot2grid((1,1),(0,0))
ax3.plot(target_dens,PAI_A,'-',c=colour[2],label = 'A')
ax3.fill_between(target_dens,llim_A,ulim_A,color=colour[2],alpha=0.25)
ax3.plot(target_dens,PAI_B,'-',c=colour[1],label = 'B')
ax3.fill_between(target_dens,llim_B,ulim_B,color=colour[1],alpha=0.25)
ax3.plot(target_dens,PAI_C,'-',c=colour[0],label = 'C')
ax3.fill_between(target_dens,llim_C,ulim_C,color=colour[0],alpha=0.25)
ax3.plot(dens_a,PAImax_10m_00deg,dashes=[8,5],c='k',linewidth=2,label = 'limit')
ax3.legend(loc='lower right')
ax3.set_xlim(xmin=0,xmax=40)
ax3.set_xlabel('point density / pts m$^{-2}$', fontsize = axis_size)
ax3.set_ylabel('PAI', fontsize = axis_size)
plt.tight_layout()
plt.savefig('Figure3_point_density_vs_PAI_pointwise.png')
plt.savefig('Figure3_point_density_vs_PAI_pointwise.pdf')
plt.show()
# Figure 4 - investigating role of resolution on PAI estimates made at specific points
# (controlling for other sources of variation)
# first construct sampling grid
ha = 100
div = np.array([10.,8.,6.,5.,4.,3.,2.,1.])
keys = ['10m','12.5m','16.7m','20m','25m','33m','50m','100m']
res = ha/div
coords_00 = coords-ha/2
n_grids = res.size
subplots = []
subplots.append({})
subplots.append({})
subplots.append({})
PAI_res = {}
PAI_mean = np.zeros((3,div.size))
PAI_sd = np.zeros((3,div.size))
for pp in range(0,3):
for ss in range(0,n_grids):
x = np.arange(coords_00[pp,0],coords_00[pp,0]+ha+1,res[ss])
y = np.arange(coords_00[pp,1],coords_00[pp,1]+ha+1,res[ss])
xv,yv=np.asarray(np.meshgrid(x,y))
rr,cc = xv.shape
rr-=1
cc-=1
subplot = []
for i in range(0,rr):
for j in range(0,cc):
bbox = [ [xv[i,j], xv[i+1,j], xv[i+1,j+1], xv[i,j+1], xv[i,j]],
[yv[i,j], yv[i+1,j], yv[i+1,j+1], yv[i,j+1], yv[i,j]] ]
subplot.append( np.asarray(bbox).transpose() )
subplots[pp][keys[ss]] = subplot
n_subplots=len(subplot)
PAI_res[keys[ss]] = np.zeros((3,n_subplots))
# now get point clouds
sample_pts_collated = []
starting_ids_collated = []
trees_collated = []
for pp in range(0,3):
print("point %i, x = %.0f, y = %.0f" % (pp+1, coords[pp,0], coords[pp,1]))
# define a bounding box around target points to load in point cloud around area of interest
E = coords[pp,0]+500
N = coords[pp,1]+500
W = coords[pp,0]-500
S = coords[pp,1]-500
# Read in LiDAR points for region of interest
polygon = np.asarray([[W,N],[E,N],[E,S],[W,S]])
lidar_pts, starting_ids_for_trees, trees = lidar_io.load_lidar_data_by_polygon(las_list,polygon,max_pts_per_tree = 5*10**5, laz_files=laz_files)
sample_pts_collated.append(lidar_pts.copy())
starting_ids_collated.append(starting_ids_for_trees.copy())
trees_collated.append(np.asarray(trees))
# Now loop through the subplots and sample the point cloud
for pp in range(0,3):
print("point %i, x = %.0f, y = %.0f" % (pp+1, coords[pp,0], coords[pp,1]))
# loop through each sampling resolution
lidar_pts = sample_pts_collated[pp].copy()
starting_ids_for_trees = starting_ids_collated[pp].copy()
N_trees = trees_collated[pp].size
for ss in range(0,res.size):
print('\t - sample res = ', keys[ss])
n_subplots = len(subplots[pp][keys[ss]])
rad_ss = np.sqrt(res[ss]**2/2.)
# for each of the subplots, clip point cloud and model PAD and get the metrics
for sp in range(0,n_subplots):
# query the tree to locate points of interest
# note that we will only have one tree for number of points in sensitivity analysis
centre_x = np.mean(subplots[pp][keys[ss]][sp][0:4,0])
centre_y = np.mean(subplots[pp][keys[ss]][sp][0:4,1])
radius = np.sqrt(res[ss]**2/2.)
# retrieve point clouds samples
sample_pts = np.array([])
for tt in range(0,N_trees):
ids = trees_collated[pp][tt].query_ball_point(np.array([centre_x,centre_y]), radius)
if len(ids)>0:
if sample_pts.size==0:
sample_pts = lidar_pts[np.asarray(ids)+starting_ids_for_trees[tt]]
else:
sample_pts = np.concatenate((sample_pts,lidar_pts[np.asarray(ids)+starting_ids_for_trees[tt]]),axis=0)
# keep only first returns
sample_pts=sample_pts[sample_pts[:,3]==1,:]
sp_pts = lidar.filter_lidar_data_by_polygon(sample_pts,subplots[pp][keys[ss]][sp])
#------
heights,first_return_profile,n_ground_returns = MH.bin_returns(sp_pts, max_height, layer_thickness)
PADprof = MH.estimate_LAD_MacArthurHorn(first_return_profile, n_ground_returns, layer_thickness, k)
# remove lowermost portion of profile
PADprof[heights<min_height]=0
PAI_res[keys[ss]][pp,sp] = PADprof.sum()
PAI_mean[pp,ss] = np.mean(PAI_res[keys[ss]][pp,:])
PAI_sd[pp,ss] = np.std(PAI_res[keys[ss]][pp,:])
PAI_serr = np.zeros(PAI_sd.shape)
for pp in range(0,3):
for ss in range(0,res.size):
PAI_serr[pp,ss]=PAI_sd[pp,ss] / np.sqrt(PAI_res[keys[ss]][pp,:].size)
# Now want to get spatial scaling of canopy variance
# First create array for 10 m resolution case
PAI_array_10m = np.zeros((3,10,10))
for pp in range(0,3):
sp = 0
for rr in range(0,10):
for cc in range(0,10):
PAI_array_10m[pp,rr,cc]=PAI_res['10m'][pp,sp]
sp+=1
test_res = np.arange(1,11)
e = np.zeros((3,len(test_res)))
bias = np.zeros((3,len(test_res)))
for tt in range(0,len(test_res)):
for pp in range(0,3):
temp_host = np.zeros((10-test_res[tt]+1,10-test_res[tt]+1))
temp_host2 = np.zeros((10-test_res[tt]+1,10-test_res[tt]+1))
for rr in range(0,10-test_res[tt]+1):
for cc in range(0,10-test_res[tt]+1):
sample_PAI = PAI_array_10m[pp,rr:rr+test_res[tt],cc:cc+test_res[tt]]
sample_E = sample_PAI-np.mean(sample_PAI)
temp_host2[rr,cc]= np.mean(sample_E)
temp_host[rr,cc] = -(1/k)*np.log(np.mean(np.exp(-k*sample_E)))
#temp_host[rr,cc] = np.std(PAI_array_10m[pp,rr:rr+test_res[tt]+1,cc:cc+test_res[tt]+1])
#print temp_host.shape
#PAI_std_scaling[pp,tt] = np.mean(temp_host)
bias[pp,tt] = np.mean(temp_host)
e[pp,tt]=np.mean(temp_host2)
print(pp,tt,bias[pp,tt], e[pp,tt])
# now use linear interpolation to estimate bias at each of the resolutions used
# in this analysis
bias_interpolated = np.zeros((3,res.size))
for pp in range(0,3):
for rr in range(0,res.size):
res1 = test_res[test_res*10<=res[rr]][-1]
res2 = test_res[test_res*10>=res[rr]][0]
bias1 = bias[pp,test_res==res1][0]
bias2 = bias[pp,test_res==res2][0]
print(res[rr],res1,res2)
if res1!=res2:
bias_interpolated[pp,rr] = bias1+(bias2-bias1)*(res[rr]/10-res1)/(res2-res1)
else:
bias_interpolated[pp,rr] = bias1
PAI_corrected = PAI_mean-bias_interpolated
# Now plot up the results
fig = plt.figure(4, facecolor='White',figsize=[7,6])
ax4a= plt.subplot2grid((1,5),(0,0),colspan=4)
ax4a.errorbar(res,PAI_mean[0,:],yerr=2*PAI_serr[0,:],marker='o',c=colour[2],label = 'A',linestyle='none')
ax4a.plot(res,PAI_corrected[0,:],marker='^',c=colour[2],linestyle='none')
ax4a.axhline(PAI_mean[0,0],c=colour[2],linestyle=':')
ax4a.errorbar(res,PAI_mean[1,:],yerr=2*PAI_serr[1,:],marker='o',c=colour[1],label = 'B',linestyle='none')
ax4a.plot(res,PAI_corrected[1,:],marker='^',c=colour[1],linestyle='none')
ax4a.axhline(PAI_mean[1,0],c=colour[1],linestyle=':')
ax4a.errorbar(res,PAI_mean[2,:],yerr=2*PAI_serr[2,:],marker='o',c=colour[0],label = 'C',linestyle='none')
ax4a.plot(res,PAI_corrected[2,:],marker='^',c=colour[0],linestyle='none')
ax4a.axhline(PAI_mean[2,0],c=colour[0],linestyle=':')
ax4a.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax4a.set_xlabel('spatial resolution / m', fontsize = axis_size)
ax4a.set_ylabel('PAI', fontsize = axis_size)
plt.tight_layout()
plt.savefig('Figure4_resolution_vs_PAI_pointwise.png')
plt.savefig('Figure4_resolution_vs_PAI_pointwise.pdf')
plt.show()
| gpl-3.0 | 4,718,651,781,681,082,000 | 39.695341 | 173 | 0.664259 | false |
camallen/aggregation | experimental/chicago/aggregation.py | 2 | 6819 | #!/usr/bin/env python
__author__ = 'greghines'
import numpy as np
import matplotlib.pyplot as plt
import csv
import sys
import os
import pymongo
import matplotlib.cbook as cbook
import random
import bisect
animals = [u'bike', u'grayFox', u'livestock', u'foxSquirrel', u'deer', u'rat', u'mink', u'human', u'beaver', u'mouse', u'muskrat', u'domDog', u'mower', u'graySquirrel', u'opossum', u'domCat', u'chipmunk', u'bird', u'otherVehicle', u'redFox', u'horse', u'woodChuck', u'rabbit', u'coyote', u'car', u'flyingSquirrel', u'melanisticGraySquirrel', u'raccoon', u'skunk']
print len(animals)
results = {}
def index(a, x):
'Locate the leftmost value exactly equal to x'
i = bisect.bisect_left(a, x)
if i != len(a) and a[i] == x:
return i
raise ValueError
if os.path.exists("/home/ggdhines"):
base_directory = "/home/ggdhines"
else:
base_directory = "/home/greg"
sys.path.append(base_directory+"/github/pyIBCC/python")
import ibcc
client = pymongo.MongoClient()
db = client['chicago_2015-01-04']
classification_collection = db["chicago_classifications"]
ip_listing = []
#the header for the csv input file
f = open(base_directory+"/Databases/condor_ibcc.csv","wb")
f.write("a,b,c\n")
subject_list = []
user_list = []
for classification in classification_collection.find():
if classification["subjects"] == []:
continue
zooniverse_id = classification["subjects"][0]["zooniverse_id"]
if "user_name" in classification:
user = classification["user_name"]
else:
user = classification["user_ip"]
try:
user_index = index(user_list,user)
except ValueError:
bisect.insort(user_list,user)
try:
subject_index = index(subject_list,zooniverse_id)
except ValueError:
bisect.insort(subject_list,zooniverse_id)
print "****"
f_ibcc = open(base_directory+"/Databases/condor_ibcc.csv","wb")
f_ibcc.write("a,b,c\n")
for classification in classification_collection.find():
if classification["subjects"] == []:
continue
if classification["tutorial"] is True:
continue
zooniverse_id = str(classification["subjects"][0]["zooniverse_id"])
if not(zooniverse_id in results):
results[zooniverse_id] = dict.fromkeys(animals,0)
if "user_name" in classification:
user = classification["user_name"]
else:
user = classification["user_ip"]
try:
user_index = user_list.index(user)
except ValueError:
user_list.append(user)
user_index = len(user_list) - 1
subject_index = index(subject_list,zooniverse_id)
user_index = index(user_list,user)
if "finished_at" in classification["annotations"][0]:
continue
else:
species = classification["annotations"][0]["species"]
if len(classification["annotations"]) != 4:
print classification["annotations"]
results[zooniverse_id][species] += 1
#print results[zooniverse_id]
continue
#print classification["annotations"][0]
mark_index = [ann.keys() for ann in classification["annotations"]].index(["marks",])
markings = classification["annotations"][mark_index].values()[0]
found_condor = "0"
for animal in markings.values():
try:
animal_type = animal["species"]
except KeyError:
continue
print animal_type
break
f_ibcc.write(str(user_index)+","+str(subject_index)+","+found_condor+"\n")
print results
assert(False)
with open(base_directory+"/Databases/condor_ibcc.py","wb") as f:
f.write("import numpy as np\n")
f.write("scores = np.array([0,1])\n")
f.write("nScores = len(scores)\n")
f.write("nClasses = 2\n")
f.write("inputFile = \""+base_directory+"/Databases/condor_ibcc.csv\"\n")
f.write("outputFile = \""+base_directory+"/Databases/condor_ibcc.out\"\n")
f.write("confMatFile = \""+base_directory+"/Databases/condor_ibcc.mat\"\n")
f.write("nu0 = np.array([30,70])\n")
f.write("alpha0 = np.array([[3, 1], [1,3]])\n")
#start by removing all temp files
try:
os.remove(base_directory+"/Databases/condor_ibcc.out")
except OSError:
pass
try:
os.remove(base_directory+"/Databases/condor_ibcc.mat")
except OSError:
pass
try:
os.remove(base_directory+"/Databases/condor_ibcc.csv.dat")
except OSError:
pass
ibcc.runIbcc(base_directory+"/Databases/condor_ibcc.py")
#read in the results
# with open(base_directory+"/Databases/condor_ibcc.out","rb") as f:
# reader = csv.reader(f,delimiter=" ")
#
# for subject_index,p0,p1 in reader:
# subject_index = int(float(subject_index))
# print p1
#now repeat - but with fewer users per image
f.close()
f = open(base_directory+"/Databases/condor_ibcc.csv","wb")
f.write("a,b,c\n")
for subject_count,zooniverse_id in enumerate(sampled_ids):
user_ips_to_sample = []
for classification in classification_collection.find({"subjects.zooniverse_id":zooniverse_id}):
user_ips_to_sample.append(classification["user_ip"])
sample = random.sample(user_ips_to_sample,2)
for user_ip in sample:
classification = classification_collection.find_one({"subjects.zooniverse_id":zooniverse_id,"user_ip":user_ip})
try:
mark_index = [ann.keys() for ann in classification["annotations"]].index(["marks",])
markings = classification["annotations"][mark_index].values()[0]
found_condor = "0"
for animal in markings.values():
try:
animal_type = animal["animal"]
except KeyError:
continue
if animal_type == "condor":
found_condor = "1"
break
f.write(str(str(ip_listing.index(user_ip)))+","+str(subject_count)+","+found_condor+"\n")
except ValueError:
pass
with open(base_directory+"/Databases/condor_ibcc.py","wb") as f:
f.write("import numpy as np\n")
f.write("scores = np.array([0,1])\n")
f.write("nScores = len(scores)\n")
f.write("nClasses = 2\n")
f.write("inputFile = \""+base_directory+"/Databases/condor_ibcc.csv\"\n")
f.write("outputFile = \""+base_directory+"/Databases/condor_ibcc.out2\"\n")
f.write("confMatFile = \""+base_directory+"/Databases/condor_ibcc.mat\"\n")
f.write("nu0 = np.array([30,70])\n")
f.write("alpha0 = np.array([[3, 1], [1,3]])\n")
#start by removing all temp files
try:
os.remove(base_directory+"/Databases/condor_ibcc.out2")
except OSError:
pass
try:
os.remove(base_directory+"/Databases/condor_ibcc.mat")
except OSError:
pass
try:
os.remove(base_directory+"/Databases/condor_ibcc.csv.dat")
except OSError:
pass
ibcc.runIbcc(base_directory+"/Databases/condor_ibcc.py")
| apache-2.0 | 2,238,402,340,212,246,000 | 28.266094 | 363 | 0.63895 | false |
JustinNoel1/ML-Course | bayes/bayesian-regression/python/bayreg.py | 1 | 3784 | #Implementation of Bayesian polynomial regression using pymc3
from pymc3 import *
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import PolynomialFeatures
import pandas as pd
import theano
from scipy.stats.kde import gaussian_kde
# set sample size
NUM_SAMPLES = 200
# set desired standard error
SDEV = 0.3
# set random seed
np.random.seed(42)
# set true function
def f(x):
return 7 *(x**3 -1.3 *x**2+0.5*x - 0.056)
# evenly spaced grid of points in unit interval
gridx = np.linspace(0,1, 200)
# sample from unit interval
x = np.random.random(NUM_SAMPLES)
# perturb values
y = f(x) + SDEV*np.random.randn(NUM_SAMPLES)
# construct polynomial features on the samples and the grid
poly = PolynomialFeatures(degree = 3)
xpow = poly.fit_transform(x.reshape(NUM_SAMPLES,1))
linpow = poly.fit_transform(gridx.reshape(200,1))
# Create dictionary of theano variables
data = dict(x1=theano.shared(xpow[:,1]), x2 = theano.shared(xpow[:,2]), x3 = theano.shared(xpow[:,3]), y=theano.shared(y))
# plot our true curve
fig = plt.figure()
ax = fig.add_subplot(111, title = 'Bayesian approach to polynomial regression')
ax.scatter(x,y, label = 'Sampled data')
ax.plot(gridx, f(gridx), label = 'True curve', linewidth = 4 )
# this is needed to work in pymc3 context
with Model() as model:
# Build the pymc3 model
coeff = Normal('coeff', mu = 0, sd = 10, shape = 4)
sigma = HalfNormal('sigma', sd = 1)
mu = np.sum([coeff[i]*data['x'+str(i)] for i in range(1,4)])+coeff[0]
y_obs = Normal('y_obs', mu = mu, sd = sigma, observed = data['y'])
# Calculate the posterior probability via MCMC integration
trace = sample(3500, njobs = 4)
# print summary of training in html format
print(stats.df_summary(trace).to_html())
# Get the mean coefficients
mcoeff = np.mean(trace['coeff'], axis = 0)
# define the predicted function using the mean polynomial
def g(x):
return np.sum([mcoeff[i]*x**i for i in range(4)], axis =0)
# plot the mean polynomial
ax.plot(gridx, g(gridx), label = 'Bayesian mean polynomial fit', color = 'Black', alpha = 0.5, lw = 2.5)
# take the last 50 samples of the parameters and graph their associated polynomials
for i in range(50):
def h(x):
return np.sum([trace['coeff'][-i][j]*x**j for j in range(4)], axis =0)
ax.plot(gridx, h(gridx), color = 'Black', alpha = 0.1)
# Calculate posterior values
data['x1'].set_value(linpow[:,1])
data['x2'].set_value(linpow[:,2])
data['x3'].set_value(linpow[:,3])
data['y'].set_value(np.zeros_like(linpow[:1,1]))
post_pred = sample_ppc(trace, samples = 200)
# plot the mean prediction plus +- one standard deviation
ax.plot(gridx, np.mean(post_pred['y_obs'], axis = 0), label = 'Bayesian mean posterior', alpha = 0.5, color = 'Red')
ax.fill_between(gridx, np.mean(post_pred['y_obs'], axis = 0)-np.std(post_pred['y_obs'], axis = 0), np.mean(post_pred['y_obs'], axis = 0)+np.std(post_pred['y_obs'], axis = 0), label = 'Bayesian error band', alpha = 0.1, color = 'Red')
plt.legend(loc = 2)
plt.savefig("bayreg.png")
plt.show()
plt.clf()
plt.close()
# plot the pdfs of the coefficients
plt.figure(figsize = (10, 5))
fig, axs = plt.subplots(5)
for i in range(4):
tdata = trace['coeff'][:,i]
kde = gaussian_kde(tdata)
tgridx = np.linspace(min(tdata), max(tdata), 200)
axs[i].plot(tgridx, kde(tgridx))
axs[i].set_title('Coeff{}'.format(i))
tdata = trace['sigma']
kde = gaussian_kde(tdata)
tgridx = np.linspace(min(tdata), max(tdata), 200)
axs[4].plot(tgridx, kde(tgridx))
axs[4].set_title('Sigma')
plt.tight_layout()
plt.savefig("trace.png")
plt.show()
| apache-2.0 | 3,971,333,095,095,953,400 | 34.698113 | 237 | 0.647199 | false |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/statistics/hist.py | 1 | 3990 | """
==========
Histograms
==========
Demonstrates how to plot histograms with matplotlib.
"""
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import colors
# from matplotlib.ticker import PercentFormatter
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot12()
# nodebox section end
# Fixing random state for reproducibility
np.random.seed(19680801)
###############################################################################
# Generate data and plot a simple histogram
# -----------------------------------------
#
# To generate a 1D histogram we only need a single vector of numbers. For a 2D
# histogram we'll need a second vector. We'll generate both below, and show
# the histogram for each vector.
N_points = 100000
n_bins = 20
# Generate a normal distribution, center at x=0 and y=5
x = np.random.randn(N_points)
y = .4 * x + np.random.randn(100000) + 5
fig, axs = plt.subplots(1, 2, sharey=True, tight_layout=True)
# We can set the number of bins with the `bins` kwarg
axs[0].hist(x, bins=n_bins)
axs[1].hist(y, bins=n_bins)
###############################################################################
# Updating histogram colors
# -------------------------
#
# The histogram method returns (among other things) a `patches` object. This
# gives us access to the properties of the objects drawn. Using this, we can
# edit the histogram to our liking. Let's change the color of each bar
# based on its y value.
fig, axs = plt.subplots(1, 2, tight_layout=True)
# N is the count in each bin, bins is the lower-limit of the bin
N, bins, patches = axs[0].hist(x, bins=n_bins)
# We'll color code by height, but you could use any scalar
fracs = N.astype(float) / N.max()
# we need to normalize the data to 0..1 for the full range of the colormap
norm = colors.Normalize(fracs.min(), fracs.max())
# Now, we'll loop through our objects and set the color of each accordingly
for thisfrac, thispatch in zip(fracs, patches):
color = plt.cm.viridis(norm(thisfrac))
thispatch.set_facecolor(color)
# We can also normalize our inputs by the total number of counts
axs[1].hist(x, bins=n_bins, normed=True)
# Now we format the y-axis to display percentage
# axs[1].yaxis.set_major_formatter(PercentFormatter(xmax=1))
###############################################################################
# Plot a 2D histogram
# -------------------
#
# To plot a 2D histogram, one only needs two vectors of the same length,
# corresponding to each axis of the histogram.
fig, ax = plt.subplots(tight_layout=True)
hist = ax.hist2d(x, y)
###############################################################################
# Customizing your histogram
# --------------------------
#
# Customizing a 2D histogram is similar to the 1D case, you can control
# visual components such as the bin size or color normalization.
fig, axs = plt.subplots(3, 1, figsize=(5, 15), sharex=True, sharey=True,
tight_layout=True)
# We can increase the number of bins on each axis
axs[0].hist2d(x, y, bins=40)
# As well as define normalization of the colors
axs[1].hist2d(x, y, bins=40, norm=colors.LogNorm())
# We can also define custom numbers of bins for each axis
axs[2].hist2d(x, y, bins=(80, 10), norm=colors.LogNorm())
pltshow(plt)
| mit | 5,362,261,448,032,349,000 | 28.555556 | 82 | 0.607018 | false |
dashmoment/facerecognition | py/apps/scripts/preprocessing_experiments.py | 2 | 5764 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) Philipp Wagner. All rights reserved.
# Licensed under the BSD license. See LICENSE file in the project root for full license information.
import sys, os
sys.path.append("../..")
# facerec
from facerec.feature import Fisherfaces, PCA, SpatialHistogram, Identity
from facerec.distance import EuclideanDistance, ChiSquareDistance
from facerec.classifier import NearestNeighbor
from facerec.model import PredictableModel
from facerec.validation import KFoldCrossValidation
from facerec.visual import subplot
from facerec.util import minmax_normalize
from facerec.serialization import save_model, load_model
# required libraries
import numpy as np
# try to import the PIL Image module
try:
from PIL import Image
except ImportError:
import Image
import matplotlib.cm as cm
import logging
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from facerec.lbp import LPQ, ExtendedLBP
class FileNameFilter:
def __init__(self, name):
self._name = name
def __call__(self, filename):
return True
def __repr__(self):
return "FileNameFilter (name=%s)" % (self._name)
class YaleBaseFilter(FileNameFilter):
def __init__(self, min_azimuth, max_azimuth, min_elevation, max_elevation):
FileNameFilter.__init__(self, "Filter YaleFDB Subset1")
self._min_azimuth = min_azimuth
self._max_azimuth = max_azimuth
self._min_elevation = min_elevation
self._max_elevation = max_elevation
def __call__(self, filename):
# We only want the PGM files:
filetype = filename[-4:]
if filetype != ".pgm":
return False
# There are "Ambient" PGM files, ignore them:
if "Ambient" in filename:
return False
azimuth = int(filename[12:16])
elevation = int(filename[17:20])
# Now filter based on angles:
if azimuth < self._min_azimuth or azimuth > self._max_azimuth:
return False
if elevation < self._min_elevation or elevation > self._max_elevation:
return False
return True
def read_images(path, fileNameFilter=FileNameFilter("None"), sz=None):
"""Reads the images in a given folder, resizes images on the fly if size is given.
Args:
path: Path to a folder with subfolders representing the subjects (persons).
sz: A tuple with the size Resizes
Returns:
A list [X,y]
X: The images, which is a Python list of numpy arrays.
y: The corresponding labels (the unique number of the subject, person) in a Python list.
"""
c = 0
X,y = [], []
for dirname, dirnames, filenames in os.walk(path):
for subdirname in dirnames:
subject_path = os.path.join(dirname, subdirname)
for filename in os.listdir(subject_path):
if fileNameFilter(filename):
print filename
try:
im = Image.open(os.path.join(subject_path, filename))
im = im.convert("L")
# resize to given size (if given)
if (sz is not None):
im = im.resize(self.sz, Image.ANTIALIAS)
X.append(np.asarray(im, dtype=np.uint8))
y.append(c)
except IOError, (errno, strerror):
print "I/O error({0}): {1}".format(errno, strerror)
except:
print "Unexpected error:", sys.exc_info()[0]
raise
c = c+1
return [X,y]
if __name__ == "__main__":
# This is where we write the images, if an output_dir is given
# in command line:
out_dir = None
# You'll need at least a path to your image data, please see
# the tutorial coming with this source code on how to prepare
# your image data:
if len(sys.argv) < 2:
print "USAGE: facerec_demo.py </path/to/images>"
sys.exit()
yale_filter = YaleBaseFilter(-25, 25, -25, 25)
# Now read in the image data. This must be a valid path!
[X,y] = read_images(sys.argv[1], yale_filter)
# Then set up a handler for logging:
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# Add handler to facerec modules, so we see what's going on inside:
logger = logging.getLogger("facerec")
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
# Define the Fisherfaces as Feature Extraction method:
feature = PCA()
# Define a 1-NN classifier with Euclidean Distance:
classifier = NearestNeighbor(dist_metric=EuclideanDistance(), k=1)
# Define the model as the combination
model = PredictableModel(feature=feature, classifier=classifier)
# Compute the Fisherfaces on the given data (in X) and labels (in y):
model.compute(X, y)
# Then turn the first (at most) 16 eigenvectors into grayscale
# images (note: eigenvectors are stored by column!)
E = []
for i in xrange(min(model.feature.eigenvectors.shape[1], 16)):
e = model.feature.eigenvectors[:,i].reshape(X[0].shape)
E.append(minmax_normalize(e,0,255, dtype=np.uint8))
# Plot them and store the plot to "python_fisherfaces_fisherfaces.pdf"
subplot(title="Fisherfaces", images=E, rows=4, cols=4, sptitle="Fisherface", colormap=cm.jet, filename="fisherfaces.png")
# Perform a 10-fold cross validation
cv = KFoldCrossValidation(model, k=10)
cv.validate(X, y)
# And print the result:
cv.print_results()
| bsd-3-clause | 7,224,324,888,766,617,000 | 37.172185 | 125 | 0.629077 | false |
wangsix/cluster | bins/xmeans_demo.py | 1 | 2365 | '''
Created on Mar 15, 2012
@author: Wang
'''
import numpy as np
from scipy.cluster.vq import *
import pylab
import matplotlib.pyplot as plt
import cluster
plt.figure()
class1 = np.array(np.random.standard_normal((2,2))) + np.array([5,5])
class2 = np.array(np.random.standard_normal((1,2)))
class3 = np.array(np.random.standard_normal((1,2))) + np.array([-5,-5])
class4 = np.array(np.random.standard_normal((1,2))) + np.array([-5,5])
features = np.vstack((class1,class2,class3,class4))
test = cluster.X_means(features)
print 'From x-means: ', test.final_k
plt.subplot(131)
pylab.plot([p[0] for p in class1],[p[1] for p in class1],'o', markersize = 60)
pylab.plot([p[0] for p in class2],[p[1] for p in class2],'or', markersize = 60)
pylab.plot([p[0] for p in class3],[p[1] for p in class3],'og', markersize = 60)
pylab.plot([p[0] for p in class4],[p[1] for p in class4],'ok', markersize = 60)
class1 = np.array(np.random.standard_normal((2,2))) + np.array([5,5])
class2 = np.array(np.random.standard_normal((3,2)))
#class3 = np.array(np.random.standard_normal((1,2))) + np.array([-5,-5])
#class4 = np.array(np.random.standard_normal((1,2))) + np.array([-5,5])
features = np.vstack((class1,class2))
test = cluster.X_means(features)
print 'From x-means: ', test.final_k
plt.subplot(132)
pylab.plot([p[0] for p in class1],[p[1] for p in class1],'o', markersize = 60)
pylab.plot([p[0] for p in class2],[p[1] for p in class2],'or', markersize = 60)
class1 = np.array(np.random.standard_normal((1,2))) + np.array([5,5])
class2 = np.array(np.random.standard_normal((1,2)))
class3 = np.array(np.random.standard_normal((1,2))) + np.array([-5,-5])
class4 = np.array(np.random.standard_normal((1,2))) + np.array([-5,5])
class5 = np.array(np.random.standard_normal((1,2))) + np.array([5,-5])
features = np.vstack((class1,class2,class3,class4,class5))
test = cluster.X_means(features)
print 'From x-means: ', test.final_k
plt.subplot(133)
pylab.plot([p[0] for p in class1],[p[1] for p in class1],'o', markersize = 60)
pylab.plot([p[0] for p in class2],[p[1] for p in class2],'or', markersize = 60)
pylab.plot([p[0] for p in class3],[p[1] for p in class3],'og', markersize = 60)
pylab.plot([p[0] for p in class4],[p[1] for p in class4],'ok', markersize = 60)
pylab.plot([p[0] for p in class5],[p[1] for p in class5],'om', markersize = 60)
pylab.show()
# | gpl-3.0 | 944,239,356,310,498,200 | 38.433333 | 81 | 0.663002 | false |
zehpunktbarron/iOSMAnalyzer | scripts/c6_landuse.py | 1 | 3381 | # -*- coding: utf-8 -*-
#!/usr/bin/python2.7
#description :This file creates a plot: Calculates the development of all objects with a "landuse"-tag
#author :Christopher Barron @ http://giscience.uni-hd.de/
#date :19.01.2013
#version :0.1
#usage :python pyscript.py
#==============================================================================
import psycopg2
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
import pylab
# import db connection parameters
import db_conn_para as db
###
### Connect to database with psycopg2. Add arguments from parser to the connection-string
###
try:
conn_string="dbname= %s user= %s host= %s password= %s" %(db.g_my_dbname, db.g_my_username, db.g_my_hostname, db.g_my_dbpassword)
print "Connecting to database\n->%s" % (conn_string)
# Verbindung mit der DB mittels psycopg2 herstellen
conn = psycopg2.connect(conn_string)
print "Connection to database was established succesfully"
except:
print "Connection to database failed"
###
### Execute SQL query
###
# Mit dieser neuen "cursor Methode" koennen SQL-Abfragen abgefeuert werden
cur = conn.cursor()
# Execute SQL query. For more than one row use three '"'
try:
cur.execute("""
SELECT
(SELECT COUNT(id)
FROM
hist_polygon
WHERE tags ? 'landuse' AND visible = 'true' AND
((version = (SELECT max(version) from hist_polygon as h where h.id = hist_polygon.id AND
valid_from <= generate_series AND (valid_to >= generate_series OR valid_to is null)))
AND minor = (SELECT max(minor) from hist_polygon as h where h.id = hist_polygon.id AND h.version = hist_polygon.version AND
(valid_from <= generate_series AND (valid_to >= generate_series OR valid_to is null))))
) AS length, date_trunc('month', generate_series)::date
FROM generate_series(
(SELECT date_trunc ('month',(
SELECT MIN(valid_from) FROM hist_plp)) as foo), -- Select minimum date (month)
(SELECT MAX(valid_from) FROM hist_plp)::date, -- Select maximum date
interval '1 month')
;
""")
# Getting a list of tuples from the database-cursor (cur)
data_tuples = []
for row in cur:
data_tuples.append(row)
except:
print "Query could not be executed"
###
### Plot (Multiline-Chart)
###
# Datatypes of the returning data: column 1(col1) --> integer, column 2(date) --> string
datatypes = [('col1', 'i4'), ('date', 'S20')]
# Data-tuple and datatype
data = np.array(data_tuples, dtype=datatypes)
# Date comes from 'col1'
col1 = data['col1']
# Converts date to a manageable date-format for matplotlib
dates = mdates.num2date(mdates.datestr2num(data['date']))
fig, ax = plt.subplots()
# Create barchart (x-axis=dates, y-axis=col1,
ax.bar(dates, col1, width=15, align='center', color = '#2dd700')
# Place a gray dashed grid behind the thicks (only for y-axis)
ax.yaxis.grid(color='gray', linestyle='dashed')
# Set this grid behind the thicks
ax.set_axisbelow(True)
# Rotate x-labels on the x-axis
fig.autofmt_xdate()
# Label x and y axis
plt.xlabel('Date')
plt.ylabel('Amount of Landuse-Polygons')
# Locate legend on the plot (http://matplotlib.org/users/legend_guide.html#legend-location)
plt.legend(loc=1)
# Plot-title
plt.title("Development of Landuse-Polygons")
# show plot
#pylab.show()
# Save plot to *.jpeg-file
plt.savefig('pics/c6_landuse.jpeg')
plt.clf()
| gpl-3.0 | 5,170,037,965,798,347,000 | 27.411765 | 131 | 0.677906 | false |
KellyBlack/Precalculus | functions/img/composition.py | 1 | 1856 | #!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.path as path
from matplotlib.patches import FancyArrowPatch
from matplotlib.patches import Ellipse
import math
import sys
from BasicPlot import BasicPlot
plotter = BasicPlot()
plt.figure(num=1,frameon=False)
###############################
plotter.clearPlot()
#plt.xkcd(scale=.6) #randomness=1,length=1,scale=0)
plotter.subplot(1,2,1)
plotter.setupGrid(0.3,'--',
-4.0,1.0,4.1,
-4.0,1.0,4.1)
plotter.setAxesBounds(-4.1,4.1,-4.1,4.1)
plotter.axesDecorations('Graph of f','x','f')
plotter.addInterpolant([[-4,-2],[-3,-1],[-2,0],[0,1],[2,2],[4,4]],
np.arange(-4,4.01,0.1),'k-',2.5)
axes = plotter.getAxes()
axes.spines['right'].set_color('none')
axes.spines['top'].set_color('none')
axes.xaxis.set_ticks_position('bottom')
axes.spines['bottom'].set_position(('data',0))
axes.yaxis.set_ticks_position('left')
axes.spines['left'].set_position(('data',0))
axes.xaxis.set_label_coords(0.95, 0.45)
axes.yaxis.set_label_coords(0.45, 0.95)
plotter.subplot(1,2,2)
plotter.setupGrid(0.3,'--',
-4.0,1.0,4.1,
-4.0,1.0,4.1)
plotter.setAxesBounds(-4.1,4.1,-4.1,4.1)
plotter.axesDecorations('Graph of g','x','g')
plotter.addInterpolant([[-4,3],[-3,2],[-2,1],[0,0],[2,-1],[4,-4]],
np.arange(-4,4.01,0.1),'k-',2.5)
axes = plotter.getAxes()
axes.spines['right'].set_color('none')
axes.spines['top'].set_color('none')
axes.xaxis.set_ticks_position('bottom')
axes.spines['bottom'].set_position(('data',0))
axes.yaxis.set_ticks_position('left')
axes.spines['left'].set_position(('data',0))
axes.xaxis.set_label_coords(0.95, 0.45)
axes.yaxis.set_label_coords(0.45, 0.95)
#plt.show()
plt.savefig('composition.pgf',format='pgf')
| gpl-3.0 | -7,112,809,213,044,979,000 | 28.460317 | 66 | 0.637392 | false |
bwvdnbro/HydroCodeSpherical1D | paper_workflows/fig_convergence_seed.py | 1 | 3451 | import numpy as np
import matplotlib
matplotlib.use("Agg")
import pylab as pl
import scipy.special.lambertw as lambertw
import sys
if len(sys.argv) < 2:
print "Usage: python fig_convergence_seed.py amplitude"
exit()
amplitude = float(sys.argv[1])
pl.rcParams["text.usetex"] = True
pl.rcParams["figure.figsize"] = (4.5, 4)
gamma = 5. / 3.
au_in_si = 1.495978707e11 # m
yr_in_si = 365.25 * 24. * 3600. # s
## Bondi
# input unit parameters
unit_length_in_si = 1.2e13
unit_mass_in_si = 2.479e31
G_in_si = 6.67408e-11
k_in_si = 1.38064852e-23
mH_in_si = 1.674e-27
solar_mass_in_si = 1.9891e30
# derived units
unit_time_in_si = np.sqrt(unit_length_in_si**3 / (unit_mass_in_si * G_in_si))
unit_density_in_si = unit_mass_in_si / (unit_length_in_si**3)
unit_velocity_in_si = unit_length_in_si / unit_time_in_si
unit_pressure_in_si = unit_mass_in_si / (unit_length_in_si * unit_time_in_si**2)
# input parameters
# physical
mass_point_mass = 18. * solar_mass_in_si
T_n = 500
pressure_contrast = 32.
bondi_rho_n = 1.e-16
r_ion = 30. * au_in_si
# practical
r_min = 10. * au_in_si
r_max = 100. * au_in_si
# derived parameters
cs2_n = T_n * k_in_si / mH_in_si
cs2_i = pressure_contrast * cs2_n
bondi_r_n = 0.5 * G_in_si * mass_point_mass / cs2_n
bondi_r_i = 0.5 * G_in_si * mass_point_mass / cs2_i
cs_n = np.sqrt(cs2_n)
cs_i = np.sqrt(cs2_i)
def neutral_bondi(r):
global cs_n, cs2_n, bondi_r_n, bondi_rho_n
u = bondi_r_n / r
omega = -u**4 * np.exp(3. - 4. * u)
v = np.where(r < bondi_r_n,
-cs_n * np.sqrt(-lambertw(omega, -1).real),
-cs_n * np.sqrt(-lambertw(omega, 0).real))
rho = -bondi_rho_n * bondi_r_n**2 * cs_n / r**2 / v
P = cs2_n * rho
return rho, v, P, r * 0. + 1.
rho1, v1, _, _ = neutral_bondi(r_ion)
Gamma = 0.5 * (v1**2 + cs2_n - \
np.sqrt((v1**2 + cs2_n)**2 - 4. * v1**2 * cs2_i)) / cs2_i
rho2 = Gamma * rho1
v2 = v1 / Gamma
def ionised_bondi(r):
global r_ion, v2, cs_i, cs2_i, bondi_r_i, rho2
omega = -(v2 / cs_i)**2 * (r_ion / r)**4 * \
np.exp(4. * (bondi_r_i / r_ion - bondi_r_i / r) - v2**2 / cs_i**2)
v = -cs_i * np.sqrt(-lambertw(omega, -1).real)
rho = r_ion**2 * v2 * rho2 / r**2 / v
P = cs2_i * rho
return rho, v, P, rho * 0.
ra = np.linspace(r_min, r_max, 1000)
rhoa, va, Pa, na = np.where(ra < r_ion, ionised_bondi(ra), neutral_bondi(ra))
ra /= au_in_si
rhoa *= 0.001
va *= 0.001
def plot(f, ax):
ifile = open(f, 'r')
timeline = ifile.readline()
time = float(timeline.split()[2]) / yr_in_si
ifile.close()
data = np.loadtxt(f)
data[:,0] /= au_in_si
data[:,1] *= 0.001
data[:,2] *= 0.001
ax[0].semilogy(data[:,0], data[:,1], "-",
label = "$t = {0:.1f}~{{\\rm{{}}yr}}$".format(time))
ax[1].plot(data[:,0], data[:,2], "-")
fig, ax = pl.subplots(2, 1, sharex = True)
filename = "convergence_seed_{nr:03d}_{sign}{amplitude}.txt"
for nr in [0, 325, 425, 650, 900]:
f = filename.format(nr = nr, sign = 'p' if amplitude > 0. else 'm',
amplitude = abs(amplitude))
plot(f, ax)
ax[0].legend(loc = "best")
ax[0].plot(ra, rhoa, "k--", linewidth = 0.8)
ax[1].plot(ra, va, "k--", linewidth = 0.8)
ax[0].set_ylabel("$\\rho{}$ (g cm$^{-3}$)")
ax[1].set_ylabel("$v$ (km s$^{-1}$)")
ax[1].set_xlabel("$r$ (AU)")
pl.tight_layout()
pl.savefig("fig_convergence_seed_{sign}{amplitude}.eps".format(
sign = 'p' if amplitude > 0. else 'm', amplitude = abs(amplitude)),
dpi = 300)
pl.close()
| agpl-3.0 | -8,984,461,688,933,214,000 | 26.830645 | 80 | 0.580122 | false |
JiaMingLin/de-identification | test/test_measure_func.py | 1 | 1068 | import common.constant as c
from django.test import TestCase
from common.data_utilities import DataUtils
from utility_measure.measure_func import UserQuery
TEST_DATA_PATH = c.TEST_ORIGIN_DATA_PATH
class TestMeasureFunc(TestCase):
def setUp(self):
self.queries = [
"Age > 50 and workclass == 'Self-emp-not-inc'",
"fnlwgt > 159449 and (native_country == 'United-States' or native_country == 'Cuba')",
"fnlwgt > 159449 and native_country in ('United-States', 'Cuba')",
"native_country == 'United-States' or native_country == 'Cuba' and fnlwgt > 159449"
]
self.user_query = UserQuery(TEST_DATA_PATH)
def test_get_query_count_with_same_results_cnt(self):
data = DataUtils(file_path = TEST_DATA_PATH)
df = data.get_pandas_df()
result_cnt = [self.user_query.get_query_count(df, query) for query in self.queries]
self.assertEqual(result_cnt == [1278, 24339, 24339, 41415], True)
def test_error_measure_with_same_dataset(self):
results = self.user_query.get_errors(TEST_DATA_PATH, self.queries)
self.assertEqual(results == [0,0,0,0], True) | apache-2.0 | -6,687,662,837,570,749,000 | 38.592593 | 89 | 0.716292 | false |
yousrabk/mne-python | mne/viz/epochs.py | 2 | 60751 | """Functions to plot epochs data
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Jaakko Leppakangas <jaeilepp@student.jyu.fi>
#
# License: Simplified BSD
from functools import partial
import copy
import numpy as np
from ..utils import verbose, get_config, set_config, logger
from ..io.pick import pick_types, channel_type
from ..io.proj import setup_proj
from ..fixes import Counter, _in1d
from ..time_frequency import compute_epochs_psd
from .utils import (tight_layout, figure_nobar, _toggle_proj, _toggle_options,
_layout_figure, _setup_vmin_vmax, _channels_changed,
_plot_raw_onscroll, _onclick_help, plt_show)
from ..defaults import _handle_default
def plot_epochs_image(epochs, picks=None, sigma=0., vmin=None,
vmax=None, colorbar=True, order=None, show=True,
units=None, scalings=None, cmap='RdBu_r', fig=None):
"""Plot Event Related Potential / Fields image
Parameters
----------
epochs : instance of Epochs
The epochs
picks : int | array-like of int | None
The indices of the channels to consider. If None, all good
data channels are plotted.
sigma : float
The standard deviation of the Gaussian smoothing to apply along
the epoch axis to apply in the image. If 0., no smoothing is applied.
vmin : float
The min value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers
vmax : float
The max value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers
colorbar : bool
Display or not a colorbar
order : None | array of int | callable
If not None, order is used to reorder the epochs on the y-axis
of the image. If it's an array of int it should be of length
the number of good epochs. If it's a callable the arguments
passed are the times vector and the data as 2d array
(data.shape[1] == len(times)
show : bool
Show figure if True.
units : dict | None
The units of the channel types used for axes lables. If None,
defaults to `units=dict(eeg='uV', grad='fT/cm', mag='fT')`.
scalings : dict | None
The scalings of the channel types to be applied for plotting.
If None, defaults to `scalings=dict(eeg=1e6, grad=1e13, mag=1e15,
eog=1e6)`
cmap : matplotlib colormap
Colormap.
fig : matplotlib figure | None
Figure instance to draw the image to. Figure must contain two axes for
drawing the single trials and evoked responses. If None a new figure is
created. Defaults to None.
Returns
-------
figs : the list of matplotlib figures
One figure per channel displayed
"""
from scipy import ndimage
units = _handle_default('units', units)
scalings = _handle_default('scalings', scalings)
import matplotlib.pyplot as plt
if picks is None:
picks = pick_types(epochs.info, meg=True, eeg=True, ref_meg=False,
exclude='bads')
if set(units.keys()) != set(scalings.keys()):
raise ValueError('Scalings and units must have the same keys.')
picks = np.atleast_1d(picks)
if fig is not None and len(picks) > 1:
raise ValueError('Only single pick can be drawn to a figure.')
evoked = epochs.average(picks)
data = epochs.get_data()[:, picks, :]
scale_vmin = True if vmin is None else False
scale_vmax = True if vmax is None else False
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
figs = list()
for i, (this_data, idx) in enumerate(zip(np.swapaxes(data, 0, 1), picks)):
if fig is None:
this_fig = plt.figure()
else:
this_fig = fig
figs.append(this_fig)
ch_type = channel_type(epochs.info, idx)
if ch_type not in scalings:
# We know it's not in either scalings or units since keys match
raise KeyError('%s type not in scalings and units' % ch_type)
this_data *= scalings[ch_type]
this_order = order
if callable(order):
this_order = order(epochs.times, this_data)
if this_order is not None:
this_data = this_data[this_order]
if sigma > 0.:
this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma,
axis=0)
plt.figure(this_fig.number)
ax1 = plt.subplot2grid((3, 10), (0, 0), colspan=9, rowspan=2)
if scale_vmin:
vmin *= scalings[ch_type]
if scale_vmax:
vmax *= scalings[ch_type]
im = ax1.imshow(this_data,
extent=[1e3 * epochs.times[0], 1e3 * epochs.times[-1],
0, len(data)],
aspect='auto', origin='lower', interpolation='nearest',
vmin=vmin, vmax=vmax, cmap=cmap)
ax2 = plt.subplot2grid((3, 10), (2, 0), colspan=9, rowspan=1)
if colorbar:
ax3 = plt.subplot2grid((3, 10), (0, 9), colspan=1, rowspan=3)
ax1.set_title(epochs.ch_names[idx])
ax1.set_ylabel('Epochs')
ax1.axis('auto')
ax1.axis('tight')
ax1.axvline(0, color='m', linewidth=3, linestyle='--')
evoked_data = scalings[ch_type] * evoked.data[i]
ax2.plot(1e3 * evoked.times, evoked_data)
ax2.set_xlabel('Time (ms)')
ax2.set_xlim([1e3 * evoked.times[0], 1e3 * evoked.times[-1]])
ax2.set_ylabel(units[ch_type])
evoked_vmin = min(evoked_data) * 1.1 if scale_vmin else vmin
evoked_vmax = max(evoked_data) * 1.1 if scale_vmax else vmax
if scale_vmin or scale_vmax:
evoked_vmax = max(np.abs([evoked_vmax, evoked_vmin]))
evoked_vmin = -evoked_vmax
ax2.set_ylim([evoked_vmin, evoked_vmax])
ax2.axvline(0, color='m', linewidth=3, linestyle='--')
if colorbar:
plt.colorbar(im, cax=ax3)
tight_layout(fig=this_fig)
plt_show(show)
return figs
def _drop_log_stats(drop_log, ignore=['IGNORED']):
"""
Parameters
----------
drop_log : list of lists
Epoch drop log from Epochs.drop_log.
ignore : list
The drop reasons to ignore.
Returns
-------
perc : float
Total percentage of epochs dropped.
"""
# XXX: This function should be moved to epochs.py after
# removal of perc return parameter in plot_drop_log()
if not isinstance(drop_log, list) or not isinstance(drop_log[0], list):
raise ValueError('drop_log must be a list of lists')
perc = 100 * np.mean([len(d) > 0 for d in drop_log
if not any(r in ignore for r in d)])
return perc
def plot_drop_log(drop_log, threshold=0, n_max_plot=20, subject='Unknown',
color=(0.9, 0.9, 0.9), width=0.8, ignore=['IGNORED'],
show=True):
"""Show the channel stats based on a drop_log from Epochs
Parameters
----------
drop_log : list of lists
Epoch drop log from Epochs.drop_log.
threshold : float
The percentage threshold to use to decide whether or not to
plot. Default is zero (always plot).
n_max_plot : int
Maximum number of channels to show stats for.
subject : str
The subject name to use in the title of the plot.
color : tuple | str
Color to use for the bars.
width : float
Width of the bars.
ignore : list
The drop reasons to ignore.
show : bool
Show figure if True.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure.
"""
import matplotlib.pyplot as plt
perc = _drop_log_stats(drop_log, ignore)
scores = Counter([ch for d in drop_log for ch in d if ch not in ignore])
ch_names = np.array(list(scores.keys()))
fig = plt.figure()
if perc < threshold or len(ch_names) == 0:
plt.text(0, 0, 'No drops')
return fig
counts = 100 * np.array(list(scores.values()), dtype=float) / len(drop_log)
n_plot = min(n_max_plot, len(ch_names))
order = np.flipud(np.argsort(counts))
plt.title('%s: %0.1f%%' % (subject, perc))
x = np.arange(n_plot)
plt.bar(x, counts[order[:n_plot]], color=color, width=width)
plt.xticks(x + width / 2.0, ch_names[order[:n_plot]], rotation=45,
horizontalalignment='right')
plt.tick_params(axis='x', which='major', labelsize=10)
plt.ylabel('% of epochs rejected')
plt.xlim((-width / 2.0, (n_plot - 1) + width * 3 / 2))
plt.grid(True, axis='y')
plt_show(show)
return fig
def _draw_epochs_axes(epoch_idx, good_ch_idx, bad_ch_idx, data, times, axes,
title_str, axes_handler):
"""Aux functioin"""
this = axes_handler[0]
for ii, data_, ax in zip(epoch_idx, data, axes):
for l, d in zip(ax.lines, data_[good_ch_idx]):
l.set_data(times, d)
if bad_ch_idx is not None:
bad_lines = [ax.lines[k] for k in bad_ch_idx]
for l, d in zip(bad_lines, data_[bad_ch_idx]):
l.set_data(times, d)
if title_str is not None:
ax.set_title(title_str % ii, fontsize=12)
ax.set_ylim(data.min(), data.max())
ax.set_yticks(list())
ax.set_xticks(list())
if vars(ax)[this]['reject'] is True:
# memorizing reject
for l in ax.lines:
l.set_color((0.8, 0.8, 0.8))
ax.get_figure().canvas.draw()
else:
# forgetting previous reject
for k in axes_handler:
if k == this:
continue
if vars(ax).get(k, {}).get('reject', None) is True:
for l in ax.lines[:len(good_ch_idx)]:
l.set_color('k')
if bad_ch_idx is not None:
for l in ax.lines[-len(bad_ch_idx):]:
l.set_color('r')
ax.get_figure().canvas.draw()
break
def _epochs_navigation_onclick(event, params):
"""Aux function"""
import matplotlib.pyplot as plt
p = params
here = None
if event.inaxes == p['back'].ax:
here = 1
elif event.inaxes == p['next'].ax:
here = -1
elif event.inaxes == p['reject-quit'].ax:
if p['reject_idx']:
p['epochs'].drop_epochs(p['reject_idx'])
plt.close(p['fig'])
plt.close(event.inaxes.get_figure())
if here is not None:
p['idx_handler'].rotate(here)
p['axes_handler'].rotate(here)
this_idx = p['idx_handler'][0]
_draw_epochs_axes(this_idx, p['good_ch_idx'], p['bad_ch_idx'],
p['data'][this_idx],
p['times'], p['axes'], p['title_str'],
p['axes_handler'])
# XXX don't ask me why
p['axes'][0].get_figure().canvas.draw()
def _epochs_axes_onclick(event, params):
"""Aux function"""
reject_color = (0.8, 0.8, 0.8)
ax = event.inaxes
if event.inaxes is None:
return
p = params
here = vars(ax)[p['axes_handler'][0]]
if here.get('reject', None) is False:
idx = here['idx']
if idx not in p['reject_idx']:
p['reject_idx'].append(idx)
for l in ax.lines:
l.set_color(reject_color)
here['reject'] = True
elif here.get('reject', None) is True:
idx = here['idx']
if idx in p['reject_idx']:
p['reject_idx'].pop(p['reject_idx'].index(idx))
good_lines = [ax.lines[k] for k in p['good_ch_idx']]
for l in good_lines:
l.set_color('k')
if p['bad_ch_idx'] is not None:
bad_lines = ax.lines[-len(p['bad_ch_idx']):]
for l in bad_lines:
l.set_color('r')
here['reject'] = False
ax.get_figure().canvas.draw()
def plot_epochs(epochs, picks=None, scalings=None, n_epochs=20,
n_channels=20, title=None, show=True, block=False):
""" Visualize epochs
Bad epochs can be marked with a left click on top of the epoch. Bad
channels can be selected by clicking the channel name on the left side of
the main axes. Calling this function drops all the selected bad epochs as
well as bad epochs marked beforehand with rejection parameters.
Parameters
----------
epochs : instance of Epochs
The epochs object
picks : array-like of int | None
Channels to be included. If None only good data channels are used.
Defaults to None
scalings : dict | None
Scale factors for the traces. If None, defaults to::
dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,
emg=1e-3, ref_meg=1e-12, misc=1e-3, stim=1, resp=1, chpi=1e-4)
n_epochs : int
The number of epochs per view. Defaults to 20.
n_channels : int
The number of channels per view. Defaults to 20.
title : str | None
The title of the window. If None, epochs name will be displayed.
Defaults to None.
show : bool
Show figure if True. Defaults to True
block : bool
Whether to halt program execution until the figure is closed.
Useful for rejecting bad trials on the fly by clicking on an epoch.
Defaults to False.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure.
Notes
-----
The arrow keys (up/down/left/right) can be used to navigate between
channels and epochs and the scaling can be adjusted with - and + (or =)
keys, but this depends on the backend matplotlib is configured to use
(e.g., mpl.use(``TkAgg``) should work). Full screen mode can be toggled
with f11 key. The amount of epochs and channels per view can be adjusted
with home/end and page down/page up keys. Butterfly plot can be toggled
with ``b`` key. Right mouse click adds a vertical line to the plot.
"""
epochs.drop_bad_epochs()
scalings = _handle_default('scalings_plot_raw', scalings)
projs = epochs.info['projs']
params = {'epochs': epochs,
'info': copy.deepcopy(epochs.info),
'bad_color': (0.8, 0.8, 0.8),
't_start': 0,
'histogram': None}
params['label_click_fun'] = partial(_pick_bad_channels, params=params)
_prepare_mne_browse_epochs(params, projs, n_channels, n_epochs, scalings,
title, picks)
_prepare_projectors(params)
_layout_figure(params)
callback_close = partial(_close_event, params=params)
params['fig'].canvas.mpl_connect('close_event', callback_close)
try:
plt_show(show, block=block)
except TypeError: # not all versions have this
plt_show(show)
return params['fig']
@verbose
def plot_epochs_psd(epochs, fmin=0, fmax=np.inf, tmin=None, tmax=None,
proj=False, n_fft=256,
picks=None, ax=None, color='black', area_mode='std',
area_alpha=0.33, n_overlap=0,
dB=True, n_jobs=1, show=True, verbose=None):
"""Plot the power spectral density across epochs
Parameters
----------
epochs : instance of Epochs
The epochs object
fmin : float
Start frequency to consider.
fmax : float
End frequency to consider.
tmin : float | None
Start time to consider.
tmax : float | None
End time to consider.
proj : bool
Apply projection.
n_fft : int
Number of points to use in Welch FFT calculations.
picks : array-like of int | None
List of channels to use.
ax : instance of matplotlib Axes | None
Axes to plot into. If None, axes will be created.
color : str | tuple
A matplotlib-compatible color to use.
area_mode : str | None
Mode for plotting area. If 'std', the mean +/- 1 STD (across channels)
will be plotted. If 'range', the min and max (across channels) will be
plotted. Bad channels will be excluded from these calculations.
If None, no area will be plotted.
area_alpha : float
Alpha for the area.
n_overlap : int
The number of points of overlap between blocks.
dB : bool
If True, transform data to decibels.
n_jobs : int
Number of jobs to run in parallel.
show : bool
Show figure if True.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
from .raw import _set_psd_plot_params
fig, picks_list, titles_list, ax_list, make_label = _set_psd_plot_params(
epochs.info, proj, picks, ax, area_mode)
for ii, (picks, title, ax) in enumerate(zip(picks_list, titles_list,
ax_list)):
psds, freqs = compute_epochs_psd(epochs, picks=picks, fmin=fmin,
fmax=fmax, tmin=tmin, tmax=tmax,
n_fft=n_fft,
n_overlap=n_overlap, proj=proj,
n_jobs=n_jobs)
# Convert PSDs to dB
if dB:
psds = 10 * np.log10(psds)
unit = 'dB'
else:
unit = 'power'
# mean across epochs and channels
psd_mean = np.mean(psds, axis=0).mean(axis=0)
if area_mode == 'std':
# std across channels
psd_std = np.std(np.mean(psds, axis=0), axis=0)
hyp_limits = (psd_mean - psd_std, psd_mean + psd_std)
elif area_mode == 'range':
hyp_limits = (np.min(np.mean(psds, axis=0), axis=0),
np.max(np.mean(psds, axis=0), axis=0))
else: # area_mode is None
hyp_limits = None
ax.plot(freqs, psd_mean, color=color)
if hyp_limits is not None:
ax.fill_between(freqs, hyp_limits[0], y2=hyp_limits[1],
color=color, alpha=area_alpha)
if make_label:
if ii == len(picks_list) - 1:
ax.set_xlabel('Freq (Hz)')
if ii == len(picks_list) // 2:
ax.set_ylabel('Power Spectral Density (%s/Hz)' % unit)
ax.set_title(title)
ax.set_xlim(freqs[0], freqs[-1])
if make_label:
tight_layout(pad=0.1, h_pad=0.1, w_pad=0.1, fig=fig)
plt_show(show)
return fig
def _prepare_mne_browse_epochs(params, projs, n_channels, n_epochs, scalings,
title, picks, order=None):
"""Helper for setting up the mne_browse_epochs window."""
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.collections import LineCollection
from matplotlib.colors import colorConverter
epochs = params['epochs']
if picks is None:
picks = _handle_picks(epochs)
if len(picks) < 1:
raise RuntimeError('No appropriate channels found. Please'
' check your picks')
picks = sorted(picks)
# Reorganize channels
inds = list()
types = list()
for t in ['grad', 'mag']:
idxs = pick_types(params['info'], meg=t, ref_meg=False, exclude=[])
if len(idxs) < 1:
continue
mask = _in1d(idxs, picks, assume_unique=True)
inds.append(idxs[mask])
types += [t] * len(inds[-1])
pick_kwargs = dict(meg=False, ref_meg=False, exclude=[])
if order is None:
order = ['eeg', 'eog', 'ecg', 'emg', 'ref_meg', 'stim', 'resp', 'misc',
'chpi', 'syst', 'ias', 'exci']
for ch_type in order:
pick_kwargs[ch_type] = True
idxs = pick_types(params['info'], **pick_kwargs)
if len(idxs) < 1:
continue
mask = _in1d(idxs, picks, assume_unique=True)
inds.append(idxs[mask])
types += [ch_type] * len(inds[-1])
pick_kwargs[ch_type] = False
inds = np.concatenate(inds).astype(int)
if not len(inds) == len(picks):
raise RuntimeError('Some channels not classified. Please'
' check your picks')
ch_names = [params['info']['ch_names'][x] for x in inds]
# set up plotting
size = get_config('MNE_BROWSE_RAW_SIZE')
n_epochs = min(n_epochs, len(epochs.events))
duration = len(epochs.times) * n_epochs
n_channels = min(n_channels, len(picks))
if size is not None:
size = size.split(',')
size = tuple(float(s) for s in size)
if title is None:
title = epochs.name
if epochs.name is None or len(title) == 0:
title = ''
fig = figure_nobar(facecolor='w', figsize=size, dpi=80)
fig.canvas.set_window_title('mne_browse_epochs')
ax = plt.subplot2grid((10, 15), (0, 1), colspan=13, rowspan=9)
ax.annotate(title, xy=(0.5, 1), xytext=(0, ax.get_ylim()[1] + 15),
ha='center', va='bottom', size=12, xycoords='axes fraction',
textcoords='offset points')
color = _handle_default('color', None)
ax.axis([0, duration, 0, 200])
ax2 = ax.twiny()
ax2.set_zorder(-1)
ax2.axis([0, duration, 0, 200])
ax_hscroll = plt.subplot2grid((10, 15), (9, 1), colspan=13)
ax_hscroll.get_yaxis().set_visible(False)
ax_hscroll.set_xlabel('Epochs')
ax_vscroll = plt.subplot2grid((10, 15), (0, 14), rowspan=9)
ax_vscroll.set_axis_off()
ax_vscroll.add_patch(mpl.patches.Rectangle((0, 0), 1, len(picks),
facecolor='w', zorder=2))
ax_help_button = plt.subplot2grid((10, 15), (9, 0), colspan=1)
help_button = mpl.widgets.Button(ax_help_button, 'Help')
help_button.on_clicked(partial(_onclick_help, params=params))
# populate vertical and horizontal scrollbars
for ci in range(len(picks)):
if ch_names[ci] in params['info']['bads']:
this_color = params['bad_color']
else:
this_color = color[types[ci]]
ax_vscroll.add_patch(mpl.patches.Rectangle((0, ci), 1, 1,
facecolor=this_color,
edgecolor=this_color,
zorder=3))
vsel_patch = mpl.patches.Rectangle((0, 0), 1, n_channels, alpha=0.5,
edgecolor='w', facecolor='w', zorder=4)
ax_vscroll.add_patch(vsel_patch)
ax_vscroll.set_ylim(len(types), 0)
ax_vscroll.set_title('Ch.')
# populate colors list
type_colors = [colorConverter.to_rgba(color[c]) for c in types]
colors = list()
for color_idx in range(len(type_colors)):
colors.append([type_colors[color_idx]] * len(epochs.events))
lines = list()
n_times = len(epochs.times)
for ch_idx in range(n_channels):
if len(colors) - 1 < ch_idx:
break
lc = LineCollection(list(), antialiased=False, linewidths=0.5,
zorder=2, picker=3.)
ax.add_collection(lc)
lines.append(lc)
times = epochs.times
data = np.zeros((params['info']['nchan'], len(times) * n_epochs))
ylim = (25., 0.) # Hardcoded 25 because butterfly has max 5 rows (5*5=25).
# make shells for plotting traces
offset = ylim[0] / n_channels
offsets = np.arange(n_channels) * offset + (offset / 2.)
times = np.arange(len(times) * len(epochs.events))
epoch_times = np.arange(0, len(times), n_times)
ax.set_yticks(offsets)
ax.set_ylim(ylim)
ticks = epoch_times + 0.5 * n_times
ax.set_xticks(ticks)
ax2.set_xticks(ticks[:n_epochs])
labels = list(range(1, len(ticks) + 1)) # epoch numbers
ax.set_xticklabels(labels)
ax2.set_xticklabels(labels)
xlim = epoch_times[-1] + len(epochs.times)
ax_hscroll.set_xlim(0, xlim)
vertline_t = ax_hscroll.text(0, 1, '', color='y', va='bottom', ha='right')
# fit horizontal scroll bar ticks
hscroll_ticks = np.arange(0, xlim, xlim / 7.0)
hscroll_ticks = np.append(hscroll_ticks, epoch_times[-1])
hticks = list()
for tick in hscroll_ticks:
hticks.append(epoch_times.flat[np.abs(epoch_times - tick).argmin()])
hlabels = [x / n_times + 1 for x in hticks]
ax_hscroll.set_xticks(hticks)
ax_hscroll.set_xticklabels(hlabels)
for epoch_idx in range(len(epoch_times)):
ax_hscroll.add_patch(mpl.patches.Rectangle((epoch_idx * n_times, 0),
n_times, 1, facecolor='w',
edgecolor='w', alpha=0.6))
hsel_patch = mpl.patches.Rectangle((0, 0), duration, 1,
edgecolor='k',
facecolor=(0.75, 0.75, 0.75),
alpha=0.25, linewidth=1, clip_on=False)
ax_hscroll.add_patch(hsel_patch)
text = ax.text(0, 0, 'blank', zorder=2, verticalalignment='baseline',
ha='left', fontweight='bold')
text.set_visible(False)
params.update({'fig': fig,
'ax': ax,
'ax2': ax2,
'ax_hscroll': ax_hscroll,
'ax_vscroll': ax_vscroll,
'vsel_patch': vsel_patch,
'hsel_patch': hsel_patch,
'lines': lines,
'projs': projs,
'ch_names': ch_names,
'n_channels': n_channels,
'n_epochs': n_epochs,
'scalings': scalings,
'duration': duration,
'ch_start': 0,
'colors': colors,
'def_colors': type_colors, # don't change at runtime
'picks': picks,
'bads': np.array(list(), dtype=int),
'data': data,
'times': times,
'epoch_times': epoch_times,
'offsets': offsets,
'labels': labels,
'scale_factor': 1.0,
'butterfly_scale': 1.0,
'fig_proj': None,
'types': np.array(types),
'inds': inds,
'vert_lines': list(),
'vertline_t': vertline_t,
'butterfly': False,
'text': text,
'ax_help_button': ax_help_button, # needed for positioning
'help_button': help_button, # reference needed for clicks
'fig_options': None,
'settings': [True, True, True, True],
'image_plot': None})
params['plot_fun'] = partial(_plot_traces, params=params)
# callbacks
callback_scroll = partial(_plot_onscroll, params=params)
fig.canvas.mpl_connect('scroll_event', callback_scroll)
callback_click = partial(_mouse_click, params=params)
fig.canvas.mpl_connect('button_press_event', callback_click)
callback_key = partial(_plot_onkey, params=params)
fig.canvas.mpl_connect('key_press_event', callback_key)
callback_resize = partial(_resize_event, params=params)
fig.canvas.mpl_connect('resize_event', callback_resize)
fig.canvas.mpl_connect('pick_event', partial(_onpick, params=params))
params['callback_key'] = callback_key
# Draw event lines for the first time.
_plot_vert_lines(params)
def _prepare_projectors(params):
""" Helper for setting up the projectors for epochs browser """
import matplotlib.pyplot as plt
import matplotlib as mpl
epochs = params['epochs']
projs = params['projs']
if len(projs) > 0 and not epochs.proj:
ax_button = plt.subplot2grid((10, 15), (9, 14))
opt_button = mpl.widgets.Button(ax_button, 'Proj')
callback_option = partial(_toggle_options, params=params)
opt_button.on_clicked(callback_option)
params['opt_button'] = opt_button
params['ax_button'] = ax_button
# As here code is shared with plot_evoked, some extra steps:
# first the actual plot update function
params['plot_update_proj_callback'] = _plot_update_epochs_proj
# then the toggle handler
callback_proj = partial(_toggle_proj, params=params)
# store these for use by callbacks in the options figure
params['callback_proj'] = callback_proj
callback_proj('none')
def _plot_traces(params):
""" Helper for plotting concatenated epochs """
params['text'].set_visible(False)
ax = params['ax']
butterfly = params['butterfly']
if butterfly:
ch_start = 0
n_channels = len(params['picks'])
data = params['data'] * params['butterfly_scale']
else:
ch_start = params['ch_start']
n_channels = params['n_channels']
data = params['data'] * params['scale_factor']
offsets = params['offsets']
lines = params['lines']
epochs = params['epochs']
n_times = len(epochs.times)
tick_list = list()
start_idx = int(params['t_start'] / n_times)
end = params['t_start'] + params['duration']
end_idx = int(end / n_times)
xlabels = params['labels'][start_idx:]
event_ids = params['epochs'].events[:, 2]
params['ax2'].set_xticklabels(event_ids[start_idx:])
ax.set_xticklabels(xlabels)
ylabels = ax.yaxis.get_ticklabels()
# do the plotting
for line_idx in range(n_channels):
ch_idx = line_idx + ch_start
if line_idx >= len(lines):
break
elif ch_idx < len(params['ch_names']):
if butterfly:
ch_type = params['types'][ch_idx]
if ch_type == 'grad':
offset = offsets[0]
elif ch_type == 'mag':
offset = offsets[1]
elif ch_type == 'eeg':
offset = offsets[2]
elif ch_type == 'eog':
offset = offsets[3]
elif ch_type == 'ecg':
offset = offsets[4]
else:
lines[line_idx].set_segments(list())
else:
tick_list += [params['ch_names'][ch_idx]]
offset = offsets[line_idx]
this_data = data[ch_idx]
# subtraction here gets correct orientation for flipped ylim
ydata = offset - this_data
xdata = params['times'][:params['duration']]
num_epochs = np.min([params['n_epochs'],
len(epochs.events)])
segments = np.split(np.array((xdata, ydata)).T, num_epochs)
ch_name = params['ch_names'][ch_idx]
if ch_name in params['info']['bads']:
if not butterfly:
this_color = params['bad_color']
ylabels[line_idx].set_color(this_color)
this_color = np.tile((params['bad_color']), (num_epochs, 1))
for bad_idx in params['bads']:
if bad_idx < start_idx or bad_idx > end_idx:
continue
this_color[bad_idx - start_idx] = (1., 0., 0.)
lines[line_idx].set_zorder(1)
else:
this_color = params['colors'][ch_idx][start_idx:end_idx]
lines[line_idx].set_zorder(2)
if not butterfly:
ylabels[line_idx].set_color('black')
lines[line_idx].set_segments(segments)
lines[line_idx].set_color(this_color)
else:
lines[line_idx].set_segments(list())
# finalize plot
ax.set_xlim(params['times'][0], params['times'][0] + params['duration'],
False)
params['ax2'].set_xlim(params['times'][0],
params['times'][0] + params['duration'], False)
if butterfly:
factor = -1. / params['butterfly_scale']
labels = np.empty(20, dtype='S15')
labels.fill('')
ticks = ax.get_yticks()
idx_offset = 1
if 'grad' in params['types']:
labels[idx_offset + 1] = '0.00'
for idx in [idx_offset, idx_offset + 2]:
labels[idx] = '{0:.2f}'.format((ticks[idx] - offsets[0]) *
params['scalings']['grad'] *
1e13 * factor)
idx_offset += 4
if 'mag' in params['types']:
labels[idx_offset + 1] = '0.00'
for idx in [idx_offset, idx_offset + 2]:
labels[idx] = '{0:.2f}'.format((ticks[idx] - offsets[1]) *
params['scalings']['mag'] *
1e15 * factor)
idx_offset += 4
if 'eeg' in params['types']:
labels[idx_offset + 1] = '0.00'
for idx in [idx_offset, idx_offset + 2]:
labels[idx] = '{0:.2f}'.format((ticks[idx] - offsets[2]) *
params['scalings']['eeg'] *
1e6 * factor)
idx_offset += 4
if 'eog' in params['types']:
labels[idx_offset + 1] = '0.00'
for idx in [idx_offset, idx_offset + 2]:
labels[idx] = '{0:.2f}'.format((ticks[idx] - offsets[3]) *
params['scalings']['eog'] *
1e6 * factor)
idx_offset += 4
if 'ecg' in params['types']:
labels[idx_offset + 1] = '0.00'
for idx in [idx_offset, idx_offset + 2]:
labels[idx] = '{0:.2f}'.format((ticks[idx] - offsets[4]) *
params['scalings']['ecg'] *
1e6 * factor)
ax.set_yticklabels(labels, fontsize=12, color='black')
else:
ax.set_yticklabels(tick_list, fontsize=12)
params['vsel_patch'].set_y(ch_start)
params['fig'].canvas.draw()
# XXX This is a hack to make sure this figure gets drawn last
# so that when matplotlib goes to calculate bounds we don't get a
# CGContextRef error on the MacOSX backend :(
if params['fig_proj'] is not None:
params['fig_proj'].canvas.draw()
def _plot_update_epochs_proj(params, bools=None):
"""Helper only needs to be called when proj is changed"""
if bools is not None:
inds = np.where(bools)[0]
params['info']['projs'] = [copy.deepcopy(params['projs'][ii])
for ii in inds]
params['proj_bools'] = bools
params['projector'], _ = setup_proj(params['info'], add_eeg_ref=False,
verbose=False)
start = int(params['t_start'] / len(params['epochs'].times))
n_epochs = params['n_epochs']
end = start + n_epochs
data = np.concatenate(params['epochs'][start:end].get_data(), axis=1)
if params['projector'] is not None:
data = np.dot(params['projector'], data)
types = params['types']
for pick, ind in enumerate(params['inds']):
params['data'][pick] = data[ind] / params['scalings'][types[pick]]
params['plot_fun']()
def _handle_picks(epochs):
"""Aux function to handle picks."""
if any('ICA' in k for k in epochs.ch_names):
picks = pick_types(epochs.info, misc=True, ref_meg=False,
exclude=[])
else:
picks = pick_types(epochs.info, meg=True, eeg=True, eog=True, ecg=True,
ref_meg=False, exclude=[])
return picks
def _plot_window(value, params):
"""Deal with horizontal shift of the viewport."""
max_times = len(params['times']) - params['duration']
if value > max_times:
value = len(params['times']) - params['duration']
if value < 0:
value = 0
if params['t_start'] != value:
params['t_start'] = value
params['hsel_patch'].set_x(value)
params['plot_update_proj_callback'](params)
def _plot_vert_lines(params):
""" Helper function for plotting vertical lines."""
ax = params['ax']
while len(ax.lines) > 0:
ax.lines.pop()
params['vert_lines'] = list()
params['vertline_t'].set_text('')
epochs = params['epochs']
if params['settings'][3]: # if zeroline visible
t_zero = np.where(epochs.times == 0.)[0]
if len(t_zero) == 1:
for event_idx in range(len(epochs.events)):
pos = [event_idx * len(epochs.times) + t_zero[0],
event_idx * len(epochs.times) + t_zero[0]]
ax.plot(pos, ax.get_ylim(), 'g', zorder=3, alpha=0.4)
for epoch_idx in range(len(epochs.events)):
pos = [epoch_idx * len(epochs.times), epoch_idx * len(epochs.times)]
ax.plot(pos, ax.get_ylim(), color='black', linestyle='--', zorder=1)
def _pick_bad_epochs(event, params):
"""Helper for selecting / dropping bad epochs"""
if 'ica' in params:
pos = (event.xdata, event.ydata)
_pick_bad_channels(pos, params)
return
n_times = len(params['epochs'].times)
start_idx = int(params['t_start'] / n_times)
xdata = event.xdata
xlim = event.inaxes.get_xlim()
epoch_idx = start_idx + int(xdata / (xlim[1] / params['n_epochs']))
total_epochs = len(params['epochs'].events)
if epoch_idx > total_epochs - 1:
return
# remove bad epoch
if epoch_idx in params['bads']:
params['bads'] = params['bads'][(params['bads'] != epoch_idx)]
for ch_idx in range(len(params['ch_names'])):
params['colors'][ch_idx][epoch_idx] = params['def_colors'][ch_idx]
params['ax_hscroll'].patches[epoch_idx].set_color('w')
params['ax_hscroll'].patches[epoch_idx].set_zorder(1)
params['plot_fun']()
return
# add bad epoch
params['bads'] = np.append(params['bads'], epoch_idx)
params['ax_hscroll'].patches[epoch_idx].set_color((1., 0., 0., 1.))
params['ax_hscroll'].patches[epoch_idx].set_zorder(2)
params['ax_hscroll'].patches[epoch_idx].set_edgecolor('w')
for ch_idx in range(len(params['ch_names'])):
params['colors'][ch_idx][epoch_idx] = (1., 0., 0., 1.)
params['plot_fun']()
def _pick_bad_channels(pos, params):
"""Helper function for selecting bad channels."""
text, ch_idx = _label2idx(params, pos)
if text is None:
return
if text in params['info']['bads']:
while text in params['info']['bads']:
params['info']['bads'].remove(text)
color = params['def_colors'][ch_idx]
params['ax_vscroll'].patches[ch_idx + 1].set_color(color)
else:
params['info']['bads'].append(text)
color = params['bad_color']
params['ax_vscroll'].patches[ch_idx + 1].set_color(color)
if 'ica' in params:
params['plot_fun']()
else:
params['plot_update_proj_callback'](params)
def _plot_onscroll(event, params):
"""Function to handle scroll events."""
if event.key == 'control':
if event.step < 0:
event.key = '-'
else:
event.key = '+'
_plot_onkey(event, params)
return
if params['butterfly']:
return
_plot_raw_onscroll(event, params, len(params['ch_names']))
def _mouse_click(event, params):
"""Function to handle mouse click events."""
if event.inaxes is None:
if params['butterfly'] or not params['settings'][0]:
return
ax = params['ax']
ylim = ax.get_ylim()
pos = ax.transData.inverted().transform((event.x, event.y))
if pos[0] > 0 or pos[1] < 0 or pos[1] > ylim[0]:
return
if event.button == 1: # left click
params['label_click_fun'](pos)
elif event.button == 3: # right click
if 'ica' not in params:
_, ch_idx = _label2idx(params, pos)
if ch_idx is None:
return
if channel_type(params['info'], ch_idx) not in ['mag', 'grad',
'eeg', 'eog']:
logger.info('Event related fields / potentials only '
'available for MEG and EEG channels.')
return
fig = plot_epochs_image(params['epochs'],
picks=params['inds'][ch_idx],
fig=params['image_plot'])[0]
params['image_plot'] = fig
elif event.button == 1: # left click
# vertical scroll bar changed
if event.inaxes == params['ax_vscroll']:
if params['butterfly']:
return
ch_start = max(int(event.ydata) - params['n_channels'] // 2, 0)
if params['ch_start'] != ch_start:
params['ch_start'] = ch_start
params['plot_fun']()
# horizontal scroll bar changed
elif event.inaxes == params['ax_hscroll']:
# find the closest epoch time
times = params['epoch_times']
offset = 0.5 * params['n_epochs'] * len(params['epochs'].times)
xdata = times.flat[np.abs(times - (event.xdata - offset)).argmin()]
_plot_window(xdata, params)
# main axes
elif event.inaxes == params['ax']:
_pick_bad_epochs(event, params)
elif event.inaxes == params['ax'] and event.button == 2: # middle click
params['fig'].canvas.draw()
if params['fig_proj'] is not None:
params['fig_proj'].canvas.draw()
elif event.inaxes == params['ax'] and event.button == 3: # right click
n_times = len(params['epochs'].times)
xdata = int(event.xdata % n_times)
prev_xdata = 0
if len(params['vert_lines']) > 0:
prev_xdata = params['vert_lines'][0][0].get_data()[0][0]
while len(params['vert_lines']) > 0:
params['ax'].lines.remove(params['vert_lines'][0][0])
params['vert_lines'].pop(0)
if prev_xdata == xdata: # lines removed
params['vertline_t'].set_text('')
params['plot_fun']()
return
ylim = params['ax'].get_ylim()
for epoch_idx in range(params['n_epochs']): # plot lines
pos = [epoch_idx * n_times + xdata, epoch_idx * n_times + xdata]
params['vert_lines'].append(params['ax'].plot(pos, ylim, 'y',
zorder=4))
params['vertline_t'].set_text('%0.3f' % params['epochs'].times[xdata])
params['plot_fun']()
def _plot_onkey(event, params):
"""Function to handle key presses."""
import matplotlib.pyplot as plt
if event.key == 'down':
if params['butterfly']:
return
params['ch_start'] += params['n_channels']
_channels_changed(params, len(params['ch_names']))
elif event.key == 'up':
if params['butterfly']:
return
params['ch_start'] -= params['n_channels']
_channels_changed(params, len(params['ch_names']))
elif event.key == 'left':
sample = params['t_start'] - params['duration']
sample = np.max([0, sample])
_plot_window(sample, params)
elif event.key == 'right':
sample = params['t_start'] + params['duration']
sample = np.min([sample, params['times'][-1] - params['duration']])
times = params['epoch_times']
xdata = times.flat[np.abs(times - sample).argmin()]
_plot_window(xdata, params)
elif event.key == '-':
if params['butterfly']:
params['butterfly_scale'] /= 1.1
else:
params['scale_factor'] /= 1.1
params['plot_fun']()
elif event.key in ['+', '=']:
if params['butterfly']:
params['butterfly_scale'] *= 1.1
else:
params['scale_factor'] *= 1.1
params['plot_fun']()
elif event.key == 'f11':
mng = plt.get_current_fig_manager()
mng.full_screen_toggle()
elif event.key == 'pagedown':
if params['n_channels'] == 1 or params['butterfly']:
return
n_channels = params['n_channels'] - 1
ylim = params['ax'].get_ylim()
offset = ylim[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
params['n_channels'] = n_channels
params['ax'].collections.pop()
params['ax'].set_yticks(params['offsets'])
params['lines'].pop()
params['vsel_patch'].set_height(n_channels)
params['plot_fun']()
elif event.key == 'pageup':
if params['butterfly']:
return
from matplotlib.collections import LineCollection
n_channels = params['n_channels'] + 1
ylim = params['ax'].get_ylim()
offset = ylim[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
params['n_channels'] = n_channels
lc = LineCollection(list(), antialiased=False, linewidths=0.5,
zorder=2, picker=3.)
params['ax'].add_collection(lc)
params['ax'].set_yticks(params['offsets'])
params['lines'].append(lc)
params['vsel_patch'].set_height(n_channels)
params['plot_fun']()
elif event.key == 'home':
n_epochs = params['n_epochs'] - 1
if n_epochs <= 0:
return
n_times = len(params['epochs'].times)
ticks = params['epoch_times'] + 0.5 * n_times
params['ax2'].set_xticks(ticks[:n_epochs])
params['n_epochs'] = n_epochs
params['duration'] -= n_times
params['hsel_patch'].set_width(params['duration'])
params['data'] = params['data'][:, :-n_times]
params['plot_update_proj_callback'](params)
elif event.key == 'end':
n_epochs = params['n_epochs'] + 1
n_times = len(params['epochs'].times)
if n_times * n_epochs > len(params['times']):
return
ticks = params['epoch_times'] + 0.5 * n_times
params['ax2'].set_xticks(ticks[:n_epochs])
params['n_epochs'] = n_epochs
if len(params['vert_lines']) > 0:
ax = params['ax']
pos = params['vert_lines'][0][0].get_data()[0] + params['duration']
params['vert_lines'].append(ax.plot(pos, ax.get_ylim(), 'y',
zorder=3))
params['duration'] += n_times
if params['t_start'] + params['duration'] > len(params['times']):
params['t_start'] -= n_times
params['hsel_patch'].set_x(params['t_start'])
params['hsel_patch'].set_width(params['duration'])
params['data'] = np.zeros((len(params['data']), params['duration']))
params['plot_update_proj_callback'](params)
elif event.key == 'b':
if params['fig_options'] is not None:
plt.close(params['fig_options'])
params['fig_options'] = None
_prepare_butterfly(params)
_plot_traces(params)
elif event.key == 'o':
if not params['butterfly']:
_open_options(params)
elif event.key == 'h':
_plot_histogram(params)
elif event.key == '?':
_onclick_help(event, params)
elif event.key == 'escape':
plt.close(params['fig'])
def _prepare_butterfly(params):
"""Helper function for setting up butterfly plot."""
from matplotlib.collections import LineCollection
butterfly = not params['butterfly']
if butterfly:
types = set(['grad', 'mag', 'eeg', 'eog',
'ecg']) & set(params['types'])
if len(types) < 1:
return
params['ax_vscroll'].set_visible(False)
ax = params['ax']
labels = ax.yaxis.get_ticklabels()
for label in labels:
label.set_visible(True)
ylim = (5. * len(types), 0.)
ax.set_ylim(ylim)
offset = ylim[0] / (4. * len(types))
ticks = np.arange(0, ylim[0], offset)
ticks = [ticks[x] if x < len(ticks) else 0 for x in range(20)]
ax.set_yticks(ticks)
used_types = 0
params['offsets'] = [ticks[2]]
if 'grad' in types:
pos = (0, 1 - (ticks[2] / ylim[0]))
params['ax2'].annotate('Grad (fT/cm)', xy=pos, xytext=(-70, 0),
ha='left', size=12, va='center',
xycoords='axes fraction', rotation=90,
textcoords='offset points')
used_types += 1
params['offsets'].append(ticks[2 + used_types * 4])
if 'mag' in types:
pos = (0, 1 - (ticks[2 + used_types * 4] / ylim[0]))
params['ax2'].annotate('Mag (fT)', xy=pos, xytext=(-70, 0),
ha='left', size=12, va='center',
xycoords='axes fraction', rotation=90,
textcoords='offset points')
used_types += 1
params['offsets'].append(ticks[2 + used_types * 4])
if 'eeg' in types:
pos = (0, 1 - (ticks[2 + used_types * 4] / ylim[0]))
params['ax2'].annotate('EEG (uV)', xy=pos, xytext=(-70, 0),
ha='left', size=12, va='center',
xycoords='axes fraction', rotation=90,
textcoords='offset points')
used_types += 1
params['offsets'].append(ticks[2 + used_types * 4])
if 'eog' in types:
pos = (0, 1 - (ticks[2 + used_types * 4] / ylim[0]))
params['ax2'].annotate('EOG (uV)', xy=pos, xytext=(-70, 0),
ha='left', size=12, va='center',
xycoords='axes fraction', rotation=90,
textcoords='offset points')
used_types += 1
params['offsets'].append(ticks[2 + used_types * 4])
if 'ecg' in types:
pos = (0, 1 - (ticks[2 + used_types * 4] / ylim[0]))
params['ax2'].annotate('ECG (uV)', xy=pos, xytext=(-70, 0),
ha='left', size=12, va='center',
xycoords='axes fraction', rotation=90,
textcoords='offset points')
used_types += 1
while len(params['lines']) < len(params['picks']):
lc = LineCollection(list(), antialiased=False, linewidths=0.5,
zorder=2, picker=3.)
ax.add_collection(lc)
params['lines'].append(lc)
else: # change back to default view
labels = params['ax'].yaxis.get_ticklabels()
for label in labels:
label.set_visible(params['settings'][0])
params['ax_vscroll'].set_visible(True)
while len(params['ax2'].texts) > 0:
params['ax2'].texts.pop()
n_channels = params['n_channels']
while len(params['lines']) > n_channels:
params['ax'].collections.pop()
params['lines'].pop()
ylim = (25., 0.)
params['ax'].set_ylim(ylim)
offset = ylim[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
params['ax'].set_yticks(params['offsets'])
params['butterfly'] = butterfly
def _onpick(event, params):
"""Helper to add a channel name on click"""
if event.mouseevent.button != 2 or not params['butterfly']:
return # text label added with a middle mouse button
lidx = np.where([l is event.artist for l in params['lines']])[0][0]
text = params['text']
text.set_x(event.mouseevent.xdata)
text.set_y(event.mouseevent.ydata)
text.set_text(params['ch_names'][lidx])
text.set_visible(True)
# do NOT redraw here, since for butterfly plots hundreds of lines could
# potentially be picked -- use _mouse_click (happens once per click)
# to do the drawing
def _close_event(event, params):
"""Function to drop selected bad epochs. Called on closing of the plot."""
params['epochs'].drop_epochs(params['bads'])
params['epochs'].info['bads'] = params['info']['bads']
logger.info('Channels marked as bad: %s' % params['epochs'].info['bads'])
def _resize_event(event, params):
"""Function to handle resize event"""
size = ','.join([str(s) for s in params['fig'].get_size_inches()])
set_config('MNE_BROWSE_RAW_SIZE', size)
_layout_figure(params)
def _update_channels_epochs(event, params):
"""Function for changing the amount of channels and epochs per view."""
from matplotlib.collections import LineCollection
# Channels
n_channels = int(np.around(params['channel_slider'].val))
offset = params['ax'].get_ylim()[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
while len(params['lines']) > n_channels:
params['ax'].collections.pop()
params['lines'].pop()
while len(params['lines']) < n_channels:
lc = LineCollection(list(), linewidths=0.5, antialiased=False,
zorder=2, picker=3.)
params['ax'].add_collection(lc)
params['lines'].append(lc)
params['ax'].set_yticks(params['offsets'])
params['vsel_patch'].set_height(n_channels)
params['n_channels'] = n_channels
# Epochs
n_epochs = int(np.around(params['epoch_slider'].val))
n_times = len(params['epochs'].times)
ticks = params['epoch_times'] + 0.5 * n_times
params['ax2'].set_xticks(ticks[:n_epochs])
params['n_epochs'] = n_epochs
params['duration'] = n_times * n_epochs
params['hsel_patch'].set_width(params['duration'])
params['data'] = np.zeros((len(params['data']), params['duration']))
if params['t_start'] + n_times * n_epochs > len(params['times']):
params['t_start'] = len(params['times']) - n_times * n_epochs
params['hsel_patch'].set_x(params['t_start'])
params['plot_update_proj_callback'](params)
def _toggle_labels(label, params):
"""Function for toggling axis labels on/off."""
if label == 'Channel names visible':
params['settings'][0] = not params['settings'][0]
labels = params['ax'].yaxis.get_ticklabels()
for label in labels:
label.set_visible(params['settings'][0])
elif label == 'Event-id visible':
params['settings'][1] = not params['settings'][1]
labels = params['ax2'].xaxis.get_ticklabels()
for label in labels:
label.set_visible(params['settings'][1])
elif label == 'Epoch-id visible':
params['settings'][2] = not params['settings'][2]
labels = params['ax'].xaxis.get_ticklabels()
for label in labels:
label.set_visible(params['settings'][2])
elif label == 'Zeroline visible':
params['settings'][3] = not params['settings'][3]
_plot_vert_lines(params)
params['fig'].canvas.draw()
if params['fig_proj'] is not None:
params['fig_proj'].canvas.draw()
def _open_options(params):
"""Function for opening the option window."""
import matplotlib.pyplot as plt
import matplotlib as mpl
if params['fig_options'] is not None:
# turn off options dialog
plt.close(params['fig_options'])
params['fig_options'] = None
return
width = 10
height = 3
fig_options = figure_nobar(figsize=(width, height), dpi=80)
fig_options.canvas.set_window_title('View settings')
params['fig_options'] = fig_options
ax_channels = plt.axes([0.15, 0.1, 0.65, 0.1])
ax_epochs = plt.axes([0.15, 0.25, 0.65, 0.1])
ax_button = plt.axes([0.85, 0.1, 0.1, 0.25])
ax_check = plt.axes([0.15, 0.4, 0.4, 0.55])
plt.axis('off')
params['update_button'] = mpl.widgets.Button(ax_button, 'Update')
params['channel_slider'] = mpl.widgets.Slider(ax_channels, 'Channels', 1,
len(params['ch_names']),
valfmt='%0.0f',
valinit=params['n_channels'])
params['epoch_slider'] = mpl.widgets.Slider(ax_epochs, 'Epochs', 1,
len(params['epoch_times']),
valfmt='%0.0f',
valinit=params['n_epochs'])
params['checkbox'] = mpl.widgets.CheckButtons(ax_check,
['Channel names visible',
'Event-id visible',
'Epoch-id visible',
'Zeroline visible'],
actives=params['settings'])
update = partial(_update_channels_epochs, params=params)
params['update_button'].on_clicked(update)
labels_callback = partial(_toggle_labels, params=params)
params['checkbox'].on_clicked(labels_callback)
close_callback = partial(_settings_closed, params=params)
params['fig_options'].canvas.mpl_connect('close_event', close_callback)
try:
params['fig_options'].canvas.draw()
params['fig_options'].show(warn=False)
if params['fig_proj'] is not None:
params['fig_proj'].canvas.draw()
except Exception:
pass
def _settings_closed(events, params):
"""Function to handle close event from settings dialog."""
params['fig_options'] = None
def _plot_histogram(params):
"""Function for plotting histogram of peak-to-peak values."""
import matplotlib.pyplot as plt
epochs = params['epochs']
p2p = np.ptp(epochs.get_data(), axis=2)
types = list()
data = list()
if 'eeg' in params['types']:
eegs = np.array([p2p.T[i] for i,
x in enumerate(params['types']) if x == 'eeg'])
data.append(eegs.ravel())
types.append('eeg')
if 'mag' in params['types']:
mags = np.array([p2p.T[i] for i,
x in enumerate(params['types']) if x == 'mag'])
data.append(mags.ravel())
types.append('mag')
if 'grad' in params['types']:
grads = np.array([p2p.T[i] for i,
x in enumerate(params['types']) if x == 'grad'])
data.append(grads.ravel())
types.append('grad')
params['histogram'] = plt.figure()
scalings = _handle_default('scalings')
units = _handle_default('units')
titles = _handle_default('titles')
colors = _handle_default('color')
for idx in range(len(types)):
ax = plt.subplot(len(types), 1, idx + 1)
plt.xlabel(units[types[idx]])
plt.ylabel('count')
color = colors[types[idx]]
rej = None
if epochs.reject is not None and types[idx] in epochs.reject.keys():
rej = epochs.reject[types[idx]] * scalings[types[idx]]
rng = [0., rej * 1.1]
else:
rng = None
plt.hist(data[idx] * scalings[types[idx]], bins=100, color=color,
range=rng)
if rej is not None:
ax.plot((rej, rej), (0, ax.get_ylim()[1]), color='r')
plt.title(titles[types[idx]])
params['histogram'].suptitle('Peak-to-peak histogram', y=0.99)
params['histogram'].subplots_adjust(hspace=0.6)
try:
params['histogram'].show(warn=False)
except:
pass
if params['fig_proj'] is not None:
params['fig_proj'].canvas.draw()
def _label2idx(params, pos):
"""Aux function for click on labels. Returns channel name and idx."""
labels = params['ax'].yaxis.get_ticklabels()
offsets = np.array(params['offsets']) + params['offsets'][0]
line_idx = np.searchsorted(offsets, pos[1])
text = labels[line_idx].get_text()
if len(text) == 0:
return None, None
ch_idx = params['ch_start'] + line_idx
return text, ch_idx
| bsd-3-clause | -2,441,897,527,907,268,600 | 39.392952 | 79 | 0.544682 | false |
romain-fontugne/disco | src/plotFunctions.py | 1 | 16050 | from __future__ import division
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import matplotlib.dates as mdates
#plt=matplotlib.pyplot
import datetime as dt
from datetime import datetime
import numpy as np
import traceback
import os
import pandas as pd
import threading
import operator
from scipy.interpolate import UnivariateSpline
class plotter():
def __init__(self):
self.figNum=0
self.suffix='live'
self.outputFormat='eps'
self.year=None
self.month = None
self.day = None
self.lock = threading.RLock()
def plotDict(self,d,outFileName):
if len(d)==0:
return
self.lock.acquire()
try:
num=self.getFigNum()
outName=outFileName+'_'+self.suffix+'.'+self.outputFormat
fig = plt.figure(num)
print('Plotting Figure {0}: {1}'.format(num,outName))
plt.tick_params(axis='both', which='major', labelsize=7)
X = np.arange(len(d))
plt.bar(X, d.values(), align='center')#, width=0.5)
plt.xticks(X, d.keys(), rotation='80')
values=d.values()
#print(values)
try:
if len(values) == 1:
ymax = values[0] + 1
else:
ymax = max(values) + 3
except:
pass
#plt.ylim(0, ymax)
#plt.yscale('log')
#plt.show()
plt.autoscale()
plt.savefig(outName)
plt.close(fig)
except:
traceback.print_exc()
finally:
self.lock.release()
def plotList(self,dataIn,outfileName,titleInfo=''):
if len(dataIn)==0:
return
self.lock.acquire()
try:
#print(dataIn)
outName=outfileName+'_'+self.suffix+'.'+self.outputFormat
data = [x / 1 for x in dataIn]
num=self.getFigNum()
print('Plotting Figure {0}: {1}'.format(num,outName))
fig = plt.figure(num,figsize=(10,8))
lowestTime=min(data)
#binStart=int(min(data))
year, month, day = datetime.utcfromtimestamp(float(lowestTime)).strftime("%Y-%m-%d").split('-')
#binStop=int(max(data))
binStart=int(datetime(int(year),int(month),int(day), 0, 0).strftime('%s'))
binStop=int(datetime(int(year), int(month), int(day), 23, 59).strftime('%s'))
#bins = numpy.linspace(binStart, binStop, 60)
bins = range(binStart, binStop+1, 20)
digitized=np.digitize(data, bins)
#print(digitized)
dtList=[]
for v in bins:
dtList.append(dt.datetime.utcfromtimestamp(v))
dict={}
for val in digitized:
if dtList[val-1] not in dict.keys():
dict[dtList[val-1]]=1
else:
dict[dtList[val-1]]+=1
#print(min(dict.keys()),max(dict.keys()))
X=range(0,len(dict.keys())+1)
X=sorted(dict.keys())
#Incase if data was not spread apart enough
#if len(X)==0:
# return
#steps=int(len(X)/5)
#if steps<1:
# steps=1
#XTicks=range(0,len(X),steps)
#dtListTicks=[]
#for iters in XTicks:
#print(dtList[iters])
# dtListTicks.append(dtList[iters])
#print(iters,dtList[iters])
Y=dict.values()
plt.xlim(datetime(int(year),int(month),int(day), 0, 0),datetime(int(year),int(month),int(day), 23, 59))
plt.plot(X,Y)
plt.title(titleInfo)
#print(dict)
plt.ylim(0,max(Y)+5)
#plt.xticks(XTicks,dtListTicks,rotation='80')
fig.autofmt_xdate()
plt.autoscale()
plt.savefig(outName)
plt.close(fig)
except:
traceback.print_exc()
finally:
self.lock.release()
def plotPDF(self,dataIn1,outfileName,xlabel='',ylabel='',titleInfo=''):
N = 1000
n = N/10
s = np.random.normal(size=N) # generate your data sample with N elements
p, x = np.histogram(s, bins=n) # bin it into n = N/10 bins
x = x[:-1] + (x[1] - x[0])/2 # convert bin edges to centers
f = UnivariateSpline(x, p, s=n)
plt.plot(x, f(x))
plt.show()
def plot2ListsHist(self,dataIn1,dataIn2,outfileName,xlabel='',ylabel='',titleInfo=''):
if len(dataIn1)==0:
return
if len(dataIn2)==0:
return
self.lock.acquire()
try:
#print(dataIn)
outName=outfileName+'_'+self.suffix+'.'+self.outputFormat
num=self.getFigNum()
print('Plotting Figure {0}: {1}'.format(num,outName))
fig = plt.figure(num,figsize=(10,8))
binStart1=0
binStop1=int(max(dataIn1))
binStart2=0
binStop2=int(max(dataIn2))
bins1 = range(binStart1, binStop1+1, 10)
digitized1=np.digitize(dataIn1, bins1)
bins2 = range(binStart2, binStop2+1, 10)
digitized2=np.digitize(dataIn2, bins2)
dtList1=[]
for v in bins1:
dtList1.append(dt.datetime.utcfromtimestamp(v))
dict1={}
for val in digitized1:
if dtList1[val-1] not in dict1.keys():
dict1[dtList1[val-1]]=1
else:
dict1[dtList1[val-1]]+=1
X1=sorted(digitized1)
Y1=dict1.values()
dtList2=[]
for v in bins2:
dtList2.append(dt.datetime.utcfromtimestamp(v))
dict2={}
for val in digitized2:
if dtList2[val-1] not in dict2.keys():
dict2[dtList2[val-1]]=1
else:
dict2[dtList2[val-1]]+=1
X2=sorted(digitized2)
Y2=dict2.values()
plt.plot(X1,Y1)
plt.plot(X2,Y2)
plt.title(titleInfo)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.autoscale()
plt.savefig(outName)
plt.close(fig)
except:
traceback.print_exc()
finally:
self.lock.release()
def plotBursts(self,bursts,name):
if len(bursts)==0:
print('No bursts!')
return
self.lock.acquire()
try:
outName=name+'_'+self.suffix+'.'+self.outputFormat
num=self.getFigNum()
fig = plt.figure(num, figsize=(15,3))
ax = fig.add_subplot(1,1,1)
print('Plotting Figure {0}: {1}'.format(num,outName))
#print(bursts)
b = {}
for q, ts, te in bursts:
if not q in b:
b[q] = {"x":[], "y":[]}
b[q]["x"].append(dt.datetime.utcfromtimestamp(ts))
b[q]["y"].append(0)
b[q]["x"].append(dt.datetime.utcfromtimestamp(ts))
b[q]["y"].append(q)
b[q]["x"].append(dt.datetime.utcfromtimestamp(te))
b[q]["y"].append(q)
b[q]["x"].append(dt.datetime.utcfromtimestamp(te))
b[q]["y"].append(0)
for q, val in b.iteritems():
plt.plot(val["x"], val["y"], label=q,color='#11557c')
plt.fill_between(val["x"], val["y"],0,color='#11557c')
plt.ylabel("Burst level")
plt.xlim([dt.datetime(int(self.year),int(self.month),int(self.day),0,0), dt.datetime(int(self.year),int(self.month),int(self.day),23,59)])
plt.ylim([0, 15])
fig.autofmt_xdate()
# plt.autoscale()
ax.xaxis.set_major_locator(mdates.HourLocator(interval=1)) #to get a tick every 15 minutes
ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M')) #optional formatting
ax.xaxis.set_minor_locator(mdates.MinuteLocator(byminute=[30])) #to get a tick every 15 minutes
ax.xaxis.set_minor_formatter(mdates.DateFormatter('')) #optional formatting
# ax.set_xticklabels(["","08:00","","09:00","","10:00","","11:00","","12:00",""])
plt.grid(True, which="minor", color="0.6", linestyle=":")
plt.grid(True, which="major", color="k", linestyle=":")
plt.savefig(outName)
plt.close(fig)
except:
traceback.print_exc()
finally:
self.lock.release()
def getFigNum(self):
self.lock.acquire()
try:
self.figNum+=1
except:
traceback.print_exc()
finally:
self.lock.release()
#print(self.figNum)
return self.figNum
def setSuffix(self,suffixName):
self.lock.acquire()
try:
self.suffix=suffixName
except:
traceback.print_exc()
finally:
self.lock.release()
def plotBinned(self,dataIn,binSize,outfileName,xlabel='',ylabel='',titleInfo=''):
if len(dataIn)==0:
return
self.lock.acquire()
try:
#print(dataIn)
outName=outfileName+'_'+self.suffix+'.'+self.outputFormat
data = [x / 1 for x in dataIn]
num=self.getFigNum()
print('Plotting Figure {0}: {1}'.format(num,outName))
fig = plt.figure(num,figsize=(10,8))
#binStart=int(min(data))
#binStop=int(max(data))
#bins = range(binStart, binStop+1, binSize)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.hist(data,bins=binSize)
plt.title(titleInfo)
plt.autoscale()
plt.savefig(outName)
plt.close(fig)
except:
traceback.print_exc()
finally:
self.lock.release()
def plotDensity(self,data,outfileName,xlabel='',ylabel='',titleInfo=''):
self.lock.acquire()
try:
outName=outfileName+'_'+self.suffix+'.'+self.outputFormat
df = pd.DataFrame(data)
ax=df.plot(kind='density')
fig = ax.get_figure()
print('Plotting Figure {0}: {1}'.format(1,outName))
fig.savefig(outName)
except:
traceback.print_exc()
finally:
self.lock.release()
def plotDensities(self,data1,data2,outfileName,data1Label='',data2Label='',xlabel='',ylabel='Density',titleInfo='',xticks=None,xlim=None):
self.lock.acquire()
try:
outName=outfileName+'_'+self.suffix+'.'+self.outputFormat
num=self.getFigNum()
print('Plotting Figure {0}: {1}'.format(num,outName))
fig = plt.figure(num)
df1 = pd.DataFrame(data1)
df1.columns = [data1Label]
ax1=df1.plot(kind='density',xticks=xticks,xlim=xlim)
ax1.set_xlabel(xlabel,fontsize=16)
ax1.set_ylabel(ylabel,fontsize=16)
df2 = pd.DataFrame(data2)
df2.columns = [data2Label]
df2.plot(kind='density',ax=ax1)
fig = ax1.get_figure()
fig.savefig(outName)
plt.close(fig)
except:
traceback.print_exc()
finally:
self.lock.release()
def plot2Hists(self,data1,data2,outfileName,data1Label='',data2Label='',xlabel='',ylabel='',titleInfo='',xticks=None,xlim=None):
self.lock.acquire()
try:
outName=outfileName+'_'+self.suffix+'.'+self.outputFormat
num=self.getFigNum()
print('Plotting Figure {0}: {1}'.format(num,outName))
fig = plt.figure(num,figsize=(5.8,4.6))
ax = plt.subplot(111)
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.15,box.width, box.height * 0.85])
ax.hist(data1,bins=70,range=[0,2],histtype='step',lw=2,label=data1Label)
ax.hist(data2,bins=70,range=[0,2],histtype='step',lw=2,label=data2Label)
plt.xlabel(xlabel,fontsize = 18)
plt.ylabel(ylabel,fontsize = 18)
plt.xticks(fontsize = 18)
plt.yticks(fontsize = 18)
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.18),fancybox=True, shadow=True, ncol=2)
#plt.legend()
#plt.tight_layout()
plt.autoscale()
#plt.show()
fig.savefig(outName)
plt.close(fig)
except:
traceback.print_exc()
finally:
self.lock.release()
def plotHeatMap(self,data1,data2,outfileName,xlabel='',ylabel='',titleInfo='',xticks=None,xlim=None):
self.lock.acquire()
try:
outName=outfileName+'_'+self.suffix+'.'+self.outputFormat
num=self.getFigNum()
print('Plotting Figure {0}: {1}'.format(num,outName))
fig = plt.figure(num)
# Generate some test data
#heatmap, xedges, yedges = np.histogram2d(data1,data2, bins=20)
#extent = [0,3,0,3]
#plt.imshow(heatmap, extent=extent)
#plt.hexbin(data1,data2,cmap=plt.cm.Blues_r, bins=30)
#plt.hist2d(data1,data2,cmap=plt.cm.Blues,bins=70)
#extent = [0,3,0,3]
#plt.imshow(heatmap, extent=extent)
#plt.xlim(0,2)
#plt.ylim(0,2)
#plt.hist2d(data1,data2,bins=70);
plt.hexbin(data1,data2, cmap="OrRd", gridsize=70, vmax=10, vmin=0, mincnt=0)
plt.xlim([-0.1,2])
plt.ylim([-0.1,2])
plt.colorbar()
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.xticks(fontsize = 18)
plt.yticks(fontsize = 18)
plt.savefig(outName)
plt.close(fig)
except:
traceback.print_exc()
finally:
self.lock.release()
def ecdf(self,data,outfileName,xlabel='',ylabel='CDF',titleInfo='',xlim=[]):
self.lock.acquire()
try:
outName=outfileName+'_'+self.suffix+'.'+self.outputFormat
num=self.getFigNum()
print('Plotting Figure {0}: {1}'.format(num,outName))
fig = plt.figure(num, figsize=(4,3))
plt.xlabel(xlabel) #,fontsize=18)
plt.ylabel(ylabel) #,fontsize=18)
plt.tick_params() #labelsize=18)
if len(xlim)>0:
plt.xlim(xlim)
plt.grid(True)
plt.title(titleInfo) #,fontsize=16)
sorted=np.sort(data)
yvals=np.arange(len(sorted))/float(len(sorted))
plt.plot( sorted, yvals,lw=2)
plt.autoscale()
plt.tight_layout()
plt.savefig(outName)
plt.close(fig)
except:
traceback.print_exc()
finally:
self.lock.release()
def ecdfs(self,data1,data2,outfileName,xlabel='',ylabel='',titleInfo=''):
self.lock.acquire()
try:
outName=outfileName+'_'+self.suffix+'.'+self.outputFormat
num=self.getFigNum()
print('Plotting Figure {0}: {1}'.format(num,outName))
fig = plt.figure(num,figsize=(8,6))
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(titleInfo)
plt.grid()
#data1
sorted1=np.sort(data1)
yvals=np.arange(len(sorted1))/float(len(sorted1))
plt.plot(sorted1, yvals)
#data2
sorted2=np.sort(data2)
yvals=np.arange(len(sorted2))/float(len(sorted2))
plt.plot(sorted2, yvals)
plt.autoscale()
plt.savefig(outName)
plt.close(fig)
except:
traceback.print_exc()
finally:
self.lock.release()
| gpl-3.0 | 1,524,251,697,561,568,300 | 34.274725 | 150 | 0.516449 | false |
ericmjl/bokeh | examples/app/movies/main.py | 1 | 4195 | import sqlite3 as sql
from os.path import dirname, join
import numpy as np
import pandas.io.sql as psql
from bokeh.io import curdoc
from bokeh.layouts import column, layout
from bokeh.models import ColumnDataSource, Div, Select, Slider, TextInput
from bokeh.plotting import figure
from bokeh.sampledata.movies_data import movie_path
conn = sql.connect(movie_path)
query = open(join(dirname(__file__), 'query.sql')).read()
movies = psql.read_sql(query, conn)
movies["color"] = np.where(movies["Oscars"] > 0, "orange", "grey")
movies["alpha"] = np.where(movies["Oscars"] > 0, 0.9, 0.25)
movies.fillna(0, inplace=True) # just replace missing values with zero
movies["revenue"] = movies.BoxOffice.apply(lambda x: '{:,d}'.format(int(x)))
with open(join(dirname(__file__), "razzies-clean.csv")) as f:
razzies = f.read().splitlines()
movies.loc[movies.imdbID.isin(razzies), "color"] = "purple"
movies.loc[movies.imdbID.isin(razzies), "alpha"] = 0.9
axis_map = {
"Tomato Meter": "Meter",
"Numeric Rating": "numericRating",
"Number of Reviews": "Reviews",
"Box Office (dollars)": "BoxOffice",
"Length (minutes)": "Runtime",
"Year": "Year",
}
desc = Div(text=open(join(dirname(__file__), "description.html")).read(), sizing_mode="stretch_width")
# Create Input controls
reviews = Slider(title="Minimum number of reviews", value=80, start=10, end=300, step=10)
min_year = Slider(title="Year released", start=1940, end=2014, value=1970, step=1)
max_year = Slider(title="End Year released", start=1940, end=2014, value=2014, step=1)
oscars = Slider(title="Minimum number of Oscar wins", start=0, end=4, value=0, step=1)
boxoffice = Slider(title="Dollars at Box Office (millions)", start=0, end=800, value=0, step=1)
genre = Select(title="Genre", value="All",
options=open(join(dirname(__file__), 'genres.txt')).read().split())
director = TextInput(title="Director name contains")
cast = TextInput(title="Cast names contains")
x_axis = Select(title="X Axis", options=sorted(axis_map.keys()), value="Tomato Meter")
y_axis = Select(title="Y Axis", options=sorted(axis_map.keys()), value="Number of Reviews")
# Create Column Data Source that will be used by the plot
source = ColumnDataSource(data=dict(x=[], y=[], color=[], title=[], year=[], revenue=[], alpha=[]))
TOOLTIPS=[
("Title", "@title"),
("Year", "@year"),
("$", "@revenue")
]
p = figure(plot_height=600, plot_width=700, title="", toolbar_location=None, tooltips=TOOLTIPS, sizing_mode="scale_both")
p.circle(x="x", y="y", source=source, size=7, color="color", line_color=None, fill_alpha="alpha")
def select_movies():
genre_val = genre.value
director_val = director.value.strip()
cast_val = cast.value.strip()
selected = movies[
(movies.Reviews >= reviews.value) &
(movies.BoxOffice >= (boxoffice.value * 1e6)) &
(movies.Year >= min_year.value) &
(movies.Year <= max_year.value) &
(movies.Oscars >= oscars.value)
]
if (genre_val != "All"):
selected = selected[selected.Genre.str.contains(genre_val)==True]
if (director_val != ""):
selected = selected[selected.Director.str.contains(director_val)==True]
if (cast_val != ""):
selected = selected[selected.Cast.str.contains(cast_val)==True]
return selected
def update():
df = select_movies()
x_name = axis_map[x_axis.value]
y_name = axis_map[y_axis.value]
p.xaxis.axis_label = x_axis.value
p.yaxis.axis_label = y_axis.value
p.title.text = "%d movies selected" % len(df)
source.data = dict(
x=df[x_name],
y=df[y_name],
color=df["color"],
title=df["Title"],
year=df["Year"],
revenue=df["revenue"],
alpha=df["alpha"],
)
controls = [reviews, boxoffice, genre, min_year, max_year, oscars, director, cast, x_axis, y_axis]
for control in controls:
control.on_change('value', lambda attr, old, new: update())
inputs = column(*controls, width=320, height=1000)
inputs.sizing_mode = "fixed"
l = layout([
[desc],
[inputs, p],
], sizing_mode="scale_both")
update() # initial load of the data
curdoc().add_root(l)
curdoc().title = "Movies"
| bsd-3-clause | 6,006,346,634,513,225,000 | 35.163793 | 121 | 0.65435 | false |
keras-team/keras-io | examples/timeseries/timeseries_weather_forecasting.py | 1 | 11224 | """
Title: Timeseries forecasting for weather prediction
Authors: [Prabhanshu Attri](https://prabhanshu.com/github), [Yashika Sharma](https://github.com/yashika51), [Kristi Takach](https://github.com/ktakattack), [Falak Shah](https://github.com/falaktheoptimist)
Date created: 2020/06/23
Last modified: 2020/07/20
Description: This notebook demonstrates how to do timeseries forecasting using a LSTM model.
"""
"""
## Setup
This example requires TensorFlow 2.3 or higher.
"""
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
"""
## Climate Data Time-Series
We will be using Jena Climate dataset recorded by the
[Max Planck Institute for Biogeochemistry](https://www.bgc-jena.mpg.de/wetter/).
The dataset consists of 14 features such as temperature, pressure, humidity etc, recorded once per
10 minutes.
**Location**: Weather Station, Max Planck Institute for Biogeochemistry
in Jena, Germany
**Time-frame Considered**: Jan 10, 2009 - December 31, 2016
The table below shows the column names, their value formats, and their description.
Index| Features |Format |Description
-----|---------------|-------------------|-----------------------
1 |Date Time |01.01.2009 00:10:00|Date-time reference
2 |p (mbar) |996.52 |The pascal SI derived unit of pressure used to quantify internal pressure. Meteorological reports typically state atmospheric pressure in millibars.
3 |T (degC) |-8.02 |Temperature in Celsius
4 |Tpot (K) |265.4 |Temperature in Kelvin
5 |Tdew (degC) |-8.9 |Temperature in Celsius relative to humidity. Dew Point is a measure of the absolute amount of water in the air, the DP is the temperature at which the air cannot hold all the moisture in it and water condenses.
6 |rh (%) |93.3 |Relative Humidity is a measure of how saturated the air is with water vapor, the %RH determines the amount of water contained within collection objects.
7 |VPmax (mbar) |3.33 |Saturation vapor pressure
8 |VPact (mbar) |3.11 |Vapor pressure
9 |VPdef (mbar) |0.22 |Vapor pressure deficit
10 |sh (g/kg) |1.94 |Specific humidity
11 |H2OC (mmol/mol)|3.12 |Water vapor concentration
12 |rho (g/m ** 3) |1307.75 |Airtight
13 |wv (m/s) |1.03 |Wind speed
14 |max. wv (m/s) |1.75 |Maximum wind speed
15 |wd (deg) |152.3 |Wind direction in degrees
"""
from zipfile import ZipFile
import os
uri = "https://storage.googleapis.com/tensorflow/tf-keras-datasets/jena_climate_2009_2016.csv.zip"
zip_path = keras.utils.get_file(origin=uri, fname="jena_climate_2009_2016.csv.zip")
zip_file = ZipFile(zip_path)
zip_file.extractall()
csv_path = "jena_climate_2009_2016.csv"
df = pd.read_csv(csv_path)
"""
## Raw Data Visualization
To give us a sense of the data we are working with, each feature has been plotted below.
This shows the distinct pattern of each feature over the time period from 2009 to 2016.
It also shows where anomalies are present, which will be addressed during normalization.
"""
titles = [
"Pressure",
"Temperature",
"Temperature in Kelvin",
"Temperature (dew point)",
"Relative Humidity",
"Saturation vapor pressure",
"Vapor pressure",
"Vapor pressure deficit",
"Specific humidity",
"Water vapor concentration",
"Airtight",
"Wind speed",
"Maximum wind speed",
"Wind direction in degrees",
]
feature_keys = [
"p (mbar)",
"T (degC)",
"Tpot (K)",
"Tdew (degC)",
"rh (%)",
"VPmax (mbar)",
"VPact (mbar)",
"VPdef (mbar)",
"sh (g/kg)",
"H2OC (mmol/mol)",
"rho (g/m**3)",
"wv (m/s)",
"max. wv (m/s)",
"wd (deg)",
]
colors = [
"blue",
"orange",
"green",
"red",
"purple",
"brown",
"pink",
"gray",
"olive",
"cyan",
]
date_time_key = "Date Time"
def show_raw_visualization(data):
time_data = data[date_time_key]
fig, axes = plt.subplots(
nrows=7, ncols=2, figsize=(15, 20), dpi=80, facecolor="w", edgecolor="k"
)
for i in range(len(feature_keys)):
key = feature_keys[i]
c = colors[i % (len(colors))]
t_data = data[key]
t_data.index = time_data
t_data.head()
ax = t_data.plot(
ax=axes[i // 2, i % 2],
color=c,
title="{} - {}".format(titles[i], key),
rot=25,
)
ax.legend([titles[i]])
plt.tight_layout()
show_raw_visualization(df)
"""
This heat map shows the correlation between different features.
"""
def show_heatmap(data):
plt.matshow(data.corr())
plt.xticks(range(data.shape[1]), data.columns, fontsize=14, rotation=90)
plt.gca().xaxis.tick_bottom()
plt.yticks(range(data.shape[1]), data.columns, fontsize=14)
cb = plt.colorbar()
cb.ax.tick_params(labelsize=14)
plt.title("Feature Correlation Heatmap", fontsize=14)
plt.show()
show_heatmap(df)
"""
## Data Preprocessing
Here we are picking ~300,000 data points for training. Observation is recorded every
10 mins, that means 6 times per hour. We will resample one point per hour since no
drastic change is expected within 60 minutes. We do this via the `sampling_rate`
argument in `timeseries_dataset_from_array` utility.
We are tracking data from past 720 timestamps (720/6=120 hours). This data will be
used to predict the temperature after 72 timestamps (72/6=12 hours).
Since every feature has values with
varying ranges, we do normalization to confine feature values to a range of `[0, 1]` before
training a neural network.
We do this by subtracting the mean and dividing by the standard deviation of each feature.
71.5 % of the data will be used to train the model, i.e. 300,693 rows. `split_fraction` can
be changed to alter this percentage.
The model is shown data for first 5 days i.e. 720 observations, that are sampled every
hour. The temperature after 72 (12 hours * 6 observation per hour) observation will be
used as a label.
"""
split_fraction = 0.715
train_split = int(split_fraction * int(df.shape[0]))
step = 6
past = 720
future = 72
learning_rate = 0.001
batch_size = 256
epochs = 10
def normalize(data, train_split):
data_mean = data[:train_split].mean(axis=0)
data_std = data[:train_split].std(axis=0)
return (data - data_mean) / data_std
"""
We can see from the correlation heatmap, few parameters like Relative Humidity and
Specific Humidity are redundant. Hence we will be using select features, not all.
"""
print(
"The selected parameters are:",
", ".join([titles[i] for i in [0, 1, 5, 7, 8, 10, 11]]),
)
selected_features = [feature_keys[i] for i in [0, 1, 5, 7, 8, 10, 11]]
features = df[selected_features]
features.index = df[date_time_key]
features.head()
features = normalize(features.values, train_split)
features = pd.DataFrame(features)
features.head()
train_data = features.loc[0 : train_split - 1]
val_data = features.loc[train_split:]
"""
# Training dataset
The training dataset labels starts from the 792nd observation (720 + 72).
"""
start = past + future
end = start + train_split
x_train = train_data[[i for i in range(7)]].values
y_train = features.iloc[start:end][[1]]
sequence_length = int(past / step)
"""
The `timeseries_dataset_from_array` function takes in a sequence of data-points gathered at
equal intervals, along with time series parameters such as length of the
sequences/windows, spacing between two sequence/windows, etc., to produce batches of
sub-timeseries inputs and targets sampled from the main timeseries.
"""
dataset_train = keras.preprocessing.timeseries_dataset_from_array(
x_train,
y_train,
sequence_length=sequence_length,
sampling_rate=step,
batch_size=batch_size,
)
"""
## Validation dataset
The validation dataset must not contain the last 792 rows as we won't have label data for
those records, hence 792 must be subtracted from the end of the data.
The validation label dataset must start from 792 after train_split, hence we must add
past + future (792) to label_start.
"""
x_end = len(val_data) - past - future
label_start = train_split + past + future
x_val = val_data.iloc[:x_end][[i for i in range(7)]].values
y_val = features.iloc[label_start:][[1]]
dataset_val = keras.preprocessing.timeseries_dataset_from_array(
x_val,
y_val,
sequence_length=sequence_length,
sampling_rate=step,
batch_size=batch_size,
)
for batch in dataset_train.take(1):
inputs, targets = batch
print("Input shape:", inputs.numpy().shape)
print("Target shape:", targets.numpy().shape)
"""
## Training
"""
inputs = keras.layers.Input(shape=(inputs.shape[1], inputs.shape[2]))
lstm_out = keras.layers.LSTM(32)(inputs)
outputs = keras.layers.Dense(1)(lstm_out)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(optimizer=keras.optimizers.Adam(learning_rate=learning_rate), loss="mse")
model.summary()
"""
We'll use the `ModelCheckpoint` callback to regularly save checkpoints, and
the `EarlyStopping` callback to interrupt training when the validation loss
is not longer improving.
"""
path_checkpoint = "model_checkpoint.h5"
es_callback = keras.callbacks.EarlyStopping(monitor="val_loss", min_delta=0, patience=5)
modelckpt_callback = keras.callbacks.ModelCheckpoint(
monitor="val_loss",
filepath=path_checkpoint,
verbose=1,
save_weights_only=True,
save_best_only=True,
)
history = model.fit(
dataset_train,
epochs=epochs,
validation_data=dataset_val,
callbacks=[es_callback, modelckpt_callback],
)
"""
We can visualize the loss with the function below. After one point, the loss stops
decreasing.
"""
def visualize_loss(history, title):
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs = range(len(loss))
plt.figure()
plt.plot(epochs, loss, "b", label="Training loss")
plt.plot(epochs, val_loss, "r", label="Validation loss")
plt.title(title)
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend()
plt.show()
visualize_loss(history, "Training and Validation Loss")
"""
## Prediction
The trained model above is now able to make predictions for 5 sets of values from
validation set.
"""
def show_plot(plot_data, delta, title):
labels = ["History", "True Future", "Model Prediction"]
marker = [".-", "rx", "go"]
time_steps = list(range(-(plot_data[0].shape[0]), 0))
if delta:
future = delta
else:
future = 0
plt.title(title)
for i, val in enumerate(plot_data):
if i:
plt.plot(future, plot_data[i], marker[i], markersize=10, label=labels[i])
else:
plt.plot(time_steps, plot_data[i].flatten(), marker[i], label=labels[i])
plt.legend()
plt.xlim([time_steps[0], (future + 5) * 2])
plt.xlabel("Time-Step")
plt.show()
return
for x, y in dataset_val.take(5):
show_plot(
[x[0][:, 1].numpy(), y[0].numpy(), model.predict(x)[0]],
12,
"Single Step Prediction",
)
| apache-2.0 | -1,283,266,379,708,159,700 | 28.382199 | 252 | 0.663845 | false |
rohandavidg/CONCORD-VCF | bin/csv_to_dict.py | 1 | 1860 | #!/dlmp/sandbox/cgslIS/rohan/Python-2.7.11/python
"""
this script converts a csv to dict
"""
import pandas as pd
import numpy as np
import re
import pprint
import warnings
from collections import defaultdict
warnings.filterwarnings("ignore")
def main(csv_file):
vcf_dict, snp_dict, indel_dict = parse_csv(csv_file)
def parse_csv(csv_file):
df = pd.read_csv(csv_file, sep='\t')
headers = df.columns.values
keep_headers = headers[6:13]
new_df = df[keep_headers]
new_df['chrom'] = new_df.ix[:,1].str.split(':').str[0]
new_df['genomic'] = new_df.ix[:,1].str.split('.').str[1]
new_df['pos'] = new_df.ix[:,1].str.split('.').str[1].str.extract('(\d+)').astype(int)
new_df['ref'] = new_df.ix[:,1].str.split('.').str[1].str.extract('([a-zA-Z]+)').astype(str)
new_df['alt'] = new_df.ix[:,1].str.split('>').str[1]
new_df['DP'] = new_df.ix[:,4]
new_df['AD'] = new_df.ix[:,5]
new_df['AF'] = new_df.ix[:,6]
req_headers = list(new_df.columns.values)
req_headers = ['chrom'] + req_headers[-7:] + ['Exon']
result_df = new_df[req_headers]
Total_variants = len(result_df)
df_indels = result_df[(result_df.alt.str.len() - result_df.ref.str.len()) != 0]
df_SNP = result_df[(result_df.alt.str.len() - result_df.ref.str.len()) == 0]
all_dict = create_dict_from_dataframe(result_df)
snp_dict = create_dict_from_dataframe(df_SNP)
indel_dict = create_dict_from_dataframe(df_indels)
return all_dict, snp_dict, indel_dict
def create_dict_from_dataframe(dataframe):
some_dict = defaultdict(list)
# variant = 'chrom' + ':' + 'pos' + 'ref' +">" + "alt"
some_dict = dataframe.groupby('genomic')[['chrom', 'ref', 'alt', 'AD', 'AF', 'DP']].apply(lambda x: [x for x in x.values]).to_dict()
print some_dict
return some_dict
if __name__ == "__main__":
main(csv_file)
| mit | 2,124,828,647,953,762,800 | 34.09434 | 136 | 0.610215 | false |
meisamhe/GPLshared | Research_Projects_UTD/Tensorflow_tutorial/tensor_flow.py | 1 | 1071 | gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
sess = tf.InteractiveSession(gpu_options=gpu_options)
import tensorflow as tf
import numpy as np
import edward as ed
import numpy as np
import tensorflow as tf
from edward.models import Normal
from edward.util import rbf
from edward.models import Bernoulli
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
sess = tf.InteractiveSession(gpu_options=gpu_options)
x = Bernoulli(p=[0.495, 0.490])
N = 10000
observed = x.sample(N).eval(session=sess)
mu = tf.Variable(tf.random_normal((2,),0.,1.))
sigma = tf.Variable(tf.ones((2,)))
theta = Normal(mu=[0.,0.], sigma=[1. , 1.])
q_theta = Normal(mu=mu, sigma=tf.exp(sigma))
x_hat = Bernoulli(p=tf.ones((N,2))*tf.nn.sigmoid(theta))
inference = ed.KLqp({theta:q_theta}, data={x_hat:observed})
inference.run()
%matplotlib inline
from matplotlib import pyplot as pl
smpls = tf.nn.sigmoid(q_theta.sample(10000)).eval()
d = pl.hist(smpls[:,0], bins=100, color='r', alpha=0.2)
d = pl.hist(smpls[:,1], bins=100, color='g', alpha=0.2)
| gpl-3.0 | -8,821,327,175,027,198,000 | 22.282609 | 66 | 0.712418 | false |
wilseypa/warped2-models | scripts/plotChains.py | 1 | 12067 | #!/usr/bin/python
# Calculates statistics and plots the chain metrics from raw data
from __future__ import print_function
import csv
import os, sys
import numpy as np
import scipy as sp
import scipy.stats as sps
import pandas as pd
import re, shutil, tempfile
import itertools, operator
import subprocess
import Gnuplot
import Gnuplot.funcutils
###### Settings go here ######
searchAttrsList = [
{ 'groupby': ['Worker_Thread_Count', 'Chain_Size'],
'filter' : 'Schedule_Queue_Count',
'model' : 'Model',
'lpcount': 'Number_of_Objects',
'output' : 'threads_vs_chainsize_key_count_' },
{ 'groupby': ['Worker_Thread_Count', 'Schedule_Queue_Count'],
'filter' : 'Chain_Size',
'model' : 'Model',
'lpcount': 'Number_of_Objects',
'output' : 'threads_vs_count_key_chainsize_' }
]
'''
List of metrics available:
Event_Commitment_Ratio
Total_Rollbacks
Simulation_Runtime_(secs.)
Average_Memory_Usage_(MB)
Event_Processing_Rate_(per_sec)
Speedup_w.r.t._Sequential_Simulation
'''
metricList = [
{ 'name' : 'Event_Processing_Rate_(per_sec)',
'ystart': 0,
'yend' : 1000000,
'ytics' : 100000 },
{ 'name' : 'Simulation_Runtime_(secs.)',
'ystart': 0,
'yend' : 150,
'ytics' : 10 },
{ 'name' : 'Event_Commitment_Ratio',
'ystart': 1,
'yend' : 2,
'ytics' : 0.1 },
{ 'name' : 'Speedup_w.r.t._Sequential_Simulation',
'ystart': 0,
'yend' : 10,
'ytics' : 1 }
]
rawDataFileName = 'chains'
statType = [ 'Mean',
'CI_Lower',
'CI_Upper',
'Median',
'Lower_Quartile',
'Upper_Quartile'
]
###### Don't edit below here ######
def mean_confidence_interval(data, confidence=0.95):
# check the input is not empty
if not data:
raise RuntimeError('mean_ci - no data points passed')
a = 1.0*np.array(data)
n = len(a)
m, se = np.mean(a), sps.sem(a)
h = se * sps.t._ppf((1+confidence)/2., n-1)
return m, m-h, m+h
def median(data):
# check the input is not empty
if not data:
raise RuntimeError('median - no data points passed')
return np.median(np.array(data))
def quartiles(data):
# check the input is not empty
if not data:
raise RuntimeError('quartiles - no data points passed')
sorts = sorted(data)
mid = len(sorts) / 2
if (len(sorts) % 2 == 0):
# even
lowerQ = median(sorts[:mid])
upperQ = median(sorts[mid:])
else:
# odd
lowerQ = median(sorts[:mid]) # same as even
upperQ = median(sorts[mid+1:])
return lowerQ, upperQ
def statistics(data):
# check the input is not empty
if not data:
raise RuntimeError('statistics - no data points passed')
mean = ci_lower = ci_upper = med = lower_quartile = upper_quartile = data[0]
if len(data) > 1:
mean, ci_lower, ci_upper = mean_confidence_interval(data)
med = median(data)
lower_quartile, upper_quartile = quartiles(data)
statList = (str(mean), str(ci_lower), str(ci_upper), str(med), str(lower_quartile), str(upper_quartile))
return ",".join(statList)
def sed_inplace(filename, pattern, repl):
# For efficiency, precompile the passed regular expression.
pattern_compiled = re.compile(pattern)
# For portability, NamedTemporaryFile() defaults to mode "w+b" (i.e., binary
# writing with updating). In this case, binary writing imposes non-trivial
# encoding constraints resolved by switching to text writing.
with tempfile.NamedTemporaryFile(mode='w', delete=False) as tmp_file:
with open(filename) as src_file:
for line in src_file:
tmp_file.write(pattern_compiled.sub(repl, line))
# Overwrite the original file with the temporary file in a
# manner preserving file attributes (e.g., permissions).
shutil.copystat(filename, tmp_file.name)
shutil.move(tmp_file.name, filename)
def getIndex(aList, text):
'''Returns the index of the requested text in the given list'''
for i,x in enumerate(aList):
if x == text:
return i
def plot(data, fileName, title, subtitle, xaxisLabel, yaxisLabel, ystart, yend, ytics, linePreface):
# Replace '_' with ' '
g = Gnuplot.Gnuplot()
multiLineTitle = title.replace("_", " ") + '\\n '+ subtitle.replace("_", " ")
g.title(multiLineTitle)
g("set terminal svg noenhanced background rgb 'white' size 1000,800 fname 'Helvetica' fsize 16")
g("set key box outside center top horizontal font ',12' ")
g("set autoscale xy")
#g("set yrange [{0}:{1}]".format(unicode(ystart), unicode(yend)))
#g("set ytics {}".format(unicode(ytics)))
g("set grid")
g.xlabel(xaxisLabel.replace("_", " "))
g.ylabel(yaxisLabel.replace("_", " "))
g('set output "' + fileName + '"')
d = []
for key in sorted(data[statType[0]]):
result = Gnuplot.Data( data['header'][key],data[statType[0]][key],\
data[statType[1]][key],data[statType[2]][key],\
with_="yerrorlines",title=linePreface+key )
d.append(result)
g.plot(*d)
def plot_stats(dirPath, fileName, xaxisLabel, keyLabel, filterLabel, filterValue, model, lpCount):
# Read the stats csv
inFile = dirPath + 'stats/' + rawDataFileName + '/' + fileName + '.csv'
reader = csv.reader(open(inFile,'rb'))
header = reader.next()
# Get Column Values for use below
xaxis = getIndex(header, xaxisLabel)
kid = getIndex(header, keyLabel)
reader = sorted(reader, key=lambda x: int(x[xaxis]), reverse=False)
reader = sorted(reader, key=lambda x: x[kid], reverse=False)
for param in metricList:
metric = param['name']
ystart = param['ystart']
yend = param['yend']
ytics = param['ytics']
outData = {'header':{}}
# Populate the header
for kindex, kdata in itertools.groupby(reader, lambda x: x[kid]):
if kindex not in outData['header']:
outData['header'][kindex] = []
for xindex, data in itertools.groupby(kdata, lambda x: x[xaxis]):
outData['header'][kindex].append(xindex)
# Populate the statistical data
for stat in statType:
columnName = metric + '_' + stat
columnIndex = getIndex(header, columnName)
if stat not in outData:
outData[stat] = {}
for xindex, data in itertools.groupby(reader, lambda x: x[xaxis]):
for kindex, kdata in itertools.groupby(data, lambda x: x[kid]):
if kindex not in outData[stat]:
outData[stat][kindex] = []
value = [item[columnIndex] for item in kdata][0]
outData[stat][kindex].append(value)
# Plot the statistical data
title = model.upper() + ' model with ' + str("{:,}".format(lpCount)) + ' LPs'
subtitle = filterLabel + ' = ' + str(filterValue).upper() + ' , key = ' + keyLabel
outDir = dirPath + 'plots/' + rawDataFileName + '/'
outFile = outDir + fileName + "_" + metric + '.svg'
yaxisLabel = metric + '_(C.I._=_95%)'
plot(outData, outFile, title, subtitle, xaxisLabel, yaxisLabel, ystart, yend, ytics, '')
# Convert svg to pdf and delete svg
outPDF = outDir + fileName + "_" + metric + '.pdf'
subprocess.call(['inkscape', outFile, '--export-pdf', outPDF])
subprocess.call(['rm', outFile])
def calc_and_plot(dirPath):
# Load the sequential simulation time
seqFile = dirPath + 'sequential.dat'
if not os.path.exists(seqFile):
print('Sequential data not available')
sys.exit()
seqFp = open(seqFile, 'r')
seqCount, _, seqTime = seqFp.readline().split()
seqFp.close()
# Load data from csv file
inFile = dirPath + rawDataFileName + '.csv'
if not os.path.exists(inFile):
print(rawDataFileName.upper() + ' raw data not available')
sys.exit()
data = pd.read_csv(inFile, sep=',')
data['Event_Commitment_Ratio'] = \
data['Events_Processed'] / data['Events_Committed']
data['Total_Rollbacks'] = \
data['Primary_Rollbacks'] + data['Secondary_Rollbacks']
data['Event_Processing_Rate_(per_sec)'] = \
data['Events_Processed'] / data['Simulation_Runtime_(secs.)']
data['Speedup_w.r.t._Sequential_Simulation'] = \
float(seqTime) / data['Simulation_Runtime_(secs.)']
# Create the plots directory (if needed)
outDir = dirPath + 'plots/'
if not os.path.exists(outDir):
os.makedirs(outDir)
outName = outDir + rawDataFileName + '/'
subprocess.call(['rm', '-rf', outName])
subprocess.call(['mkdir', outName])
# Create the stats directory (if needed)
outDir = dirPath + 'stats/'
if not os.path.exists(outDir):
os.makedirs(outDir)
outName = outDir + rawDataFileName + '/'
subprocess.call(['rm', '-rf', outName])
subprocess.call(['mkdir', outName])
for searchAttrs in searchAttrsList:
groupbyList = searchAttrs['groupby']
filterName = searchAttrs['filter']
model = searchAttrs['model']
lpcount = searchAttrs['lpcount']
output = searchAttrs['output']
groupbyList.append(filterName)
# Read unique values for the filter
filterValues = data[filterName].unique().tolist()
# Read the model name and LP count
modelName = data[model].unique().tolist()
lpCount = data[lpcount].unique().tolist()
for filterValue in filterValues:
# Filter data for each filterValue
filteredData = data[data[filterName] == filterValue]
groupedData = filteredData.groupby(groupbyList)
columnNames = list(groupbyList)
# Generate stats
result = pd.DataFrame()
for param in metricList:
metric = param['name']
columnNames += [metric + '_' + x for x in statType]
stats = groupedData.apply(lambda x : statistics(x[metric].tolist()))
result = pd.concat([result, stats], axis=1)
# Write to the csv
fileName = output + str(filterValue)
outFile = outName + fileName + '.csv'
statFile = open(outFile,'w')
for colName in columnNames:
statFile.write(colName + ',')
statFile.write("\n")
statFile.close()
result.to_csv(outFile, mode='a', header=False, sep=',')
# Remove " from the newly created csv file
# Note: It is needed since pandas package has an unresolved bug for
# quoting arg which retains the double quotes for column attributes.
sed_inplace(outFile, r'"', '')
# Plot the statistics
plot_stats( dirPath, fileName, groupbyList[0], groupbyList[1],
filterName, filterValue, modelName[0], lpCount[0] )
def main():
dirPath = sys.argv[1]
if not os.path.exists(dirPath):
print('Invalid path to source')
sys.exit()
calc_and_plot(dirPath)
if __name__ == "__main__":
main()
| mit | 1,226,787,144,049,247,200 | 35.128743 | 108 | 0.555648 | false |
yhat/ggplot | docs/examples.py | 1 | 13564 | from ggplot import *
import uuid
import seaborn as sns
import pandas as pd
import numpy as np
tips = sns.load_dataset('tips')
import sys
p = ggplot(mtcars, aes(x='mpg', y='cyl', color='steelblue')) + geom_point()
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
p = ggplot(mtcars, aes(x='mpg', y='cyl')) + geom_point(color='green')
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
p = ggplot(diamonds.sample(100), aes(x='carat', y='price')) + geom_point() + facet_wrap('clarity', ncol=4)
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
p = ggplot(diamonds.sample(100), aes(x='carat', y='price')) + geom_point() + facet_wrap('clarity', nrow=5)
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
p = ggplot(diamonds, aes(x='carat', y='price')) + geom_point() + facet_grid(x='clarity')
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
p = ggplot(diamonds, aes(x='carat', y='price')) + geom_point() + facet_grid(y='clarity')
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
p = ggplot(diamonds.sample(1000), aes(x='carat', y='price')) + geom_point() + facet_wrap(x='clarity', y='cut')
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
p = ggplot(diamonds, aes(x='carat', y='price')) + geom_point() + facet_wrap(x='clarity')
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
p = ggplot(diamonds, aes(x='carat', y='price')) + geom_point() + facet_wrap(y='clarity')
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
p = ggplot(diamonds.sample(1000), aes(x='carat', y='price', size='clarity')) + geom_point()
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
p = ggplot(diamonds.sample(1000), aes(x='carat', y='price', size='x')) + geom_point()
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
p = ggplot(diamonds.sample(1000), aes(x='carat', y='price', alpha='x')) + geom_point()
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
p = ggplot(diamonds.sample(1000), aes(x='carat', y='price', alpha='x')) + geom_point() + facet_grid(x='clarity')
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
p = ggplot(diamonds.sample(1000), aes(x='carat', y='price', alpha='x')) + geom_point() + facet_grid(y='clarity')
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
# shape
p = ggplot(diamonds, aes(x='carat', y='price', shape='clarity')) + geom_point()
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
p = ggplot(diamonds.sample(100), aes(x='carat', y='price', shape='cut', color='clarity')) + geom_point() + scale_color_brewer() + facet_grid(x='color')
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
p = ggplot(diamonds.sample(100), aes(x='carat', y='price')) + geom_point() + scale_color_brewer() + facet_grid(x='color', y='clarity')
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
p = ggplot(diamonds.sample(100), aes(x='carat', y='price', shape='cut', color='clarity')) + geom_point() + scale_color_brewer() + facet_grid(y='color')
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
p = ggplot(diamonds.sample(100), aes(x='carat', y='price', color='clarity')) + geom_point() + scale_color_brewer(type='div')
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
p = ggplot(diamonds.sample(100), aes(x='carat', y='price', color='x')) + geom_point()
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
# # linetype
p = ggplot(diamonds.sample(100), aes(x='carat', y='price', linetype='cut')) + geom_line()
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
#
# # histogram
p = ggplot(diamonds, aes(x='carat')) + geom_histogram()
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
# # point
p = ggplot(diamonds, aes(x='carat', y='price')) + geom_point()
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
# # titles and stuff
p = ggplot(diamonds, aes(x='carat', y='price', color='clarity')) + geom_point() + xlab("THIS IS AN X LABEL") + ylab("THIS IS A Y LABEL") + ggtitle("THIS IS A TITLE")
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
# # density
p = ggplot(diamonds, aes(x='carat')) + geom_density()
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
# # hline
p = ggplot(diamonds, aes(x='price')) + geom_hline(y=10)
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
# # vline
p = ggplot(diamonds, aes(x='price')) + geom_vline(x=10)
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
# # bar
p = ggplot(diamonds, aes(x='clarity')) + geom_bar()
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
# # bar w/ weight
p = ggplot(diamonds, aes(x='clarity', weight='x')) + geom_bar()
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
# # abline
p = ggplot(diamonds, aes(x='carat', y='price')) + geom_point() + geom_abline(slope=5000, intercept=-500)
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
# abline w/ facet
p = ggplot(diamonds, aes(x='carat', y='price')) + geom_point() + geom_abline(slope=5000, intercept=-500) + facet_wrap(y='clarity')
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
# area
df = pd.DataFrame({"x": np.arange(1000)})
df['y_low'] = df.x * 0.9
df['y_high'] = df.x * 1.1
df['thing'] = ['a' if i%2==0 else 'b' for i in df.x]
p = ggplot(df, aes(x='x', ymin='y_low', ymax='y_high')) + geom_area()
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
# # area w/ facet
p = ggplot(df, aes(x='x', ymin='y_low', ymax='y_high')) + geom_area() + facet_wrap(x='thing')
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
# # facet wrap
p = ggplot(diamonds, aes(x='carat', y='price')) + geom_point() + facet_wrap(x='clarity')
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
# #
# # facet wrap w/ 2 variables
p = ggplot(diamonds, aes(x='carat', y='price')) + geom_point() + facet_wrap(x='color', y='cut')
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
# # facet grid w/ 1 variable
p = ggplot(diamonds, aes(x='carat', y='price')) + geom_point() + facet_grid(x='color')
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
p = ggplot(diamonds, aes(x='carat', y='price')) + geom_point() + facet_grid(y='color')
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
# # # facet grid w/ 2 variables
p = ggplot(diamonds, aes(x='price')) + geom_histogram() + facet_grid(x='color', y='cut')
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
df = pd.DataFrame({"x": np.arange(100)})
df['y'] = df.x * 10
df['z'] = ["a" if x%2==0 else "b" for x in df.x]
#
# # polar coords
p = ggplot(df, aes(x='x', y='y')) + geom_point() + coord_polar()
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
# # equal coords
p = ggplot(df, aes(x='x', y='y')) + geom_point() + coord_equal()
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
# # equal coords faceted
p = ggplot(df, aes(x='x', y='y')) + geom_point() + coord_equal() + facet_wrap(x='z')
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
# # flipped coords
p = ggplot(df, aes(x='x', y='y')) + geom_point() + coord_flip()
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
# # flipped coords facted
p = ggplot(df, aes(x='x', y='y')) + geom_point() + coord_flip() + facet_grid(x='z')
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
# # x dates formatting
p = ggplot(pageviews, aes(x='date_hour', y='pageviews')) + geom_line() + scale_x_date(labels=date_format('%B %-d, %Y'))
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
# # # x dates formatting faceted
pageviews['z'] = ["a" if i%2==0 else "b" for i in range(len(pageviews))]
p = ggplot(pageviews, aes(x='date_hour', y='pageviews')) + geom_line() + scale_x_date(labels=date_format('%B %-d, %Y')) + facet_grid(y='z')
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
# # geom_line
p = ggplot(pageviews, aes(x='date_hour', y='pageviews')) + geom_line()
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
# # geom_line w/ facets
p = ggplot(pageviews, aes(x='date_hour', y='pageviews')) + geom_line() + facet_grid(y='z')
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
# # stat_smooth w/ lm
p = ggplot(tips, aes(x='total_bill', y='tip')) + stat_smooth(method='lm')
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
# # stat_smooth w/ lowess
p = ggplot(tips, aes(x='total_bill', y='tip')) + stat_smooth(method='lowess')
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
# # stat_smooth w/ lowess and custom span
p = ggplot(tips, aes(x='total_bill', y='tip')) + stat_smooth(method='lowess', span=0.2)
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
#
p = ggplot(diamonds, aes(x='carat', y='price')) + geom_point() + facet_wrap(x='color')
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
p = ggplot(diamonds, aes(x='carat', y='price')) + geom_point() + facet_wrap(y='color')
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
p = ggplot(tips, aes(x='total_bill', y='tip', color='sex')) + geom_point()
p + scale_color_brewer(type='div')
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
p = ggplot(tips, aes(x='total_bill', y='tip', color='sex')) + geom_point() + scale_color_manual(values=['pink', 'skyblue'])
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
p = ggplot(tips, aes(x='total_bill', y='tip', color='tip')) + geom_point() + scale_color_gradient(low='pink', high='royalblue')
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
p = ggplot(tips, aes(x='total_bill', y='tip')) + geom_point() + scale_x_log() + scale_y_log()
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
p = ggplot(diamonds, aes(x='carat', y='price')) + geom_point() + scale_x_log() + scale_y_log() + facet_wrap(x='color')
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
p = ggplot(diamonds, aes(x='carat', y='price')) + geom_point() + scale_x_reverse() + scale_y_reverse() + facet_wrap(x='color')
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
p = ggplot(diamonds, aes(x='carat', y='price')) + geom_point() + scale_x_continuous(breaks=[0, 3, 6], labels=["Low", "Medium", "High"]) + scale_y_continuous(breaks=[0, 10000, 20000], labels=["Low", "Medium", "High"]) + facet_wrap(x='color')
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
p = ggplot(tips, aes(x='total_bill', y='tip', color='sex')) + geom_point() + theme_gray()
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
p = ggplot(tips, aes(x='total_bill', y='tip', color='sex')) + geom_point() + theme_xkcd()
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
p = ggplot(tips, aes(x='total_bill', y='tip', color='sex')) + geom_point()
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
p = ggplot(tips, aes(x='total_bill', y='tip')) + geom_point(color='orange')
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
p = ggplot(tips, aes(x='total_bill', y='tip', color='sex', shape='smoker', size='tip')) + geom_point()
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
p = ggplot(tips, aes(x='total_bill', y='tip', color='sex')) + geom_point() + facet_wrap(x="time")
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
p = ggplot(tips, aes(x='total_bill', y='tip', color='sex')) + geom_point() + facet_wrap(y="time")
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
p = ggplot(tips, aes(x='total_bill', y='tip', color='sex')) + geom_point() + facet_wrap(x="time")
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
p = ggplot(tips, aes(x='total_bill', y='tip', color='sex')) + geom_point() + geom_line() + facet_wrap(x="time", y="smoker")
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
p = ggplot(tips, aes(x='total_bill', y='tip')) + geom_point() + facet_wrap(x="time", y="smoker")
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
p = ggplot(tips, aes(x='total_bill', y='tip')) + geom_point() + scale_color_brewer()
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
p = ggplot(tips, aes(x='total_bill', y='tip')) + geom_point() + theme_538()
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
p = ggplot(tips, aes(x='total_bill', y='tip')) + stat_smooth() + xlim(low=10, high=25) + ylim(2, 12)
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
p = ggplot(tips, aes(x='total_bill', y='tip')) + stat_smooth() + labs(x="this is x", y="this is y", title="this is title")
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
p = ggplot(tips, aes(x='total_bill', y='tip')) + stat_smooth() + geom_vline(x=30) + geom_hline(y=10) + ylab("GOo!") + ggtitle("This is a title")
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
p = ggplot(tips, aes(x='total_bill', y='tip')) + stat_smooth() + facet_wrap(x="time", y="smoker")
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
p = ggplot(tips, aes(x='total_bill', y='tip')) + geom_line(color="blue") + geom_point()
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#k
p = ggplot(tips, aes(x='total_bill', y='tip')) + geom_line(color="blue") + geom_point() + facet_wrap(x="time", y="smoker")
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
p = ggplot(tips, aes(x='total_bill')) + geom_histogram()
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
p = ggplot(tips, aes(x='total_bill')) + geom_density()
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
#
p = ggplot(tips, aes(x='total_bill')) + geom_density() + facet_wrap(y="time")
p.save("./examples/example-" + str(uuid.uuid4()) + ".png")
| bsd-2-clause | -3,096,100,790,687,385,000 | 48.323636 | 240 | 0.605352 | false |
gfyoung/pandas | pandas/util/_test_decorators.py | 1 | 8663 | """
This module provides decorator functions which can be applied to test objects
in order to skip those objects when certain conditions occur. A sample use case
is to detect if the platform is missing ``matplotlib``. If so, any test objects
which require ``matplotlib`` and decorated with ``@td.skip_if_no_mpl`` will be
skipped by ``pytest`` during the execution of the test suite.
To illustrate, after importing this module:
import pandas.util._test_decorators as td
The decorators can be applied to classes:
@td.skip_if_some_reason
class Foo:
...
Or individual functions:
@td.skip_if_some_reason
def test_foo():
...
For more information, refer to the ``pytest`` documentation on ``skipif``.
"""
from contextlib import contextmanager
from distutils.version import LooseVersion
import locale
from typing import Callable, Optional
import warnings
import numpy as np
import pytest
from pandas.compat import IS64, is_platform_windows
from pandas.compat._optional import import_optional_dependency
from pandas.core.computation.expressions import NUMEXPR_INSTALLED, USE_NUMEXPR
def safe_import(mod_name: str, min_version: Optional[str] = None):
"""
Parameters
----------
mod_name : str
Name of the module to be imported
min_version : str, default None
Minimum required version of the specified mod_name
Returns
-------
object
The imported module if successful, or False
"""
with warnings.catch_warnings():
# Suppress warnings that we can't do anything about,
# e.g. from aiohttp
warnings.filterwarnings(
"ignore",
category=DeprecationWarning,
module="aiohttp",
message=".*decorator is deprecated since Python 3.8.*",
)
try:
mod = __import__(mod_name)
except ImportError:
return False
if not min_version:
return mod
else:
import sys
try:
version = getattr(sys.modules[mod_name], "__version__")
except AttributeError:
# xlrd uses a capitalized attribute name
version = getattr(sys.modules[mod_name], "__VERSION__")
if version:
from distutils.version import LooseVersion
if LooseVersion(version) >= LooseVersion(min_version):
return mod
return False
def _skip_if_no_mpl():
mod = safe_import("matplotlib")
if mod:
mod.use("Agg")
else:
return True
def _skip_if_has_locale():
lang, _ = locale.getlocale()
if lang is not None:
return True
def _skip_if_not_us_locale():
lang, _ = locale.getlocale()
if lang != "en_US":
return True
def _skip_if_no_scipy() -> bool:
return not (
safe_import("scipy.stats")
and safe_import("scipy.sparse")
and safe_import("scipy.interpolate")
and safe_import("scipy.signal")
)
# TODO: return type, _pytest.mark.structures.MarkDecorator is not public
# https://github.com/pytest-dev/pytest/issues/7469
def skip_if_installed(package: str):
"""
Skip a test if a package is installed.
Parameters
----------
package : str
The name of the package.
"""
return pytest.mark.skipif(
safe_import(package), reason=f"Skipping because {package} is installed."
)
# TODO: return type, _pytest.mark.structures.MarkDecorator is not public
# https://github.com/pytest-dev/pytest/issues/7469
def skip_if_no(package: str, min_version: Optional[str] = None):
"""
Generic function to help skip tests when required packages are not
present on the testing system.
This function returns a pytest mark with a skip condition that will be
evaluated during test collection. An attempt will be made to import the
specified ``package`` and optionally ensure it meets the ``min_version``
The mark can be used as either a decorator for a test function or to be
applied to parameters in pytest.mark.parametrize calls or parametrized
fixtures.
If the import and version check are unsuccessful, then the test function
(or test case when used in conjunction with parametrization) will be
skipped.
Parameters
----------
package: str
The name of the required package.
min_version: str or None, default None
Optional minimum version of the package.
Returns
-------
_pytest.mark.structures.MarkDecorator
a pytest.mark.skipif to use as either a test decorator or a
parametrization mark.
"""
msg = f"Could not import '{package}'"
if min_version:
msg += f" satisfying a min_version of {min_version}"
return pytest.mark.skipif(
not safe_import(package, min_version=min_version), reason=msg
)
skip_if_no_mpl = pytest.mark.skipif(
_skip_if_no_mpl(), reason="Missing matplotlib dependency"
)
skip_if_mpl = pytest.mark.skipif(not _skip_if_no_mpl(), reason="matplotlib is present")
skip_if_32bit = pytest.mark.skipif(not IS64, reason="skipping for 32 bit")
skip_if_windows = pytest.mark.skipif(is_platform_windows(), reason="Running on Windows")
skip_if_windows_python_3 = pytest.mark.skipif(
is_platform_windows(), reason="not used on win32"
)
skip_if_has_locale = pytest.mark.skipif(
_skip_if_has_locale(), reason=f"Specific locale is set {locale.getlocale()[0]}"
)
skip_if_not_us_locale = pytest.mark.skipif(
_skip_if_not_us_locale(), reason=f"Specific locale is set {locale.getlocale()[0]}"
)
skip_if_no_scipy = pytest.mark.skipif(
_skip_if_no_scipy(), reason="Missing SciPy requirement"
)
skip_if_no_ne = pytest.mark.skipif(
not USE_NUMEXPR,
reason=f"numexpr enabled->{USE_NUMEXPR}, installed->{NUMEXPR_INSTALLED}",
)
# TODO: return type, _pytest.mark.structures.MarkDecorator is not public
# https://github.com/pytest-dev/pytest/issues/7469
def skip_if_np_lt(ver_str: str, *args, reason: Optional[str] = None):
if reason is None:
reason = f"NumPy {ver_str} or greater required"
return pytest.mark.skipif(
np.__version__ < LooseVersion(ver_str), *args, reason=reason
)
def parametrize_fixture_doc(*args):
"""
Intended for use as a decorator for parametrized fixture,
this function will wrap the decorated function with a pytest
``parametrize_fixture_doc`` mark. That mark will format
initial fixture docstring by replacing placeholders {0}, {1} etc
with parameters passed as arguments.
Parameters
----------
args: iterable
Positional arguments for docstring.
Returns
-------
function
The decorated function wrapped within a pytest
``parametrize_fixture_doc`` mark
"""
def documented_fixture(fixture):
fixture.__doc__ = fixture.__doc__.format(*args)
return fixture
return documented_fixture
def check_file_leaks(func) -> Callable:
"""
Decorate a test function to check that we are not leaking file descriptors.
"""
with file_leak_context():
return func
@contextmanager
def file_leak_context():
"""
ContextManager analogue to check_file_leaks.
"""
psutil = safe_import("psutil")
if not psutil:
yield
else:
proc = psutil.Process()
flist = proc.open_files()
conns = proc.connections()
yield
flist2 = proc.open_files()
# on some builds open_files includes file position, which we _dont_
# expect to remain unchanged, so we need to compare excluding that
flist_ex = [(x.path, x.fd) for x in flist]
flist2_ex = [(x.path, x.fd) for x in flist2]
assert flist2_ex == flist_ex, (flist2, flist)
conns2 = proc.connections()
assert conns2 == conns, (conns2, conns)
def async_mark():
try:
import_optional_dependency("pytest_asyncio")
async_mark = pytest.mark.asyncio
except ImportError:
async_mark = pytest.mark.skip(reason="Missing dependency pytest-asyncio")
return async_mark
# Note: we are using a string as condition (and not for example
# `get_option("mode.data_manager") == "array"`) because this needs to be
# evaluated at test time (otherwise this boolean condition gets evaluated
# at import time, when the pd.options.mode.data_manager has not yet been set)
skip_array_manager_not_yet_implemented = pytest.mark.skipif(
"config.getvalue('--array-manager')", reason="JSON C code relies on Blocks"
)
skip_array_manager_invalid_test = pytest.mark.skipif(
"config.getvalue('--array-manager')",
reason="Test that relies on BlockManager internals or specific behaviour",
)
| bsd-3-clause | -5,316,504,105,834,613,000 | 28.769759 | 88 | 0.663973 | false |
dattalab/d_code | plotting/plottingRoutines.py | 1 | 6205 | import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
__all__ = ['plot_avg_and_sem', 'plot_array', 'imshow_array', 'plot_avg_and_comps', 'plot_array_xy']
def plot_avg_and_sem(npArray, axis=1):
"""This routine takes a multidimenionsal numpy array and an axis and then
plots the average over that axis on top of a band that represents the standard
error of the mean.
"""
mean = npArray.mean(axis=axis)
sem_plus = mean + stats.sem(npArray, axis=axis)
sem_minus = mean - stats.sem(npArray, axis=axis)
plt.figure()
plt.fill_between(np.arange(mean.shape[0]), sem_plus, sem_minus, alpha=0.5)
plt.plot(mean)
def plot_avg_and_comps(npArray, axis=1):
"""This routine takes a multidimenionsal numpy array and an axis and then
plots the average over that axis on top of fainter plots of the components of that average.
"""
plt.figure()
plt.plot(npArray, alpha=0.25, lw=1)
plt.plot(npArray.mean(axis=axis), lw=2, color='black')
def plot_array(npArray, axis=1, xlim=None, ylim=None, color=None, suppress_labels=True, title=None):
"""This routine takes a multidimensional numpy array and an axis and then
'facets' the data across that dimension. So, if npArray was a 100x9 array:
plot_array(npArray) would generate a 9x9 grid of a single 100 point plot each.
'color' lets you specifiy specific colors for all traces, and xlim and ylim let you set bounds.
The number of plots are based on making a sqaure grid of minimum size.
"""
f = plt.figure()
f.suptitle(title, fontsize=14)
num_plots = npArray.shape[axis]
side = np.ceil(np.sqrt(num_plots))
if color is not None:
if not isinstance(color, (list, tuple)): # did we pass in a list of colors?
color_list = [color] * num_plots
else:
color_list = [None] * num_plots
assert(len(color_list) == num_plots)
for current_plot, color in zip(range(1, num_plots+1), color_list):
plt.subplot(side, side, current_plot)
# need to make a tuple of Ellipses and an int that is the current plot number
slice_obj = []
for a in range(npArray.ndim):
if a is axis:
slice_obj.append(current_plot-1)
else:
slice_obj.append(Ellipsis)
if color is None:
plt.plot(npArray[tuple(slice_obj)])
else:
plt.plot(npArray[tuple(slice_obj)], color=color)
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
else:
plot_min = np.min(npArray[np.logical_not(np.isnan(npArray))]) * 0.9
plot_max = np.max(npArray[np.logical_not(np.isnan(npArray))]) * 1.1
plt.ylim([plot_min,plot_max])
if suppress_labels:
a = plt.gca()
a.set_xticklabels([])
a.set_yticklabels([])
return f
def plot_array_xy(npArray_x, npArray_y, axis=1, xlim=None, ylim=None):
"""Similar to plot_array(), this routine takes a pair of multidimensional numpy arrays
and an axis and then 'facets' the data across that dimension. The major
difference here is that you can explicitly specify the x values instead of using
index.
xlim and ylim let you set bounds.
The number of plots are based on making a sqaure grid of minimum size.
"""
# ensure x and y match in dimensions
if npArray_x.ndim is not npArray_y.ndim:
temp_x = np.empty_like(npArray_y)
while npArray_x.ndim is not npArray_y.ndim:
npArray_x = np.expand_dims(npArray_x, -1)
temp_x[:] = npArray_x
npArray_x = temp_x
plt.figure()
num_plots = npArray_y.shape[axis]
side = np.ceil(np.sqrt(num_plots))
for current_plot in range(1, num_plots+1):
plt.subplot(side, side, current_plot)
# need to make a tuple of Ellipses and an int that is the current plot number
slice_obj = []
for a in range(npArray_y.ndim):
if a is axis:
slice_obj.append(current_plot-1)
else:
slice_obj.append(Ellipsis)
plt.plot(npArray_x[tuple(slice_obj)], npArray_y[tuple(slice_obj)])
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
else:
plot_min = np.min(npArray_y[np.logical_not(np.isnan(npArray_y))]) * 0.9
plot_max = np.max(npArray_y[np.logical_not(np.isnan(npArray_y))]) * 1.1
plt.ylim([plot_min,plot_max])
def imshow_array(npArray, axis=2, vmax=None, vmin=None, transpose=False, tight_axis=True, suppress_labels=True, title=None):
"""This routine takes a multidimensional numpy array and an axis and then
'facets' the data across that dimension. So, if npArray was a 100x100x9 array:
imshow_array(npArray) would generate a 9x9 grid of 100x100 images.
vmin and vmax let you set bounds on the image.
The number of plots are based on making a sqaure grid of minimum size.
"""
f = plt.figure()
f.suptitle(title, fontsize=14)
num_plots = npArray.shape[axis]
side = np.ceil(np.sqrt(num_plots))
for current_plot in range(1, num_plots+1):
plt.subplot(side, side, current_plot)
# need to make a tuple of Ellipses and an int that is the current plot number
slice_obj = []
for a in range(npArray.ndim):
if a is axis:
slice_obj.append(current_plot-1)
else:
slice_obj.append(Ellipsis)
if vmax is None:
vmax = np.max(npArray[np.logical_not(np.isnan(npArray))]) * 1.1
if vmin is None:
vmin = np.min(npArray[np.logical_not(np.isnan(npArray))]) * 0.9
if transpose:
plt.imshow(npArray[tuple(slice_obj)].T, vmax=vmax, vmin=vmin)
else:
plt.imshow(npArray[tuple(slice_obj)], vmax=vmax, vmin=vmin)
if tight_axis is True:
plt.axis('tight')
if suppress_labels:
a = plt.gca()
a.set_xticklabels([])
a.set_yticklabels([])
return f
| mit | 3,045,821,880,977,947,600 | 34.255682 | 124 | 0.612409 | false |
Svolcano/python_exercise | dianhua/endday_batch/update_9999_to_-1.py | 1 | 2813 | # -*- coding: utf-8 -*-
"""
Created on Wed May 30 11:09:10 2018
@author: huang
"""
import pymongo
import time
import datetime
import json
import sys
import getopt
import pandas as pd
import numpy as np
np.seterr(divide='ignore', invalid='ignore')
sys.path.append('../')
stdout = sys.stdout
reload(sys)
sys.stdout = stdout
sys.setdefaultencoding("utf8")
all_cols=[u'_id', u'bill_cache_hit_month_list', u'bill_fusion_cost_time', u'cache_time',
u'call_log_cache_hit_month_list', u'call_log_fusion_cost_time',
u'call_log_missing_month_list', u'call_log_part_missing_month_list',
u'call_log_possibly_missing_month_list', u'cid', u'crawl_status',
u'crawler_channel', u'cuishou_error_msg', u'cuishou_request_status',
u'cuishou_request_time', u'cuishou_sid', u'emergency_contact', u'end_time',
u'expire_time', u'hasAlerted', u'job_id', u'login_status', u'login_time',
u'message', u'original_channel', u'phone_bill_missing_month_list',
u'report_create_time', u'report_message', u'report_used', u'sid',
u'start_time', u'status', u'status_report', u'tel', u'tel_info',
u'third_party_error_msg', u'third_party_status', u'third_party_token',
u'uid', u'user_info']
record_col=['cid','crawler_channel','province','telecom']
MONGO_CONFIG = {
# 'host': '127.0.0.1',
'host': '172.18.21.117',
'port': 27017,
'db': 'crs'
}
if __name__=='__main__':
opts, args = getopt.getopt(sys.argv[1:], "b:e:")
for op, value in opts:
if op == "-b":
BEGIN_DATE = value
elif op == "-e":
END_DATE = value
class Pro_MongodbConnection(object):
"""
MongoDB连接类
"""
def __init__(self):
self.conn = None
self.set_pro_db_conn()
def set_pro_db_conn(self):
self.client = pymongo.MongoClient(MONGO_CONFIG['host'], MONGO_CONFIG['port'], connect=False)
self.conn = self.client[MONGO_CONFIG['db']]
def export_data(self):
self.data = self.conn['sid_info_data_rpt'].find({"cid":9999},{'cid':1,'_id':1})
return self.data
def modify_data(self):
# print self.data['status']
print self.data.count()
for line in self.data:
update_dict={}
if line['cid']==9999:
update_dict.update({ '$set' : {'cid':-1}})
ret=self.update_data(line,update_dict)
if ret['ok']=='OK':
pass
def update_data(self,where,update_dict):
ret = self.conn['sid_info_data_rpt'].update(where,update_dict)
return ret
pro_conn = Pro_MongodbConnection()
data=pro_conn.export_data()
pro_conn.modify_data() | mit | -4,865,315,491,329,630,000 | 29.920455 | 100 | 0.57036 | false |
AminMahpour/Wigman | main.py | 1 | 4824 | #!/usr/bin/env python3
import operator
import pyBigWig
import sys
import matplotlib.pyplot as pp
import numpy as np
from tqdm import tqdm
# added
def parseconfig(conf_file):
config = open(conf_file, mode="r")
bed_line = []
bw_line =[]
pdf_file = ""
for line in config:
line=line.strip("\n").split("\t")
if line[0] == "bed":
bed_line.append([line[1], int( line[2]), int( line[3]), line[4]])
elif line[0] =="bw":
bw_line.append([line[1], float(line[2]), float(line[3]), float(line[4]), line[5], line[6], line[7]])
elif line[0] == "pdf":
pdf_file = line[1]
# print(bed_line,bw_line,pdf_file)
return bed_line, bw_line, pdf_file
class BigwigObj:
def __init__(self, url):
myurl = url
self.bw = pyBigWig.open(myurl)
def get_scores(self, pos):
return self.bw.values(*pos)
def bedreader(file, minbp=50, maxbp=50):
data = open(file, mode="r")
for line in data:
line = line.split("\t")
line[1] = int(line[1]) - minbp
line[2] = int(line[2]) + maxbp
out = (line[0], line[1], line[2])
yield out
def get_value_from_pos(bwurl, bed, minbp=50, maxbp=60, sort=True):
bw = BigwigObj(bwurl)
out_data = []
data_output = []
masked_data = []
for coord in bed:
scores = None
try:
scores = bw.get_scores(coord)
except Exception as e:
print("Error occurred: {0}".format(e))
if scores != None:
if len(scores) != 0:
data_output.append([coord, np.mean(scores[minbp:maxbp]), scores])
if sort:
for i in data_output:
if not np.isnan(np.mean(i[2])):
masked_data.append(i)
out_data = sorted(masked_data, key=operator.itemgetter(1))
else:
out_data = data_output
return out_data
if __name__ == '__main__':
graph = 1
config_file = sys.argv[1]
if config_file == "test": exit()
beds, bws, pdf_file = parseconfig(config_file)
# print(beds)
# print("calculating...")
fig = pp.figure(figsize=(2 * len(bws), 4 * len(beds) + 2), dpi=90)
bar = tqdm(total=100, desc="Complete", unit="Iteration")
for j, bed in enumerate(beds):
current_bed = bedreader(bed[0], minbp=bed[1], maxbp=bed[2])
bed_title = bed[3]
sorted_bed = []
for i, bw in enumerate(bws):
bw_file = bw[0]
bw_min = float(bw[1])
bw_max = float(bw[2])
bw_step = float(bw[3])
bw_gradient = str(bw[4])
bw_title = str(bw[5])
bw_desc = str(bw[6])
if i == 0:
raw_data = get_value_from_pos(bw_file, current_bed, minbp=bed[1], maxbp=bed[2] + 10)
sorted_bed = [x[0] for x in raw_data]
current_bed = sorted_bed
else:
raw_data = get_value_from_pos(bw_file, current_bed, sort=False)
array = np.array([x[2] for x in raw_data])
masked_array = np.ma.masked_invalid(array)
y = int(len(raw_data) / 40) + 2
blrd_color = pp.cm.bwr
hot_color = pp.cm.hot
current_color = None
if bw_gradient == "BuRd": current_color = pp.cm.bwr
if bw_gradient == "Hot": current_color = pp.cm.hot
if bw_gradient == "Reds": current_color = "Reds"
if bw_gradient == "Blues": current_color = "Blues_r"
# print("plotting {0}...".format(bw_file))
pp.subplot(len(beds), len(bws), graph)
pp.title(bw_title)
pp.pcolormesh(masked_array, cmap=current_color)
pp.clim(bw_min, bw_max)
cbar = pp.colorbar(orientation="horizontal", ticks=list(np.arange(bw_min, bw_max, step=bw_step)), pad=0.07)
cbar.set_label(bw_desc, size=10)
cbar.ax.tick_params(labelsize=8)
frame1 = pp.gca()
if i == 0:
pp.ylabel("{0}\nn={1}".format(bed_title, len(raw_data)), fontsize=16, color="black")
for xlabel_i in frame1.axes.get_xticklabels():
xlabel_i.set_visible(False)
xlabel_i.set_fontsize(0.0)
for xlabel_i in frame1.axes.get_yticklabels():
xlabel_i.set_fontsize(0.0)
xlabel_i.set_visible(False)
for tick in frame1.axes.get_xticklines():
tick.set_visible(False)
for tick in frame1.axes.get_yticklines():
tick.set_visible(False)
#print(int( graph/ (len(beds) * len(bws)) *100))
total = len(beds) * len(bws)
chunk = 1/total
bar.update ( chunk *100 )
graph += 1
bar.close()
pp.savefig(pdf_file)
| gpl-2.0 | 4,575,110,508,619,404,300 | 28.777778 | 119 | 0.524461 | false |
planetarymike/IDL-Colorbars | IDL_py_test/033_Blue-Red.py | 1 | 5972 | from matplotlib.colors import LinearSegmentedColormap
from numpy import nan, inf
cm_data = [[0., 0., 0.513725],
[0., 0., 0.513725],
[0., 0., 0.529412],
[0., 0., 0.545098],
[0., 0., 0.560784],
[0., 0., 0.576471],
[0., 0., 0.592157],
[0., 0., 0.607843],
[0., 0., 0.623529],
[0., 0., 0.639216],
[0., 0., 0.654902],
[0., 0., 0.670588],
[0., 0., 0.686275],
[0., 0., 0.701961],
[0., 0., 0.717647],
[0., 0., 0.733333],
[0., 0., 0.74902],
[0., 0., 0.764706],
[0., 0., 0.780392],
[0., 0., 0.796078],
[0., 0., 0.811765],
[0., 0., 0.827451],
[0., 0., 0.843137],
[0., 0., 0.858824],
[0., 0., 0.87451],
[0., 0., 0.890196],
[0., 0., 0.905882],
[0., 0., 0.921569],
[0., 0., 0.937255],
[0., 0., 0.952941],
[0., 0., 0.968627],
[0., 0., 0.984314],
[0., 0., 1.],
[0., 0., 1.],
[0., 0.0117647, 1.],
[0., 0.027451, 1.],
[0., 0.0431373, 1.],
[0., 0.0588235, 1.],
[0., 0.0745098, 1.],
[0., 0.0901961, 1.],
[0., 0.105882, 1.],
[0., 0.121569, 1.],
[0., 0.137255, 1.],
[0., 0.152941, 1.],
[0., 0.168627, 1.],
[0., 0.184314, 1.],
[0., 0.2, 1.],
[0., 0.215686, 1.],
[0., 0.231373, 1.],
[0., 0.247059, 1.],
[0., 0.262745, 1.],
[0., 0.278431, 1.],
[0., 0.294118, 1.],
[0., 0.309804, 1.],
[0., 0.32549, 1.],
[0., 0.341176, 1.],
[0., 0.356863, 1.],
[0., 0.372549, 1.],
[0., 0.388235, 1.],
[0., 0.403922, 1.],
[0., 0.419608, 1.],
[0., 0.435294, 1.],
[0., 0.45098, 1.],
[0., 0.466667, 1.],
[0., 0.482353, 1.],
[0., 0.498039, 1.],
[0., 0.513725, 1.],
[0., 0.529412, 1.],
[0., 0.545098, 1.],
[0., 0.560784, 1.],
[0., 0.576471, 1.],
[0., 0.592157, 1.],
[0., 0.607843, 1.],
[0., 0.623529, 1.],
[0., 0.639216, 1.],
[0., 0.654902, 1.],
[0., 0.670588, 1.],
[0., 0.686275, 1.],
[0., 0.701961, 1.],
[0., 0.717647, 1.],
[0., 0.733333, 1.],
[0., 0.74902, 1.],
[0., 0.764706, 1.],
[0., 0.780392, 1.],
[0., 0.796078, 1.],
[0., 0.811765, 1.],
[0., 0.827451, 1.],
[0., 0.843137, 1.],
[0., 0.858824, 1.],
[0., 0.87451, 1.],
[0., 0.890196, 1.],
[0., 0.905882, 1.],
[0., 0.921569, 1.],
[0., 0.937255, 1.],
[0., 0.952941, 1.],
[0., 0.968627, 1.],
[0., 0.984314, 1.],
[0., 1., 1.],
[0., 1., 1.],
[0.0117647, 1., 0.984314],
[0.027451, 1., 0.968627],
[0.0431373, 1., 0.952941],
[0.0588235, 1., 0.937255],
[0.0745098, 1., 0.921569],
[0.0901961, 1., 0.905882],
[0.105882, 1., 0.890196],
[0.121569, 1., 0.87451],
[0.137255, 1., 0.858824],
[0.152941, 1., 0.843137],
[0.168627, 1., 0.827451],
[0.184314, 1., 0.811765],
[0.2, 1., 0.796078],
[0.215686, 1., 0.780392],
[0.231373, 1., 0.764706],
[0.247059, 1., 0.74902],
[0.262745, 1., 0.733333],
[0.278431, 1., 0.717647],
[0.294118, 1., 0.701961],
[0.309804, 1., 0.686275],
[0.32549, 1., 0.670588],
[0.341176, 1., 0.654902],
[0.356863, 1., 0.639216],
[0.372549, 1., 0.623529],
[0.388235, 1., 0.607843],
[0.403922, 1., 0.592157],
[0.419608, 1., 0.576471],
[0.435294, 1., 0.560784],
[0.45098, 1., 0.545098],
[0.466667, 1., 0.529412],
[0.482353, 1., 0.513725],
[0.498039, 1., 0.498039],
[0.513725, 1., 0.482353],
[0.529412, 1., 0.466667],
[0.545098, 1., 0.45098],
[0.560784, 1., 0.435294],
[0.576471, 1., 0.419608],
[0.592157, 1., 0.403922],
[0.607843, 1., 0.388235],
[0.623529, 1., 0.372549],
[0.639216, 1., 0.356863],
[0.654902, 1., 0.341176],
[0.670588, 1., 0.32549],
[0.686275, 1., 0.309804],
[0.701961, 1., 0.294118],
[0.717647, 1., 0.278431],
[0.733333, 1., 0.262745],
[0.74902, 1., 0.247059],
[0.764706, 1., 0.231373],
[0.780392, 1., 0.215686],
[0.796078, 1., 0.2],
[0.811765, 1., 0.184314],
[0.827451, 1., 0.168627],
[0.843137, 1., 0.152941],
[0.858824, 1., 0.137255],
[0.87451, 1., 0.121569],
[0.890196, 1., 0.105882],
[0.905882, 1., 0.0901961],
[0.921569, 1., 0.0745098],
[0.937255, 1., 0.0588235],
[0.952941, 1., 0.0431373],
[0.968627, 1., 0.027451],
[0.984314, 1., 0.0117647],
[1., 1., 0.],
[1., 0.984314, 0.],
[1., 0.968627, 0.],
[1., 0.952941, 0.],
[1., 0.937255, 0.],
[1., 0.921569, 0.],
[1., 0.905882, 0.],
[1., 0.890196, 0.],
[1., 0.87451, 0.],
[1., 0.858824, 0.],
[1., 0.843137, 0.],
[1., 0.827451, 0.],
[1., 0.811765, 0.],
[1., 0.796078, 0.],
[1., 0.780392, 0.],
[1., 0.764706, 0.],
[1., 0.74902, 0.],
[1., 0.733333, 0.],
[1., 0.717647, 0.],
[1., 0.701961, 0.],
[1., 0.686275, 0.],
[1., 0.670588, 0.],
[1., 0.654902, 0.],
[1., 0.639216, 0.],
[1., 0.623529, 0.],
[1., 0.607843, 0.],
[1., 0.592157, 0.],
[1., 0.576471, 0.],
[1., 0.560784, 0.],
[1., 0.545098, 0.],
[1., 0.529412, 0.],
[1., 0.513725, 0.],
[1., 0.498039, 0.],
[1., 0.482353, 0.],
[1., 0.466667, 0.],
[1., 0.45098, 0.],
[1., 0.435294, 0.],
[1., 0.419608, 0.],
[1., 0.403922, 0.],
[1., 0.388235, 0.],
[1., 0.372549, 0.],
[1., 0.356863, 0.],
[1., 0.341176, 0.],
[1., 0.32549, 0.],
[1., 0.309804, 0.],
[1., 0.294118, 0.],
[1., 0.278431, 0.],
[1., 0.262745, 0.],
[1., 0.247059, 0.],
[1., 0.231373, 0.],
[1., 0.215686, 0.],
[1., 0.2, 0.],
[1., 0.184314, 0.],
[1., 0.168627, 0.],
[1., 0.152941, 0.],
[1., 0.137255, 0.],
[1., 0.121569, 0.],
[1., 0.105882, 0.],
[1., 0.0901961, 0.],
[1., 0.0745098, 0.],
[1., 0.0588235, 0.],
[1., 0.0431373, 0.],
[1., 0.027451, 0.],
[1., 0.0117647, 0.],
[1., 0., 0.],
[0.980392, 0., 0.],
[0.964706, 0., 0.],
[0.945098, 0., 0.],
[0.929412, 0., 0.],
[0.913725, 0., 0.],
[0.894118, 0., 0.],
[0.878431, 0., 0.],
[0.858824, 0., 0.],
[0.843137, 0., 0.],
[0.827451, 0., 0.],
[0.807843, 0., 0.],
[0.792157, 0., 0.],
[0.772549, 0., 0.],
[0.756863, 0., 0.],
[0.741176, 0., 0.],
[0.721569, 0., 0.],
[0.705882, 0., 0.],
[0.686275, 0., 0.],
[0.670588, 0., 0.],
[0.654902, 0., 0.],
[0.635294, 0., 0.],
[0.619608, 0., 0.],
[0.6, 0., 0.],
[0.584314, 0., 0.],
[0.568627, 0., 0.],
[0.54902, 0., 0.],
[0.533333, 0., 0.],
[0.513725, 0., 0.],
[0.513725, 0., 0.]]
test_cm = LinearSegmentedColormap.from_list(__file__, cm_data)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from pycam02ucs.cm.viscm import viscm
viscm(test_cm)
except ImportError:
print("pycam02ucs not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=test_cm)
plt.show()
| gpl-2.0 | 427,885,498,026,020,350 | 20.79562 | 69 | 0.496651 | false |
alexholcombe/spatiotopic-motion | plotHelpers.py | 1 | 12084 | from psychopy import visual, data, logging
from psychopy.misc import fromFile
import itertools
from math import log
from copy import deepcopy
import pandas as pd
from pandas import DataFrame
import pylab, scipy
import numpy as np
from calcUnderOvercorrect import calcOverCorrected
def agrestiCoull95CI(x, nTrials):
#Calculate 95% confidence interval with Agresti-Coull method for x of nTrials
#http://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval#Agresti-Coull_Interval
nTilde = nTrials + 1.96**2
pTilde = 1/nTilde*(x + 0.5*1.96**2)
plusMinus = 1.96*np.sqrt(1/nTilde*pTilde*(1-pTilde))
return pTilde - plusMinus, pTilde + plusMinus
def plotDataAndPsychometricCurve(df, dataFileName):
"""
Plot data, and fit and plot psychometric curve
If df is not None then get data from dataFileName
"""
if df is None:
if type(dataFileName) is not str:
print 'dataFileName = ', dataFileName
raise Exception("No df supplied and no string dataFileName supplied")
if dataFileName.endswith('.pickle'):
df = fromFile(dataFileName)
elif dataFileName.endswith('.txt'):
df = pd.read_csv(dataFileName, delimiter='\t')
elif dataFileName.endswith('.psydat'):
trialHandler = fromFile(dataFileName)
raise Exception('Cant handle .psydat file, because first Alex has to write a toDataFrame function for experimentHandler, so that its data can be analyzed.')
#or would have to save to save trialHandler saveAsWideText to dummy file, and use returned dataframe
#dat = tools.filetools.fromFile(dataFileName) #<class 'psychopy.data.DataHandler'>
if not isinstance(df, pd.core.frame.DataFrame):
raise Exception("Don't have viable DataFrame still")
if np.all(df.dtypes==object):
raise Exception("I thought you'd give me some numbers to work with, but everything in this dataframe is an object")
#Need to convert_
#add overcorrect to cases where tilt==0
tilt = df.loc[:,'tilt']
neutralStimIdxs = (tilt==0)
#print('neutralStimIdxs=\n',neutralStimIdxs)
if len(neutralStimIdxs)>1:
if neutralStimIdxs.any(): #Calculate over/under-correction, which is only interpretable when tilt=0
forCalculatn = df.loc[neutralStimIdxs, ['tilt','startLeft','upDown','respLeftRight']]
overCorrected = calcOverCorrected( forCalculatn )
df['overCorrected']= np.nan
df.loc[neutralStimIdxs, 'overCorrected'] = overCorrected
#test plotting of data
usePsychopy_ext = False
if usePsychopy_ext:
#have to use psychopy_ext to aggregate
ag = psychopy_ext.stats.aggregate(df, values="respLeftRight", cols="tilt") #, values=None, subplots=None, yerr=None, aggfunc='mean', order='natural')
print "ag = \n", ag
plt = psychopy_ext.plot.Plot()
plt.plot(ag, kind='line')
print "Showing plot with psychopy_ext.stats.aggregate"
plt.show()
#dataframe aggregate
grouped = df.groupby(['startLeft','tilt'])
dirTilt = grouped.mean() #this is a dataframe, not a DataFrameGroupBy
print "mean at each dir, tilt =\n", dirTilt
#print "dirTilt.index = ", dirTilt.index #there is no column called 'tilt', instead it's the actual index, kinda like row names
# MultiIndex [(False, -0.4), (False, 0.0), (False, 0.4), (True, -0.4), (True, 0.0), (True, 0.4)]
#dirTilt.groups no groups, maybe because dataframe?
dirTilt = dirTilt.reset_index() #flatten MultiIndex back into columns with rows (simple dataframe)
leftwardM = dirTilt[ dirTilt['startLeft']==False ]
rightwardM = dirTilt[ dirTilt['startLeft']==True ]
ax1 = pylab.subplot(121)
pylab.scatter(leftwardM['tilt'], leftwardM['respLeftRight'],
edgecolors=(1,0,0), facecolor=(1,0,0), label='leftward saccade')
pylab.scatter(rightwardM['tilt'], rightwardM['respLeftRight'],
edgecolors=(0,1,0), facecolor=(0,1,0), label='rightward saccade')
pylab.legend()
print str( round( 100*df['overCorrected'].mean(), 2) )
msg = 'proportn overCorrected at 0 tilt = ' + str( round( 100*df['overCorrected'].mean(), 2) ) + \
'% of ' + str( df['overCorrected'].count() ) + ' trials'
msg2= ' 95% Agresti-Coull CI = ' + \
str( np.round( agrestiCoull95CI(df['overCorrected'].sum(), df['overCorrected'].count()), 2) )
pylab.text(0.52, 0.85, msg, horizontalalignment='left', fontsize=12)
pylab.text(0.52,0.75, msg2, horizontalalignment='left', fontsize=12)
#pylab.ylim([-0.01,1.01])
pylab.xlabel("tilt")
pylab.ylabel("proportion respond 'right'")
#psychometric curve basics
tiltMin = min( df['tilt'] )
tiltMax = max( df['tilt'] )
x = np.linspace(tiltMin, tiltMax, 50)
#test function fitting
#fit curve
def logistic(x, x0, k):
y = 1 / (1 + np.exp(-k*(x-x0)))
return y
def inverseLogistic(y, x0, k):
linear = np.log ( y / (1-y) )
#linear = -k*(x-x0)
#x-x0 = linear/-k
#x= linear/-k + x0
x = linear/-k + x0
return x
#scipy.stats.logistic.fit
paramsLeft = None; paramsRight = None
try:
paramsLeft, pcov = scipy.optimize.curve_fit(logistic, leftwardM['tilt'], leftwardM['respLeftRight'], p0 = [0, 6])
except Exception as e:
print 'leftward fit failed ', e
try:
paramsRight, pcov = scipy.optimize.curve_fit(logistic, rightwardM['tilt'], rightwardM['respLeftRight'], p0 = [0, 6])
except Exception as e:
print 'rightward fit failed ', e
threshVal = 0.5
pylab.plot([tiltMin, tiltMax],[threshVal,threshVal],'k--') #horizontal dashed line
overCorrectAmts = list()
if paramsLeft is not None:
pylab.plot(x, logistic(x, *paramsLeft) , 'r-')
threshL = inverseLogistic(threshVal, paramsLeft[0], paramsLeft[1])
print 'threshL = ', np.round(threshL, 2)
overCorrectAmts.append(threshL)
pylab.plot([threshL, threshL],[0,threshVal],'g--') #vertical dashed line
if paramsRight is not None:
pylab.plot(x, logistic(x, *paramsRight) , 'g-')
threshR = inverseLogistic(threshVal, paramsRight[0], paramsRight[1])
print 'threshR = ', np.round(threshR, 2)
overCorrectAmts.append(-1*threshR)
pylab.plot([threshR, threshR],[0,threshVal],'g--') #vertical dashed line
pylab.title('threshold (%.2f) = %0.3f' %(threshVal, threshR))
if (paramsLeft is not None) and (paramsRight is not None):
pylab.title('PSE (%.2f) = %0.3f & %0.3f' %(threshVal, threshL, threshR))
if len(overCorrectAmts)==0:
msg3= 'Failed both fits so cant tell you average over/under correction amount'
else:
msg3= 'Average tilt needed to compensate overcorrection\n (negative indicates undercorrection)\n = ' + str( np.round( np.mean(overCorrectAmts), 2) )
pylab.text(0.52,0.45, msg3, horizontalalignment='left', fontsize=12, linespacing=2.0)
#pylab.savefig('figures/Alex.png') #, bbox_inches='tight')
return pylab.gcf() #return current figure
def plotStaircaseDataAndPsychometricCurve(fit,IV_name,DV_name,intensities,resps,descendingPsycho,threshCriterion):
#Expects staircase, which has intensities and responses in it
#May or may not be log steps staircase internals
#Plotting with linear axes
#Fit is a psychopy data fit object. Assuming that it couldn't handle descendingPsycho so have to invert the values from it
#IV_name independent variable name
#DV_name dependent variable name
intensLinear= intensities
if fit is not None:
#generate psychometric curve
intensitiesForCurve = np.arange(min(intensLinear), max(intensLinear), 0.01)
thresh = fit.inverse(threshCriterion)
if descendingPsycho:
intensitiesForFit = 100-intensitiesForCurve
thresh = 100 - thresh
ysForCurve = fit.eval(intensitiesForFit)
#print('intensitiesForCurve=',intensitiesForCurve)
#print('ysForCurve=',ysForCurve) #debug
if descendingPsycho:
thresh = 100-thresh
#plot staircase in left hand panel
pylab.subplot(121)
#plot psychometric function on the right.
ax1 = pylab.subplot(122)
figure_title = "threshold "
if fit is None:
figure_title += "unknown because fit was not provided"
else:
figure_title += 'threshold (%.2f) = %0.2f' %(threshCriterion, thresh) + '%'
pylab.plot(intensitiesForCurve, ysForCurve, 'k-') #fitted curve
pylab.plot([thresh, thresh],[0,threshCriterion],'k--') #vertical dashed line
pylab.plot([0, thresh],[threshVal,threshCriterion],'k--') #horizontal dashed line
#print thresh proportion top of plot
pylab.text(0, 1.11, figure_title, horizontalalignment='center', fontsize=12)
if fit is None:
pylab.title('Fit failed')
#Use pandas to calculate proportion correct at each level
df= DataFrame({IV_name: intensLinear, DV_name: resps})
#print('df='); print(df) #debug
grouped = df.groupby(IV_name)
groupMeans= grouped.mean() #a groupBy object, kind of like a DataFrame but without column names, only an index?
intensitiesTested = list(groupMeans.index)
pCorrect = list(groupMeans[DV_name]) #x.iloc[:]
ns = grouped.sum() #want n per trial to scale data point size
ns = list(ns[DV_name])
print('df mean at each intensity\n'); print( DataFrame({IV_name: intensitiesTested, 'pCorr': pCorrect, 'n': ns }) )
#data point sizes. One entry in array for each datapoint
pointSizes = 5+ 40 * np.array(ns) / max(ns) #the more trials, the bigger the datapoint size for maximum of 6
#print('pointSizes = ',pointSizes)
points = pylab.scatter(intensitiesTested, pCorrect, s=pointSizes,
edgecolors=(0,0,0), facecolors= 'none', linewidths=1,
zorder=10, #make sure the points plot on top of the line
)
pylab.ylim([-0.01,1.01])
pylab.xlim([-2,102])
pylab.xlabel("%noise")
pylab.ylabel("proportion correct")
#save a vector-graphics format for future
#outputFile = os.path.join(dataFolder, 'last.pdf')
#pylab.savefig(outputFile)
createSecondAxis = False
if createSecondAxis: #presently not used, if fit to log would need this to also show linear scale
#create second x-axis to show linear percentNoise instead of log
ax2 = ax1.twiny()
ax2.set(xlabel='%noise', xlim=[2, 102]) #not quite right but if go to 0, end up with -infinity? and have error
#ax2.axis.set_major_formatter(ScalarFormatter()) #Show linear labels, not scientific notation
#ax2 seems to be the wrong object. Why am I using pylab anyway? Matplotlib documentation seems more clear
#for programming it is recommended that the namespaces be kept separate, http://matplotlib.org/api/pyplot_api.html
#http://stackoverflow.com/questions/21920233/matplotlib-log-scale-tick-label-number-formatting
ax2.set_xscale('log')
ax2.tick_params(axis='x',which='minor',bottom='off')
# #save figure to file
# outputFile = os.path.join(dataDir, 'test.pdf')
# pylab.savefig(outputFile)
if __name__=='__main__': #Running this helper file, must want to test functions in this file
#dataFileName="data/raw/Alex/Alex_spatiotopicMotion_15Dec2014_16-25_DataFrame.pickle"
dataFileName="data/raw/Alex/Alex_spatiotopicMotion_15Dec2014_16-25_PSYCHOPY.txt"
#dataFileName='data/raw/LK/LK100_spatiotopicMotion_02Jan2015_15-46.txt'
#dataFileName='data/raw/LK/LK100_spatiotopicMotion_02Jan2015_15-46.psydat'
fig = plotDataAndPsychometricCurve(None, dataFileName)
pylab.savefig('figures/examples/AlexResults.png') #, bbox_inches='tight')
print('The plot has been saved, as figures/examples/AlexResults.png')
pylab.show() #pauses until window manually closed. Have to save before calling this, because closing the window loses the figure
| mit | -7,298,490,796,033,904,000 | 49.35 | 168 | 0.66708 | false |
cchayward/ALMAHerschelDSFG | Code/dnda.py | 1 | 3270 | """
Shane Bussmann
2014 August 20
Plot dN/dA as a function of angular separation from the center of light. dN =
number of objects between radius 1 and radius 2. dA = area between radius 1
and radius 2.
"""
from astropy.table import Table
import matplotlib
import matplotlib.pyplot as plt
from pylab import savefig
import numpy
import sep_util
# set font properties
font = {'family' : 'Arial',
'weight' : 'normal',
'size' : 12}
matplotlib.rc('font', **font)
matplotlib.rcParams['axes.linewidth'] = 1.2
fig = plt.figure(figsize=(5.0, 4.5))
# Plotting parameters
hodgecolor = 'LightPink'
hodgesimcolor = 'LightPink'
hodgems = 4
hodgefmt = 'D'
nbins = 15
binwidth = 1.0
bin_edges = numpy.arange(0, nbins + 1, binwidth)
# Load the data
fluxcomponent_file = 'hodge2013.dat'
fluxcomponent = Table.read(fluxcomponent_file, format='ascii')
# filter out single source systems
fluxcomponent = sep_util.rmSingles(fluxcomponent, targetstring='lessid')
nmultiples = len(fluxcomponent)
avgsep_hodge, wmeansep_hodge = sep_util.getSeparation(fluxcomponent,
targetstring='lessid')
deltasep = avgsep_hodge.max() - avgsep_hodge.min()
#nbins = deltasep / binwidth
sep_util.histArea(avgsep_hodge, nbins, color=hodgecolor, fmt=hodgefmt,
ms=hodgems, norm=nmultiples)
# plot simulated positions
nsim = 100
sep_util.simArea(fluxcomponent, nsim, bin_edges, targetstring='lessid',
edgecolor=hodgesimcolor, facecolor='none', hatch='//', norm=nmultiples)
# ***********
# ALMA sample
# ***********
# plotting parameters
acolor = 'green'
asimcolor = 'green'
ams = 5
afmt = 's'
fluxcomponent_file = 'table_positions.dat'
fluxcomponent = Table.read(fluxcomponent_file, format='ascii')
# filter out single source systems
fluxcomponent = sep_util.rmSingles(fluxcomponent, targetstring='target')
nmultiples = len(fluxcomponent)
avgsep_alma, wmeansep_alma = sep_util.getSeparation(fluxcomponent,
fluxstring='S_870_observed')
sep_util.histArea(avgsep_alma, nbins, color=acolor, fmt=afmt, ms=ams,
norm=nmultiples)
sep_util.simArea(fluxcomponent, nsim, bin_edges, fluxstring='S_870_observed',
edgecolor=asimcolor, facecolor='none', hatch='\\', norm=nmultiples)
xmin = 0
ymin = 0
xmax = 6
ymax = 0.25
plt.axis([xmin, xmax, ymin, ymax])
plt.xlabel(r'${\rm Angular\,Separation\,from\,Centroid\,(arcsec)}$', fontsize='large')
plt.ylabel(r'$dN/dA \, ({\rm arcsec}^{-2}$)', fontsize='large')
plt.minorticks_on()
plt.tick_params(width=1.2, which='both')
plt.tick_params(length=2, which='minor')
plt.tick_params(length=4, which='major')
fake = numpy.arange(2) + 1e5
plt.plot(fake, color=hodgecolor, label='Hodge+13 Observed')
plt.plot(fake, color=hodgesimcolor, linestyle='--',
label='Hodge+13 Uniform Distribution')
plt.plot(fake, color=acolor, label='ALMA Sample Observed')
plt.plot(fake, color=asimcolor, linestyle='--',
label='ALMA Sample Uniform Distribution')
plt.legend(loc='upper right', numpoints=1, handletextpad=0.35, borderpad=0.4,
labelspacing=0.18, handlelength=1.0)
leg = plt.gca().get_legend()
ltext = leg.get_texts()
#plt.setp(ltext, fontsize='medium')
plt.subplots_adjust(left=0.14, right=0.95, top=0.97, bottom=0.13, wspace=0.39)
savefig('dNdA.pdf')
import pdb; pdb.set_trace()
| mit | 1,041,237,110,525,958,500 | 27.434783 | 86 | 0.710703 | false |
walkerps/ICGPM | model.py | 1 | 3369 | import pandas as pd
import numpy as np
import re
from nltk import word_tokenize
from nltk.corpus import wordnet
import pickle
def feature_extraction_approach_2(name):
consonants = ['b','c','d','f','g','h','j','k','l','m','n','p','q','r','s','t','v','w','x','y','z']
vowels = ['a','e','i','o','u']
bobua_consonants = ['b','l','m','n']
bobua_vowels = ['u','o']
kiki_consonants = ['k','p','t']
kiki_vowels = ['i','e']
number_of_consonants = 0
number_of_vowels = 0
number_of_bobua_consonants = 0
number_of_bobua_vowels = 0
number_of_kiki_consonants = 0
number_of_kiki_vowels = 0
last_character = 0
len_of_name = 0
featuress = []
name_array = list(name)
for name in name_array:
if name in consonants:
number_of_consonants = number_of_consonants + 1
if name in bobua_consonants:
number_of_bobua_consonants = number_of_bobua_consonants + 1
elif name in kiki_consonants:
number_of_kiki_consonants = number_of_kiki_consonants + 1
elif name in vowels:
number_of_vowels = number_of_vowels + 1
if name in bobua_vowels:
number_of_bobua_vowels = number_of_bobua_vowels + 1
elif name in kiki_vowels:
number_of_kiki_vowels = number_of_kiki_vowels + 1
if name[-1] in vowels:
last_character = 1
len_of_name = len(name_array)
features = [number_of_consonants,number_of_vowels,number_of_bobua_consonants,number_of_bobua_vowels,number_of_kiki_consonants,number_of_kiki_vowels,last_character,len_of_name]
return features
def model(data):
dataframe_to_parse = data
dataframe_to_parse['noc'] = 0
dataframe_to_parse['nov'] = 0
dataframe_to_parse['nobc'] = 0
dataframe_to_parse['nobv'] = 0
dataframe_to_parse['nokc'] = 0
dataframe_to_parse['nokv'] = 0
dataframe_to_parse['last'] = 0
dataframe_to_parse['len'] = 0
#data['one-gram'] = [one_gram[ii] for ii in range(len(one_gram))]
#data['bi-gram'] = [bi_gram[ii] for ii in range(len(bi_gram))]
#data['tri-gram'] = [tri_gram[ii] for ii in range(len(tri_gram))]
noc = []
nov = []
nobc = []
nobv = []
nokc = []
nokv = []
last = []
leng = []
#print "Starting feature Extractiont"
name_list = []
for name in data.Firstname:
name_list.append(name)
for ii in range(len(name_list)):
feature = feature_extraction_approach_2(name_list[ii])
noc.append(feature[0])
nov.append(feature[1])
nobc.append(feature[2])
nobv.append(feature[3])
nokc.append(feature[4])
nokv.append(feature[5])
last.append(feature[6])
leng.append(feature[7])
#print "In between feature Extraction"
data['noc'] = [noc[ii] for ii in range(len(noc))]
data['nov'] = [nov[ii] for ii in range(len(nov))]
data['nobc'] = [nobc[ii] for ii in range(len(nobc))]
data['nobv'] = [nobv[ii] for ii in range(len(nobv))]
data['nokc'] = [nokc[ii] for ii in range(len(nokc))]
data['nokv'] = [nokv[ii] for ii in range(len(nokv))]
data['last'] = [last[ii] for ii in range(len(last))]
data['len'] = [leng[ii] for ii in range(len(leng))]
dataframe_to_parse = dataframe_to_parse.drop(['OrderId','Firstname'],axis = 1)
#print "Running model on data"
dataframe_to_parse = dataframe_to_parse.values
loaded_model = pickle.load(open('dataModel.sav','rb'))
result = loaded_model.predict(dataframe_to_parse)
data['Gender'] = 0
data['Gender'] = [result[ii] for ii in range(len(result))]
data = data.drop(['noc','nov','nobc','nobv','nokc','nokv','last','len'],axis = 1)
return data | apache-2.0 | -1,384,336,063,342,586,000 | 30.495327 | 178 | 0.658356 | false |
FedoraScientific/salome-gui | tools/CurvePlot/src/python/views/XYView.py | 1 | 25807 | import matplotlib.pyplot as plt
import matplotlib.colors as colors
from View import View
from CurveView import CurveView
from utils import Logger, trQ
from PlotWidget import PlotWidget
from PlotSettings import PlotSettings
from pyqtside import QtGui, QtCore
from pyqtside.QtCore import QObject
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg, NavigationToolbar2QT
class EventHandler(QObject):
""" Handle the right-click properly so that it only triggers the contextual menu """
def __init__(self,parent=None):
QObject.__init__(self, parent)
def eventFilter(self, obj, event):
if event.type() == QtCore.QEvent.MouseButtonPress:
if event.button() == 2:
# Discarding right button press to only keep context menu display
return True # Event handled (and hence not passed to matplotlib)
return QObject.eventFilter(self, obj, event)
class XYView(View):
AUTOFIT_MARGIN = 0.03 # 3%
# See http://matplotlib.org/api/markers_api.html:
CURVE_MARKERS = [ "o" ,# circle
"*", # star
"+", # plus
"x", # x
"s", # square
"p", # pentagon
"h", # hexagon1
"8", # octagon
"D", # diamond
"^", # triangle_up
"<", # triangle_left
">", # triangle_right
"1", # tri_down
"2", # tri_up
"3", # tri_left
"4", # tri_right
"v", # triangle_down
"H", # hexagon2
"d", # thin diamond
"", # NO MARKER
]
_DEFAULT_LEGEND_STATE = False # for test purposes mainly - initial status of the legend
def __init__(self, controller):
View.__init__(self, controller)
self._eventHandler = EventHandler()
self._curveViews = {} # key: curve (model) ID, value: CurveView
self._salomeViewID = None
self._mplFigure = None
self._mplAxes = None
self._mplCanvas = None
self._plotWidget = None
self._sgPyQt = self._controller._sgPyQt
self._toolbar = None
self._mplNavigationActions = {}
self._toobarMPL = None
self._grid = None
self._currCrv = None # current curve selected in the view
self._legend = None
self._legendLoc = "right" # "right" or "bottom"
self._fitArea = False
self._zoomPan = False
self._dragOnDrop = False
self._move = False
self._patch = None
self._xdata = None
self._ydata = None
self._defaultLineStyle = None
self._last_point = None
self._lastMarkerID = -1
self._blockLogSignal = False
self._axisXSciNotation = False
self._axisYSciNotation = False
self._prevTitle = None
def __repaintOK(self):
""" To be called inside XYView each time a low-level expansive matplotlib methods is to be invoked.
@return False if painting is currently locked, in which case it will also register the current XYView
as needing a refresh when unlocked
"""
ret = self._controller._plotManager.isRepaintLocked()
if ret:
self._controller._plotManager.registerRepaint(self._model)
return (not ret)
def appendCurve(self, curveID):
newC = CurveView(self._controller, self)
newC.setModel(self._model._curves[curveID])
newC.setMPLAxes(self._mplAxes)
newC.draw()
newC.setMarker(self.getMarker(go_next=True))
self._curveViews[curveID] = newC
def removeCurve(self, curveID):
v = self._curveViews.pop(curveID)
v.erase()
if self._currCrv is not None and self._currCrv.getID() == curveID:
self._currCrv = None
def cleanBeforeClose(self):
""" Clean some items to avoid accumulating stuff in memory """
self._mplFigure.clear()
plt.close(self._mplFigure)
self._plotWidget.clearAll()
# For memory debugging only:
import gc
gc.collect()
def repaint(self):
if self.__repaintOK():
Logger.Debug("XYView::draw")
self._mplCanvas.draw()
def onXLabelChange(self):
if self.__repaintOK():
self._mplAxes.set_xlabel(self._model._xlabel)
self.repaint()
def onYLabelChange(self):
if self.__repaintOK():
self._mplAxes.set_ylabel(self._model._ylabel)
self.repaint()
def onTitleChange(self):
if self.__repaintOK():
self._mplAxes.set_title(self._model._title)
self.updateViewTitle()
self.repaint()
def onCurveTitleChange(self):
# Updating the legend should suffice
self.showHideLegend()
def onClearAll(self):
""" Just does an update with a reset of the marker cycle. """
if self.__repaintOK():
self._lastMarkerID = -1
self.update()
def onPick(self, event):
""" MPL callback when picking
"""
if event.mouseevent.button == 1:
selected_id = -1
a = event.artist
for crv_id, cv in self._curveViews.items():
if cv._mplLines[0] is a:
selected_id = crv_id
# Use the plotmanager so that other plot sets get their current reset:
self._controller._plotManager.setCurrentCurve(selected_id)
def createAndAddLocalAction(self, icon_file, short_name):
return self._toolbar.addAction(self._sgPyQt.loadIcon("CURVEPLOT", icon_file), short_name)
def createPlotWidget(self):
self._mplFigure = Figure((8.0,5.0), dpi=100)
self._mplCanvas = FigureCanvasQTAgg(self._mplFigure)
self._mplCanvas.installEventFilter(self._eventHandler)
self._mplCanvas.mpl_connect('pick_event', self.onPick)
self._mplAxes = self._mplFigure.add_subplot(1, 1, 1)
self._plotWidget = PlotWidget()
self._toobarMPL = NavigationToolbar2QT(self._mplCanvas, None)
for act in self._toobarMPL.actions():
actionName = str(act.text()).strip()
self._mplNavigationActions[actionName] = act
self._plotWidget.setCentralWidget(self._mplCanvas)
self._toolbar = self._plotWidget.toolBar
self.populateToolbar()
self._popupMenu = QtGui.QMenu()
self._popupMenu.addAction(self._actionLegend)
# Connect evenement for the graphic scene
self._mplCanvas.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self._mplCanvas.customContextMenuRequested.connect(self.onContextMenu)
self._mplCanvas.mpl_connect('scroll_event', self.onScroll)
self._mplCanvas.mpl_connect('button_press_event', self.onMousePress)
def populateToolbar(self):
# Action to dump view in a file
a = self.createAndAddLocalAction("dump_view.png", trQ("DUMP_VIEW_TXT"))
a.triggered.connect(self.dumpView)
self._toolbar.addSeparator()
# Actions to manipulate the scene
a = self.createAndAddLocalAction("fit_all.png", trQ("FIT_ALL_TXT"))
a.triggered.connect(self.autoFit)
# Zoom and pan are mutually exclusive but can be both de-activated:
self._zoomAction = self.createAndAddLocalAction("fit_area.png", trQ("FIT_AREA_TXT"))
self._zoomAction.triggered.connect(self.zoomArea)
self._zoomAction.setCheckable(True)
self._panAction = self.createAndAddLocalAction("zoom_pan.png", trQ("ZOOM_PAN_TXT"))
self._panAction.triggered.connect(self.pan)
self._panAction.setCheckable(True)
self._toolbar.addSeparator()
# Actions to change the representation of curves
self._curveActionGroup = QtGui.QActionGroup(self._plotWidget)
self._pointsAction = self.createAndAddLocalAction("draw_points.png", trQ("DRAW_POINTS_TXT"))
self._pointsAction.setCheckable(True)
self._linesAction = self.createAndAddLocalAction("draw_lines.png", trQ("DRAW_LINES_TXT"))
self._linesAction.setCheckable(True)
self._curveActionGroup.addAction(self._pointsAction)
self._curveActionGroup.addAction(self._linesAction)
self._linesAction.setChecked(True)
self._curveActionGroup.triggered.connect(self.changeModeCurve)
self._curveActionGroup.setExclusive(True)
self._toolbar.addSeparator()
# Actions to draw horizontal curves as linear or logarithmic
self._horActionGroup = QtGui.QActionGroup(self._plotWidget)
self._horLinearAction = self.createAndAddLocalAction("hor_linear.png", trQ("HOR_LINEAR_TXT"))
self._horLinearAction.setCheckable(True)
self._horLogarithmicAction = self.createAndAddLocalAction("hor_logarithmic.png", trQ("HOR_LOGARITHMIC_TXT"))
self._horLogarithmicAction.setCheckable(True)
self._horActionGroup.addAction(self._horLinearAction)
self._horActionGroup.addAction(self._horLogarithmicAction)
self._horLinearAction.setChecked(True)
self._horActionGroup.triggered.connect(self.onViewHorizontalMode)
self._toolbar.addSeparator()
# Actions to draw vertical curves as linear or logarithmic
self._verActionGroup = QtGui.QActionGroup(self._plotWidget)
self._verLinearAction = self.createAndAddLocalAction("ver_linear.png", trQ("VER_LINEAR_TXT"))
self._verLinearAction.setCheckable(True)
self._verLogarithmicAction = self.createAndAddLocalAction("ver_logarithmic.png", trQ("VER_LOGARITHMIC_TXT"))
self._verLogarithmicAction.setCheckable(True)
self._verActionGroup.addAction(self._verLinearAction)
self._verActionGroup.addAction(self._verLogarithmicAction)
self._verLinearAction.setChecked(True)
self._verActionGroup.triggered.connect(self.onViewVerticalMode)
self._verActionGroup.setExclusive(True)
self._toolbar.addSeparator()
# Action to show or hide the legend
self._actionLegend = self.createAndAddLocalAction("legend.png", trQ("SHOW_LEGEND_TXT"))
self._actionLegend.setCheckable(True)
self._actionLegend.triggered.connect(self.showHideLegend)
if self._DEFAULT_LEGEND_STATE:
self._actionLegend.setChecked(True)
self._toolbar.addSeparator()
# Action to set the preferences
a = self.createAndAddLocalAction("settings.png", trQ("SETTINGS_TXT"))
a.triggered.connect(self.onSettings)
pass
def dumpView(self):
# Choice of the view backup file
filters = []
for form in ["IMAGES_FILES", "PDF_FILES", "POSTSCRIPT_FILES", "ENCAPSULATED_POSTSCRIPT_FILES"]:
filters.append(trQ(form))
fileName = self._sgPyQt.getFileName(self._sgPyQt.getDesktop(),
"",
filters,
trQ("DUMP_VIEW_FILE"),
False )
if not fileName.isEmpty():
name = str(fileName)
self._mplAxes.figure.savefig(name)
pass
def autoFit(self, check=True, repaint=True):
if self.__repaintOK():
self._mplAxes.relim()
xm, xM = self._mplAxes.xaxis.get_data_interval()
ym, yM = self._mplAxes.yaxis.get_data_interval()
i = yM-ym
self._mplAxes.axis([xm, xM, ym-i*self.AUTOFIT_MARGIN, yM+i*self.AUTOFIT_MARGIN])
if repaint:
self.repaint()
def zoomArea(self):
if self._panAction.isChecked() and self._zoomAction.isChecked():
self._panAction.setChecked(False)
# Trigger underlying matplotlib action:
self._mplNavigationActions["Zoom"].trigger()
def pan(self):
if self._panAction.isChecked() and self._zoomAction.isChecked():
self._zoomAction.setChecked(False)
# Trigger underlying matplotlib action:
self._mplNavigationActions["Pan"].trigger()
def getMarker(self, go_next=False):
if go_next:
self._lastMarkerID = (self._lastMarkerID+1) % len(self.CURVE_MARKERS)
return self.CURVE_MARKERS[self._lastMarkerID]
def changeModeCurve(self, repaint=True):
if not self.__repaintOK():
return
action = self._curveActionGroup.checkedAction()
if action is self._pointsAction :
for crv_view in self._curveViews.values():
crv_view.setLineStyle("None")
elif action is self._linesAction :
for crv_view in self._curveViews.values():
crv_view.setLineStyle("-")
else :
raise NotImplementedError
if repaint:
self.repaint()
def setXLog(self, log, repaint=True):
if not self.__repaintOK():
return
self._blockLogSignal = True
if log:
self._mplAxes.set_xscale('log')
self._horLogarithmicAction.setChecked(True)
else:
self._mplAxes.set_xscale('linear')
self._horLinearAction.setChecked(True)
if repaint:
self.autoFit()
self.repaint()
self._blockLogSignal = False
def setYLog(self, log, repaint=True):
if not self.__repaintOK():
return
self._blockLogSignal = True
if log:
self._mplAxes.set_yscale('log')
self._verLogarithmicAction.setChecked(True)
else:
self._mplAxes.set_yscale('linear')
self._verLinearAction.setChecked(True)
if repaint:
self.autoFit()
self.repaint()
self._blockLogSignal = False
def setXSciNotation(self, sciNotation, repaint=True):
self._axisXSciNotation = sciNotation
self.changeFormatAxis()
if repaint:
self.repaint()
def setYSciNotation(self, sciNotation, repaint=True):
self._axisYSciNotation = sciNotation
self.changeFormatAxis()
if repaint:
self.repaint()
def onViewHorizontalMode(self, checked=True, repaint=True):
if self._blockLogSignal:
return
action = self._horActionGroup.checkedAction()
if action is self._horLinearAction:
self.setXLog(False, repaint)
elif action is self._horLogarithmicAction:
self.setXLog(True, repaint)
else:
raise NotImplementedError
def onViewVerticalMode(self, checked=True, repaint=True):
if self._blockLogSignal:
return
action = self._verActionGroup.checkedAction()
if action is self._verLinearAction:
self.setYLog(False, repaint)
elif action is self._verLogarithmicAction:
self.setYLog(True, repaint)
else:
raise NotImplementedError
if repaint:
self.repaint()
def __adjustFigureMargins(self, withLegend):
""" Adjust figure margins to make room for the legend """
if withLegend:
leg = self._legend
bbox = leg.get_window_extent()
# In axes coordinates:
bbox2 = bbox.transformed(leg.figure.transFigure.inverted())
if self._legendLoc == "right":
self._mplFigure.subplots_adjust(right=1.0-(bbox2.width+0.02))
elif self._legendLoc == "bottom":
self._mplFigure.subplots_adjust(bottom=bbox2.height+0.1)
else:
# Reset to default (rc) values
self._mplFigure.subplots_adjust(bottom=0.1, right=0.9)
def setLegendVisible(self, visible, repaint=True):
if visible and not self._actionLegend.isChecked():
self._actionLegend.setChecked(True)
self.showHideLegend(repaint=repaint)
if not visible and self._actionLegend.isChecked():
self._actionLegend.setChecked(False)
self.showHideLegend(repaint=repaint)
def showHideLegend(self, actionChecked=None, repaint=True):
if not self.__repaintOK(): # Show/hide legend is extremely costly
return
show = self._actionLegend.isChecked()
nCurves = len(self._curveViews)
if nCurves > 10: fontSize = 'x-small'
else: fontSize = None
if nCurves == 0:
# Remove legend
leg = self._mplAxes.legend()
if leg is not None: leg.remove()
if show and nCurves > 0:
# Recreate legend from scratch
if self._legend is not None:
self._legend = None
self._mplAxes._legend = None
if self._legendLoc == "bottom":
self._legend = self._mplAxes.legend(loc="upper left", bbox_to_anchor=(0.0, -0.05, 1.0, -0.05),
borderaxespad=0.0, mode="expand", fancybox=True,
shadow=True, ncol=3, prop={'size':fontSize, 'style': 'italic'})
elif self._legendLoc == "right":
self._legend = self._mplAxes.legend(loc="upper left", bbox_to_anchor=(1.02,1.0), borderaxespad=0.0,
ncol=1, fancybox=True, shadow=True, prop={'size':fontSize, 'style': 'italic'})
else:
raise Exception("Invalid legend placement! Must be 'bottom' or 'right'")
# Canvas must be drawn so we can adjust the figure placement:
self._mplCanvas.draw()
self.__adjustFigureMargins(withLegend=True)
else:
if self._legend is None:
# Nothing to do
return
else:
self._legend.set_visible(False)
self._legend = None
self._mplAxes._legend = None
self._mplCanvas.draw()
self.__adjustFigureMargins(withLegend=False)
curr_crv = self._model._currentCurve
if curr_crv is None: curr_title = None
else: curr_title = curr_crv.getTitle()
if self._legend is not None:
for label in self._legend.get_texts() :
text = label.get_text()
if (text == curr_title):
label.set_backgroundcolor('0.85')
else :
label.set_backgroundcolor('white')
if repaint:
self.repaint()
def onSettings(self, trigger=False, dlg_test=None):
dlg = dlg_test or PlotSettings()
dlg.titleEdit.setText(self._mplAxes.get_title())
dlg.axisXTitleEdit.setText(self._mplAxes.get_xlabel())
dlg.axisYTitleEdit.setText(self._mplAxes.get_ylabel())
dlg.gridCheckBox.setChecked(self._mplAxes.xaxis._gridOnMajor) # could not find a relevant API to check this
dlg.axisXSciCheckBox.setChecked(self._axisXSciNotation)
dlg.axisYSciCheckBox.setChecked(self._axisYSciNotation)
xmin, xmax = self._mplAxes.get_xlim()
ymin, ymax = self._mplAxes.get_ylim()
xminText = "%g" %xmin
xmaxText = "%g" %xmax
yminText = "%g" %ymin
ymaxText = "%g" %ymax
dlg.axisXMinEdit.setText(xminText)
dlg.axisXMaxEdit.setText(xmaxText)
dlg.axisYMinEdit.setText(yminText)
dlg.axisYMaxEdit.setText(ymaxText)
# List of markers
dlg.markerCurve.clear()
for marker in self.CURVE_MARKERS :
dlg.markerCurve.addItem(marker)
curr_crv = self._model.getCurrentCurve()
if not curr_crv is None:
dlg.colorCurve.setEnabled(True)
dlg.markerCurve.setEnabled(True)
name = curr_crv.getTitle()
dlg.nameCurve.setText(name)
view = self._curveViews[curr_crv.getID()]
marker = view.getMarker()
color = view.getColor()
index = dlg.markerCurve.findText(marker)
dlg.markerCurve.setCurrentIndex(index)
rgb = colors.colorConverter.to_rgb(color)
dlg.setRGB(rgb[0],rgb[1],rgb[2])
else :
dlg.colorCurve.setEnabled(False)
dlg.markerCurve.setEnabled(False)
dlg.nameCurve.setText("")
view = None
if self._legend is None:
dlg.showLegendCheckBox.setChecked(False)
dlg.legendPositionComboBox.setEnabled(False)
else :
if self._legend.get_visible():
dlg.showLegendCheckBox.setChecked(True)
dlg.legendPositionComboBox.setEnabled(True)
if self._legendLoc == "bottom":
dlg.legendPositionComboBox.setCurrentIndex(0)
elif self._legendLoc == "right" :
dlg.legendPositionComboBox.setCurrentIndex(1)
else :
dlg.showLegendCheckBox.setChecked(False)
dlg.legendPositionComboBox.setEnabled(False)
if dlg.exec_():
# Title
self._model.setTitle(dlg.titleEdit.text())
# Axis
self._model.setXLabel(dlg.axisXTitleEdit.text())
self._model.setYLabel(dlg.axisYTitleEdit.text())
# Grid
if dlg.gridCheckBox.isChecked() :
self._mplAxes.grid(True)
else :
self._mplAxes.grid(False)
# Legend
if dlg.showLegendCheckBox.isChecked():
self._actionLegend.setChecked(True)
if dlg.legendPositionComboBox.currentIndex() == 0 :
self._legendLoc = "bottom"
elif dlg.legendPositionComboBox.currentIndex() == 1 :
self._legendLoc = "right"
else :
self._actionLegend.setChecked(False)
xminText = dlg.axisXMinEdit.text()
xmaxText = dlg.axisXMaxEdit.text()
yminText = dlg.axisYMinEdit.text()
ymaxText = dlg.axisYMaxEdit.text()
self._mplAxes.axis([float(xminText), float(xmaxText), float(yminText), float(ymaxText)] )
self._axisXSciNotation = dlg.axisXSciCheckBox.isChecked()
self._axisYSciNotation = dlg.axisYSciCheckBox.isChecked()
self.changeFormatAxis()
# Color and marker of the curve
if view:
view.setColor(dlg.getRGB())
view.setMarker(self.CURVE_MARKERS[dlg.markerCurve.currentIndex()])
self.showHideLegend(repaint=True)
self._mplCanvas.draw()
pass
def updateViewTitle(self):
s = ""
if self._model._title != "":
s = " - %s" % self._model._title
title = "CurvePlot (%d)%s" % (self._model.getID(), s)
self._sgPyQt.setViewTitle(self._salomeViewID, title)
def onCurrentPlotSetChange(self):
""" Avoid a unnecessary call to update() when just switching current plot set! """
pass
def onCurrentCurveChange(self):
curr_crv2 = self._model.getCurrentCurve()
if curr_crv2 != self._currCrv:
if self._currCrv is not None:
view = self._curveViews[self._currCrv.getID()]
view.toggleHighlight(False)
if not curr_crv2 is None:
view = self._curveViews[curr_crv2.getID()]
view.toggleHighlight(True)
self._currCrv = curr_crv2
self.showHideLegend(repaint=False) # redo legend
self.repaint()
def changeFormatAxis(self) :
if not self.__repaintOK():
return
# don't try to switch to sci notation if we are not using the
# matplotlib.ticker.ScalarFormatter (i.e. if in Log for ex.)
if self._horLinearAction.isChecked():
if self._axisXSciNotation :
self._mplAxes.ticklabel_format(style='sci',scilimits=(0,0), axis='x')
else :
self._mplAxes.ticklabel_format(style='plain',axis='x')
if self._verLinearAction.isChecked():
if self._axisYSciNotation :
self._mplAxes.ticklabel_format(style='sci',scilimits=(0,0), axis='y')
else :
self._mplAxes.ticklabel_format(style='plain',axis='y')
def update(self):
if self._salomeViewID is None:
self.createPlotWidget()
self._salomeViewID = self._sgPyQt.createView("CurvePlot", self._plotWidget)
Logger.Debug("Creating SALOME view ID=%d" % self._salomeViewID)
self._sgPyQt.setViewVisible(self._salomeViewID, True)
self.updateViewTitle()
# Check list of curve views:
set_mod = set(self._model._curves.keys())
set_view = set(self._curveViews.keys())
# Deleted/Added curves:
dels = set_view - set_mod
added = set_mod - set_view
for d in dels:
self.removeCurve(d)
if not len(self._curveViews):
# Reset color cycle
self._mplAxes.set_color_cycle(None)
for a in added:
self.appendCurve(a)
# Axes labels and title
self._mplAxes.set_xlabel(self._model._xlabel)
self._mplAxes.set_ylabel(self._model._ylabel)
self._mplAxes.set_title(self._model._title)
self.onViewHorizontalMode(repaint=False)
self.onViewVerticalMode(repaint=False)
self.changeModeCurve(repaint=False)
self.showHideLegend(repaint=False) # The canvas is repainted anyway (needed to get legend bounding box)
self.changeFormatAxis()
# Redo auto-fit
self.autoFit(repaint=False)
self.repaint()
def onDataChange(self):
# the rest is done in the CurveView:
self.autoFit(repaint=True)
def onMousePress(self, event):
if event.button == 3 :
if self._panAction.isChecked():
self._panAction.setChecked(False)
if self._zoomAction.isChecked():
self._zoomAction.setChecked(False)
def onContextMenu(self, position):
pos = self._mplCanvas.mapToGlobal(QtCore.QPoint(position.x(),position.y()))
self._popupMenu.exec_(pos)
def onScroll(self, event):
# Event location (x and y)
xdata = event.xdata
ydata = event.ydata
cur_xlim = self._mplAxes.get_xlim()
cur_ylim = self._mplAxes.get_ylim()
base_scale = 2.
if event.button == 'down':
# deal with zoom in
scale_factor = 1 / base_scale
elif event.button == 'up':
# deal with zoom out
scale_factor = base_scale
else:
# deal with something that should never happen
scale_factor = 1
new_width = (cur_xlim[1] - cur_xlim[0]) * scale_factor
new_height = (cur_ylim[1] - cur_ylim[0]) * scale_factor
relx = (cur_xlim[1] - xdata)/(cur_xlim[1] - cur_xlim[0])
rely = (cur_ylim[1] - ydata)/(cur_ylim[1] - cur_ylim[0])
self._mplAxes.set_xlim([xdata - new_width * (1-relx), xdata + new_width * (relx)])
self._mplAxes.set_ylim([ydata - new_height * (1-rely), ydata + new_height * (rely)])
self.repaint()
pass
def onPressEvent(self, event):
if event.button == 3 :
#self._mplCanvas.emit(QtCore.SIGNAL("button_release_event()"))
canvasSize = event.canvas.geometry()
point = event.canvas.mapToGlobal(QtCore.QPoint(event.x,canvasSize.height()-event.y))
self._popupMenu.exec_(point)
else :
print "Press event on the other button"
#if event.button == 3 :
# canvasSize = event.canvas.geometry()
# point = event.canvas.mapToGlobal(QtCore.QPoint(event.x,canvasSize.height()-event.y))
# self._popupMenu.move(point)
# self._popupMenu.show()
def onMotionEvent(self, event):
print "OnMotionEvent ",event.button
#if event.button == 3 :
# event.button = None
# return True
def onReleaseEvent(self, event):
print "OnReleaseEvent ",event.button
#if event.button == 3 :
# event.button = None
# return False
| lgpl-2.1 | 6,267,137,297,581,451,000 | 35.762108 | 122 | 0.648119 | false |
mohanprasath/Course-Work | data_analysis/uh_data_analysis_with_python/hy-data-analysis-with-python-spring-2020/part04-e16_split_date/test/test_split_date.py | 1 | 2526 | #!/usr/bin/env python3
import unittest
from unittest.mock import patch
import numpy as np
import pandas as pd
from tmc import points
from tmc.utils import load, get_out, patch_helper
module_name="src.split_date"
split_date = load(module_name, "split_date")
main = load(module_name, "main")
ph = patch_helper(module_name)
@points('p04-16.1')
class SplitDate(unittest.TestCase):
# @classmethod
# def setUpClass(cls):
# cls.df = split_date()
def setUp(self):
self.df = split_date()
def test_shape(self):
self.assertEqual(self.df.shape, (37128, 5),
msg="The DataFrame has incorrect shape!")
def test_columns(self):
np.testing.assert_array_equal(self.df.columns,
["Weekday", "Day", "Month", "Year", "Hour"],
err_msg="Incorrect column names!")
def test_dtypes(self):
correct_types = [object, np.integer, np.integer, np.integer, np.integer]
for i, (result, correct) in enumerate(zip(self.df.dtypes, correct_types)):
self.assertTrue(np.issubdtype(result, correct),
msg="Types don't match on column %i! Expected %s got %s." % (i, correct, result))
# np.testing.assert_array_equal(self.df.dtypes,
# [object, int, np.integer, int, int], err_msg="Incorrect column types")
def test_called(self):
with patch(ph("pd.read_csv"), wraps=pd.read_csv) as prc,\
patch(ph("split_date"), wraps=split_date) as psd:
main()
psd.assert_called()
def test_content(self):
weekdays = "Mon Tue Wed Thu Fri Sat Sun".split()
for elem in self.df["Weekday"]:
self.assertIn(elem, weekdays, msg="Incorrect value '%s' in column Weekday!" % elem)
for index in self.df.index:
weekday, day, month, year, hour = self.df.loc[index]
self.assertIn(weekday, weekdays, msg="Incorrect value '%s' in column Weekday!" % weekday)
self.assertIn(day, range(1,32), msg="Incorrect value '%s' in column Day!" % day)
self.assertIn(month, range(1,13), msg="Incorrect value '%s' in column Month!" % month)
self.assertIn(year, range(2014,2019), msg="Incorrect value '%s' in column Year!" % year)
self.assertIn(hour, range(0,24), msg="Incorrect value '%s' in column Hour!" % hour)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -8,878,842,569,421,881,000 | 37.861538 | 110 | 0.583531 | false |
priyesh16/thesis | scratch/runall.py | 1 | 3159 | #!/usr/bin/python
import sys
import os
import getopt
import subprocess
from subprocess import call
import re
import numpy as np
import matplotlib.pyplot as plt
from operator import itemgetter
nodelist = []
dijhops = []
airhops = []
files = []
#comblist = []
def sortlist(comblist):
comblist = sorted(comblist,key=itemgetter(1))
return comblist
airmean = []
airvariance = []
dijmean = []
dijvariance = []
statfile = "scratch/mean_variance.txt"
def main():
#p = subprocess.Popen("rm scratch/mean_variance.txt", shell=True)
#p.wait();
#for i in range(0, total):
#runsingle(i)
matrix = []
with open(statfile, "r") as f:
for line in f:
words = line.split()
values = [float(words[0]), float(words[1]),
float(words[2]), float(words[3]), float(words[4])]
matrix.append(values)
'''
airmean.append(float(words[1]))
airvariance.append(float(words[2]))
dijmean.append(float(words[3]))
dijvariance.append(float(words[4]))
'''
for row in matrix:
print row
sort = matrix
#sort = sortlist(matrix)
for row in sort:
print row
airmean.append(row[1])
airvariance.append(row[2])
dijmean.append(row[3])
dijvariance.append(row[4])
def creategraph(total):
index = np.arange(len(airmean))
fig, ax = plt.subplots()
bar_width = 0.35
opacity = 0.4
error_config = {'ecolor': '0.3'}
'''
rects1 = plt.bar(index, dijhops, bar_width,
alpha=opacity, color='b', yerr=0, error_kw=error_config, label='Dijkstra\'s')
rects2 = plt.bar(index + bar_width, airhops, bar_width,
alpha=opacity, color='r', yerr=0, error_kw=error_config, label='Air')
'''
dijsd = np.sqrt(dijvariance)
airsd = np.sqrt(airvariance)
axes = plt.gca()
axes.set_ylim([0,10])
print len(airmean)
print len(index)
plt.errorbar(index, airmean, xerr=0, yerr=airsd)
plt.errorbar(index, dijmean, xerr=0, yerr=dijsd)
#plt.errorbar(index, dijmean, xerr=0, yerr=dijvariance)
plt.xlabel('Nodes')
plt.ylabel('Hop Counts')
plt.title('Mean and Standard Deviation wrt Hop Counts')
'''hide the x axix labels'''
#frame1 = plt.gca()
#frame1.axes.get_xaxis().set_ticks([])
#ax.set_xticks(nodeids)
#ax.set_xticklabels(nodeids)
#rects = ax.patches
#for rect, label in zip(rects, nodeids):
# height = rect.get_height()
# ax.text(rect.get_x() + rect.get_width()/2, height + 1, label, ha='center', va='bottom')
plt.legend()
#plt.tight_layout()
#fig = plt.figure()
plt.savefig("scratch/mean_variance.png", bbox_inches='tight')
plt.show()
#plt.close(fig)
def runsingle(dest):
command = "scratch/nodes_graph.py " + str(dest)
p = subprocess.Popen(command, shell=True)
p.wait();
def getfiles(dest):
files.append("scratch/subdir/statfiles/dij" + dest)
files.append("scratch/subdir/statfiles/air" + dest)
if __name__ == "__main__":
total = int(sys.argv[1])
main();
creategraph(total)
| gpl-2.0 | 3,099,603,084,124,216,000 | 23.679688 | 96 | 0.60114 | false |
VicenteYanez/GFA | gfa/field_analysis/fun_fig.py | 1 | 5557 | #! /usr/bin/env python3
"Function to plot the examples script"
import pdb
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import griddata
def greater_absolute(value1, value2):
if abs(value1) >= abs(value2):
return np.array([-value1, value1])
else:
return np.array([value2, -value2])
def vorticity_figure(x, y, z):
"""
Figure of example_vorticity.py
"""
fig = plt.figure(figsize=(6, 8))
ax = plt.axes(projection=ccrs.PlateCarree())
paralelos = np.arange(-46, -35, 2)
meridianos = np.arange(-76, -70, 2)
labelE = [1, 0, 0, 0]
labelS = [1, 0, 0, 1]
grosor_linea = 0.5
# extension of the map
latmin_m = y.min() - 0.5
latmax_m = y.max() + 0.5
lonmin_m = x.min() - 0.5
lonmax_m = x.max() + 0.5
# zone to interpol
latmin = y.min()
latmax = y.max()
lonmin = x.min()
lonmax = x.max()
ax.set_extent([lonmin_m, lonmax_m, latmin_m, latmax_m])
ax.set_xticks(meridianos, crs=ccrs.PlateCarree())
ax.set_yticks(paralelos, crs=ccrs.PlateCarree())
# ax.gridlines(crs=ccrs.PlateCarree())
x2 = []
y2 = []
z2 = []
for i, value in enumerate(z):
if np.isnan(z[i]) is False:
x2.append(x[i])
y2.append(y[i])
z2.append(z[i])
# interpolation grid
resolucion = 400
lats = np.linspace(latmin, latmax, resolucion)
lons = np.linspace(lonmin, lonmax, resolucion)
y_mapa, x_mapa = np.meshgrid(lats, lons)
z_mapa = griddata((x, y), z, (x_mapa, y_mapa), method='linear')
# mask nan values
z_mapa = np.ma.masked_array(z_mapa, mask=np.isnan(z_mapa))
# plot grid
cmap = plt.cm.seismic
cmap.set_under(color="black", alpha="0")
# colorbar scale
z_mapa_esc = z_mapa*360 # to convert to degrees/year
vesc = greater_absolute(z_mapa_esc.min(), z_mapa_esc.max())
im = ax.pcolormesh(x_mapa, y_mapa, z_mapa_esc,
cmap=cmap,
transform=ccrs.PlateCarree(),
vmin=vesc.min(), vmax=vesc.max(),
alpha=1, zorder=1)
cb = plt.colorbar(im)
cb.set_label('º/year')
# final details
ax.coastlines(resolution='10m')
return fig
def cinematic_figure(x, y, z):
fig = plt.figure(figsize=(6, 8))
ax = plt.axes(projection=ccrs.PlateCarree())
paralelos = np.arange(-46, -35, 2)
meridianos = np.arange(-76, -70, 2)
labelE = [1, 0, 0, 0]
labelS = [1, 0, 0, 1]
grosor_linea = 0.5
# extension of the map
latmin_m = y.min() - 0.5
latmax_m = y.max() + 0.5
lonmin_m = x.min() - 0.5
lonmax_m = x.max() + 0.5
# zone to interpol
latmin = y.min()
latmax = y.max()
lonmin = x.min()
lonmax = x.max()
ax.set_extent([lonmin_m, lonmax_m, latmin_m, latmax_m])
ax.set_xticks(meridianos, crs=ccrs.PlateCarree())
ax.set_yticks(paralelos, crs=ccrs.PlateCarree())
# ax.gridlines(crs=ccrs.PlateCarree())
x2 = []
y2 = []
z2 = []
for i, value in enumerate(z):
if np.isnan(z[i]) is False:
x2.append(x[i])
y2.append(y[i])
z2.append(z[i])
# interpolation grid
resolucion = 1000
lats = np.linspace(latmin, latmax, resolucion)
lons = np.linspace(lonmin, lonmax, resolucion)
y_mapa, x_mapa = np.meshgrid(lats, lons)
z_mapa = griddata((x, y), z, (x_mapa, y_mapa), method='linear')
# mask nan values
z_mapa = np.ma.masked_array(z_mapa, mask=np.isnan(z_mapa))
# plot grid
cmap = plt.cm.jet
cmap.set_under(color="black", alpha="0")
im = ax.pcolormesh(x_mapa, y_mapa, z_mapa,
cmap=cmap,
transform=ccrs.PlateCarree(),
vmin=0, vmax=1,
alpha=1, zorder=1)
cb = plt.colorbar(im)
cb.set_label('Cinematic Vorticity')
# final details
ax.coastlines(resolution='10m')
return fig
def add_vector(figure, axes, x, y, vx, vy):
figure.plot(x, y, vx, vy)
return figure, axes
def tensor_figure(x, y, evalue, evector):
"""
Figure of example_principalstress.py
"""
fig = plt.figure()
ax = plt.axes(projection=ccrs.PlateCarree())
paralelos = np.arange(-46, -35, 2)
meridianos = np.arange(-76, -70, 2)
labelE = [1, 0, 0, 0]
labelS = [1, 0, 0, 1]
grosor_linea = 0.5
# extension of the map
latmin_m = y.min() - 0.5
latmax_m = y.max() + 0.5
lonmin_m = x.min() - 0.5
lonmax_m = x.max() + 0.5
# zone to interpole
latmin = y.min()
latmax = y.max()
lonmin = x.min()
lonmax = x.max()
ax.set_extent([lonmin_m, lonmax_m, latmin_m, latmax_m])
ax.set_xticks(meridianos, crs=ccrs.PlateCarree())
ax.set_yticks(paralelos, crs=ccrs.PlateCarree())
ax.gridlines(crs=ccrs.PlateCarree())
x2 = []
y2 = []
for i, value in enumerate(x):
if np.isnan(x[i]) is False:
x2.append(x[i])
y2.append(y[i])
# eigen-vector and eigen-value format
xf = [i for i in np.array(evector)[:, 0, 0] if i.imag != 0]
yf = np.array(evector)[:, 0, 1]
print(xf)
pdb.set_trace()
# plot tensor
plt.quiver(x, y, evector.T[0], color='red') # first vector
plt.quiver(x, y, color='blue') # second vector
plt.quiver(x, y, evector.T[0], color='red') # oppose first vector
plt.quiver(x, y, color='blue') # oppose second vector
# final details
ax.coastlines(resolution='10m')
return fig
| gpl-3.0 | 6,889,460,577,200,876,000 | 26.102439 | 70 | 0.566955 | false |
sahilTakiar/spark | python/pyspark/sql/tests.py | 1 | 276859 | # -*- encoding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for pyspark.sql; additional tests are implemented as doctests in
individual modules.
"""
import os
import sys
import subprocess
import pydoc
import shutil
import tempfile
import pickle
import functools
import time
import datetime
import array
import ctypes
import warnings
import py4j
from contextlib import contextmanager
try:
import xmlrunner
except ImportError:
xmlrunner = None
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
from pyspark.util import _exception_message
_pandas_requirement_message = None
try:
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
except ImportError as e:
# If Pandas version requirement is not satisfied, skip related tests.
_pandas_requirement_message = _exception_message(e)
_pyarrow_requirement_message = None
try:
from pyspark.sql.utils import require_minimum_pyarrow_version
require_minimum_pyarrow_version()
except ImportError as e:
# If Arrow version requirement is not satisfied, skip related tests.
_pyarrow_requirement_message = _exception_message(e)
_test_not_compiled_message = None
try:
from pyspark.sql.utils import require_test_compiled
require_test_compiled()
except Exception as e:
_test_not_compiled_message = _exception_message(e)
_have_pandas = _pandas_requirement_message is None
_have_pyarrow = _pyarrow_requirement_message is None
_test_compiled = _test_not_compiled_message is None
from pyspark import SparkContext
from pyspark.sql import SparkSession, SQLContext, HiveContext, Column, Row
from pyspark.sql.types import *
from pyspark.sql.types import UserDefinedType, _infer_type, _make_type_verifier
from pyspark.sql.types import _array_signed_int_typecode_ctype_mappings, _array_type_mappings
from pyspark.sql.types import _array_unsigned_int_typecode_ctype_mappings
from pyspark.sql.types import _merge_type
from pyspark.tests import QuietTest, ReusedPySparkTestCase, PySparkTestCase, SparkSubmitTests
from pyspark.sql.functions import UserDefinedFunction, sha2, lit
from pyspark.sql.window import Window
from pyspark.sql.utils import AnalysisException, ParseException, IllegalArgumentException
class UTCOffsetTimezone(datetime.tzinfo):
"""
Specifies timezone in UTC offset
"""
def __init__(self, offset=0):
self.ZERO = datetime.timedelta(hours=offset)
def utcoffset(self, dt):
return self.ZERO
def dst(self, dt):
return self.ZERO
class ExamplePointUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return 'pyspark.sql.tests'
@classmethod
def scalaUDT(cls):
return 'org.apache.spark.sql.test.ExamplePointUDT'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return ExamplePoint(datum[0], datum[1])
class ExamplePoint:
"""
An example class to demonstrate UDT in Scala, Java, and Python.
"""
__UDT__ = ExamplePointUDT()
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "ExamplePoint(%s,%s)" % (self.x, self.y)
def __str__(self):
return "(%s,%s)" % (self.x, self.y)
def __eq__(self, other):
return isinstance(other, self.__class__) and \
other.x == self.x and other.y == self.y
class PythonOnlyUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return '__main__'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return PythonOnlyPoint(datum[0], datum[1])
@staticmethod
def foo():
pass
@property
def props(self):
return {}
class PythonOnlyPoint(ExamplePoint):
"""
An example class to demonstrate UDT in only Python
"""
__UDT__ = PythonOnlyUDT()
class MyObject(object):
def __init__(self, key, value):
self.key = key
self.value = value
class SQLTestUtils(object):
"""
This util assumes the instance of this to have 'spark' attribute, having a spark session.
It is usually used with 'ReusedSQLTestCase' class but can be used if you feel sure the
the implementation of this class has 'spark' attribute.
"""
@contextmanager
def sql_conf(self, pairs):
"""
A convenient context manager to test some configuration specific logic. This sets
`value` to the configuration `key` and then restores it back when it exits.
"""
assert isinstance(pairs, dict), "pairs should be a dictionary."
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
keys = pairs.keys()
new_values = pairs.values()
old_values = [self.spark.conf.get(key, None) for key in keys]
for key, new_value in zip(keys, new_values):
self.spark.conf.set(key, new_value)
try:
yield
finally:
for key, old_value in zip(keys, old_values):
if old_value is None:
self.spark.conf.unset(key)
else:
self.spark.conf.set(key, old_value)
class ReusedSQLTestCase(ReusedPySparkTestCase, SQLTestUtils):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.spark = SparkSession(cls.sc)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
cls.spark.stop()
def assertPandasEqual(self, expected, result):
msg = ("DataFrames are not equal: " +
"\n\nExpected:\n%s\n%s" % (expected, expected.dtypes) +
"\n\nResult:\n%s\n%s" % (result, result.dtypes))
self.assertTrue(expected.equals(result), msg=msg)
class DataTypeTests(unittest.TestCase):
# regression test for SPARK-6055
def test_data_type_eq(self):
lt = LongType()
lt2 = pickle.loads(pickle.dumps(LongType()))
self.assertEqual(lt, lt2)
# regression test for SPARK-7978
def test_decimal_type(self):
t1 = DecimalType()
t2 = DecimalType(10, 2)
self.assertTrue(t2 is not t1)
self.assertNotEqual(t1, t2)
t3 = DecimalType(8)
self.assertNotEqual(t2, t3)
# regression test for SPARK-10392
def test_datetype_equal_zero(self):
dt = DateType()
self.assertEqual(dt.fromInternal(0), datetime.date(1970, 1, 1))
# regression test for SPARK-17035
def test_timestamp_microsecond(self):
tst = TimestampType()
self.assertEqual(tst.toInternal(datetime.datetime.max) % 1000000, 999999)
def test_empty_row(self):
row = Row()
self.assertEqual(len(row), 0)
def test_struct_field_type_name(self):
struct_field = StructField("a", IntegerType())
self.assertRaises(TypeError, struct_field.typeName)
class SQLTests(ReusedSQLTestCase):
@classmethod
def setUpClass(cls):
ReusedSQLTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.spark.createDataFrame(cls.testData)
@classmethod
def tearDownClass(cls):
ReusedSQLTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
def test_sqlcontext_reuses_sparksession(self):
sqlContext1 = SQLContext(self.sc)
sqlContext2 = SQLContext(self.sc)
self.assertTrue(sqlContext1.sparkSession is sqlContext2.sparkSession)
def tearDown(self):
super(SQLTests, self).tearDown()
# tear down test_bucketed_write state
self.spark.sql("DROP TABLE IF EXISTS pyspark_bucket")
def test_row_should_be_read_only(self):
row = Row(a=1, b=2)
self.assertEqual(1, row.a)
def foo():
row.a = 3
self.assertRaises(Exception, foo)
row2 = self.spark.range(10).first()
self.assertEqual(0, row2.id)
def foo2():
row2.id = 2
self.assertRaises(Exception, foo2)
def test_range(self):
self.assertEqual(self.spark.range(1, 1).count(), 0)
self.assertEqual(self.spark.range(1, 0, -1).count(), 1)
self.assertEqual(self.spark.range(0, 1 << 40, 1 << 39).count(), 2)
self.assertEqual(self.spark.range(-2).count(), 0)
self.assertEqual(self.spark.range(3).count(), 3)
def test_duplicated_column_names(self):
df = self.spark.createDataFrame([(1, 2)], ["c", "c"])
row = df.select('*').first()
self.assertEqual(1, row[0])
self.assertEqual(2, row[1])
self.assertEqual("Row(c=1, c=2)", str(row))
# Cannot access columns
self.assertRaises(AnalysisException, lambda: df.select(df[0]).first())
self.assertRaises(AnalysisException, lambda: df.select(df.c).first())
self.assertRaises(AnalysisException, lambda: df.select(df["c"]).first())
def test_column_name_encoding(self):
"""Ensure that created columns has `str` type consistently."""
columns = self.spark.createDataFrame([('Alice', 1)], ['name', u'age']).columns
self.assertEqual(columns, ['name', 'age'])
self.assertTrue(isinstance(columns[0], str))
self.assertTrue(isinstance(columns[1], str))
def test_explode(self):
from pyspark.sql.functions import explode, explode_outer, posexplode_outer
d = [
Row(a=1, intlist=[1, 2, 3], mapfield={"a": "b"}),
Row(a=1, intlist=[], mapfield={}),
Row(a=1, intlist=None, mapfield=None),
]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
result = data.select(explode(data.intlist).alias("a")).select("a").collect()
self.assertEqual(result[0][0], 1)
self.assertEqual(result[1][0], 2)
self.assertEqual(result[2][0], 3)
result = data.select(explode(data.mapfield).alias("a", "b")).select("a", "b").collect()
self.assertEqual(result[0][0], "a")
self.assertEqual(result[0][1], "b")
result = [tuple(x) for x in data.select(posexplode_outer("intlist")).collect()]
self.assertEqual(result, [(0, 1), (1, 2), (2, 3), (None, None), (None, None)])
result = [tuple(x) for x in data.select(posexplode_outer("mapfield")).collect()]
self.assertEqual(result, [(0, 'a', 'b'), (None, None, None), (None, None, None)])
result = [x[0] for x in data.select(explode_outer("intlist")).collect()]
self.assertEqual(result, [1, 2, 3, None, None])
result = [tuple(x) for x in data.select(explode_outer("mapfield")).collect()]
self.assertEqual(result, [('a', 'b'), (None, None), (None, None)])
def test_and_in_expression(self):
self.assertEqual(4, self.df.filter((self.df.key <= 10) & (self.df.value <= "2")).count())
self.assertRaises(ValueError, lambda: (self.df.key <= 10) and (self.df.value <= "2"))
self.assertEqual(14, self.df.filter((self.df.key <= 3) | (self.df.value < "2")).count())
self.assertRaises(ValueError, lambda: self.df.key <= 3 or self.df.value < "2")
self.assertEqual(99, self.df.filter(~(self.df.key == 1)).count())
self.assertRaises(ValueError, lambda: not self.df.key == 1)
def test_udf_with_callable(self):
d = [Row(number=i, squared=i**2) for i in range(10)]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
class PlusFour:
def __call__(self, col):
if col is not None:
return col + 4
call = PlusFour()
pudf = UserDefinedFunction(call, LongType())
res = data.select(pudf(data['number']).alias('plus_four'))
self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85)
def test_udf_with_partial_function(self):
d = [Row(number=i, squared=i**2) for i in range(10)]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
def some_func(col, param):
if col is not None:
return col + param
pfunc = functools.partial(some_func, param=4)
pudf = UserDefinedFunction(pfunc, LongType())
res = data.select(pudf(data['number']).alias('plus_four'))
self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85)
def test_udf(self):
self.spark.catalog.registerFunction("twoArgs", lambda x, y: len(x) + y, IntegerType())
[row] = self.spark.sql("SELECT twoArgs('test', 1)").collect()
self.assertEqual(row[0], 5)
# This is to check if a deprecated 'SQLContext.registerFunction' can call its alias.
sqlContext = self.spark._wrapped
sqlContext.registerFunction("oneArg", lambda x: len(x), IntegerType())
[row] = sqlContext.sql("SELECT oneArg('test')").collect()
self.assertEqual(row[0], 4)
def test_udf2(self):
self.spark.catalog.registerFunction("strlen", lambda string: len(string), IntegerType())
self.spark.createDataFrame(self.sc.parallelize([Row(a="test")]))\
.createOrReplaceTempView("test")
[res] = self.spark.sql("SELECT strlen(a) FROM test WHERE strlen(a) > 1").collect()
self.assertEqual(4, res[0])
def test_udf3(self):
two_args = self.spark.catalog.registerFunction(
"twoArgs", UserDefinedFunction(lambda x, y: len(x) + y))
self.assertEqual(two_args.deterministic, True)
[row] = self.spark.sql("SELECT twoArgs('test', 1)").collect()
self.assertEqual(row[0], u'5')
def test_udf_registration_return_type_none(self):
two_args = self.spark.catalog.registerFunction(
"twoArgs", UserDefinedFunction(lambda x, y: len(x) + y, "integer"), None)
self.assertEqual(two_args.deterministic, True)
[row] = self.spark.sql("SELECT twoArgs('test', 1)").collect()
self.assertEqual(row[0], 5)
def test_udf_registration_return_type_not_none(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(TypeError, "Invalid returnType"):
self.spark.catalog.registerFunction(
"f", UserDefinedFunction(lambda x, y: len(x) + y, StringType()), StringType())
def test_nondeterministic_udf(self):
# Test that nondeterministic UDFs are evaluated only once in chained UDF evaluations
from pyspark.sql.functions import udf
import random
udf_random_col = udf(lambda: int(100 * random.random()), IntegerType()).asNondeterministic()
self.assertEqual(udf_random_col.deterministic, False)
df = self.spark.createDataFrame([Row(1)]).select(udf_random_col().alias('RAND'))
udf_add_ten = udf(lambda rand: rand + 10, IntegerType())
[row] = df.withColumn('RAND_PLUS_TEN', udf_add_ten('RAND')).collect()
self.assertEqual(row[0] + 10, row[1])
def test_nondeterministic_udf2(self):
import random
from pyspark.sql.functions import udf
random_udf = udf(lambda: random.randint(6, 6), IntegerType()).asNondeterministic()
self.assertEqual(random_udf.deterministic, False)
random_udf1 = self.spark.catalog.registerFunction("randInt", random_udf)
self.assertEqual(random_udf1.deterministic, False)
[row] = self.spark.sql("SELECT randInt()").collect()
self.assertEqual(row[0], 6)
[row] = self.spark.range(1).select(random_udf1()).collect()
self.assertEqual(row[0], 6)
[row] = self.spark.range(1).select(random_udf()).collect()
self.assertEqual(row[0], 6)
# render_doc() reproduces the help() exception without printing output
pydoc.render_doc(udf(lambda: random.randint(6, 6), IntegerType()))
pydoc.render_doc(random_udf)
pydoc.render_doc(random_udf1)
pydoc.render_doc(udf(lambda x: x).asNondeterministic)
def test_nondeterministic_udf3(self):
# regression test for SPARK-23233
from pyspark.sql.functions import udf
f = udf(lambda x: x)
# Here we cache the JVM UDF instance.
self.spark.range(1).select(f("id"))
# This should reset the cache to set the deterministic status correctly.
f = f.asNondeterministic()
# Check the deterministic status of udf.
df = self.spark.range(1).select(f("id"))
deterministic = df._jdf.logicalPlan().projectList().head().deterministic()
self.assertFalse(deterministic)
def test_nondeterministic_udf_in_aggregate(self):
from pyspark.sql.functions import udf, sum
import random
udf_random_col = udf(lambda: int(100 * random.random()), 'int').asNondeterministic()
df = self.spark.range(10)
with QuietTest(self.sc):
with self.assertRaisesRegexp(AnalysisException, "nondeterministic"):
df.groupby('id').agg(sum(udf_random_col())).collect()
with self.assertRaisesRegexp(AnalysisException, "nondeterministic"):
df.agg(sum(udf_random_col())).collect()
def test_chained_udf(self):
self.spark.catalog.registerFunction("double", lambda x: x + x, IntegerType())
[row] = self.spark.sql("SELECT double(1)").collect()
self.assertEqual(row[0], 2)
[row] = self.spark.sql("SELECT double(double(1))").collect()
self.assertEqual(row[0], 4)
[row] = self.spark.sql("SELECT double(double(1) + 1)").collect()
self.assertEqual(row[0], 6)
def test_single_udf_with_repeated_argument(self):
# regression test for SPARK-20685
self.spark.catalog.registerFunction("add", lambda x, y: x + y, IntegerType())
row = self.spark.sql("SELECT add(1, 1)").first()
self.assertEqual(tuple(row), (2, ))
def test_multiple_udfs(self):
self.spark.catalog.registerFunction("double", lambda x: x * 2, IntegerType())
[row] = self.spark.sql("SELECT double(1), double(2)").collect()
self.assertEqual(tuple(row), (2, 4))
[row] = self.spark.sql("SELECT double(double(1)), double(double(2) + 2)").collect()
self.assertEqual(tuple(row), (4, 12))
self.spark.catalog.registerFunction("add", lambda x, y: x + y, IntegerType())
[row] = self.spark.sql("SELECT double(add(1, 2)), add(double(2), 1)").collect()
self.assertEqual(tuple(row), (6, 5))
def test_udf_in_filter_on_top_of_outer_join(self):
from pyspark.sql.functions import udf
left = self.spark.createDataFrame([Row(a=1)])
right = self.spark.createDataFrame([Row(a=1)])
df = left.join(right, on='a', how='left_outer')
df = df.withColumn('b', udf(lambda x: 'x')(df.a))
self.assertEqual(df.filter('b = "x"').collect(), [Row(a=1, b='x')])
def test_udf_in_filter_on_top_of_join(self):
# regression test for SPARK-18589
from pyspark.sql.functions import udf
left = self.spark.createDataFrame([Row(a=1)])
right = self.spark.createDataFrame([Row(b=1)])
f = udf(lambda a, b: a == b, BooleanType())
df = left.crossJoin(right).filter(f("a", "b"))
self.assertEqual(df.collect(), [Row(a=1, b=1)])
def test_udf_without_arguments(self):
self.spark.catalog.registerFunction("foo", lambda: "bar")
[row] = self.spark.sql("SELECT foo()").collect()
self.assertEqual(row[0], "bar")
def test_udf_with_array_type(self):
d = [Row(l=list(range(3)), d={"key": list(range(5))})]
rdd = self.sc.parallelize(d)
self.spark.createDataFrame(rdd).createOrReplaceTempView("test")
self.spark.catalog.registerFunction("copylist", lambda l: list(l), ArrayType(IntegerType()))
self.spark.catalog.registerFunction("maplen", lambda d: len(d), IntegerType())
[(l1, l2)] = self.spark.sql("select copylist(l), maplen(d) from test").collect()
self.assertEqual(list(range(3)), l1)
self.assertEqual(1, l2)
def test_broadcast_in_udf(self):
bar = {"a": "aa", "b": "bb", "c": "abc"}
foo = self.sc.broadcast(bar)
self.spark.catalog.registerFunction("MYUDF", lambda x: foo.value[x] if x else '')
[res] = self.spark.sql("SELECT MYUDF('c')").collect()
self.assertEqual("abc", res[0])
[res] = self.spark.sql("SELECT MYUDF('')").collect()
self.assertEqual("", res[0])
def test_udf_with_filter_function(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql.functions import udf, col
from pyspark.sql.types import BooleanType
my_filter = udf(lambda a: a < 2, BooleanType())
sel = df.select(col("key"), col("value")).filter((my_filter(col("key"))) & (df.value < "2"))
self.assertEqual(sel.collect(), [Row(key=1, value='1')])
def test_udf_with_aggregate_function(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql.functions import udf, col, sum
from pyspark.sql.types import BooleanType
my_filter = udf(lambda a: a == 1, BooleanType())
sel = df.select(col("key")).distinct().filter(my_filter(col("key")))
self.assertEqual(sel.collect(), [Row(key=1)])
my_copy = udf(lambda x: x, IntegerType())
my_add = udf(lambda a, b: int(a + b), IntegerType())
my_strlen = udf(lambda x: len(x), IntegerType())
sel = df.groupBy(my_copy(col("key")).alias("k"))\
.agg(sum(my_strlen(col("value"))).alias("s"))\
.select(my_add(col("k"), col("s")).alias("t"))
self.assertEqual(sel.collect(), [Row(t=4), Row(t=3)])
def test_udf_in_generate(self):
from pyspark.sql.functions import udf, explode
df = self.spark.range(5)
f = udf(lambda x: list(range(x)), ArrayType(LongType()))
row = df.select(explode(f(*df))).groupBy().sum().first()
self.assertEqual(row[0], 10)
df = self.spark.range(3)
res = df.select("id", explode(f(df.id))).collect()
self.assertEqual(res[0][0], 1)
self.assertEqual(res[0][1], 0)
self.assertEqual(res[1][0], 2)
self.assertEqual(res[1][1], 0)
self.assertEqual(res[2][0], 2)
self.assertEqual(res[2][1], 1)
range_udf = udf(lambda value: list(range(value - 1, value + 1)), ArrayType(IntegerType()))
res = df.select("id", explode(range_udf(df.id))).collect()
self.assertEqual(res[0][0], 0)
self.assertEqual(res[0][1], -1)
self.assertEqual(res[1][0], 0)
self.assertEqual(res[1][1], 0)
self.assertEqual(res[2][0], 1)
self.assertEqual(res[2][1], 0)
self.assertEqual(res[3][0], 1)
self.assertEqual(res[3][1], 1)
def test_udf_with_order_by_and_limit(self):
from pyspark.sql.functions import udf
my_copy = udf(lambda x: x, IntegerType())
df = self.spark.range(10).orderBy("id")
res = df.select(df.id, my_copy(df.id).alias("copy")).limit(1)
res.explain(True)
self.assertEqual(res.collect(), [Row(id=0, copy=0)])
def test_udf_registration_returns_udf(self):
df = self.spark.range(10)
add_three = self.spark.udf.register("add_three", lambda x: x + 3, IntegerType())
self.assertListEqual(
df.selectExpr("add_three(id) AS plus_three").collect(),
df.select(add_three("id").alias("plus_three")).collect()
)
# This is to check if a 'SQLContext.udf' can call its alias.
sqlContext = self.spark._wrapped
add_four = sqlContext.udf.register("add_four", lambda x: x + 4, IntegerType())
self.assertListEqual(
df.selectExpr("add_four(id) AS plus_four").collect(),
df.select(add_four("id").alias("plus_four")).collect()
)
def test_non_existed_udf(self):
spark = self.spark
self.assertRaisesRegexp(AnalysisException, "Can not load class non_existed_udf",
lambda: spark.udf.registerJavaFunction("udf1", "non_existed_udf"))
# This is to check if a deprecated 'SQLContext.registerJavaFunction' can call its alias.
sqlContext = spark._wrapped
self.assertRaisesRegexp(AnalysisException, "Can not load class non_existed_udf",
lambda: sqlContext.registerJavaFunction("udf1", "non_existed_udf"))
def test_non_existed_udaf(self):
spark = self.spark
self.assertRaisesRegexp(AnalysisException, "Can not load class non_existed_udaf",
lambda: spark.udf.registerJavaUDAF("udaf1", "non_existed_udaf"))
def test_linesep_text(self):
df = self.spark.read.text("python/test_support/sql/ages_newlines.csv", lineSep=",")
expected = [Row(value=u'Joe'), Row(value=u'20'), Row(value=u'"Hi'),
Row(value=u'\nI am Jeo"\nTom'), Row(value=u'30'),
Row(value=u'"My name is Tom"\nHyukjin'), Row(value=u'25'),
Row(value=u'"I am Hyukjin\n\nI love Spark!"\n')]
self.assertEqual(df.collect(), expected)
tpath = tempfile.mkdtemp()
shutil.rmtree(tpath)
try:
df.write.text(tpath, lineSep="!")
expected = [Row(value=u'Joe!20!"Hi!'), Row(value=u'I am Jeo"'),
Row(value=u'Tom!30!"My name is Tom"'),
Row(value=u'Hyukjin!25!"I am Hyukjin'),
Row(value=u''), Row(value=u'I love Spark!"'),
Row(value=u'!')]
readback = self.spark.read.text(tpath)
self.assertEqual(readback.collect(), expected)
finally:
shutil.rmtree(tpath)
def test_multiline_json(self):
people1 = self.spark.read.json("python/test_support/sql/people.json")
people_array = self.spark.read.json("python/test_support/sql/people_array.json",
multiLine=True)
self.assertEqual(people1.collect(), people_array.collect())
def test_encoding_json(self):
people_array = self.spark.read\
.json("python/test_support/sql/people_array_utf16le.json",
multiLine=True, encoding="UTF-16LE")
expected = [Row(age=30, name=u'Andy'), Row(age=19, name=u'Justin')]
self.assertEqual(people_array.collect(), expected)
def test_linesep_json(self):
df = self.spark.read.json("python/test_support/sql/people.json", lineSep=",")
expected = [Row(_corrupt_record=None, name=u'Michael'),
Row(_corrupt_record=u' "age":30}\n{"name":"Justin"', name=None),
Row(_corrupt_record=u' "age":19}\n', name=None)]
self.assertEqual(df.collect(), expected)
tpath = tempfile.mkdtemp()
shutil.rmtree(tpath)
try:
df = self.spark.read.json("python/test_support/sql/people.json")
df.write.json(tpath, lineSep="!!")
readback = self.spark.read.json(tpath, lineSep="!!")
self.assertEqual(readback.collect(), df.collect())
finally:
shutil.rmtree(tpath)
def test_multiline_csv(self):
ages_newlines = self.spark.read.csv(
"python/test_support/sql/ages_newlines.csv", multiLine=True)
expected = [Row(_c0=u'Joe', _c1=u'20', _c2=u'Hi,\nI am Jeo'),
Row(_c0=u'Tom', _c1=u'30', _c2=u'My name is Tom'),
Row(_c0=u'Hyukjin', _c1=u'25', _c2=u'I am Hyukjin\n\nI love Spark!')]
self.assertEqual(ages_newlines.collect(), expected)
def test_ignorewhitespace_csv(self):
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.spark.createDataFrame([[" a", "b ", " c "]]).write.csv(
tmpPath,
ignoreLeadingWhiteSpace=False,
ignoreTrailingWhiteSpace=False)
expected = [Row(value=u' a,b , c ')]
readback = self.spark.read.text(tmpPath)
self.assertEqual(readback.collect(), expected)
shutil.rmtree(tmpPath)
def test_read_multiple_orc_file(self):
df = self.spark.read.orc(["python/test_support/sql/orc_partitioned/b=0/c=0",
"python/test_support/sql/orc_partitioned/b=1/c=1"])
self.assertEqual(2, df.count())
def test_udf_with_input_file_name(self):
from pyspark.sql.functions import udf, input_file_name
sourceFile = udf(lambda path: path, StringType())
filePath = "python/test_support/sql/people1.json"
row = self.spark.read.json(filePath).select(sourceFile(input_file_name())).first()
self.assertTrue(row[0].find("people1.json") != -1)
def test_udf_with_input_file_name_for_hadooprdd(self):
from pyspark.sql.functions import udf, input_file_name
def filename(path):
return path
sameText = udf(filename, StringType())
rdd = self.sc.textFile('python/test_support/sql/people.json')
df = self.spark.read.json(rdd).select(input_file_name().alias('file'))
row = df.select(sameText(df['file'])).first()
self.assertTrue(row[0].find("people.json") != -1)
rdd2 = self.sc.newAPIHadoopFile(
'python/test_support/sql/people.json',
'org.apache.hadoop.mapreduce.lib.input.TextInputFormat',
'org.apache.hadoop.io.LongWritable',
'org.apache.hadoop.io.Text')
df2 = self.spark.read.json(rdd2).select(input_file_name().alias('file'))
row2 = df2.select(sameText(df2['file'])).first()
self.assertTrue(row2[0].find("people.json") != -1)
def test_udf_defers_judf_initialization(self):
# This is separate of UDFInitializationTests
# to avoid context initialization
# when udf is called
from pyspark.sql.functions import UserDefinedFunction
f = UserDefinedFunction(lambda x: x, StringType())
self.assertIsNone(
f._judf_placeholder,
"judf should not be initialized before the first call."
)
self.assertIsInstance(f("foo"), Column, "UDF call should return a Column.")
self.assertIsNotNone(
f._judf_placeholder,
"judf should be initialized after UDF has been called."
)
def test_udf_with_string_return_type(self):
from pyspark.sql.functions import UserDefinedFunction
add_one = UserDefinedFunction(lambda x: x + 1, "integer")
make_pair = UserDefinedFunction(lambda x: (-x, x), "struct<x:integer,y:integer>")
make_array = UserDefinedFunction(
lambda x: [float(x) for x in range(x, x + 3)], "array<double>")
expected = (2, Row(x=-1, y=1), [1.0, 2.0, 3.0])
actual = (self.spark.range(1, 2).toDF("x")
.select(add_one("x"), make_pair("x"), make_array("x"))
.first())
self.assertTupleEqual(expected, actual)
def test_udf_shouldnt_accept_noncallable_object(self):
from pyspark.sql.functions import UserDefinedFunction
non_callable = None
self.assertRaises(TypeError, UserDefinedFunction, non_callable, StringType())
def test_udf_with_decorator(self):
from pyspark.sql.functions import lit, udf
from pyspark.sql.types import IntegerType, DoubleType
@udf(IntegerType())
def add_one(x):
if x is not None:
return x + 1
@udf(returnType=DoubleType())
def add_two(x):
if x is not None:
return float(x + 2)
@udf
def to_upper(x):
if x is not None:
return x.upper()
@udf()
def to_lower(x):
if x is not None:
return x.lower()
@udf
def substr(x, start, end):
if x is not None:
return x[start:end]
@udf("long")
def trunc(x):
return int(x)
@udf(returnType="double")
def as_double(x):
return float(x)
df = (
self.spark
.createDataFrame(
[(1, "Foo", "foobar", 3.0)], ("one", "Foo", "foobar", "float"))
.select(
add_one("one"), add_two("one"),
to_upper("Foo"), to_lower("Foo"),
substr("foobar", lit(0), lit(3)),
trunc("float"), as_double("one")))
self.assertListEqual(
[tpe for _, tpe in df.dtypes],
["int", "double", "string", "string", "string", "bigint", "double"]
)
self.assertListEqual(
list(df.first()),
[2, 3.0, "FOO", "foo", "foo", 3, 1.0]
)
def test_udf_wrapper(self):
from pyspark.sql.functions import udf
from pyspark.sql.types import IntegerType
def f(x):
"""Identity"""
return x
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
class F(object):
"""Identity"""
def __call__(self, x):
return x
f = F()
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
f = functools.partial(f, x=1)
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
def test_validate_column_types(self):
from pyspark.sql.functions import udf, to_json
from pyspark.sql.column import _to_java_column
self.assertTrue("Column" in _to_java_column("a").getClass().toString())
self.assertTrue("Column" in _to_java_column(u"a").getClass().toString())
self.assertTrue("Column" in _to_java_column(self.spark.range(1).id).getClass().toString())
self.assertRaisesRegexp(
TypeError,
"Invalid argument, not a string or column",
lambda: _to_java_column(1))
class A():
pass
self.assertRaises(TypeError, lambda: _to_java_column(A()))
self.assertRaises(TypeError, lambda: _to_java_column([]))
self.assertRaisesRegexp(
TypeError,
"Invalid argument, not a string or column",
lambda: udf(lambda x: x)(None))
self.assertRaises(TypeError, lambda: to_json(1))
def test_basic_functions(self):
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
df.count()
df.collect()
df.schema
# cache and checkpoint
self.assertFalse(df.is_cached)
df.persist()
df.unpersist(True)
df.cache()
self.assertTrue(df.is_cached)
self.assertEqual(2, df.count())
df.createOrReplaceTempView("temp")
df = self.spark.sql("select foo from temp")
df.count()
df.collect()
def test_apply_schema_to_row(self):
df = self.spark.read.json(self.sc.parallelize(["""{"a":2}"""]))
df2 = self.spark.createDataFrame(df.rdd.map(lambda x: x), df.schema)
self.assertEqual(df.collect(), df2.collect())
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x))
df3 = self.spark.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_infer_schema_to_local(self):
input = [{"a": 1}, {"b": "coffee"}]
rdd = self.sc.parallelize(input)
df = self.spark.createDataFrame(input)
df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))
df3 = self.spark.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_apply_schema_to_dict_and_rows(self):
schema = StructType().add("b", StringType()).add("a", IntegerType())
input = [{"a": 1}, {"b": "coffee"}]
rdd = self.sc.parallelize(input)
for verify in [False, True]:
df = self.spark.createDataFrame(input, schema, verifySchema=verify)
df2 = self.spark.createDataFrame(rdd, schema, verifySchema=verify)
self.assertEqual(df.schema, df2.schema)
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))
df3 = self.spark.createDataFrame(rdd, schema, verifySchema=verify)
self.assertEqual(10, df3.count())
input = [Row(a=x, b=str(x)) for x in range(10)]
df4 = self.spark.createDataFrame(input, schema, verifySchema=verify)
self.assertEqual(10, df4.count())
def test_create_dataframe_schema_mismatch(self):
input = [Row(a=1)]
rdd = self.sc.parallelize(range(3)).map(lambda i: Row(a=i))
schema = StructType([StructField("a", IntegerType()), StructField("b", StringType())])
df = self.spark.createDataFrame(rdd, schema)
self.assertRaises(Exception, lambda: df.show())
def test_serialize_nested_array_and_map(self):
d = [Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
row = df.head()
self.assertEqual(1, len(row.l))
self.assertEqual(1, row.l[0].a)
self.assertEqual("2", row.d["key"].d)
l = df.rdd.map(lambda x: x.l).first()
self.assertEqual(1, len(l))
self.assertEqual('s', l[0].b)
d = df.rdd.map(lambda x: x.d).first()
self.assertEqual(1, len(d))
self.assertEqual(1.0, d["key"].c)
row = df.rdd.map(lambda x: x.d["key"]).first()
self.assertEqual(1.0, row.c)
self.assertEqual("2", row.d)
def test_infer_schema(self):
d = [Row(l=[], d={}, s=None),
Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")}, s="")]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
self.assertEqual([], df.rdd.map(lambda r: r.l).first())
self.assertEqual([None, ""], df.rdd.map(lambda r: r.s).collect())
df.createOrReplaceTempView("test")
result = self.spark.sql("SELECT l[0].a from test where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
self.assertEqual({}, df2.rdd.map(lambda r: r.d).first())
self.assertEqual([None, ""], df2.rdd.map(lambda r: r.s).collect())
df2.createOrReplaceTempView("test2")
result = self.spark.sql("SELECT l[0].a from test2 where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
def test_infer_schema_not_enough_names(self):
df = self.spark.createDataFrame([["a", "b"]], ["col1"])
self.assertEqual(df.columns, ['col1', '_2'])
def test_infer_schema_fails(self):
with self.assertRaisesRegexp(TypeError, 'field a'):
self.spark.createDataFrame(self.spark.sparkContext.parallelize([[1, 1], ["x", 1]]),
schema=["a", "b"], samplingRatio=0.99)
def test_infer_nested_schema(self):
NestedRow = Row("f1", "f2")
nestedRdd1 = self.sc.parallelize([NestedRow([1, 2], {"row1": 1.0}),
NestedRow([2, 3], {"row2": 2.0})])
df = self.spark.createDataFrame(nestedRdd1)
self.assertEqual(Row(f1=[1, 2], f2={u'row1': 1.0}), df.collect()[0])
nestedRdd2 = self.sc.parallelize([NestedRow([[1, 2], [2, 3]], [1, 2]),
NestedRow([[2, 3], [3, 4]], [2, 3])])
df = self.spark.createDataFrame(nestedRdd2)
self.assertEqual(Row(f1=[[1, 2], [2, 3]], f2=[1, 2]), df.collect()[0])
from collections import namedtuple
CustomRow = namedtuple('CustomRow', 'field1 field2')
rdd = self.sc.parallelize([CustomRow(field1=1, field2="row1"),
CustomRow(field1=2, field2="row2"),
CustomRow(field1=3, field2="row3")])
df = self.spark.createDataFrame(rdd)
self.assertEqual(Row(field1=1, field2=u'row1'), df.first())
def test_create_dataframe_from_dict_respects_schema(self):
df = self.spark.createDataFrame([{'a': 1}], ["b"])
self.assertEqual(df.columns, ['b'])
def test_create_dataframe_from_objects(self):
data = [MyObject(1, "1"), MyObject(2, "2")]
df = self.spark.createDataFrame(data)
self.assertEqual(df.dtypes, [("key", "bigint"), ("value", "string")])
self.assertEqual(df.first(), Row(key=1, value="1"))
def test_select_null_literal(self):
df = self.spark.sql("select null as col")
self.assertEqual(Row(col=None), df.first())
def test_apply_schema(self):
from datetime import date, datetime
rdd = self.sc.parallelize([(127, -128, -32768, 32767, 2147483647, 1.0,
date(2010, 1, 1), datetime(2010, 1, 1, 1, 1, 1),
{"a": 1}, (2,), [1, 2, 3], None)])
schema = StructType([
StructField("byte1", ByteType(), False),
StructField("byte2", ByteType(), False),
StructField("short1", ShortType(), False),
StructField("short2", ShortType(), False),
StructField("int1", IntegerType(), False),
StructField("float1", FloatType(), False),
StructField("date1", DateType(), False),
StructField("time1", TimestampType(), False),
StructField("map1", MapType(StringType(), IntegerType(), False), False),
StructField("struct1", StructType([StructField("b", ShortType(), False)]), False),
StructField("list1", ArrayType(ByteType(), False), False),
StructField("null1", DoubleType(), True)])
df = self.spark.createDataFrame(rdd, schema)
results = df.rdd.map(lambda x: (x.byte1, x.byte2, x.short1, x.short2, x.int1, x.float1,
x.date1, x.time1, x.map1["a"], x.struct1.b, x.list1, x.null1))
r = (127, -128, -32768, 32767, 2147483647, 1.0, date(2010, 1, 1),
datetime(2010, 1, 1, 1, 1, 1), 1, 2, [1, 2, 3], None)
self.assertEqual(r, results.first())
df.createOrReplaceTempView("table2")
r = self.spark.sql("SELECT byte1 - 1 AS byte1, byte2 + 1 AS byte2, " +
"short1 + 1 AS short1, short2 - 1 AS short2, int1 - 1 AS int1, " +
"float1 + 1.5 as float1 FROM table2").first()
self.assertEqual((126, -127, -32767, 32766, 2147483646, 2.5), tuple(r))
def test_struct_in_map(self):
d = [Row(m={Row(i=1): Row(s="")})]
df = self.sc.parallelize(d).toDF()
k, v = list(df.head().m.items())[0]
self.assertEqual(1, k.i)
self.assertEqual("", v.s)
def test_convert_row_to_dict(self):
row = Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})
self.assertEqual(1, row.asDict()['l'][0].a)
df = self.sc.parallelize([row]).toDF()
df.createOrReplaceTempView("test")
row = self.spark.sql("select l, d from test").head()
self.assertEqual(1, row.asDict()["l"][0].a)
self.assertEqual(1.0, row.asDict()['d']['key'].c)
def test_udt(self):
from pyspark.sql.types import _parse_datatype_json_string, _infer_type, _make_type_verifier
from pyspark.sql.tests import ExamplePointUDT, ExamplePoint
def check_datatype(datatype):
pickled = pickle.loads(pickle.dumps(datatype))
assert datatype == pickled
scala_datatype = self.spark._jsparkSession.parseDataType(datatype.json())
python_datatype = _parse_datatype_json_string(scala_datatype.json())
assert datatype == python_datatype
check_datatype(ExamplePointUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
check_datatype(structtype_with_udt)
p = ExamplePoint(1.0, 2.0)
self.assertEqual(_infer_type(p), ExamplePointUDT())
_make_type_verifier(ExamplePointUDT())(ExamplePoint(1.0, 2.0))
self.assertRaises(ValueError, lambda: _make_type_verifier(ExamplePointUDT())([1.0, 2.0]))
check_datatype(PythonOnlyUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
check_datatype(structtype_with_udt)
p = PythonOnlyPoint(1.0, 2.0)
self.assertEqual(_infer_type(p), PythonOnlyUDT())
_make_type_verifier(PythonOnlyUDT())(PythonOnlyPoint(1.0, 2.0))
self.assertRaises(
ValueError,
lambda: _make_type_verifier(PythonOnlyUDT())([1.0, 2.0]))
def test_simple_udt_in_df(self):
schema = StructType().add("key", LongType()).add("val", PythonOnlyUDT())
df = self.spark.createDataFrame(
[(i % 3, PythonOnlyPoint(float(i), float(i))) for i in range(10)],
schema=schema)
df.show()
def test_nested_udt_in_df(self):
schema = StructType().add("key", LongType()).add("val", ArrayType(PythonOnlyUDT()))
df = self.spark.createDataFrame(
[(i % 3, [PythonOnlyPoint(float(i), float(i))]) for i in range(10)],
schema=schema)
df.collect()
schema = StructType().add("key", LongType()).add("val",
MapType(LongType(), PythonOnlyUDT()))
df = self.spark.createDataFrame(
[(i % 3, {i % 3: PythonOnlyPoint(float(i + 1), float(i + 1))}) for i in range(10)],
schema=schema)
df.collect()
def test_complex_nested_udt_in_df(self):
from pyspark.sql.functions import udf
schema = StructType().add("key", LongType()).add("val", PythonOnlyUDT())
df = self.spark.createDataFrame(
[(i % 3, PythonOnlyPoint(float(i), float(i))) for i in range(10)],
schema=schema)
df.collect()
gd = df.groupby("key").agg({"val": "collect_list"})
gd.collect()
udf = udf(lambda k, v: [(k, v[0])], ArrayType(df.schema))
gd.select(udf(*gd)).collect()
def test_udt_with_none(self):
df = self.spark.range(0, 10, 1, 1)
def myudf(x):
if x > 0:
return PythonOnlyPoint(float(x), float(x))
self.spark.catalog.registerFunction("udf", myudf, PythonOnlyUDT())
rows = [r[0] for r in df.selectExpr("udf(id)").take(2)]
self.assertEqual(rows, [None, PythonOnlyPoint(1, 1)])
def test_nonparam_udf_with_aggregate(self):
import pyspark.sql.functions as f
df = self.spark.createDataFrame([(1, 2), (1, 2)])
f_udf = f.udf(lambda: "const_str")
rows = df.distinct().withColumn("a", f_udf()).collect()
self.assertEqual(rows, [Row(_1=1, _2=2, a=u'const_str')])
def test_infer_schema_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), ExamplePointUDT)
df.createOrReplaceTempView("labeled_point")
point = self.spark.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), PythonOnlyUDT)
df.createOrReplaceTempView("labeled_point")
point = self.spark.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_apply_schema_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = (1.0, ExamplePoint(1.0, 2.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
df = self.spark.createDataFrame([row], schema)
point = df.head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = (1.0, PythonOnlyPoint(1.0, 2.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
df = self.spark.createDataFrame([row], schema)
point = df.head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_udf_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first())
udf = UserDefinedFunction(lambda p: p.y, DoubleType())
self.assertEqual(2.0, df.select(udf(df.point)).first()[0])
udf2 = UserDefinedFunction(lambda p: ExamplePoint(p.x + 1, p.y + 1), ExamplePointUDT())
self.assertEqual(ExamplePoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first())
udf = UserDefinedFunction(lambda p: p.y, DoubleType())
self.assertEqual(2.0, df.select(udf(df.point)).first()[0])
udf2 = UserDefinedFunction(lambda p: PythonOnlyPoint(p.x + 1, p.y + 1), PythonOnlyUDT())
self.assertEqual(PythonOnlyPoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])
def test_parquet_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df0 = self.spark.createDataFrame([row])
output_dir = os.path.join(self.tempdir.name, "labeled_point")
df0.write.parquet(output_dir)
df1 = self.spark.read.parquet(output_dir)
point = df1.head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df0 = self.spark.createDataFrame([row])
df0.write.parquet(output_dir, mode='overwrite')
df1 = self.spark.read.parquet(output_dir)
point = df1.head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_union_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row1 = (1.0, ExamplePoint(1.0, 2.0))
row2 = (2.0, ExamplePoint(3.0, 4.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
df1 = self.spark.createDataFrame([row1], schema)
df2 = self.spark.createDataFrame([row2], schema)
result = df1.union(df2).orderBy("label").collect()
self.assertEqual(
result,
[
Row(label=1.0, point=ExamplePoint(1.0, 2.0)),
Row(label=2.0, point=ExamplePoint(3.0, 4.0))
]
)
def test_cast_to_string_with_udt(self):
from pyspark.sql.tests import ExamplePointUDT, ExamplePoint
from pyspark.sql.functions import col
row = (ExamplePoint(1.0, 2.0), PythonOnlyPoint(3.0, 4.0))
schema = StructType([StructField("point", ExamplePointUDT(), False),
StructField("pypoint", PythonOnlyUDT(), False)])
df = self.spark.createDataFrame([row], schema)
result = df.select(col('point').cast('string'), col('pypoint').cast('string')).head()
self.assertEqual(result, Row(point=u'(1.0, 2.0)', pypoint=u'[3.0, 4.0]'))
def test_column_operators(self):
ci = self.df.key
cs = self.df.value
c = ci == cs
self.assertTrue(isinstance((- ci - 1 - 2) % 3 * 2.5 / 3.5, Column))
rcc = (1 + ci), (1 - ci), (1 * ci), (1 / ci), (1 % ci), (1 ** ci), (ci ** 1)
self.assertTrue(all(isinstance(c, Column) for c in rcc))
cb = [ci == 5, ci != 0, ci > 3, ci < 4, ci >= 0, ci <= 7]
self.assertTrue(all(isinstance(c, Column) for c in cb))
cbool = (ci & ci), (ci | ci), (~ci)
self.assertTrue(all(isinstance(c, Column) for c in cbool))
css = cs.contains('a'), cs.like('a'), cs.rlike('a'), cs.asc(), cs.desc(),\
cs.startswith('a'), cs.endswith('a'), ci.eqNullSafe(cs)
self.assertTrue(all(isinstance(c, Column) for c in css))
self.assertTrue(isinstance(ci.cast(LongType()), Column))
self.assertRaisesRegexp(ValueError,
"Cannot apply 'in' operator against a column",
lambda: 1 in cs)
def test_column_getitem(self):
from pyspark.sql.functions import col
self.assertIsInstance(col("foo")[1:3], Column)
self.assertIsInstance(col("foo")[0], Column)
self.assertIsInstance(col("foo")["bar"], Column)
self.assertRaises(ValueError, lambda: col("foo")[0:10:2])
def test_column_select(self):
df = self.df
self.assertEqual(self.testData, df.select("*").collect())
self.assertEqual(self.testData, df.select(df.key, df.value).collect())
self.assertEqual([Row(value='1')], df.where(df.key == 1).select(df.value).collect())
def test_freqItems(self):
vals = [Row(a=1, b=-2.0) if i % 2 == 0 else Row(a=i, b=i * 1.0) for i in range(100)]
df = self.sc.parallelize(vals).toDF()
items = df.stat.freqItems(("a", "b"), 0.4).collect()[0]
self.assertTrue(1 in items[0])
self.assertTrue(-2.0 in items[1])
def test_aggregator(self):
df = self.df
g = df.groupBy()
self.assertEqual([99, 100], sorted(g.agg({'key': 'max', 'value': 'count'}).collect()[0]))
self.assertEqual([Row(**{"AVG(key#0)": 49.5})], g.mean().collect())
from pyspark.sql import functions
self.assertEqual((0, u'99'),
tuple(g.agg(functions.first(df.key), functions.last(df.value)).first()))
self.assertTrue(95 < g.agg(functions.approxCountDistinct(df.key)).first()[0])
self.assertEqual(100, g.agg(functions.countDistinct(df.value)).first()[0])
def test_first_last_ignorenulls(self):
from pyspark.sql import functions
df = self.spark.range(0, 100)
df2 = df.select(functions.when(df.id % 3 == 0, None).otherwise(df.id).alias("id"))
df3 = df2.select(functions.first(df2.id, False).alias('a'),
functions.first(df2.id, True).alias('b'),
functions.last(df2.id, False).alias('c'),
functions.last(df2.id, True).alias('d'))
self.assertEqual([Row(a=None, b=1, c=None, d=98)], df3.collect())
def test_approxQuantile(self):
df = self.sc.parallelize([Row(a=i, b=i+10) for i in range(10)]).toDF()
for f in ["a", u"a"]:
aq = df.stat.approxQuantile(f, [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aq, list))
self.assertEqual(len(aq), 3)
self.assertTrue(all(isinstance(q, float) for q in aq))
aqs = df.stat.approxQuantile(["a", u"b"], [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aqs, list))
self.assertEqual(len(aqs), 2)
self.assertTrue(isinstance(aqs[0], list))
self.assertEqual(len(aqs[0]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqs[0]))
self.assertTrue(isinstance(aqs[1], list))
self.assertEqual(len(aqs[1]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqs[1]))
aqt = df.stat.approxQuantile((u"a", "b"), [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aqt, list))
self.assertEqual(len(aqt), 2)
self.assertTrue(isinstance(aqt[0], list))
self.assertEqual(len(aqt[0]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqt[0]))
self.assertTrue(isinstance(aqt[1], list))
self.assertEqual(len(aqt[1]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqt[1]))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(123, [0.1, 0.9], 0.1))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(("a", 123), [0.1, 0.9], 0.1))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(["a", 123], [0.1, 0.9], 0.1))
def test_corr(self):
import math
df = self.sc.parallelize([Row(a=i, b=math.sqrt(i)) for i in range(10)]).toDF()
corr = df.stat.corr(u"a", "b")
self.assertTrue(abs(corr - 0.95734012) < 1e-6)
def test_sampleby(self):
df = self.sc.parallelize([Row(a=i, b=(i % 3)) for i in range(10)]).toDF()
sampled = df.stat.sampleBy(u"b", fractions={0: 0.5, 1: 0.5}, seed=0)
self.assertTrue(sampled.count() == 3)
def test_cov(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
cov = df.stat.cov(u"a", "b")
self.assertTrue(abs(cov - 55.0 / 3) < 1e-6)
def test_crosstab(self):
df = self.sc.parallelize([Row(a=i % 3, b=i % 2) for i in range(1, 7)]).toDF()
ct = df.stat.crosstab(u"a", "b").collect()
ct = sorted(ct, key=lambda x: x[0])
for i, row in enumerate(ct):
self.assertEqual(row[0], str(i))
self.assertTrue(row[1], 1)
self.assertTrue(row[2], 1)
def test_math_functions(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
from pyspark.sql import functions
import math
def get_values(l):
return [j[0] for j in l]
def assert_close(a, b):
c = get_values(b)
diff = [abs(v - c[k]) < 1e-6 for k, v in enumerate(a)]
return sum(diff) == len(a)
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos(df.a)).collect())
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos("a")).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df.a)).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df['a'])).collect())
assert_close([math.pow(i, 2 * i) for i in range(10)],
df.select(functions.pow(df.a, df.b)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2.0)).collect())
assert_close([math.hypot(i, 2 * i) for i in range(10)],
df.select(functions.hypot(df.a, df.b)).collect())
def test_rand_functions(self):
df = self.df
from pyspark.sql import functions
rnd = df.select('key', functions.rand()).collect()
for row in rnd:
assert row[1] >= 0.0 and row[1] <= 1.0, "got: %s" % row[1]
rndn = df.select('key', functions.randn(5)).collect()
for row in rndn:
assert row[1] >= -4.0 and row[1] <= 4.0, "got: %s" % row[1]
# If the specified seed is 0, we should use it.
# https://issues.apache.org/jira/browse/SPARK-9691
rnd1 = df.select('key', functions.rand(0)).collect()
rnd2 = df.select('key', functions.rand(0)).collect()
self.assertEqual(sorted(rnd1), sorted(rnd2))
rndn1 = df.select('key', functions.randn(0)).collect()
rndn2 = df.select('key', functions.randn(0)).collect()
self.assertEqual(sorted(rndn1), sorted(rndn2))
def test_string_functions(self):
from pyspark.sql.functions import col, lit
df = self.spark.createDataFrame([['nick']], schema=['name'])
self.assertRaisesRegexp(
TypeError,
"must be the same type",
lambda: df.select(col('name').substr(0, lit(1))))
if sys.version_info.major == 2:
self.assertRaises(
TypeError,
lambda: df.select(col('name').substr(long(0), long(1))))
def test_array_contains_function(self):
from pyspark.sql.functions import array_contains
df = self.spark.createDataFrame([(["1", "2", "3"],), ([],)], ['data'])
actual = df.select(array_contains(df.data, 1).alias('b')).collect()
# The value argument can be implicitly castable to the element's type of the array.
self.assertEqual([Row(b=True), Row(b=False)], actual)
def test_between_function(self):
df = self.sc.parallelize([
Row(a=1, b=2, c=3),
Row(a=2, b=1, c=3),
Row(a=4, b=1, c=4)]).toDF()
self.assertEqual([Row(a=2, b=1, c=3), Row(a=4, b=1, c=4)],
df.filter(df.a.between(df.b, df.c)).collect())
def test_struct_type(self):
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
struct2 = StructType([StructField("f1", StringType(), True),
StructField("f2", StringType(), True, None)])
self.assertEqual(struct1.fieldNames(), struct2.names)
self.assertEqual(struct1, struct2)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
struct2 = StructType([StructField("f1", StringType(), True)])
self.assertNotEqual(struct1.fieldNames(), struct2.names)
self.assertNotEqual(struct1, struct2)
struct1 = (StructType().add(StructField("f1", StringType(), True))
.add(StructField("f2", StringType(), True, None)))
struct2 = StructType([StructField("f1", StringType(), True),
StructField("f2", StringType(), True, None)])
self.assertEqual(struct1.fieldNames(), struct2.names)
self.assertEqual(struct1, struct2)
struct1 = (StructType().add(StructField("f1", StringType(), True))
.add(StructField("f2", StringType(), True, None)))
struct2 = StructType([StructField("f1", StringType(), True)])
self.assertNotEqual(struct1.fieldNames(), struct2.names)
self.assertNotEqual(struct1, struct2)
# Catch exception raised during improper construction
self.assertRaises(ValueError, lambda: StructType().add("name"))
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
for field in struct1:
self.assertIsInstance(field, StructField)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
self.assertEqual(len(struct1), 2)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
self.assertIs(struct1["f1"], struct1.fields[0])
self.assertIs(struct1[0], struct1.fields[0])
self.assertEqual(struct1[0:1], StructType(struct1.fields[0:1]))
self.assertRaises(KeyError, lambda: struct1["f9"])
self.assertRaises(IndexError, lambda: struct1[9])
self.assertRaises(TypeError, lambda: struct1[9.9])
def test_parse_datatype_string(self):
from pyspark.sql.types import _all_atomic_types, _parse_datatype_string
for k, t in _all_atomic_types.items():
if t != NullType:
self.assertEqual(t(), _parse_datatype_string(k))
self.assertEqual(IntegerType(), _parse_datatype_string("int"))
self.assertEqual(DecimalType(1, 1), _parse_datatype_string("decimal(1 ,1)"))
self.assertEqual(DecimalType(10, 1), _parse_datatype_string("decimal( 10,1 )"))
self.assertEqual(DecimalType(11, 1), _parse_datatype_string("decimal(11,1)"))
self.assertEqual(
ArrayType(IntegerType()),
_parse_datatype_string("array<int >"))
self.assertEqual(
MapType(IntegerType(), DoubleType()),
_parse_datatype_string("map< int, double >"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("struct<a:int, c:double >"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("a:int, c:double"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("a INT, c DOUBLE"))
def test_metadata_null(self):
schema = StructType([StructField("f1", StringType(), True, None),
StructField("f2", StringType(), True, {'a': None})])
rdd = self.sc.parallelize([["a", "b"], ["c", "d"]])
self.spark.createDataFrame(rdd, schema)
def test_save_and_load(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.read.json(tmpPath, schema)
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
df.write.json(tmpPath, "overwrite")
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
df.write.save(format="json", mode="overwrite", path=tmpPath,
noUse="this options will not be used in save.")
actual = self.spark.read.load(format="json", path=tmpPath,
noUse="this options will not be used in load.")
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
defaultDataSourceName = self.spark.conf.get("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
actual = self.spark.read.load(path=tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
csvpath = os.path.join(tempfile.mkdtemp(), 'data')
df.write.option('quote', None).format('csv').save(csvpath)
shutil.rmtree(tmpPath)
def test_save_and_load_builder(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.read.json(tmpPath, schema)
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
df.write.mode("overwrite").json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
df.write.mode("overwrite").options(noUse="this options will not be used in save.")\
.option("noUse", "this option will not be used in save.")\
.format("json").save(path=tmpPath)
actual =\
self.spark.read.format("json")\
.load(path=tmpPath, noUse="this options will not be used in load.")
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
defaultDataSourceName = self.spark.conf.get("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
actual = self.spark.read.load(path=tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
shutil.rmtree(tmpPath)
def test_stream_trigger(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
# Should take at least one arg
try:
df.writeStream.trigger()
except ValueError:
pass
# Should not take multiple args
try:
df.writeStream.trigger(once=True, processingTime='5 seconds')
except ValueError:
pass
# Should not take multiple args
try:
df.writeStream.trigger(processingTime='5 seconds', continuous='1 second')
except ValueError:
pass
# Should take only keyword args
try:
df.writeStream.trigger('5 seconds')
self.fail("Should have thrown an exception")
except TypeError:
pass
def test_stream_read_options(self):
schema = StructType([StructField("data", StringType(), False)])
df = self.spark.readStream\
.format('text')\
.option('path', 'python/test_support/sql/streaming')\
.schema(schema)\
.load()
self.assertTrue(df.isStreaming)
self.assertEqual(df.schema.simpleString(), "struct<data:string>")
def test_stream_read_options_overwrite(self):
bad_schema = StructType([StructField("test", IntegerType(), False)])
schema = StructType([StructField("data", StringType(), False)])
df = self.spark.readStream.format('csv').option('path', 'python/test_support/sql/fake') \
.schema(bad_schema)\
.load(path='python/test_support/sql/streaming', schema=schema, format='text')
self.assertTrue(df.isStreaming)
self.assertEqual(df.schema.simpleString(), "struct<data:string>")
def test_stream_save_options(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming') \
.withColumn('id', lit(1))
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream.option('checkpointLocation', chk).queryName('this_query') \
.format('parquet').partitionBy('id').outputMode('append').option('path', out).start()
try:
self.assertEqual(q.name, 'this_query')
self.assertTrue(q.isActive)
q.processAllAvailable()
output_files = []
for _, _, files in os.walk(out):
output_files.extend([f for f in files if not f.startswith('.')])
self.assertTrue(len(output_files) > 0)
self.assertTrue(len(os.listdir(chk)) > 0)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_save_options_overwrite(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
fake1 = os.path.join(tmpPath, 'fake1')
fake2 = os.path.join(tmpPath, 'fake2')
q = df.writeStream.option('checkpointLocation', fake1)\
.format('memory').option('path', fake2) \
.queryName('fake_query').outputMode('append') \
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertEqual(q.name, 'this_query')
self.assertTrue(q.isActive)
q.processAllAvailable()
output_files = []
for _, _, files in os.walk(out):
output_files.extend([f for f in files if not f.startswith('.')])
self.assertTrue(len(output_files) > 0)
self.assertTrue(len(os.listdir(chk)) > 0)
self.assertFalse(os.path.isdir(fake1)) # should not have been created
self.assertFalse(os.path.isdir(fake2)) # should not have been created
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_status_and_progress(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
def func(x):
time.sleep(1)
return x
from pyspark.sql.functions import col, udf
sleep_udf = udf(func)
# Use "sleep_udf" to delay the progress update so that we can test `lastProgress` when there
# were no updates.
q = df.select(sleep_udf(col("value")).alias('value')).writeStream \
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
# "lastProgress" will return None in most cases. However, as it may be flaky when
# Jenkins is very slow, we don't assert it. If there is something wrong, "lastProgress"
# may throw error with a high chance and make this test flaky, so we should still be
# able to detect broken codes.
q.lastProgress
q.processAllAvailable()
lastProgress = q.lastProgress
recentProgress = q.recentProgress
status = q.status
self.assertEqual(lastProgress['name'], q.name)
self.assertEqual(lastProgress['id'], q.id)
self.assertTrue(any(p == lastProgress for p in recentProgress))
self.assertTrue(
"message" in status and
"isDataAvailable" in status and
"isTriggerActive" in status)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_await_termination(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream\
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertTrue(q.isActive)
try:
q.awaitTermination("hello")
self.fail("Expected a value exception")
except ValueError:
pass
now = time.time()
# test should take at least 2 seconds
res = q.awaitTermination(2.6)
duration = time.time() - now
self.assertTrue(duration >= 2)
self.assertFalse(res)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_exception(self):
sdf = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
sq = sdf.writeStream.format('memory').queryName('query_explain').start()
try:
sq.processAllAvailable()
self.assertEqual(sq.exception(), None)
finally:
sq.stop()
from pyspark.sql.functions import col, udf
from pyspark.sql.utils import StreamingQueryException
bad_udf = udf(lambda x: 1 / 0)
sq = sdf.select(bad_udf(col("value")))\
.writeStream\
.format('memory')\
.queryName('this_query')\
.start()
try:
# Process some data to fail the query
sq.processAllAvailable()
self.fail("bad udf should fail the query")
except StreamingQueryException as e:
# This is expected
self.assertTrue("ZeroDivisionError" in e.desc)
finally:
sq.stop()
self.assertTrue(type(sq.exception()) is StreamingQueryException)
self.assertTrue("ZeroDivisionError" in sq.exception().desc)
def test_query_manager_await_termination(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream\
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertTrue(q.isActive)
try:
self.spark._wrapped.streams.awaitAnyTermination("hello")
self.fail("Expected a value exception")
except ValueError:
pass
now = time.time()
# test should take at least 2 seconds
res = self.spark._wrapped.streams.awaitAnyTermination(2.6)
duration = time.time() - now
self.assertTrue(duration >= 2)
self.assertFalse(res)
finally:
q.stop()
shutil.rmtree(tmpPath)
class ForeachWriterTester:
def __init__(self, spark):
self.spark = spark
def write_open_event(self, partitionId, epochId):
self._write_event(
self.open_events_dir,
{'partition': partitionId, 'epoch': epochId})
def write_process_event(self, row):
self._write_event(self.process_events_dir, {'value': 'text'})
def write_close_event(self, error):
self._write_event(self.close_events_dir, {'error': str(error)})
def write_input_file(self):
self._write_event(self.input_dir, "text")
def open_events(self):
return self._read_events(self.open_events_dir, 'partition INT, epoch INT')
def process_events(self):
return self._read_events(self.process_events_dir, 'value STRING')
def close_events(self):
return self._read_events(self.close_events_dir, 'error STRING')
def run_streaming_query_on_writer(self, writer, num_files):
self._reset()
try:
sdf = self.spark.readStream.format('text').load(self.input_dir)
sq = sdf.writeStream.foreach(writer).start()
for i in range(num_files):
self.write_input_file()
sq.processAllAvailable()
finally:
self.stop_all()
def assert_invalid_writer(self, writer, msg=None):
self._reset()
try:
sdf = self.spark.readStream.format('text').load(self.input_dir)
sq = sdf.writeStream.foreach(writer).start()
self.write_input_file()
sq.processAllAvailable()
self.fail("invalid writer %s did not fail the query" % str(writer)) # not expected
except Exception as e:
if msg:
assert msg in str(e), "%s not in %s" % (msg, str(e))
finally:
self.stop_all()
def stop_all(self):
for q in self.spark._wrapped.streams.active:
q.stop()
def _reset(self):
self.input_dir = tempfile.mkdtemp()
self.open_events_dir = tempfile.mkdtemp()
self.process_events_dir = tempfile.mkdtemp()
self.close_events_dir = tempfile.mkdtemp()
def _read_events(self, dir, json):
rows = self.spark.read.schema(json).json(dir).collect()
dicts = [row.asDict() for row in rows]
return dicts
def _write_event(self, dir, event):
import uuid
with open(os.path.join(dir, str(uuid.uuid4())), 'w') as f:
f.write("%s\n" % str(event))
def __getstate__(self):
return (self.open_events_dir, self.process_events_dir, self.close_events_dir)
def __setstate__(self, state):
self.open_events_dir, self.process_events_dir, self.close_events_dir = state
def test_streaming_foreach_with_simple_function(self):
tester = self.ForeachWriterTester(self.spark)
def foreach_func(row):
tester.write_process_event(row)
tester.run_streaming_query_on_writer(foreach_func, 2)
self.assertEqual(len(tester.process_events()), 2)
def test_streaming_foreach_with_basic_open_process_close(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def open(self, partitionId, epochId):
tester.write_open_event(partitionId, epochId)
return True
def process(self, row):
tester.write_process_event(row)
def close(self, error):
tester.write_close_event(error)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
open_events = tester.open_events()
self.assertEqual(len(open_events), 2)
self.assertSetEqual(set([e['epoch'] for e in open_events]), {0, 1})
self.assertEqual(len(tester.process_events()), 2)
close_events = tester.close_events()
self.assertEqual(len(close_events), 2)
self.assertSetEqual(set([e['error'] for e in close_events]), {'None'})
def test_streaming_foreach_with_open_returning_false(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def open(self, partition_id, epoch_id):
tester.write_open_event(partition_id, epoch_id)
return False
def process(self, row):
tester.write_process_event(row)
def close(self, error):
tester.write_close_event(error)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
self.assertEqual(len(tester.open_events()), 2)
self.assertEqual(len(tester.process_events()), 0) # no row was processed
close_events = tester.close_events()
self.assertEqual(len(close_events), 2)
self.assertSetEqual(set([e['error'] for e in close_events]), {'None'})
def test_streaming_foreach_without_open_method(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def process(self, row):
tester.write_process_event(row)
def close(self, error):
tester.write_close_event(error)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
self.assertEqual(len(tester.open_events()), 0) # no open events
self.assertEqual(len(tester.process_events()), 2)
self.assertEqual(len(tester.close_events()), 2)
def test_streaming_foreach_without_close_method(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def open(self, partition_id, epoch_id):
tester.write_open_event(partition_id, epoch_id)
return True
def process(self, row):
tester.write_process_event(row)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
self.assertEqual(len(tester.open_events()), 2) # no open events
self.assertEqual(len(tester.process_events()), 2)
self.assertEqual(len(tester.close_events()), 0)
def test_streaming_foreach_without_open_and_close_methods(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def process(self, row):
tester.write_process_event(row)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
self.assertEqual(len(tester.open_events()), 0) # no open events
self.assertEqual(len(tester.process_events()), 2)
self.assertEqual(len(tester.close_events()), 0)
def test_streaming_foreach_with_process_throwing_error(self):
from pyspark.sql.utils import StreamingQueryException
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def process(self, row):
raise Exception("test error")
def close(self, error):
tester.write_close_event(error)
try:
tester.run_streaming_query_on_writer(ForeachWriter(), 1)
self.fail("bad writer did not fail the query") # this is not expected
except StreamingQueryException as e:
# TODO: Verify whether original error message is inside the exception
pass
self.assertEqual(len(tester.process_events()), 0) # no row was processed
close_events = tester.close_events()
self.assertEqual(len(close_events), 1)
# TODO: Verify whether original error message is inside the exception
def test_streaming_foreach_with_invalid_writers(self):
tester = self.ForeachWriterTester(self.spark)
def func_with_iterator_input(iter):
for x in iter:
print(x)
tester.assert_invalid_writer(func_with_iterator_input)
class WriterWithoutProcess:
def open(self, partition):
pass
tester.assert_invalid_writer(WriterWithoutProcess(), "does not have a 'process'")
class WriterWithNonCallableProcess():
process = True
tester.assert_invalid_writer(WriterWithNonCallableProcess(),
"'process' in provided object is not callable")
class WriterWithNoParamProcess():
def process(self):
pass
tester.assert_invalid_writer(WriterWithNoParamProcess())
# Abstract class for tests below
class WithProcess():
def process(self, row):
pass
class WriterWithNonCallableOpen(WithProcess):
open = True
tester.assert_invalid_writer(WriterWithNonCallableOpen(),
"'open' in provided object is not callable")
class WriterWithNoParamOpen(WithProcess):
def open(self):
pass
tester.assert_invalid_writer(WriterWithNoParamOpen())
class WriterWithNonCallableClose(WithProcess):
close = True
tester.assert_invalid_writer(WriterWithNonCallableClose(),
"'close' in provided object is not callable")
def test_streaming_foreachBatch(self):
q = None
collected = dict()
def collectBatch(batch_df, batch_id):
collected[batch_id] = batch_df.collect()
try:
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
q = df.writeStream.foreachBatch(collectBatch).start()
q.processAllAvailable()
self.assertTrue(0 in collected)
self.assertTrue(len(collected[0]), 2)
finally:
if q:
q.stop()
def test_streaming_foreachBatch_propagates_python_errors(self):
from pyspark.sql.utils import StreamingQueryException
q = None
def collectBatch(df, id):
raise Exception("this should fail the query")
try:
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
q = df.writeStream.foreachBatch(collectBatch).start()
q.processAllAvailable()
self.fail("Expected a failure")
except StreamingQueryException as e:
self.assertTrue("this should fail" in str(e))
finally:
if q:
q.stop()
def test_help_command(self):
# Regression test for SPARK-5464
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
# render_doc() reproduces the help() exception without printing output
pydoc.render_doc(df)
pydoc.render_doc(df.foo)
pydoc.render_doc(df.take(1))
def test_access_column(self):
df = self.df
self.assertTrue(isinstance(df.key, Column))
self.assertTrue(isinstance(df['key'], Column))
self.assertTrue(isinstance(df[0], Column))
self.assertRaises(IndexError, lambda: df[2])
self.assertRaises(AnalysisException, lambda: df["bad_key"])
self.assertRaises(TypeError, lambda: df[{}])
def test_column_name_with_non_ascii(self):
if sys.version >= '3':
columnName = "数量"
self.assertTrue(isinstance(columnName, str))
else:
columnName = unicode("数量", "utf-8")
self.assertTrue(isinstance(columnName, unicode))
schema = StructType([StructField(columnName, LongType(), True)])
df = self.spark.createDataFrame([(1,)], schema)
self.assertEqual(schema, df.schema)
self.assertEqual("DataFrame[数量: bigint]", str(df))
self.assertEqual([("数量", 'bigint')], df.dtypes)
self.assertEqual(1, df.select("数量").first()[0])
self.assertEqual(1, df.select(df["数量"]).first()[0])
def test_access_nested_types(self):
df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF()
self.assertEqual(1, df.select(df.l[0]).first()[0])
self.assertEqual(1, df.select(df.l.getItem(0)).first()[0])
self.assertEqual(1, df.select(df.r.a).first()[0])
self.assertEqual("b", df.select(df.r.getField("b")).first()[0])
self.assertEqual("v", df.select(df.d["k"]).first()[0])
self.assertEqual("v", df.select(df.d.getItem("k")).first()[0])
def test_field_accessor(self):
df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF()
self.assertEqual(1, df.select(df.l[0]).first()[0])
self.assertEqual(1, df.select(df.r["a"]).first()[0])
self.assertEqual(1, df.select(df["r.a"]).first()[0])
self.assertEqual("b", df.select(df.r["b"]).first()[0])
self.assertEqual("b", df.select(df["r.b"]).first()[0])
self.assertEqual("v", df.select(df.d["k"]).first()[0])
def test_infer_long_type(self):
longrow = [Row(f1='a', f2=100000000000000)]
df = self.sc.parallelize(longrow).toDF()
self.assertEqual(df.schema.fields[1].dataType, LongType())
# this saving as Parquet caused issues as well.
output_dir = os.path.join(self.tempdir.name, "infer_long_type")
df.write.parquet(output_dir)
df1 = self.spark.read.parquet(output_dir)
self.assertEqual('a', df1.first().f1)
self.assertEqual(100000000000000, df1.first().f2)
self.assertEqual(_infer_type(1), LongType())
self.assertEqual(_infer_type(2**10), LongType())
self.assertEqual(_infer_type(2**20), LongType())
self.assertEqual(_infer_type(2**31 - 1), LongType())
self.assertEqual(_infer_type(2**31), LongType())
self.assertEqual(_infer_type(2**61), LongType())
self.assertEqual(_infer_type(2**71), LongType())
def test_merge_type(self):
self.assertEqual(_merge_type(LongType(), NullType()), LongType())
self.assertEqual(_merge_type(NullType(), LongType()), LongType())
self.assertEqual(_merge_type(LongType(), LongType()), LongType())
self.assertEqual(_merge_type(
ArrayType(LongType()),
ArrayType(LongType())
), ArrayType(LongType()))
with self.assertRaisesRegexp(TypeError, 'element in array'):
_merge_type(ArrayType(LongType()), ArrayType(DoubleType()))
self.assertEqual(_merge_type(
MapType(StringType(), LongType()),
MapType(StringType(), LongType())
), MapType(StringType(), LongType()))
with self.assertRaisesRegexp(TypeError, 'key of map'):
_merge_type(
MapType(StringType(), LongType()),
MapType(DoubleType(), LongType()))
with self.assertRaisesRegexp(TypeError, 'value of map'):
_merge_type(
MapType(StringType(), LongType()),
MapType(StringType(), DoubleType()))
self.assertEqual(_merge_type(
StructType([StructField("f1", LongType()), StructField("f2", StringType())]),
StructType([StructField("f1", LongType()), StructField("f2", StringType())])
), StructType([StructField("f1", LongType()), StructField("f2", StringType())]))
with self.assertRaisesRegexp(TypeError, 'field f1'):
_merge_type(
StructType([StructField("f1", LongType()), StructField("f2", StringType())]),
StructType([StructField("f1", DoubleType()), StructField("f2", StringType())]))
self.assertEqual(_merge_type(
StructType([StructField("f1", StructType([StructField("f2", LongType())]))]),
StructType([StructField("f1", StructType([StructField("f2", LongType())]))])
), StructType([StructField("f1", StructType([StructField("f2", LongType())]))]))
with self.assertRaisesRegexp(TypeError, 'field f2 in field f1'):
_merge_type(
StructType([StructField("f1", StructType([StructField("f2", LongType())]))]),
StructType([StructField("f1", StructType([StructField("f2", StringType())]))]))
self.assertEqual(_merge_type(
StructType([StructField("f1", ArrayType(LongType())), StructField("f2", StringType())]),
StructType([StructField("f1", ArrayType(LongType())), StructField("f2", StringType())])
), StructType([StructField("f1", ArrayType(LongType())), StructField("f2", StringType())]))
with self.assertRaisesRegexp(TypeError, 'element in array field f1'):
_merge_type(
StructType([
StructField("f1", ArrayType(LongType())),
StructField("f2", StringType())]),
StructType([
StructField("f1", ArrayType(DoubleType())),
StructField("f2", StringType())]))
self.assertEqual(_merge_type(
StructType([
StructField("f1", MapType(StringType(), LongType())),
StructField("f2", StringType())]),
StructType([
StructField("f1", MapType(StringType(), LongType())),
StructField("f2", StringType())])
), StructType([
StructField("f1", MapType(StringType(), LongType())),
StructField("f2", StringType())]))
with self.assertRaisesRegexp(TypeError, 'value of map field f1'):
_merge_type(
StructType([
StructField("f1", MapType(StringType(), LongType())),
StructField("f2", StringType())]),
StructType([
StructField("f1", MapType(StringType(), DoubleType())),
StructField("f2", StringType())]))
self.assertEqual(_merge_type(
StructType([StructField("f1", ArrayType(MapType(StringType(), LongType())))]),
StructType([StructField("f1", ArrayType(MapType(StringType(), LongType())))])
), StructType([StructField("f1", ArrayType(MapType(StringType(), LongType())))]))
with self.assertRaisesRegexp(TypeError, 'key of map element in array field f1'):
_merge_type(
StructType([StructField("f1", ArrayType(MapType(StringType(), LongType())))]),
StructType([StructField("f1", ArrayType(MapType(DoubleType(), LongType())))])
)
def test_filter_with_datetime(self):
time = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000)
date = time.date()
row = Row(date=date, time=time)
df = self.spark.createDataFrame([row])
self.assertEqual(1, df.filter(df.date == date).count())
self.assertEqual(1, df.filter(df.time == time).count())
self.assertEqual(0, df.filter(df.date > date).count())
self.assertEqual(0, df.filter(df.time > time).count())
def test_filter_with_datetime_timezone(self):
dt1 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(0))
dt2 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(1))
row = Row(date=dt1)
df = self.spark.createDataFrame([row])
self.assertEqual(0, df.filter(df.date == dt2).count())
self.assertEqual(1, df.filter(df.date > dt2).count())
self.assertEqual(0, df.filter(df.date < dt2).count())
def test_time_with_timezone(self):
day = datetime.date.today()
now = datetime.datetime.now()
ts = time.mktime(now.timetuple())
# class in __main__ is not serializable
from pyspark.sql.tests import UTCOffsetTimezone
utc = UTCOffsetTimezone()
utcnow = datetime.datetime.utcfromtimestamp(ts) # without microseconds
# add microseconds to utcnow (keeping year,month,day,hour,minute,second)
utcnow = datetime.datetime(*(utcnow.timetuple()[:6] + (now.microsecond, utc)))
df = self.spark.createDataFrame([(day, now, utcnow)])
day1, now1, utcnow1 = df.first()
self.assertEqual(day1, day)
self.assertEqual(now, now1)
self.assertEqual(now, utcnow1)
# regression test for SPARK-19561
def test_datetime_at_epoch(self):
epoch = datetime.datetime.fromtimestamp(0)
df = self.spark.createDataFrame([Row(date=epoch)])
first = df.select('date', lit(epoch).alias('lit_date')).first()
self.assertEqual(first['date'], epoch)
self.assertEqual(first['lit_date'], epoch)
def test_dayofweek(self):
from pyspark.sql.functions import dayofweek
dt = datetime.datetime(2017, 11, 6)
df = self.spark.createDataFrame([Row(date=dt)])
row = df.select(dayofweek(df.date)).first()
self.assertEqual(row[0], 2)
def test_decimal(self):
from decimal import Decimal
schema = StructType([StructField("decimal", DecimalType(10, 5))])
df = self.spark.createDataFrame([(Decimal("3.14159"),)], schema)
row = df.select(df.decimal + 1).first()
self.assertEqual(row[0], Decimal("4.14159"))
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.parquet(tmpPath)
df2 = self.spark.read.parquet(tmpPath)
row = df2.first()
self.assertEqual(row[0], Decimal("3.14159"))
def test_dropna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# shouldn't drop a non-null row
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, 80.1)], schema).dropna().count(),
1)
# dropping rows with a single null value
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna().count(),
0)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='any').count(),
0)
# if how = 'all', only drop rows if all values are null
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='all').count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(None, None, None)], schema).dropna(how='all').count(),
0)
# how and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
0)
# threshold
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(thresh=2).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(thresh=2).count(),
0)
# threshold and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 180.9)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
0)
# thresh should take precedence over how
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(
how='any', thresh=2, subset=['name', 'age']).count(),
1)
def test_fillna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True),
StructField("spy", BooleanType(), True)])
# fillna shouldn't change non-null values
row = self.spark.createDataFrame([(u'Alice', 10, 80.1, True)], schema).fillna(50).first()
self.assertEqual(row.age, 10)
# fillna with int
row = self.spark.createDataFrame([(u'Alice', None, None, None)], schema).fillna(50).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.0)
# fillna with double
row = self.spark.createDataFrame(
[(u'Alice', None, None, None)], schema).fillna(50.1).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.1)
# fillna with bool
row = self.spark.createDataFrame(
[(u'Alice', None, None, None)], schema).fillna(True).first()
self.assertEqual(row.age, None)
self.assertEqual(row.spy, True)
# fillna with string
row = self.spark.createDataFrame([(None, None, None, None)], schema).fillna("hello").first()
self.assertEqual(row.name, u"hello")
self.assertEqual(row.age, None)
# fillna with subset specified for numeric cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna(50, subset=['name', 'age']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, 50)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, None)
# fillna with subset specified for string cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna("haha", subset=['name', 'age']).first()
self.assertEqual(row.name, "haha")
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, None)
# fillna with subset specified for bool cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna(True, subset=['name', 'spy']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, True)
# fillna with dictionary for boolean types
row = self.spark.createDataFrame([Row(a=None), Row(a=True)]).fillna({"a": True}).first()
self.assertEqual(row.a, True)
def test_bitwise_operations(self):
from pyspark.sql import functions
row = Row(a=170, b=75)
df = self.spark.createDataFrame([row])
result = df.select(df.a.bitwiseAND(df.b)).collect()[0].asDict()
self.assertEqual(170 & 75, result['(a & b)'])
result = df.select(df.a.bitwiseOR(df.b)).collect()[0].asDict()
self.assertEqual(170 | 75, result['(a | b)'])
result = df.select(df.a.bitwiseXOR(df.b)).collect()[0].asDict()
self.assertEqual(170 ^ 75, result['(a ^ b)'])
result = df.select(functions.bitwiseNOT(df.b)).collect()[0].asDict()
self.assertEqual(~75, result['~b'])
def test_expr(self):
from pyspark.sql import functions
row = Row(a="length string", b=75)
df = self.spark.createDataFrame([row])
result = df.select(functions.expr("length(a)")).collect()[0].asDict()
self.assertEqual(13, result["length(a)"])
def test_repartitionByRange_dataframe(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
df1 = self.spark.createDataFrame(
[(u'Bob', 27, 66.0), (u'Alice', 10, 10.0), (u'Bob', 10, 66.0)], schema)
df2 = self.spark.createDataFrame(
[(u'Alice', 10, 10.0), (u'Bob', 10, 66.0), (u'Bob', 27, 66.0)], schema)
# test repartitionByRange(numPartitions, *cols)
df3 = df1.repartitionByRange(2, "name", "age")
self.assertEqual(df3.rdd.getNumPartitions(), 2)
self.assertEqual(df3.rdd.first(), df2.rdd.first())
self.assertEqual(df3.rdd.take(3), df2.rdd.take(3))
# test repartitionByRange(numPartitions, *cols)
df4 = df1.repartitionByRange(3, "name", "age")
self.assertEqual(df4.rdd.getNumPartitions(), 3)
self.assertEqual(df4.rdd.first(), df2.rdd.first())
self.assertEqual(df4.rdd.take(3), df2.rdd.take(3))
# test repartitionByRange(*cols)
df5 = df1.repartitionByRange("name", "age")
self.assertEqual(df5.rdd.first(), df2.rdd.first())
self.assertEqual(df5.rdd.take(3), df2.rdd.take(3))
def test_replace(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# replace with int
row = self.spark.createDataFrame([(u'Alice', 10, 10.0)], schema).replace(10, 20).first()
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 20.0)
# replace with double
row = self.spark.createDataFrame(
[(u'Alice', 80, 80.0)], schema).replace(80.0, 82.1).first()
self.assertEqual(row.age, 82)
self.assertEqual(row.height, 82.1)
# replace with string
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(u'Alice', u'Ann').first()
self.assertEqual(row.name, u"Ann")
self.assertEqual(row.age, 10)
# replace with subset specified by a string of a column name w/ actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='age').first()
self.assertEqual(row.age, 20)
# replace with subset specified by a string of a column name w/o actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='height').first()
self.assertEqual(row.age, 10)
# replace with subset specified with one column replaced, another column not in subset
# stays unchanged.
row = self.spark.createDataFrame(
[(u'Alice', 10, 10.0)], schema).replace(10, 20, subset=['name', 'age']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 10.0)
# replace with subset specified but no column will be replaced
row = self.spark.createDataFrame(
[(u'Alice', 10, None)], schema).replace(10, 20, subset=['name', 'height']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 10)
self.assertEqual(row.height, None)
# replace with lists
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace([u'Alice'], [u'Ann']).first()
self.assertTupleEqual(row, (u'Ann', 10, 80.1))
# replace with dict
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}).first()
self.assertTupleEqual(row, (u'Alice', 11, 80.1))
# test backward compatibility with dummy value
dummy_value = 1
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({'Alice': 'Bob'}, dummy_value).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# test dict with mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: -10, 80.1: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', -10, 90.5))
# replace with tuples
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace((u'Alice', ), (u'Bob', )).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# replace multiple columns
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80.0), (20, 90)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.0))
# test for mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80), (20, 90.5)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace({10: 20, 80: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
# replace with boolean
row = (self
.spark.createDataFrame([(u'Alice', 10, 80.0)], schema)
.selectExpr("name = 'Bob'", 'age <= 15')
.replace(False, True).first())
self.assertTupleEqual(row, (True, True))
# replace string with None and then drop None rows
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace(u'Alice', None).dropna()
self.assertEqual(row.count(), 0)
# replace with number and None
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace([10, 80], [20, None]).first()
self.assertTupleEqual(row, (u'Alice', 20, None))
# should fail if subset is not list, tuple or None
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}, subset=1).first()
# should fail if to_replace and value have different length
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", "Bob"], ["Eve"]).first()
# should fail if when received unexpected type
with self.assertRaises(ValueError):
from datetime import datetime
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(datetime.now(), datetime.now()).first()
# should fail if provided mixed type replacements
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", 10], ["Eve", 20]).first()
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({u"Alice": u"Bob", 10: 20}).first()
with self.assertRaisesRegexp(
TypeError,
'value argument is required when to_replace is not a dictionary.'):
self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace(["Alice", "Bob"]).first()
def test_capture_analysis_exception(self):
self.assertRaises(AnalysisException, lambda: self.spark.sql("select abc"))
self.assertRaises(AnalysisException, lambda: self.df.selectExpr("a + b"))
def test_capture_parse_exception(self):
self.assertRaises(ParseException, lambda: self.spark.sql("abc"))
def test_capture_illegalargument_exception(self):
self.assertRaisesRegexp(IllegalArgumentException, "Setting negative mapred.reduce.tasks",
lambda: self.spark.sql("SET mapred.reduce.tasks=-1"))
df = self.spark.createDataFrame([(1, 2)], ["a", "b"])
self.assertRaisesRegexp(IllegalArgumentException, "1024 is not in the permitted values",
lambda: df.select(sha2(df.a, 1024)).collect())
try:
df.select(sha2(df.a, 1024)).collect()
except IllegalArgumentException as e:
self.assertRegexpMatches(e.desc, "1024 is not in the permitted values")
self.assertRegexpMatches(e.stackTrace,
"org.apache.spark.sql.functions")
def test_with_column_with_existing_name(self):
keys = self.df.withColumn("key", self.df.key).select("key").collect()
self.assertEqual([r.key for r in keys], list(range(100)))
# regression test for SPARK-10417
def test_column_iterator(self):
def foo():
for x in self.df.key:
break
self.assertRaises(TypeError, foo)
# add test for SPARK-10577 (test broadcast join hint)
def test_functions_broadcast(self):
from pyspark.sql.functions import broadcast
df1 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
# equijoin - should be converted into broadcast join
plan1 = df1.join(broadcast(df2), "key")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan1.toString().count("BroadcastHashJoin"))
# no join key -- should not be a broadcast join
plan2 = df1.crossJoin(broadcast(df2))._jdf.queryExecution().executedPlan()
self.assertEqual(0, plan2.toString().count("BroadcastHashJoin"))
# planner should not crash without a join
broadcast(df1)._jdf.queryExecution().executedPlan()
def test_generic_hints(self):
from pyspark.sql import DataFrame
df1 = self.spark.range(10e10).toDF("id")
df2 = self.spark.range(10e10).toDF("id")
self.assertIsInstance(df1.hint("broadcast"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", []), DataFrame)
# Dummy rules
self.assertIsInstance(df1.hint("broadcast", "foo", "bar"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", ["foo", "bar"]), DataFrame)
plan = df1.join(df2.hint("broadcast"), "id")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan.toString().count("BroadcastHashJoin"))
def test_sample(self):
self.assertRaisesRegexp(
TypeError,
"should be a bool, float and number",
lambda: self.spark.range(1).sample())
self.assertRaises(
TypeError,
lambda: self.spark.range(1).sample("a"))
self.assertRaises(
TypeError,
lambda: self.spark.range(1).sample(seed="abc"))
self.assertRaises(
IllegalArgumentException,
lambda: self.spark.range(1).sample(-1.0))
def test_toDF_with_schema_string(self):
data = [Row(key=i, value=str(i)) for i in range(100)]
rdd = self.sc.parallelize(data, 5)
df = rdd.toDF("key: int, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:int,value:string>")
self.assertEqual(df.collect(), data)
# different but compatible field types can be used.
df = rdd.toDF("key: string, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:string,value:string>")
self.assertEqual(df.collect(), [Row(key=str(i), value=str(i)) for i in range(100)])
# field names can differ.
df = rdd.toDF(" a: int, b: string ")
self.assertEqual(df.schema.simpleString(), "struct<a:int,b:string>")
self.assertEqual(df.collect(), data)
# number of fields must match.
self.assertRaisesRegexp(Exception, "Length of object",
lambda: rdd.toDF("key: int").collect())
# field types mismatch will cause exception at runtime.
self.assertRaisesRegexp(Exception, "FloatType can not accept",
lambda: rdd.toDF("key: float, value: string").collect())
# flat schema values will be wrapped into row.
df = rdd.map(lambda row: row.key).toDF("int")
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
# users can use DataType directly instead of data type string.
df = rdd.map(lambda row: row.key).toDF(IntegerType())
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
def test_join_without_on(self):
df1 = self.spark.range(1).toDF("a")
df2 = self.spark.range(1).toDF("b")
with self.sql_conf({"spark.sql.crossJoin.enabled": False}):
self.assertRaises(AnalysisException, lambda: df1.join(df2, how="inner").collect())
with self.sql_conf({"spark.sql.crossJoin.enabled": True}):
actual = df1.join(df2, how="inner").collect()
expected = [Row(a=0, b=0)]
self.assertEqual(actual, expected)
# Regression test for invalid join methods when on is None, Spark-14761
def test_invalid_join_method(self):
df1 = self.spark.createDataFrame([("Alice", 5), ("Bob", 8)], ["name", "age"])
df2 = self.spark.createDataFrame([("Alice", 80), ("Bob", 90)], ["name", "height"])
self.assertRaises(IllegalArgumentException, lambda: df1.join(df2, how="invalid-join-type"))
# Cartesian products require cross join syntax
def test_require_cross(self):
from pyspark.sql.functions import broadcast
df1 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
# joins without conditions require cross join syntax
self.assertRaises(AnalysisException, lambda: df1.join(df2).collect())
# works with crossJoin
self.assertEqual(1, df1.crossJoin(df2).count())
def test_conf(self):
spark = self.spark
spark.conf.set("bogo", "sipeo")
self.assertEqual(spark.conf.get("bogo"), "sipeo")
spark.conf.set("bogo", "ta")
self.assertEqual(spark.conf.get("bogo"), "ta")
self.assertEqual(spark.conf.get("bogo", "not.read"), "ta")
self.assertEqual(spark.conf.get("not.set", "ta"), "ta")
self.assertRaisesRegexp(Exception, "not.set", lambda: spark.conf.get("not.set"))
spark.conf.unset("bogo")
self.assertEqual(spark.conf.get("bogo", "colombia"), "colombia")
self.assertEqual(spark.conf.get("hyukjin", None), None)
# This returns 'STATIC' because it's the default value of
# 'spark.sql.sources.partitionOverwriteMode', and `defaultValue` in
# `spark.conf.get` is unset.
self.assertEqual(spark.conf.get("spark.sql.sources.partitionOverwriteMode"), "STATIC")
# This returns None because 'spark.sql.sources.partitionOverwriteMode' is unset, but
# `defaultValue` in `spark.conf.get` is set to None.
self.assertEqual(spark.conf.get("spark.sql.sources.partitionOverwriteMode", None), None)
def test_current_database(self):
spark = self.spark
spark.catalog._reset()
self.assertEquals(spark.catalog.currentDatabase(), "default")
spark.sql("CREATE DATABASE some_db")
spark.catalog.setCurrentDatabase("some_db")
self.assertEquals(spark.catalog.currentDatabase(), "some_db")
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.setCurrentDatabase("does_not_exist"))
def test_list_databases(self):
spark = self.spark
spark.catalog._reset()
databases = [db.name for db in spark.catalog.listDatabases()]
self.assertEquals(databases, ["default"])
spark.sql("CREATE DATABASE some_db")
databases = [db.name for db in spark.catalog.listDatabases()]
self.assertEquals(sorted(databases), ["default", "some_db"])
def test_list_tables(self):
from pyspark.sql.catalog import Table
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
self.assertEquals(spark.catalog.listTables(), [])
self.assertEquals(spark.catalog.listTables("some_db"), [])
spark.createDataFrame([(1, 1)]).createOrReplaceTempView("temp_tab")
spark.sql("CREATE TABLE tab1 (name STRING, age INT) USING parquet")
spark.sql("CREATE TABLE some_db.tab2 (name STRING, age INT) USING parquet")
tables = sorted(spark.catalog.listTables(), key=lambda t: t.name)
tablesDefault = sorted(spark.catalog.listTables("default"), key=lambda t: t.name)
tablesSomeDb = sorted(spark.catalog.listTables("some_db"), key=lambda t: t.name)
self.assertEquals(tables, tablesDefault)
self.assertEquals(len(tables), 2)
self.assertEquals(len(tablesSomeDb), 2)
self.assertEquals(tables[0], Table(
name="tab1",
database="default",
description=None,
tableType="MANAGED",
isTemporary=False))
self.assertEquals(tables[1], Table(
name="temp_tab",
database=None,
description=None,
tableType="TEMPORARY",
isTemporary=True))
self.assertEquals(tablesSomeDb[0], Table(
name="tab2",
database="some_db",
description=None,
tableType="MANAGED",
isTemporary=False))
self.assertEquals(tablesSomeDb[1], Table(
name="temp_tab",
database=None,
description=None,
tableType="TEMPORARY",
isTemporary=True))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listTables("does_not_exist"))
def test_list_functions(self):
from pyspark.sql.catalog import Function
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
functions = dict((f.name, f) for f in spark.catalog.listFunctions())
functionsDefault = dict((f.name, f) for f in spark.catalog.listFunctions("default"))
self.assertTrue(len(functions) > 200)
self.assertTrue("+" in functions)
self.assertTrue("like" in functions)
self.assertTrue("month" in functions)
self.assertTrue("to_date" in functions)
self.assertTrue("to_timestamp" in functions)
self.assertTrue("to_unix_timestamp" in functions)
self.assertTrue("current_database" in functions)
self.assertEquals(functions["+"], Function(
name="+",
description=None,
className="org.apache.spark.sql.catalyst.expressions.Add",
isTemporary=True))
self.assertEquals(functions, functionsDefault)
spark.catalog.registerFunction("temp_func", lambda x: str(x))
spark.sql("CREATE FUNCTION func1 AS 'org.apache.spark.data.bricks'")
spark.sql("CREATE FUNCTION some_db.func2 AS 'org.apache.spark.data.bricks'")
newFunctions = dict((f.name, f) for f in spark.catalog.listFunctions())
newFunctionsSomeDb = dict((f.name, f) for f in spark.catalog.listFunctions("some_db"))
self.assertTrue(set(functions).issubset(set(newFunctions)))
self.assertTrue(set(functions).issubset(set(newFunctionsSomeDb)))
self.assertTrue("temp_func" in newFunctions)
self.assertTrue("func1" in newFunctions)
self.assertTrue("func2" not in newFunctions)
self.assertTrue("temp_func" in newFunctionsSomeDb)
self.assertTrue("func1" not in newFunctionsSomeDb)
self.assertTrue("func2" in newFunctionsSomeDb)
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listFunctions("does_not_exist"))
def test_list_columns(self):
from pyspark.sql.catalog import Column
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
spark.sql("CREATE TABLE tab1 (name STRING, age INT) USING parquet")
spark.sql("CREATE TABLE some_db.tab2 (nickname STRING, tolerance FLOAT) USING parquet")
columns = sorted(spark.catalog.listColumns("tab1"), key=lambda c: c.name)
columnsDefault = sorted(spark.catalog.listColumns("tab1", "default"), key=lambda c: c.name)
self.assertEquals(columns, columnsDefault)
self.assertEquals(len(columns), 2)
self.assertEquals(columns[0], Column(
name="age",
description=None,
dataType="int",
nullable=True,
isPartition=False,
isBucket=False))
self.assertEquals(columns[1], Column(
name="name",
description=None,
dataType="string",
nullable=True,
isPartition=False,
isBucket=False))
columns2 = sorted(spark.catalog.listColumns("tab2", "some_db"), key=lambda c: c.name)
self.assertEquals(len(columns2), 2)
self.assertEquals(columns2[0], Column(
name="nickname",
description=None,
dataType="string",
nullable=True,
isPartition=False,
isBucket=False))
self.assertEquals(columns2[1], Column(
name="tolerance",
description=None,
dataType="float",
nullable=True,
isPartition=False,
isBucket=False))
self.assertRaisesRegexp(
AnalysisException,
"tab2",
lambda: spark.catalog.listColumns("tab2"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listColumns("does_not_exist"))
def test_cache(self):
spark = self.spark
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab1")
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab2")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab1")
self.assertTrue(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab2")
spark.catalog.uncacheTable("tab1")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertTrue(spark.catalog.isCached("tab2"))
spark.catalog.clearCache()
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.isCached("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.cacheTable("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.uncacheTable("does_not_exist"))
def test_read_text_file_list(self):
df = self.spark.read.text(['python/test_support/sql/text-test.txt',
'python/test_support/sql/text-test.txt'])
count = df.count()
self.assertEquals(count, 4)
def test_BinaryType_serialization(self):
# Pyrolite version <= 4.9 could not serialize BinaryType with Python3 SPARK-17808
# The empty bytearray is test for SPARK-21534.
schema = StructType([StructField('mybytes', BinaryType())])
data = [[bytearray(b'here is my data')],
[bytearray(b'and here is some more')],
[bytearray(b'')]]
df = self.spark.createDataFrame(data, schema=schema)
df.collect()
# test for SPARK-16542
def test_array_types(self):
# This test need to make sure that the Scala type selected is at least
# as large as the python's types. This is necessary because python's
# array types depend on C implementation on the machine. Therefore there
# is no machine independent correspondence between python's array types
# and Scala types.
# See: https://docs.python.org/2/library/array.html
def assertCollectSuccess(typecode, value):
row = Row(myarray=array.array(typecode, [value]))
df = self.spark.createDataFrame([row])
self.assertEqual(df.first()["myarray"][0], value)
# supported string types
#
# String types in python's array are "u" for Py_UNICODE and "c" for char.
# "u" will be removed in python 4, and "c" is not supported in python 3.
supported_string_types = []
if sys.version_info[0] < 4:
supported_string_types += ['u']
# test unicode
assertCollectSuccess('u', u'a')
if sys.version_info[0] < 3:
supported_string_types += ['c']
# test string
assertCollectSuccess('c', 'a')
# supported float and double
#
# Test max, min, and precision for float and double, assuming IEEE 754
# floating-point format.
supported_fractional_types = ['f', 'd']
assertCollectSuccess('f', ctypes.c_float(1e+38).value)
assertCollectSuccess('f', ctypes.c_float(1e-38).value)
assertCollectSuccess('f', ctypes.c_float(1.123456).value)
assertCollectSuccess('d', sys.float_info.max)
assertCollectSuccess('d', sys.float_info.min)
assertCollectSuccess('d', sys.float_info.epsilon)
# supported signed int types
#
# The size of C types changes with implementation, we need to make sure
# that there is no overflow error on the platform running this test.
supported_signed_int_types = list(
set(_array_signed_int_typecode_ctype_mappings.keys())
.intersection(set(_array_type_mappings.keys())))
for t in supported_signed_int_types:
ctype = _array_signed_int_typecode_ctype_mappings[t]
max_val = 2 ** (ctypes.sizeof(ctype) * 8 - 1)
assertCollectSuccess(t, max_val - 1)
assertCollectSuccess(t, -max_val)
# supported unsigned int types
#
# JVM does not have unsigned types. We need to be very careful to make
# sure that there is no overflow error.
supported_unsigned_int_types = list(
set(_array_unsigned_int_typecode_ctype_mappings.keys())
.intersection(set(_array_type_mappings.keys())))
for t in supported_unsigned_int_types:
ctype = _array_unsigned_int_typecode_ctype_mappings[t]
assertCollectSuccess(t, 2 ** (ctypes.sizeof(ctype) * 8) - 1)
# all supported types
#
# Make sure the types tested above:
# 1. are all supported types
# 2. cover all supported types
supported_types = (supported_string_types +
supported_fractional_types +
supported_signed_int_types +
supported_unsigned_int_types)
self.assertEqual(set(supported_types), set(_array_type_mappings.keys()))
# all unsupported types
#
# Keys in _array_type_mappings is a complete list of all supported types,
# and types not in _array_type_mappings are considered unsupported.
# `array.typecodes` are not supported in python 2.
if sys.version_info[0] < 3:
all_types = set(['c', 'b', 'B', 'u', 'h', 'H', 'i', 'I', 'l', 'L', 'f', 'd'])
else:
all_types = set(array.typecodes)
unsupported_types = all_types - set(supported_types)
# test unsupported types
for t in unsupported_types:
with self.assertRaises(TypeError):
a = array.array(t)
self.spark.createDataFrame([Row(myarray=a)]).collect()
def test_bucketed_write(self):
data = [
(1, "foo", 3.0), (2, "foo", 5.0),
(3, "bar", -1.0), (4, "bar", 6.0),
]
df = self.spark.createDataFrame(data, ["x", "y", "z"])
def count_bucketed_cols(names, table="pyspark_bucket"):
"""Given a sequence of column names and a table name
query the catalog and return number o columns which are
used for bucketing
"""
cols = self.spark.catalog.listColumns(table)
num = len([c for c in cols if c.name in names and c.isBucket])
return num
# Test write with one bucketing column
df.write.bucketBy(3, "x").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x"]), 1)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write two bucketing columns
df.write.bucketBy(3, "x", "y").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x", "y"]), 2)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort
df.write.bucketBy(2, "x").sortBy("z").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x"]), 1)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with a list of columns
df.write.bucketBy(3, ["x", "y"]).mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x", "y"]), 2)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort with a list of columns
(df.write.bucketBy(2, "x")
.sortBy(["y", "z"])
.mode("overwrite").saveAsTable("pyspark_bucket"))
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort with multiple columns
(df.write.bucketBy(2, "x")
.sortBy("y", "z")
.mode("overwrite").saveAsTable("pyspark_bucket"))
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
def _to_pandas(self):
from datetime import datetime, date
schema = StructType().add("a", IntegerType()).add("b", StringType())\
.add("c", BooleanType()).add("d", FloatType())\
.add("dt", DateType()).add("ts", TimestampType())
data = [
(1, "foo", True, 3.0, date(1969, 1, 1), datetime(1969, 1, 1, 1, 1, 1)),
(2, "foo", True, 5.0, None, None),
(3, "bar", False, -1.0, date(2012, 3, 3), datetime(2012, 3, 3, 3, 3, 3)),
(4, "bar", False, 6.0, date(2100, 4, 4), datetime(2100, 4, 4, 4, 4, 4)),
]
df = self.spark.createDataFrame(data, schema)
return df.toPandas()
@unittest.skipIf(not _have_pandas, _pandas_requirement_message)
def test_to_pandas(self):
import numpy as np
pdf = self._to_pandas()
types = pdf.dtypes
self.assertEquals(types[0], np.int32)
self.assertEquals(types[1], np.object)
self.assertEquals(types[2], np.bool)
self.assertEquals(types[3], np.float32)
self.assertEquals(types[4], np.object) # datetime.date
self.assertEquals(types[5], 'datetime64[ns]')
@unittest.skipIf(_have_pandas, "Required Pandas was found.")
def test_to_pandas_required_pandas_not_found(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(ImportError, 'Pandas >= .* must be installed'):
self._to_pandas()
@unittest.skipIf(not _have_pandas, _pandas_requirement_message)
def test_to_pandas_avoid_astype(self):
import numpy as np
schema = StructType().add("a", IntegerType()).add("b", StringType())\
.add("c", IntegerType())
data = [(1, "foo", 16777220), (None, "bar", None)]
df = self.spark.createDataFrame(data, schema)
types = df.toPandas().dtypes
self.assertEquals(types[0], np.float64) # doesn't convert to np.int32 due to NaN value.
self.assertEquals(types[1], np.object)
self.assertEquals(types[2], np.float64)
def test_create_dataframe_from_array_of_long(self):
import array
data = [Row(longarray=array.array('l', [-9223372036854775808, 0, 9223372036854775807]))]
df = self.spark.createDataFrame(data)
self.assertEqual(df.first(), Row(longarray=[-9223372036854775808, 0, 9223372036854775807]))
@unittest.skipIf(not _have_pandas, _pandas_requirement_message)
def test_create_dataframe_from_pandas_with_timestamp(self):
import pandas as pd
from datetime import datetime
pdf = pd.DataFrame({"ts": [datetime(2017, 10, 31, 1, 1, 1)],
"d": [pd.Timestamp.now().date()]})
# test types are inferred correctly without specifying schema
df = self.spark.createDataFrame(pdf)
self.assertTrue(isinstance(df.schema['ts'].dataType, TimestampType))
self.assertTrue(isinstance(df.schema['d'].dataType, DateType))
# test with schema will accept pdf as input
df = self.spark.createDataFrame(pdf, schema="d date, ts timestamp")
self.assertTrue(isinstance(df.schema['ts'].dataType, TimestampType))
self.assertTrue(isinstance(df.schema['d'].dataType, DateType))
@unittest.skipIf(_have_pandas, "Required Pandas was found.")
def test_create_dataframe_required_pandas_not_found(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(
ImportError,
"(Pandas >= .* must be installed|No module named '?pandas'?)"):
import pandas as pd
from datetime import datetime
pdf = pd.DataFrame({"ts": [datetime(2017, 10, 31, 1, 1, 1)],
"d": [pd.Timestamp.now().date()]})
self.spark.createDataFrame(pdf)
# Regression test for SPARK-23360
@unittest.skipIf(not _have_pandas, _pandas_requirement_message)
def test_create_dateframe_from_pandas_with_dst(self):
import pandas as pd
from datetime import datetime
pdf = pd.DataFrame({'time': [datetime(2015, 10, 31, 22, 30)]})
df = self.spark.createDataFrame(pdf)
self.assertPandasEqual(pdf, df.toPandas())
orig_env_tz = os.environ.get('TZ', None)
try:
tz = 'America/Los_Angeles'
os.environ['TZ'] = tz
time.tzset()
with self.sql_conf({'spark.sql.session.timeZone': tz}):
df = self.spark.createDataFrame(pdf)
self.assertPandasEqual(pdf, df.toPandas())
finally:
del os.environ['TZ']
if orig_env_tz is not None:
os.environ['TZ'] = orig_env_tz
time.tzset()
def test_sort_with_nulls_order(self):
from pyspark.sql import functions
df = self.spark.createDataFrame(
[('Tom', 80), (None, 60), ('Alice', 50)], ["name", "height"])
self.assertEquals(
df.select(df.name).orderBy(functions.asc_nulls_first('name')).collect(),
[Row(name=None), Row(name=u'Alice'), Row(name=u'Tom')])
self.assertEquals(
df.select(df.name).orderBy(functions.asc_nulls_last('name')).collect(),
[Row(name=u'Alice'), Row(name=u'Tom'), Row(name=None)])
self.assertEquals(
df.select(df.name).orderBy(functions.desc_nulls_first('name')).collect(),
[Row(name=None), Row(name=u'Tom'), Row(name=u'Alice')])
self.assertEquals(
df.select(df.name).orderBy(functions.desc_nulls_last('name')).collect(),
[Row(name=u'Tom'), Row(name=u'Alice'), Row(name=None)])
def test_json_sampling_ratio(self):
rdd = self.spark.sparkContext.range(0, 100, 1, 1) \
.map(lambda x: '{"a":0.1}' if x == 1 else '{"a":%s}' % str(x))
schema = self.spark.read.option('inferSchema', True) \
.option('samplingRatio', 0.5) \
.json(rdd).schema
self.assertEquals(schema, StructType([StructField("a", LongType(), True)]))
def test_csv_sampling_ratio(self):
rdd = self.spark.sparkContext.range(0, 100, 1, 1) \
.map(lambda x: '0.1' if x == 1 else str(x))
schema = self.spark.read.option('inferSchema', True)\
.csv(rdd, samplingRatio=0.5).schema
self.assertEquals(schema, StructType([StructField("_c0", IntegerType(), True)]))
def test_checking_csv_header(self):
path = tempfile.mkdtemp()
shutil.rmtree(path)
try:
self.spark.createDataFrame([[1, 1000], [2000, 2]])\
.toDF('f1', 'f2').write.option("header", "true").csv(path)
schema = StructType([
StructField('f2', IntegerType(), nullable=True),
StructField('f1', IntegerType(), nullable=True)])
df = self.spark.read.option('header', 'true').schema(schema)\
.csv(path, enforceSchema=False)
self.assertRaisesRegexp(
Exception,
"CSV header does not conform to the schema",
lambda: df.collect())
finally:
shutil.rmtree(path)
def test_ignore_column_of_all_nulls(self):
path = tempfile.mkdtemp()
shutil.rmtree(path)
try:
df = self.spark.createDataFrame([["""{"a":null, "b":1, "c":3.0}"""],
["""{"a":null, "b":null, "c":"string"}"""],
["""{"a":null, "b":null, "c":null}"""]])
df.write.text(path)
schema = StructType([
StructField('b', LongType(), nullable=True),
StructField('c', StringType(), nullable=True)])
readback = self.spark.read.json(path, dropFieldIfAllNull=True)
self.assertEquals(readback.schema, schema)
finally:
shutil.rmtree(path)
# SPARK-24721
@unittest.skipIf(not _test_compiled, _test_not_compiled_message)
def test_datasource_with_udf(self):
from pyspark.sql.functions import udf, lit, col
path = tempfile.mkdtemp()
shutil.rmtree(path)
try:
self.spark.range(1).write.mode("overwrite").format('csv').save(path)
filesource_df = self.spark.read.option('inferSchema', True).csv(path).toDF('i')
datasource_df = self.spark.read \
.format("org.apache.spark.sql.sources.SimpleScanSource") \
.option('from', 0).option('to', 1).load().toDF('i')
datasource_v2_df = self.spark.read \
.format("org.apache.spark.sql.sources.v2.SimpleDataSourceV2") \
.load().toDF('i', 'j')
c1 = udf(lambda x: x + 1, 'int')(lit(1))
c2 = udf(lambda x: x + 1, 'int')(col('i'))
f1 = udf(lambda x: False, 'boolean')(lit(1))
f2 = udf(lambda x: False, 'boolean')(col('i'))
for df in [filesource_df, datasource_df, datasource_v2_df]:
result = df.withColumn('c', c1)
expected = df.withColumn('c', lit(2))
self.assertEquals(expected.collect(), result.collect())
for df in [filesource_df, datasource_df, datasource_v2_df]:
result = df.withColumn('c', c2)
expected = df.withColumn('c', col('i') + 1)
self.assertEquals(expected.collect(), result.collect())
for df in [filesource_df, datasource_df, datasource_v2_df]:
for f in [f1, f2]:
result = df.filter(f)
self.assertEquals(0, result.count())
finally:
shutil.rmtree(path)
def test_repr_behaviors(self):
import re
pattern = re.compile(r'^ *\|', re.MULTILINE)
df = self.spark.createDataFrame([(1, "1"), (22222, "22222")], ("key", "value"))
# test when eager evaluation is enabled and _repr_html_ will not be called
with self.sql_conf({"spark.sql.repl.eagerEval.enabled": True}):
expected1 = """+-----+-----+
|| key|value|
|+-----+-----+
|| 1| 1|
||22222|22222|
|+-----+-----+
|"""
self.assertEquals(re.sub(pattern, '', expected1), df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}):
expected2 = """+---+-----+
||key|value|
|+---+-----+
|| 1| 1|
||222| 222|
|+---+-----+
|"""
self.assertEquals(re.sub(pattern, '', expected2), df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}):
expected3 = """+---+-----+
||key|value|
|+---+-----+
|| 1| 1|
|+---+-----+
|only showing top 1 row
|"""
self.assertEquals(re.sub(pattern, '', expected3), df.__repr__())
# test when eager evaluation is enabled and _repr_html_ will be called
with self.sql_conf({"spark.sql.repl.eagerEval.enabled": True}):
expected1 = """<table border='1'>
|<tr><th>key</th><th>value</th></tr>
|<tr><td>1</td><td>1</td></tr>
|<tr><td>22222</td><td>22222</td></tr>
|</table>
|"""
self.assertEquals(re.sub(pattern, '', expected1), df._repr_html_())
with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}):
expected2 = """<table border='1'>
|<tr><th>key</th><th>value</th></tr>
|<tr><td>1</td><td>1</td></tr>
|<tr><td>222</td><td>222</td></tr>
|</table>
|"""
self.assertEquals(re.sub(pattern, '', expected2), df._repr_html_())
with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}):
expected3 = """<table border='1'>
|<tr><th>key</th><th>value</th></tr>
|<tr><td>1</td><td>1</td></tr>
|</table>
|only showing top 1 row
|"""
self.assertEquals(re.sub(pattern, '', expected3), df._repr_html_())
# test when eager evaluation is disabled and _repr_html_ will be called
with self.sql_conf({"spark.sql.repl.eagerEval.enabled": False}):
expected = "DataFrame[key: bigint, value: string]"
self.assertEquals(None, df._repr_html_())
self.assertEquals(expected, df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}):
self.assertEquals(None, df._repr_html_())
self.assertEquals(expected, df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}):
self.assertEquals(None, df._repr_html_())
self.assertEquals(expected, df.__repr__())
class HiveSparkSubmitTests(SparkSubmitTests):
@classmethod
def setUpClass(cls):
# get a SparkContext to check for availability of Hive
sc = SparkContext('local[4]', cls.__name__)
cls.hive_available = True
try:
sc._jvm.org.apache.hadoop.hive.conf.HiveConf()
except py4j.protocol.Py4JError:
cls.hive_available = False
except TypeError:
cls.hive_available = False
finally:
# we don't need this SparkContext for the test
sc.stop()
def setUp(self):
super(HiveSparkSubmitTests, self).setUp()
if not self.hive_available:
self.skipTest("Hive is not available.")
def test_hivecontext(self):
# This test checks that HiveContext is using Hive metastore (SPARK-16224).
# It sets a metastore url and checks if there is a derby dir created by
# Hive metastore. If this derby dir exists, HiveContext is using
# Hive metastore.
metastore_path = os.path.join(tempfile.mkdtemp(), "spark16224_metastore_db")
metastore_URL = "jdbc:derby:;databaseName=" + metastore_path + ";create=true"
hive_site_dir = os.path.join(self.programDir, "conf")
hive_site_file = self.createTempFile("hive-site.xml", ("""
|<configuration>
| <property>
| <name>javax.jdo.option.ConnectionURL</name>
| <value>%s</value>
| </property>
|</configuration>
""" % metastore_URL).lstrip(), "conf")
script = self.createTempFile("test.py", """
|import os
|
|from pyspark.conf import SparkConf
|from pyspark.context import SparkContext
|from pyspark.sql import HiveContext
|
|conf = SparkConf()
|sc = SparkContext(conf=conf)
|hive_context = HiveContext(sc)
|print(hive_context.sql("show databases").collect())
""")
proc = subprocess.Popen(
self.sparkSubmit + ["--master", "local-cluster[1,1,1024]",
"--driver-class-path", hive_site_dir, script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("default", out.decode('utf-8'))
self.assertTrue(os.path.exists(metastore_path))
class SQLTests2(ReusedSQLTestCase):
# We can't include this test into SQLTests because we will stop class's SparkContext and cause
# other tests failed.
def test_sparksession_with_stopped_sparkcontext(self):
self.sc.stop()
sc = SparkContext('local[4]', self.sc.appName)
spark = SparkSession.builder.getOrCreate()
try:
df = spark.createDataFrame([(1, 2)], ["c", "c"])
df.collect()
finally:
spark.stop()
sc.stop()
class QueryExecutionListenerTests(unittest.TestCase, SQLTestUtils):
# These tests are separate because it uses 'spark.sql.queryExecutionListeners' which is
# static and immutable. This can't be set or unset, for example, via `spark.conf`.
@classmethod
def setUpClass(cls):
import glob
from pyspark.find_spark_home import _find_spark_home
SPARK_HOME = _find_spark_home()
filename_pattern = (
"sql/core/target/scala-*/test-classes/org/apache/spark/sql/"
"TestQueryExecutionListener.class")
cls.has_listener = bool(glob.glob(os.path.join(SPARK_HOME, filename_pattern)))
if cls.has_listener:
# Note that 'spark.sql.queryExecutionListeners' is a static immutable configuration.
cls.spark = SparkSession.builder \
.master("local[4]") \
.appName(cls.__name__) \
.config(
"spark.sql.queryExecutionListeners",
"org.apache.spark.sql.TestQueryExecutionListener") \
.getOrCreate()
def setUp(self):
if not self.has_listener:
raise self.skipTest(
"'org.apache.spark.sql.TestQueryExecutionListener' is not "
"available. Will skip the related tests.")
@classmethod
def tearDownClass(cls):
if hasattr(cls, "spark"):
cls.spark.stop()
def tearDown(self):
self.spark._jvm.OnSuccessCall.clear()
def test_query_execution_listener_on_collect(self):
self.assertFalse(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should not be called before 'collect'")
self.spark.sql("SELECT * FROM range(1)").collect()
self.assertTrue(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should be called after 'collect'")
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
def test_query_execution_listener_on_collect_with_arrow(self):
with self.sql_conf({"spark.sql.execution.arrow.enabled": True}):
self.assertFalse(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should not be "
"called before 'toPandas'")
self.spark.sql("SELECT * FROM range(1)").toPandas()
self.assertTrue(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should be called after 'toPandas'")
class SparkSessionTests(PySparkTestCase):
# This test is separate because it's closely related with session's start and stop.
# See SPARK-23228.
def test_set_jvm_default_session(self):
spark = SparkSession.builder.getOrCreate()
try:
self.assertTrue(spark._jvm.SparkSession.getDefaultSession().isDefined())
finally:
spark.stop()
self.assertTrue(spark._jvm.SparkSession.getDefaultSession().isEmpty())
def test_jvm_default_session_already_set(self):
# Here, we assume there is the default session already set in JVM.
jsession = self.sc._jvm.SparkSession(self.sc._jsc.sc())
self.sc._jvm.SparkSession.setDefaultSession(jsession)
spark = SparkSession.builder.getOrCreate()
try:
self.assertTrue(spark._jvm.SparkSession.getDefaultSession().isDefined())
# The session should be the same with the exiting one.
self.assertTrue(jsession.equals(spark._jvm.SparkSession.getDefaultSession().get()))
finally:
spark.stop()
class UDFInitializationTests(unittest.TestCase):
def tearDown(self):
if SparkSession._instantiatedSession is not None:
SparkSession._instantiatedSession.stop()
if SparkContext._active_spark_context is not None:
SparkContext._active_spark_context.stop()
def test_udf_init_shouldnt_initialize_context(self):
from pyspark.sql.functions import UserDefinedFunction
UserDefinedFunction(lambda x: x, StringType())
self.assertIsNone(
SparkContext._active_spark_context,
"SparkContext shouldn't be initialized when UserDefinedFunction is created."
)
self.assertIsNone(
SparkSession._instantiatedSession,
"SparkSession shouldn't be initialized when UserDefinedFunction is created."
)
class HiveContextSQLTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
cls.hive_available = True
try:
cls.sc._jvm.org.apache.hadoop.hive.conf.HiveConf()
except py4j.protocol.Py4JError:
cls.hive_available = False
except TypeError:
cls.hive_available = False
os.unlink(cls.tempdir.name)
if cls.hive_available:
cls.spark = HiveContext._createForTesting(cls.sc)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.sc.parallelize(cls.testData).toDF()
def setUp(self):
if not self.hive_available:
self.skipTest("Hive is not available.")
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
def test_save_and_load_table(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.saveAsTable("savedJsonTable", "json", "append", path=tmpPath)
actual = self.spark.createExternalTable("externalJsonTable", tmpPath, "json")
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE externalJsonTable")
df.write.saveAsTable("savedJsonTable", "json", "overwrite", path=tmpPath)
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.createExternalTable("externalJsonTable", source="json",
schema=schema, path=tmpPath,
noUse="this options will not be used")
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.select("value").collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE savedJsonTable")
self.spark.sql("DROP TABLE externalJsonTable")
defaultDataSourceName = self.spark.getConf("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
df.write.saveAsTable("savedJsonTable", path=tmpPath, mode="overwrite")
actual = self.spark.createExternalTable("externalJsonTable", path=tmpPath)
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE savedJsonTable")
self.spark.sql("DROP TABLE externalJsonTable")
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
shutil.rmtree(tmpPath)
def test_window_functions(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.partitionBy("value").orderBy("key")
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.row_number().over(w),
F.rank().over(w),
F.dense_rank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 1, 1, 1, 1, 1),
("2", 1, 1, 1, 3, 1, 1, 1, 1),
("2", 1, 2, 1, 3, 2, 1, 1, 1),
("2", 2, 2, 2, 3, 3, 3, 2, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_window_functions_without_partitionBy(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.orderBy("key", df.value)
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.row_number().over(w),
F.rank().over(w),
F.dense_rank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 4, 1, 1, 1, 1),
("2", 1, 1, 1, 4, 2, 2, 2, 1),
("2", 1, 2, 1, 4, 3, 2, 2, 2),
("2", 2, 2, 2, 4, 4, 4, 3, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_window_functions_cumulative_sum(self):
df = self.spark.createDataFrame([("one", 1), ("two", 2)], ["key", "value"])
from pyspark.sql import functions as F
# Test cumulative sum
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding, 0)))
rs = sorted(sel.collect())
expected = [("one", 1), ("two", 3)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
# Test boundary values less than JVM's Long.MinValue and make sure we don't overflow
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding - 1, 0)))
rs = sorted(sel.collect())
expected = [("one", 1), ("two", 3)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
# Test boundary values greater than JVM's Long.MaxValue and make sure we don't overflow
frame_end = Window.unboundedFollowing + 1
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.currentRow, frame_end)))
rs = sorted(sel.collect())
expected = [("one", 3), ("two", 2)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_collect_functions(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql import functions
self.assertEqual(
sorted(df.select(functions.collect_set(df.key).alias('r')).collect()[0].r),
[1, 2])
self.assertEqual(
sorted(df.select(functions.collect_list(df.key).alias('r')).collect()[0].r),
[1, 1, 1, 2])
self.assertEqual(
sorted(df.select(functions.collect_set(df.value).alias('r')).collect()[0].r),
["1", "2"])
self.assertEqual(
sorted(df.select(functions.collect_list(df.value).alias('r')).collect()[0].r),
["1", "2", "2", "2"])
def test_limit_and_take(self):
df = self.spark.range(1, 1000, numPartitions=10)
def assert_runs_only_one_job_stage_and_task(job_group_name, f):
tracker = self.sc.statusTracker()
self.sc.setJobGroup(job_group_name, description="")
f()
jobs = tracker.getJobIdsForGroup(job_group_name)
self.assertEqual(1, len(jobs))
stages = tracker.getJobInfo(jobs[0]).stageIds
self.assertEqual(1, len(stages))
self.assertEqual(1, tracker.getStageInfo(stages[0]).numTasks)
# Regression test for SPARK-10731: take should delegate to Scala implementation
assert_runs_only_one_job_stage_and_task("take", lambda: df.take(1))
# Regression test for SPARK-17514: limit(n).collect() should the perform same as take(n)
assert_runs_only_one_job_stage_and_task("collect_limit", lambda: df.limit(1).collect())
def test_datetime_functions(self):
from pyspark.sql import functions
from datetime import date, datetime
df = self.spark.range(1).selectExpr("'2017-01-22' as dateCol")
parse_result = df.select(functions.to_date(functions.col("dateCol"))).first()
self.assertEquals(date(2017, 1, 22), parse_result['to_date(`dateCol`)'])
@unittest.skipIf(sys.version_info < (3, 3), "Unittest < 3.3 doesn't support mocking")
def test_unbounded_frames(self):
from unittest.mock import patch
from pyspark.sql import functions as F
from pyspark.sql import window
import importlib
df = self.spark.range(0, 3)
def rows_frame_match():
return "ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING" in df.select(
F.count("*").over(window.Window.rowsBetween(-sys.maxsize, sys.maxsize))
).columns[0]
def range_frame_match():
return "RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING" in df.select(
F.count("*").over(window.Window.rangeBetween(-sys.maxsize, sys.maxsize))
).columns[0]
with patch("sys.maxsize", 2 ** 31 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
with patch("sys.maxsize", 2 ** 63 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
with patch("sys.maxsize", 2 ** 127 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
importlib.reload(window)
class DataTypeVerificationTests(unittest.TestCase):
def test_verify_type_exception_msg(self):
self.assertRaisesRegexp(
ValueError,
"test_name",
lambda: _make_type_verifier(StringType(), nullable=False, name="test_name")(None))
schema = StructType([StructField('a', StructType([StructField('b', IntegerType())]))])
self.assertRaisesRegexp(
TypeError,
"field b in field a",
lambda: _make_type_verifier(schema)([["data"]]))
def test_verify_type_ok_nullable(self):
obj = None
types = [IntegerType(), FloatType(), StringType(), StructType([])]
for data_type in types:
try:
_make_type_verifier(data_type, nullable=True)(obj)
except Exception:
self.fail("verify_type(%s, %s, nullable=True)" % (obj, data_type))
def test_verify_type_not_nullable(self):
import array
import datetime
import decimal
schema = StructType([
StructField('s', StringType(), nullable=False),
StructField('i', IntegerType(), nullable=True)])
class MyObj:
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
# obj, data_type
success_spec = [
# String
("", StringType()),
(u"", StringType()),
(1, StringType()),
(1.0, StringType()),
([], StringType()),
({}, StringType()),
# UDT
(ExamplePoint(1.0, 2.0), ExamplePointUDT()),
# Boolean
(True, BooleanType()),
# Byte
(-(2**7), ByteType()),
(2**7 - 1, ByteType()),
# Short
(-(2**15), ShortType()),
(2**15 - 1, ShortType()),
# Integer
(-(2**31), IntegerType()),
(2**31 - 1, IntegerType()),
# Long
(2**64, LongType()),
# Float & Double
(1.0, FloatType()),
(1.0, DoubleType()),
# Decimal
(decimal.Decimal("1.0"), DecimalType()),
# Binary
(bytearray([1, 2]), BinaryType()),
# Date/Timestamp
(datetime.date(2000, 1, 2), DateType()),
(datetime.datetime(2000, 1, 2, 3, 4), DateType()),
(datetime.datetime(2000, 1, 2, 3, 4), TimestampType()),
# Array
([], ArrayType(IntegerType())),
(["1", None], ArrayType(StringType(), containsNull=True)),
([1, 2], ArrayType(IntegerType())),
((1, 2), ArrayType(IntegerType())),
(array.array('h', [1, 2]), ArrayType(IntegerType())),
# Map
({}, MapType(StringType(), IntegerType())),
({"a": 1}, MapType(StringType(), IntegerType())),
({"a": None}, MapType(StringType(), IntegerType(), valueContainsNull=True)),
# Struct
({"s": "a", "i": 1}, schema),
({"s": "a", "i": None}, schema),
({"s": "a"}, schema),
({"s": "a", "f": 1.0}, schema),
(Row(s="a", i=1), schema),
(Row(s="a", i=None), schema),
(Row(s="a", i=1, f=1.0), schema),
(["a", 1], schema),
(["a", None], schema),
(("a", 1), schema),
(MyObj(s="a", i=1), schema),
(MyObj(s="a", i=None), schema),
(MyObj(s="a"), schema),
]
# obj, data_type, exception class
failure_spec = [
# String (match anything but None)
(None, StringType(), ValueError),
# UDT
(ExamplePoint(1.0, 2.0), PythonOnlyUDT(), ValueError),
# Boolean
(1, BooleanType(), TypeError),
("True", BooleanType(), TypeError),
([1], BooleanType(), TypeError),
# Byte
(-(2**7) - 1, ByteType(), ValueError),
(2**7, ByteType(), ValueError),
("1", ByteType(), TypeError),
(1.0, ByteType(), TypeError),
# Short
(-(2**15) - 1, ShortType(), ValueError),
(2**15, ShortType(), ValueError),
# Integer
(-(2**31) - 1, IntegerType(), ValueError),
(2**31, IntegerType(), ValueError),
# Float & Double
(1, FloatType(), TypeError),
(1, DoubleType(), TypeError),
# Decimal
(1.0, DecimalType(), TypeError),
(1, DecimalType(), TypeError),
("1.0", DecimalType(), TypeError),
# Binary
(1, BinaryType(), TypeError),
# Date/Timestamp
("2000-01-02", DateType(), TypeError),
(946811040, TimestampType(), TypeError),
# Array
(["1", None], ArrayType(StringType(), containsNull=False), ValueError),
([1, "2"], ArrayType(IntegerType()), TypeError),
# Map
({"a": 1}, MapType(IntegerType(), IntegerType()), TypeError),
({"a": "1"}, MapType(StringType(), IntegerType()), TypeError),
({"a": None}, MapType(StringType(), IntegerType(), valueContainsNull=False),
ValueError),
# Struct
({"s": "a", "i": "1"}, schema, TypeError),
(Row(s="a"), schema, ValueError), # Row can't have missing field
(Row(s="a", i="1"), schema, TypeError),
(["a"], schema, ValueError),
(["a", "1"], schema, TypeError),
(MyObj(s="a", i="1"), schema, TypeError),
(MyObj(s=None, i="1"), schema, ValueError),
]
# Check success cases
for obj, data_type in success_spec:
try:
_make_type_verifier(data_type, nullable=False)(obj)
except Exception:
self.fail("verify_type(%s, %s, nullable=False)" % (obj, data_type))
# Check failure cases
for obj, data_type, exp in failure_spec:
msg = "verify_type(%s, %s, nullable=False) == %s" % (obj, data_type, exp)
with self.assertRaises(exp, msg=msg):
_make_type_verifier(data_type, nullable=False)(obj)
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
class ArrowTests(ReusedSQLTestCase):
@classmethod
def setUpClass(cls):
from datetime import date, datetime
from decimal import Decimal
from distutils.version import LooseVersion
import pyarrow as pa
ReusedSQLTestCase.setUpClass()
# Synchronize default timezone between Python and Java
cls.tz_prev = os.environ.get("TZ", None) # save current tz if set
tz = "America/Los_Angeles"
os.environ["TZ"] = tz
time.tzset()
cls.spark.conf.set("spark.sql.session.timeZone", tz)
cls.spark.conf.set("spark.sql.execution.arrow.enabled", "true")
# Disable fallback by default to easily detect the failures.
cls.spark.conf.set("spark.sql.execution.arrow.fallback.enabled", "false")
cls.schema = StructType([
StructField("1_str_t", StringType(), True),
StructField("2_int_t", IntegerType(), True),
StructField("3_long_t", LongType(), True),
StructField("4_float_t", FloatType(), True),
StructField("5_double_t", DoubleType(), True),
StructField("6_decimal_t", DecimalType(38, 18), True),
StructField("7_date_t", DateType(), True),
StructField("8_timestamp_t", TimestampType(), True)])
cls.data = [(u"a", 1, 10, 0.2, 2.0, Decimal("2.0"),
date(1969, 1, 1), datetime(1969, 1, 1, 1, 1, 1)),
(u"b", 2, 20, 0.4, 4.0, Decimal("4.0"),
date(2012, 2, 2), datetime(2012, 2, 2, 2, 2, 2)),
(u"c", 3, 30, 0.8, 6.0, Decimal("6.0"),
date(2100, 3, 3), datetime(2100, 3, 3, 3, 3, 3))]
# TODO: remove version check once minimum pyarrow version is 0.10.0
if LooseVersion("0.10.0") <= LooseVersion(pa.__version__):
cls.schema.add(StructField("9_binary_t", BinaryType(), True))
cls.data[0] = cls.data[0] + (bytearray(b"a"),)
cls.data[1] = cls.data[1] + (bytearray(b"bb"),)
cls.data[2] = cls.data[2] + (bytearray(b"ccc"),)
@classmethod
def tearDownClass(cls):
del os.environ["TZ"]
if cls.tz_prev is not None:
os.environ["TZ"] = cls.tz_prev
time.tzset()
ReusedSQLTestCase.tearDownClass()
def create_pandas_data_frame(self):
import pandas as pd
import numpy as np
data_dict = {}
for j, name in enumerate(self.schema.names):
data_dict[name] = [self.data[i][j] for i in range(len(self.data))]
# need to convert these to numpy types first
data_dict["2_int_t"] = np.int32(data_dict["2_int_t"])
data_dict["4_float_t"] = np.float32(data_dict["4_float_t"])
return pd.DataFrame(data=data_dict)
def test_toPandas_fallback_enabled(self):
import pandas as pd
with self.sql_conf({"spark.sql.execution.arrow.fallback.enabled": True}):
schema = StructType([StructField("map", MapType(StringType(), IntegerType()), True)])
df = self.spark.createDataFrame([({u'a': 1},)], schema=schema)
with QuietTest(self.sc):
with warnings.catch_warnings(record=True) as warns:
pdf = df.toPandas()
# Catch and check the last UserWarning.
user_warns = [
warn.message for warn in warns if isinstance(warn.message, UserWarning)]
self.assertTrue(len(user_warns) > 0)
self.assertTrue(
"Attempting non-optimization" in _exception_message(user_warns[-1]))
self.assertPandasEqual(pdf, pd.DataFrame({u'map': [{u'a': 1}]}))
def test_toPandas_fallback_disabled(self):
from distutils.version import LooseVersion
import pyarrow as pa
schema = StructType([StructField("map", MapType(StringType(), IntegerType()), True)])
df = self.spark.createDataFrame([(None,)], schema=schema)
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'Unsupported type'):
df.toPandas()
# TODO: remove BinaryType check once minimum pyarrow version is 0.10.0
if LooseVersion(pa.__version__) < LooseVersion("0.10.0"):
schema = StructType([StructField("binary", BinaryType(), True)])
df = self.spark.createDataFrame([(None,)], schema=schema)
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'Unsupported type.*BinaryType'):
df.toPandas()
def test_null_conversion(self):
df_null = self.spark.createDataFrame([tuple([None for _ in range(len(self.data[0]))])] +
self.data)
pdf = df_null.toPandas()
null_counts = pdf.isnull().sum().tolist()
self.assertTrue(all([c == 1 for c in null_counts]))
def _toPandas_arrow_toggle(self, df):
with self.sql_conf({"spark.sql.execution.arrow.enabled": False}):
pdf = df.toPandas()
pdf_arrow = df.toPandas()
return pdf, pdf_arrow
def test_toPandas_arrow_toggle(self):
df = self.spark.createDataFrame(self.data, schema=self.schema)
pdf, pdf_arrow = self._toPandas_arrow_toggle(df)
expected = self.create_pandas_data_frame()
self.assertPandasEqual(expected, pdf)
self.assertPandasEqual(expected, pdf_arrow)
def test_toPandas_respect_session_timezone(self):
df = self.spark.createDataFrame(self.data, schema=self.schema)
timezone = "America/New_York"
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": False,
"spark.sql.session.timeZone": timezone}):
pdf_la, pdf_arrow_la = self._toPandas_arrow_toggle(df)
self.assertPandasEqual(pdf_arrow_la, pdf_la)
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": True,
"spark.sql.session.timeZone": timezone}):
pdf_ny, pdf_arrow_ny = self._toPandas_arrow_toggle(df)
self.assertPandasEqual(pdf_arrow_ny, pdf_ny)
self.assertFalse(pdf_ny.equals(pdf_la))
from pyspark.sql.types import _check_series_convert_timestamps_local_tz
pdf_la_corrected = pdf_la.copy()
for field in self.schema:
if isinstance(field.dataType, TimestampType):
pdf_la_corrected[field.name] = _check_series_convert_timestamps_local_tz(
pdf_la_corrected[field.name], timezone)
self.assertPandasEqual(pdf_ny, pdf_la_corrected)
def test_pandas_round_trip(self):
pdf = self.create_pandas_data_frame()
df = self.spark.createDataFrame(self.data, schema=self.schema)
pdf_arrow = df.toPandas()
self.assertPandasEqual(pdf_arrow, pdf)
def test_filtered_frame(self):
df = self.spark.range(3).toDF("i")
pdf = df.filter("i < 0").toPandas()
self.assertEqual(len(pdf.columns), 1)
self.assertEqual(pdf.columns[0], "i")
self.assertTrue(pdf.empty)
def _createDataFrame_toggle(self, pdf, schema=None):
with self.sql_conf({"spark.sql.execution.arrow.enabled": False}):
df_no_arrow = self.spark.createDataFrame(pdf, schema=schema)
df_arrow = self.spark.createDataFrame(pdf, schema=schema)
return df_no_arrow, df_arrow
def test_createDataFrame_toggle(self):
pdf = self.create_pandas_data_frame()
df_no_arrow, df_arrow = self._createDataFrame_toggle(pdf, schema=self.schema)
self.assertEquals(df_no_arrow.collect(), df_arrow.collect())
def test_createDataFrame_respect_session_timezone(self):
from datetime import timedelta
pdf = self.create_pandas_data_frame()
timezone = "America/New_York"
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": False,
"spark.sql.session.timeZone": timezone}):
df_no_arrow_la, df_arrow_la = self._createDataFrame_toggle(pdf, schema=self.schema)
result_la = df_no_arrow_la.collect()
result_arrow_la = df_arrow_la.collect()
self.assertEqual(result_la, result_arrow_la)
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": True,
"spark.sql.session.timeZone": timezone}):
df_no_arrow_ny, df_arrow_ny = self._createDataFrame_toggle(pdf, schema=self.schema)
result_ny = df_no_arrow_ny.collect()
result_arrow_ny = df_arrow_ny.collect()
self.assertEqual(result_ny, result_arrow_ny)
self.assertNotEqual(result_ny, result_la)
# Correct result_la by adjusting 3 hours difference between Los Angeles and New York
result_la_corrected = [Row(**{k: v - timedelta(hours=3) if k == '8_timestamp_t' else v
for k, v in row.asDict().items()})
for row in result_la]
self.assertEqual(result_ny, result_la_corrected)
def test_createDataFrame_with_schema(self):
pdf = self.create_pandas_data_frame()
df = self.spark.createDataFrame(pdf, schema=self.schema)
self.assertEquals(self.schema, df.schema)
pdf_arrow = df.toPandas()
self.assertPandasEqual(pdf_arrow, pdf)
def test_createDataFrame_with_incorrect_schema(self):
pdf = self.create_pandas_data_frame()
fields = list(self.schema)
fields[0], fields[7] = fields[7], fields[0] # swap str with timestamp
wrong_schema = StructType(fields)
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, ".*No cast.*string.*timestamp.*"):
self.spark.createDataFrame(pdf, schema=wrong_schema)
def test_createDataFrame_with_names(self):
pdf = self.create_pandas_data_frame()
new_names = list(map(str, range(len(self.schema.fieldNames()))))
# Test that schema as a list of column names gets applied
df = self.spark.createDataFrame(pdf, schema=list(new_names))
self.assertEquals(df.schema.fieldNames(), new_names)
# Test that schema as tuple of column names gets applied
df = self.spark.createDataFrame(pdf, schema=tuple(new_names))
self.assertEquals(df.schema.fieldNames(), new_names)
def test_createDataFrame_column_name_encoding(self):
import pandas as pd
pdf = pd.DataFrame({u'a': [1]})
columns = self.spark.createDataFrame(pdf).columns
self.assertTrue(isinstance(columns[0], str))
self.assertEquals(columns[0], 'a')
columns = self.spark.createDataFrame(pdf, [u'b']).columns
self.assertTrue(isinstance(columns[0], str))
self.assertEquals(columns[0], 'b')
def test_createDataFrame_with_single_data_type(self):
import pandas as pd
with QuietTest(self.sc):
with self.assertRaisesRegexp(ValueError, ".*IntegerType.*not supported.*"):
self.spark.createDataFrame(pd.DataFrame({"a": [1]}), schema="int")
def test_createDataFrame_does_not_modify_input(self):
import pandas as pd
# Some series get converted for Spark to consume, this makes sure input is unchanged
pdf = self.create_pandas_data_frame()
# Use a nanosecond value to make sure it is not truncated
pdf.ix[0, '8_timestamp_t'] = pd.Timestamp(1)
# Integers with nulls will get NaNs filled with 0 and will be casted
pdf.ix[1, '2_int_t'] = None
pdf_copy = pdf.copy(deep=True)
self.spark.createDataFrame(pdf, schema=self.schema)
self.assertTrue(pdf.equals(pdf_copy))
def test_schema_conversion_roundtrip(self):
from pyspark.sql.types import from_arrow_schema, to_arrow_schema
arrow_schema = to_arrow_schema(self.schema)
schema_rt = from_arrow_schema(arrow_schema)
self.assertEquals(self.schema, schema_rt)
def test_createDataFrame_with_array_type(self):
import pandas as pd
pdf = pd.DataFrame({"a": [[1, 2], [3, 4]], "b": [[u"x", u"y"], [u"y", u"z"]]})
df, df_arrow = self._createDataFrame_toggle(pdf)
result = df.collect()
result_arrow = df_arrow.collect()
expected = [tuple(list(e) for e in rec) for rec in pdf.to_records(index=False)]
for r in range(len(expected)):
for e in range(len(expected[r])):
self.assertTrue(expected[r][e] == result_arrow[r][e] and
result[r][e] == result_arrow[r][e])
def test_toPandas_with_array_type(self):
expected = [([1, 2], [u"x", u"y"]), ([3, 4], [u"y", u"z"])]
array_schema = StructType([StructField("a", ArrayType(IntegerType())),
StructField("b", ArrayType(StringType()))])
df = self.spark.createDataFrame(expected, schema=array_schema)
pdf, pdf_arrow = self._toPandas_arrow_toggle(df)
result = [tuple(list(e) for e in rec) for rec in pdf.to_records(index=False)]
result_arrow = [tuple(list(e) for e in rec) for rec in pdf_arrow.to_records(index=False)]
for r in range(len(expected)):
for e in range(len(expected[r])):
self.assertTrue(expected[r][e] == result_arrow[r][e] and
result[r][e] == result_arrow[r][e])
def test_createDataFrame_with_int_col_names(self):
import numpy as np
import pandas as pd
pdf = pd.DataFrame(np.random.rand(4, 2))
df, df_arrow = self._createDataFrame_toggle(pdf)
pdf_col_names = [str(c) for c in pdf.columns]
self.assertEqual(pdf_col_names, df.columns)
self.assertEqual(pdf_col_names, df_arrow.columns)
def test_createDataFrame_fallback_enabled(self):
import pandas as pd
with QuietTest(self.sc):
with self.sql_conf({"spark.sql.execution.arrow.fallback.enabled": True}):
with warnings.catch_warnings(record=True) as warns:
df = self.spark.createDataFrame(
pd.DataFrame([[{u'a': 1}]]), "a: map<string, int>")
# Catch and check the last UserWarning.
user_warns = [
warn.message for warn in warns if isinstance(warn.message, UserWarning)]
self.assertTrue(len(user_warns) > 0)
self.assertTrue(
"Attempting non-optimization" in _exception_message(user_warns[-1]))
self.assertEqual(df.collect(), [Row(a={u'a': 1})])
def test_createDataFrame_fallback_disabled(self):
from distutils.version import LooseVersion
import pandas as pd
import pyarrow as pa
with QuietTest(self.sc):
with self.assertRaisesRegexp(TypeError, 'Unsupported type'):
self.spark.createDataFrame(
pd.DataFrame([[{u'a': 1}]]), "a: map<string, int>")
# TODO: remove BinaryType check once minimum pyarrow version is 0.10.0
if LooseVersion(pa.__version__) < LooseVersion("0.10.0"):
with QuietTest(self.sc):
with self.assertRaisesRegexp(TypeError, 'Unsupported type.*BinaryType'):
self.spark.createDataFrame(
pd.DataFrame([[{'a': b'aaa'}]]), "a: binary")
# Regression test for SPARK-23314
def test_timestamp_dst(self):
import pandas as pd
# Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am
dt = [datetime.datetime(2015, 11, 1, 0, 30),
datetime.datetime(2015, 11, 1, 1, 30),
datetime.datetime(2015, 11, 1, 2, 30)]
pdf = pd.DataFrame({'time': dt})
df_from_python = self.spark.createDataFrame(dt, 'timestamp').toDF('time')
df_from_pandas = self.spark.createDataFrame(pdf)
self.assertPandasEqual(pdf, df_from_python.toPandas())
self.assertPandasEqual(pdf, df_from_pandas.toPandas())
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
class PandasUDFTests(ReusedSQLTestCase):
def test_pandas_udf_basic(self):
from pyspark.rdd import PythonEvalType
from pyspark.sql.functions import pandas_udf, PandasUDFType
udf = pandas_udf(lambda x: x, DoubleType())
self.assertEqual(udf.returnType, DoubleType())
self.assertEqual(udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
udf = pandas_udf(lambda x: x, DoubleType(), PandasUDFType.SCALAR)
self.assertEqual(udf.returnType, DoubleType())
self.assertEqual(udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
udf = pandas_udf(lambda x: x, 'double', PandasUDFType.SCALAR)
self.assertEqual(udf.returnType, DoubleType())
self.assertEqual(udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
udf = pandas_udf(lambda x: x, StructType([StructField("v", DoubleType())]),
PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
udf = pandas_udf(lambda x: x, 'v double', PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
udf = pandas_udf(lambda x: x, 'v double',
functionType=PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
udf = pandas_udf(lambda x: x, returnType='v double',
functionType=PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
def test_pandas_udf_decorator(self):
from pyspark.rdd import PythonEvalType
from pyspark.sql.functions import pandas_udf, PandasUDFType
from pyspark.sql.types import StructType, StructField, DoubleType
@pandas_udf(DoubleType())
def foo(x):
return x
self.assertEqual(foo.returnType, DoubleType())
self.assertEqual(foo.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
@pandas_udf(returnType=DoubleType())
def foo(x):
return x
self.assertEqual(foo.returnType, DoubleType())
self.assertEqual(foo.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
schema = StructType([StructField("v", DoubleType())])
@pandas_udf(schema, PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
@pandas_udf('v double', PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
@pandas_udf(schema, functionType=PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
@pandas_udf(returnType='double', functionType=PandasUDFType.SCALAR)
def foo(x):
return x
self.assertEqual(foo.returnType, DoubleType())
self.assertEqual(foo.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
@pandas_udf(returnType=schema, functionType=PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
def test_udf_wrong_arg(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
with QuietTest(self.sc):
with self.assertRaises(ParseException):
@pandas_udf('blah')
def foo(x):
return x
with self.assertRaisesRegexp(ValueError, 'Invalid returnType.*None'):
@pandas_udf(functionType=PandasUDFType.SCALAR)
def foo(x):
return x
with self.assertRaisesRegexp(ValueError, 'Invalid functionType'):
@pandas_udf('double', 100)
def foo(x):
return x
with self.assertRaisesRegexp(ValueError, '0-arg pandas_udfs.*not.*supported'):
pandas_udf(lambda: 1, LongType(), PandasUDFType.SCALAR)
with self.assertRaisesRegexp(ValueError, '0-arg pandas_udfs.*not.*supported'):
@pandas_udf(LongType(), PandasUDFType.SCALAR)
def zero_with_type():
return 1
with self.assertRaisesRegexp(TypeError, 'Invalid returnType'):
@pandas_udf(returnType=PandasUDFType.GROUPED_MAP)
def foo(df):
return df
with self.assertRaisesRegexp(TypeError, 'Invalid returnType'):
@pandas_udf(returnType='double', functionType=PandasUDFType.GROUPED_MAP)
def foo(df):
return df
with self.assertRaisesRegexp(ValueError, 'Invalid function'):
@pandas_udf(returnType='k int, v double', functionType=PandasUDFType.GROUPED_MAP)
def foo(k, v, w):
return k
def test_stopiteration_in_udf(self):
from pyspark.sql.functions import udf, pandas_udf, PandasUDFType
from py4j.protocol import Py4JJavaError
def foo(x):
raise StopIteration()
def foofoo(x, y):
raise StopIteration()
exc_message = "Caught StopIteration thrown from user's code; failing the task"
df = self.spark.range(0, 100)
# plain udf (test for SPARK-23754)
self.assertRaisesRegexp(
Py4JJavaError,
exc_message,
df.withColumn('v', udf(foo)('id')).collect
)
# pandas scalar udf
self.assertRaisesRegexp(
Py4JJavaError,
exc_message,
df.withColumn(
'v', pandas_udf(foo, 'double', PandasUDFType.SCALAR)('id')
).collect
)
# pandas grouped map
self.assertRaisesRegexp(
Py4JJavaError,
exc_message,
df.groupBy('id').apply(
pandas_udf(foo, df.schema, PandasUDFType.GROUPED_MAP)
).collect
)
self.assertRaisesRegexp(
Py4JJavaError,
exc_message,
df.groupBy('id').apply(
pandas_udf(foofoo, df.schema, PandasUDFType.GROUPED_MAP)
).collect
)
# pandas grouped agg
self.assertRaisesRegexp(
Py4JJavaError,
exc_message,
df.groupBy('id').agg(
pandas_udf(foo, 'double', PandasUDFType.GROUPED_AGG)('id')
).collect
)
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
class ScalarPandasUDFTests(ReusedSQLTestCase):
@classmethod
def setUpClass(cls):
ReusedSQLTestCase.setUpClass()
# Synchronize default timezone between Python and Java
cls.tz_prev = os.environ.get("TZ", None) # save current tz if set
tz = "America/Los_Angeles"
os.environ["TZ"] = tz
time.tzset()
cls.sc.environment["TZ"] = tz
cls.spark.conf.set("spark.sql.session.timeZone", tz)
@classmethod
def tearDownClass(cls):
del os.environ["TZ"]
if cls.tz_prev is not None:
os.environ["TZ"] = cls.tz_prev
time.tzset()
ReusedSQLTestCase.tearDownClass()
@property
def nondeterministic_vectorized_udf(self):
from pyspark.sql.functions import pandas_udf
@pandas_udf('double')
def random_udf(v):
import pandas as pd
import numpy as np
return pd.Series(np.random.random(len(v)))
random_udf = random_udf.asNondeterministic()
return random_udf
def test_vectorized_udf_basic(self):
from pyspark.sql.functions import pandas_udf, col, array
df = self.spark.range(10).select(
col('id').cast('string').alias('str'),
col('id').cast('int').alias('int'),
col('id').alias('long'),
col('id').cast('float').alias('float'),
col('id').cast('double').alias('double'),
col('id').cast('decimal').alias('decimal'),
col('id').cast('boolean').alias('bool'),
array(col('id')).alias('array_long'))
f = lambda x: x
str_f = pandas_udf(f, StringType())
int_f = pandas_udf(f, IntegerType())
long_f = pandas_udf(f, LongType())
float_f = pandas_udf(f, FloatType())
double_f = pandas_udf(f, DoubleType())
decimal_f = pandas_udf(f, DecimalType())
bool_f = pandas_udf(f, BooleanType())
array_long_f = pandas_udf(f, ArrayType(LongType()))
res = df.select(str_f(col('str')), int_f(col('int')),
long_f(col('long')), float_f(col('float')),
double_f(col('double')), decimal_f('decimal'),
bool_f(col('bool')), array_long_f('array_long'))
self.assertEquals(df.collect(), res.collect())
def test_register_nondeterministic_vectorized_udf_basic(self):
from pyspark.sql.functions import pandas_udf
from pyspark.rdd import PythonEvalType
import random
random_pandas_udf = pandas_udf(
lambda x: random.randint(6, 6) + x, IntegerType()).asNondeterministic()
self.assertEqual(random_pandas_udf.deterministic, False)
self.assertEqual(random_pandas_udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
nondeterministic_pandas_udf = self.spark.catalog.registerFunction(
"randomPandasUDF", random_pandas_udf)
self.assertEqual(nondeterministic_pandas_udf.deterministic, False)
self.assertEqual(nondeterministic_pandas_udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
[row] = self.spark.sql("SELECT randomPandasUDF(1)").collect()
self.assertEqual(row[0], 7)
def test_vectorized_udf_null_boolean(self):
from pyspark.sql.functions import pandas_udf, col
data = [(True,), (True,), (None,), (False,)]
schema = StructType().add("bool", BooleanType())
df = self.spark.createDataFrame(data, schema)
bool_f = pandas_udf(lambda x: x, BooleanType())
res = df.select(bool_f(col('bool')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_byte(self):
from pyspark.sql.functions import pandas_udf, col
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("byte", ByteType())
df = self.spark.createDataFrame(data, schema)
byte_f = pandas_udf(lambda x: x, ByteType())
res = df.select(byte_f(col('byte')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_short(self):
from pyspark.sql.functions import pandas_udf, col
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("short", ShortType())
df = self.spark.createDataFrame(data, schema)
short_f = pandas_udf(lambda x: x, ShortType())
res = df.select(short_f(col('short')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_int(self):
from pyspark.sql.functions import pandas_udf, col
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("int", IntegerType())
df = self.spark.createDataFrame(data, schema)
int_f = pandas_udf(lambda x: x, IntegerType())
res = df.select(int_f(col('int')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_long(self):
from pyspark.sql.functions import pandas_udf, col
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("long", LongType())
df = self.spark.createDataFrame(data, schema)
long_f = pandas_udf(lambda x: x, LongType())
res = df.select(long_f(col('long')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_float(self):
from pyspark.sql.functions import pandas_udf, col
data = [(3.0,), (5.0,), (-1.0,), (None,)]
schema = StructType().add("float", FloatType())
df = self.spark.createDataFrame(data, schema)
float_f = pandas_udf(lambda x: x, FloatType())
res = df.select(float_f(col('float')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_double(self):
from pyspark.sql.functions import pandas_udf, col
data = [(3.0,), (5.0,), (-1.0,), (None,)]
schema = StructType().add("double", DoubleType())
df = self.spark.createDataFrame(data, schema)
double_f = pandas_udf(lambda x: x, DoubleType())
res = df.select(double_f(col('double')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_decimal(self):
from decimal import Decimal
from pyspark.sql.functions import pandas_udf, col
data = [(Decimal(3.0),), (Decimal(5.0),), (Decimal(-1.0),), (None,)]
schema = StructType().add("decimal", DecimalType(38, 18))
df = self.spark.createDataFrame(data, schema)
decimal_f = pandas_udf(lambda x: x, DecimalType(38, 18))
res = df.select(decimal_f(col('decimal')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_string(self):
from pyspark.sql.functions import pandas_udf, col
data = [("foo",), (None,), ("bar",), ("bar",)]
schema = StructType().add("str", StringType())
df = self.spark.createDataFrame(data, schema)
str_f = pandas_udf(lambda x: x, StringType())
res = df.select(str_f(col('str')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_string_in_udf(self):
from pyspark.sql.functions import pandas_udf, col
import pandas as pd
df = self.spark.range(10)
str_f = pandas_udf(lambda x: pd.Series(map(str, x)), StringType())
actual = df.select(str_f(col('id')))
expected = df.select(col('id').cast('string'))
self.assertEquals(expected.collect(), actual.collect())
def test_vectorized_udf_datatype_string(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.range(10).select(
col('id').cast('string').alias('str'),
col('id').cast('int').alias('int'),
col('id').alias('long'),
col('id').cast('float').alias('float'),
col('id').cast('double').alias('double'),
col('id').cast('decimal').alias('decimal'),
col('id').cast('boolean').alias('bool'))
f = lambda x: x
str_f = pandas_udf(f, 'string')
int_f = pandas_udf(f, 'integer')
long_f = pandas_udf(f, 'long')
float_f = pandas_udf(f, 'float')
double_f = pandas_udf(f, 'double')
decimal_f = pandas_udf(f, 'decimal(38, 18)')
bool_f = pandas_udf(f, 'boolean')
res = df.select(str_f(col('str')), int_f(col('int')),
long_f(col('long')), float_f(col('float')),
double_f(col('double')), decimal_f('decimal'),
bool_f(col('bool')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_binary(self):
from distutils.version import LooseVersion
import pyarrow as pa
from pyspark.sql.functions import pandas_udf, col
if LooseVersion(pa.__version__) < LooseVersion("0.10.0"):
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*scalar Pandas UDF.*BinaryType'):
pandas_udf(lambda x: x, BinaryType())
else:
data = [(bytearray(b"a"),), (None,), (bytearray(b"bb"),), (bytearray(b"ccc"),)]
schema = StructType().add("binary", BinaryType())
df = self.spark.createDataFrame(data, schema)
str_f = pandas_udf(lambda x: x, BinaryType())
res = df.select(str_f(col('binary')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_array_type(self):
from pyspark.sql.functions import pandas_udf, col
data = [([1, 2],), ([3, 4],)]
array_schema = StructType([StructField("array", ArrayType(IntegerType()))])
df = self.spark.createDataFrame(data, schema=array_schema)
array_f = pandas_udf(lambda x: x, ArrayType(IntegerType()))
result = df.select(array_f(col('array')))
self.assertEquals(df.collect(), result.collect())
def test_vectorized_udf_null_array(self):
from pyspark.sql.functions import pandas_udf, col
data = [([1, 2],), (None,), (None,), ([3, 4],), (None,)]
array_schema = StructType([StructField("array", ArrayType(IntegerType()))])
df = self.spark.createDataFrame(data, schema=array_schema)
array_f = pandas_udf(lambda x: x, ArrayType(IntegerType()))
result = df.select(array_f(col('array')))
self.assertEquals(df.collect(), result.collect())
def test_vectorized_udf_complex(self):
from pyspark.sql.functions import pandas_udf, col, expr
df = self.spark.range(10).select(
col('id').cast('int').alias('a'),
col('id').cast('int').alias('b'),
col('id').cast('double').alias('c'))
add = pandas_udf(lambda x, y: x + y, IntegerType())
power2 = pandas_udf(lambda x: 2 ** x, IntegerType())
mul = pandas_udf(lambda x, y: x * y, DoubleType())
res = df.select(add(col('a'), col('b')), power2(col('a')), mul(col('b'), col('c')))
expected = df.select(expr('a + b'), expr('power(2, a)'), expr('b * c'))
self.assertEquals(expected.collect(), res.collect())
def test_vectorized_udf_exception(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.range(10)
raise_exception = pandas_udf(lambda x: x * (1 / 0), LongType())
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'division( or modulo)? by zero'):
df.select(raise_exception(col('id'))).collect()
def test_vectorized_udf_invalid_length(self):
from pyspark.sql.functions import pandas_udf, col
import pandas as pd
df = self.spark.range(10)
raise_exception = pandas_udf(lambda _: pd.Series(1), LongType())
with QuietTest(self.sc):
with self.assertRaisesRegexp(
Exception,
'Result vector from pandas_udf was not the required length'):
df.select(raise_exception(col('id'))).collect()
def test_vectorized_udf_chained(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.range(10)
f = pandas_udf(lambda x: x + 1, LongType())
g = pandas_udf(lambda x: x - 1, LongType())
res = df.select(g(f(col('id'))))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_wrong_return_type(self):
from pyspark.sql.functions import pandas_udf, col
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*scalar Pandas UDF.*MapType'):
pandas_udf(lambda x: x * 1.0, MapType(LongType(), LongType()))
def test_vectorized_udf_return_scalar(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.range(10)
f = pandas_udf(lambda x: 1.0, DoubleType())
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'Return.*type.*Series'):
df.select(f(col('id'))).collect()
def test_vectorized_udf_decorator(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.range(10)
@pandas_udf(returnType=LongType())
def identity(x):
return x
res = df.select(identity(col('id')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_empty_partition(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.createDataFrame(self.sc.parallelize([Row(id=1)], 2))
f = pandas_udf(lambda x: x, LongType())
res = df.select(f(col('id')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_varargs(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.createDataFrame(self.sc.parallelize([Row(id=1)], 2))
f = pandas_udf(lambda *v: v[0], LongType())
res = df.select(f(col('id')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_unsupported_types(self):
from pyspark.sql.functions import pandas_udf
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*scalar Pandas UDF.*MapType'):
pandas_udf(lambda x: x, MapType(StringType(), IntegerType()))
def test_vectorized_udf_dates(self):
from pyspark.sql.functions import pandas_udf, col
from datetime import date
schema = StructType().add("idx", LongType()).add("date", DateType())
data = [(0, date(1969, 1, 1),),
(1, date(2012, 2, 2),),
(2, None,),
(3, date(2100, 4, 4),)]
df = self.spark.createDataFrame(data, schema=schema)
date_copy = pandas_udf(lambda t: t, returnType=DateType())
df = df.withColumn("date_copy", date_copy(col("date")))
@pandas_udf(returnType=StringType())
def check_data(idx, date, date_copy):
import pandas as pd
msgs = []
is_equal = date.isnull()
for i in range(len(idx)):
if (is_equal[i] and data[idx[i]][1] is None) or \
date[i] == data[idx[i]][1]:
msgs.append(None)
else:
msgs.append(
"date values are not equal (date='%s': data[%d][1]='%s')"
% (date[i], idx[i], data[idx[i]][1]))
return pd.Series(msgs)
result = df.withColumn("check_data",
check_data(col("idx"), col("date"), col("date_copy"))).collect()
self.assertEquals(len(data), len(result))
for i in range(len(result)):
self.assertEquals(data[i][1], result[i][1]) # "date" col
self.assertEquals(data[i][1], result[i][2]) # "date_copy" col
self.assertIsNone(result[i][3]) # "check_data" col
def test_vectorized_udf_timestamps(self):
from pyspark.sql.functions import pandas_udf, col
from datetime import datetime
schema = StructType([
StructField("idx", LongType(), True),
StructField("timestamp", TimestampType(), True)])
data = [(0, datetime(1969, 1, 1, 1, 1, 1)),
(1, datetime(2012, 2, 2, 2, 2, 2)),
(2, None),
(3, datetime(2100, 3, 3, 3, 3, 3))]
df = self.spark.createDataFrame(data, schema=schema)
# Check that a timestamp passed through a pandas_udf will not be altered by timezone calc
f_timestamp_copy = pandas_udf(lambda t: t, returnType=TimestampType())
df = df.withColumn("timestamp_copy", f_timestamp_copy(col("timestamp")))
@pandas_udf(returnType=StringType())
def check_data(idx, timestamp, timestamp_copy):
import pandas as pd
msgs = []
is_equal = timestamp.isnull() # use this array to check values are equal
for i in range(len(idx)):
# Check that timestamps are as expected in the UDF
if (is_equal[i] and data[idx[i]][1] is None) or \
timestamp[i].to_pydatetime() == data[idx[i]][1]:
msgs.append(None)
else:
msgs.append(
"timestamp values are not equal (timestamp='%s': data[%d][1]='%s')"
% (timestamp[i], idx[i], data[idx[i]][1]))
return pd.Series(msgs)
result = df.withColumn("check_data", check_data(col("idx"), col("timestamp"),
col("timestamp_copy"))).collect()
# Check that collection values are correct
self.assertEquals(len(data), len(result))
for i in range(len(result)):
self.assertEquals(data[i][1], result[i][1]) # "timestamp" col
self.assertEquals(data[i][1], result[i][2]) # "timestamp_copy" col
self.assertIsNone(result[i][3]) # "check_data" col
def test_vectorized_udf_return_timestamp_tz(self):
from pyspark.sql.functions import pandas_udf, col
import pandas as pd
df = self.spark.range(10)
@pandas_udf(returnType=TimestampType())
def gen_timestamps(id):
ts = [pd.Timestamp(i, unit='D', tz='America/Los_Angeles') for i in id]
return pd.Series(ts)
result = df.withColumn("ts", gen_timestamps(col("id"))).collect()
spark_ts_t = TimestampType()
for r in result:
i, ts = r
ts_tz = pd.Timestamp(i, unit='D', tz='America/Los_Angeles').to_pydatetime()
expected = spark_ts_t.fromInternal(spark_ts_t.toInternal(ts_tz))
self.assertEquals(expected, ts)
def test_vectorized_udf_check_config(self):
from pyspark.sql.functions import pandas_udf, col
import pandas as pd
with self.sql_conf({"spark.sql.execution.arrow.maxRecordsPerBatch": 3}):
df = self.spark.range(10, numPartitions=1)
@pandas_udf(returnType=LongType())
def check_records_per_batch(x):
return pd.Series(x.size).repeat(x.size)
result = df.select(check_records_per_batch(col("id"))).collect()
for (r,) in result:
self.assertTrue(r <= 3)
def test_vectorized_udf_timestamps_respect_session_timezone(self):
from pyspark.sql.functions import pandas_udf, col
from datetime import datetime
import pandas as pd
schema = StructType([
StructField("idx", LongType(), True),
StructField("timestamp", TimestampType(), True)])
data = [(1, datetime(1969, 1, 1, 1, 1, 1)),
(2, datetime(2012, 2, 2, 2, 2, 2)),
(3, None),
(4, datetime(2100, 3, 3, 3, 3, 3))]
df = self.spark.createDataFrame(data, schema=schema)
f_timestamp_copy = pandas_udf(lambda ts: ts, TimestampType())
internal_value = pandas_udf(
lambda ts: ts.apply(lambda ts: ts.value if ts is not pd.NaT else None), LongType())
timezone = "America/New_York"
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": False,
"spark.sql.session.timeZone": timezone}):
df_la = df.withColumn("tscopy", f_timestamp_copy(col("timestamp"))) \
.withColumn("internal_value", internal_value(col("timestamp")))
result_la = df_la.select(col("idx"), col("internal_value")).collect()
# Correct result_la by adjusting 3 hours difference between Los Angeles and New York
diff = 3 * 60 * 60 * 1000 * 1000 * 1000
result_la_corrected = \
df_la.select(col("idx"), col("tscopy"), col("internal_value") + diff).collect()
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": True,
"spark.sql.session.timeZone": timezone}):
df_ny = df.withColumn("tscopy", f_timestamp_copy(col("timestamp"))) \
.withColumn("internal_value", internal_value(col("timestamp")))
result_ny = df_ny.select(col("idx"), col("tscopy"), col("internal_value")).collect()
self.assertNotEqual(result_ny, result_la)
self.assertEqual(result_ny, result_la_corrected)
def test_nondeterministic_vectorized_udf(self):
# Test that nondeterministic UDFs are evaluated only once in chained UDF evaluations
from pyspark.sql.functions import udf, pandas_udf, col
@pandas_udf('double')
def plus_ten(v):
return v + 10
random_udf = self.nondeterministic_vectorized_udf
df = self.spark.range(10).withColumn('rand', random_udf(col('id')))
result1 = df.withColumn('plus_ten(rand)', plus_ten(df['rand'])).toPandas()
self.assertEqual(random_udf.deterministic, False)
self.assertTrue(result1['plus_ten(rand)'].equals(result1['rand'] + 10))
def test_nondeterministic_vectorized_udf_in_aggregate(self):
from pyspark.sql.functions import pandas_udf, sum
df = self.spark.range(10)
random_udf = self.nondeterministic_vectorized_udf
with QuietTest(self.sc):
with self.assertRaisesRegexp(AnalysisException, 'nondeterministic'):
df.groupby(df.id).agg(sum(random_udf(df.id))).collect()
with self.assertRaisesRegexp(AnalysisException, 'nondeterministic'):
df.agg(sum(random_udf(df.id))).collect()
def test_register_vectorized_udf_basic(self):
from pyspark.rdd import PythonEvalType
from pyspark.sql.functions import pandas_udf, col, expr
df = self.spark.range(10).select(
col('id').cast('int').alias('a'),
col('id').cast('int').alias('b'))
original_add = pandas_udf(lambda x, y: x + y, IntegerType())
self.assertEqual(original_add.deterministic, True)
self.assertEqual(original_add.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
new_add = self.spark.catalog.registerFunction("add1", original_add)
res1 = df.select(new_add(col('a'), col('b')))
res2 = self.spark.sql(
"SELECT add1(t.a, t.b) FROM (SELECT id as a, id as b FROM range(10)) t")
expected = df.select(expr('a + b'))
self.assertEquals(expected.collect(), res1.collect())
self.assertEquals(expected.collect(), res2.collect())
# Regression test for SPARK-23314
def test_timestamp_dst(self):
from pyspark.sql.functions import pandas_udf
# Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am
dt = [datetime.datetime(2015, 11, 1, 0, 30),
datetime.datetime(2015, 11, 1, 1, 30),
datetime.datetime(2015, 11, 1, 2, 30)]
df = self.spark.createDataFrame(dt, 'timestamp').toDF('time')
foo_udf = pandas_udf(lambda x: x, 'timestamp')
result = df.withColumn('time', foo_udf(df.time))
self.assertEquals(df.collect(), result.collect())
@unittest.skipIf(sys.version_info[:2] < (3, 5), "Type hints are supported from Python 3.5.")
def test_type_annotation(self):
from pyspark.sql.functions import pandas_udf
# Regression test to check if type hints can be used. See SPARK-23569.
# Note that it throws an error during compilation in lower Python versions if 'exec'
# is not used. Also, note that we explicitly use another dictionary to avoid modifications
# in the current 'locals()'.
#
# Hyukjin: I think it's an ugly way to test issues about syntax specific in
# higher versions of Python, which we shouldn't encourage. This was the last resort
# I could come up with at that time.
_locals = {}
exec(
"import pandas as pd\ndef noop(col: pd.Series) -> pd.Series: return col",
_locals)
df = self.spark.range(1).select(pandas_udf(f=_locals['noop'], returnType='bigint')('id'))
self.assertEqual(df.first()[0], 0)
def test_mixed_udf(self):
import pandas as pd
from pyspark.sql.functions import col, udf, pandas_udf
df = self.spark.range(0, 1).toDF('v')
# Test mixture of multiple UDFs and Pandas UDFs.
@udf('int')
def f1(x):
assert type(x) == int
return x + 1
@pandas_udf('int')
def f2(x):
assert type(x) == pd.Series
return x + 10
@udf('int')
def f3(x):
assert type(x) == int
return x + 100
@pandas_udf('int')
def f4(x):
assert type(x) == pd.Series
return x + 1000
# Test single expression with chained UDFs
df_chained_1 = df.withColumn('f2_f1', f2(f1(df['v'])))
df_chained_2 = df.withColumn('f3_f2_f1', f3(f2(f1(df['v']))))
df_chained_3 = df.withColumn('f4_f3_f2_f1', f4(f3(f2(f1(df['v'])))))
df_chained_4 = df.withColumn('f4_f2_f1', f4(f2(f1(df['v']))))
df_chained_5 = df.withColumn('f4_f3_f1', f4(f3(f1(df['v']))))
expected_chained_1 = df.withColumn('f2_f1', df['v'] + 11)
expected_chained_2 = df.withColumn('f3_f2_f1', df['v'] + 111)
expected_chained_3 = df.withColumn('f4_f3_f2_f1', df['v'] + 1111)
expected_chained_4 = df.withColumn('f4_f2_f1', df['v'] + 1011)
expected_chained_5 = df.withColumn('f4_f3_f1', df['v'] + 1101)
self.assertEquals(expected_chained_1.collect(), df_chained_1.collect())
self.assertEquals(expected_chained_2.collect(), df_chained_2.collect())
self.assertEquals(expected_chained_3.collect(), df_chained_3.collect())
self.assertEquals(expected_chained_4.collect(), df_chained_4.collect())
self.assertEquals(expected_chained_5.collect(), df_chained_5.collect())
# Test multiple mixed UDF expressions in a single projection
df_multi_1 = df \
.withColumn('f1', f1(col('v'))) \
.withColumn('f2', f2(col('v'))) \
.withColumn('f3', f3(col('v'))) \
.withColumn('f4', f4(col('v'))) \
.withColumn('f2_f1', f2(col('f1'))) \
.withColumn('f3_f1', f3(col('f1'))) \
.withColumn('f4_f1', f4(col('f1'))) \
.withColumn('f3_f2', f3(col('f2'))) \
.withColumn('f4_f2', f4(col('f2'))) \
.withColumn('f4_f3', f4(col('f3'))) \
.withColumn('f3_f2_f1', f3(col('f2_f1'))) \
.withColumn('f4_f2_f1', f4(col('f2_f1'))) \
.withColumn('f4_f3_f1', f4(col('f3_f1'))) \
.withColumn('f4_f3_f2', f4(col('f3_f2'))) \
.withColumn('f4_f3_f2_f1', f4(col('f3_f2_f1')))
# Test mixed udfs in a single expression
df_multi_2 = df \
.withColumn('f1', f1(col('v'))) \
.withColumn('f2', f2(col('v'))) \
.withColumn('f3', f3(col('v'))) \
.withColumn('f4', f4(col('v'))) \
.withColumn('f2_f1', f2(f1(col('v')))) \
.withColumn('f3_f1', f3(f1(col('v')))) \
.withColumn('f4_f1', f4(f1(col('v')))) \
.withColumn('f3_f2', f3(f2(col('v')))) \
.withColumn('f4_f2', f4(f2(col('v')))) \
.withColumn('f4_f3', f4(f3(col('v')))) \
.withColumn('f3_f2_f1', f3(f2(f1(col('v'))))) \
.withColumn('f4_f2_f1', f4(f2(f1(col('v'))))) \
.withColumn('f4_f3_f1', f4(f3(f1(col('v'))))) \
.withColumn('f4_f3_f2', f4(f3(f2(col('v'))))) \
.withColumn('f4_f3_f2_f1', f4(f3(f2(f1(col('v'))))))
expected = df \
.withColumn('f1', df['v'] + 1) \
.withColumn('f2', df['v'] + 10) \
.withColumn('f3', df['v'] + 100) \
.withColumn('f4', df['v'] + 1000) \
.withColumn('f2_f1', df['v'] + 11) \
.withColumn('f3_f1', df['v'] + 101) \
.withColumn('f4_f1', df['v'] + 1001) \
.withColumn('f3_f2', df['v'] + 110) \
.withColumn('f4_f2', df['v'] + 1010) \
.withColumn('f4_f3', df['v'] + 1100) \
.withColumn('f3_f2_f1', df['v'] + 111) \
.withColumn('f4_f2_f1', df['v'] + 1011) \
.withColumn('f4_f3_f1', df['v'] + 1101) \
.withColumn('f4_f3_f2', df['v'] + 1110) \
.withColumn('f4_f3_f2_f1', df['v'] + 1111)
self.assertEquals(expected.collect(), df_multi_1.collect())
self.assertEquals(expected.collect(), df_multi_2.collect())
def test_mixed_udf_and_sql(self):
import pandas as pd
from pyspark.sql import Column
from pyspark.sql.functions import udf, pandas_udf
df = self.spark.range(0, 1).toDF('v')
# Test mixture of UDFs, Pandas UDFs and SQL expression.
@udf('int')
def f1(x):
assert type(x) == int
return x + 1
def f2(x):
assert type(x) == Column
return x + 10
@pandas_udf('int')
def f3(x):
assert type(x) == pd.Series
return x + 100
df1 = df.withColumn('f1', f1(df['v'])) \
.withColumn('f2', f2(df['v'])) \
.withColumn('f3', f3(df['v'])) \
.withColumn('f1_f2', f1(f2(df['v']))) \
.withColumn('f1_f3', f1(f3(df['v']))) \
.withColumn('f2_f1', f2(f1(df['v']))) \
.withColumn('f2_f3', f2(f3(df['v']))) \
.withColumn('f3_f1', f3(f1(df['v']))) \
.withColumn('f3_f2', f3(f2(df['v']))) \
.withColumn('f1_f2_f3', f1(f2(f3(df['v'])))) \
.withColumn('f1_f3_f2', f1(f3(f2(df['v'])))) \
.withColumn('f2_f1_f3', f2(f1(f3(df['v'])))) \
.withColumn('f2_f3_f1', f2(f3(f1(df['v'])))) \
.withColumn('f3_f1_f2', f3(f1(f2(df['v'])))) \
.withColumn('f3_f2_f1', f3(f2(f1(df['v']))))
expected = df.withColumn('f1', df['v'] + 1) \
.withColumn('f2', df['v'] + 10) \
.withColumn('f3', df['v'] + 100) \
.withColumn('f1_f2', df['v'] + 11) \
.withColumn('f1_f3', df['v'] + 101) \
.withColumn('f2_f1', df['v'] + 11) \
.withColumn('f2_f3', df['v'] + 110) \
.withColumn('f3_f1', df['v'] + 101) \
.withColumn('f3_f2', df['v'] + 110) \
.withColumn('f1_f2_f3', df['v'] + 111) \
.withColumn('f1_f3_f2', df['v'] + 111) \
.withColumn('f2_f1_f3', df['v'] + 111) \
.withColumn('f2_f3_f1', df['v'] + 111) \
.withColumn('f3_f1_f2', df['v'] + 111) \
.withColumn('f3_f2_f1', df['v'] + 111)
self.assertEquals(expected.collect(), df1.collect())
# SPARK-24721
@unittest.skipIf(not _test_compiled, _test_not_compiled_message)
def test_datasource_with_udf(self):
# Same as SQLTests.test_datasource_with_udf, but with Pandas UDF
# This needs to a separate test because Arrow dependency is optional
import pandas as pd
import numpy as np
from pyspark.sql.functions import pandas_udf, lit, col
path = tempfile.mkdtemp()
shutil.rmtree(path)
try:
self.spark.range(1).write.mode("overwrite").format('csv').save(path)
filesource_df = self.spark.read.option('inferSchema', True).csv(path).toDF('i')
datasource_df = self.spark.read \
.format("org.apache.spark.sql.sources.SimpleScanSource") \
.option('from', 0).option('to', 1).load().toDF('i')
datasource_v2_df = self.spark.read \
.format("org.apache.spark.sql.sources.v2.SimpleDataSourceV2") \
.load().toDF('i', 'j')
c1 = pandas_udf(lambda x: x + 1, 'int')(lit(1))
c2 = pandas_udf(lambda x: x + 1, 'int')(col('i'))
f1 = pandas_udf(lambda x: pd.Series(np.repeat(False, len(x))), 'boolean')(lit(1))
f2 = pandas_udf(lambda x: pd.Series(np.repeat(False, len(x))), 'boolean')(col('i'))
for df in [filesource_df, datasource_df, datasource_v2_df]:
result = df.withColumn('c', c1)
expected = df.withColumn('c', lit(2))
self.assertEquals(expected.collect(), result.collect())
for df in [filesource_df, datasource_df, datasource_v2_df]:
result = df.withColumn('c', c2)
expected = df.withColumn('c', col('i') + 1)
self.assertEquals(expected.collect(), result.collect())
for df in [filesource_df, datasource_df, datasource_v2_df]:
for f in [f1, f2]:
result = df.filter(f)
self.assertEquals(0, result.count())
finally:
shutil.rmtree(path)
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
class GroupedMapPandasUDFTests(ReusedSQLTestCase):
@property
def data(self):
from pyspark.sql.functions import array, explode, col, lit
return self.spark.range(10).toDF('id') \
.withColumn("vs", array([lit(i) for i in range(20, 30)])) \
.withColumn("v", explode(col('vs'))).drop('vs')
def test_supported_types(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType, array, col
df = self.data.withColumn("arr", array(col("id")))
# Different forms of group map pandas UDF, results of these are the same
output_schema = StructType(
[StructField('id', LongType()),
StructField('v', IntegerType()),
StructField('arr', ArrayType(LongType())),
StructField('v1', DoubleType()),
StructField('v2', LongType())])
udf1 = pandas_udf(
lambda pdf: pdf.assign(v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id),
output_schema,
PandasUDFType.GROUPED_MAP
)
udf2 = pandas_udf(
lambda _, pdf: pdf.assign(v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id),
output_schema,
PandasUDFType.GROUPED_MAP
)
udf3 = pandas_udf(
lambda key, pdf: pdf.assign(id=key[0], v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id),
output_schema,
PandasUDFType.GROUPED_MAP
)
result1 = df.groupby('id').apply(udf1).sort('id').toPandas()
expected1 = df.toPandas().groupby('id').apply(udf1.func).reset_index(drop=True)
result2 = df.groupby('id').apply(udf2).sort('id').toPandas()
expected2 = expected1
result3 = df.groupby('id').apply(udf3).sort('id').toPandas()
expected3 = expected1
self.assertPandasEqual(expected1, result1)
self.assertPandasEqual(expected2, result2)
self.assertPandasEqual(expected3, result3)
def test_array_type_correct(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType, array, col
df = self.data.withColumn("arr", array(col("id"))).repartition(1, "id")
output_schema = StructType(
[StructField('id', LongType()),
StructField('v', IntegerType()),
StructField('arr', ArrayType(LongType()))])
udf = pandas_udf(
lambda pdf: pdf,
output_schema,
PandasUDFType.GROUPED_MAP
)
result = df.groupby('id').apply(udf).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(udf.func).reset_index(drop=True)
self.assertPandasEqual(expected, result)
def test_register_grouped_map_udf(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
foo_udf = pandas_udf(lambda x: x, "id long", PandasUDFType.GROUPED_MAP)
with QuietTest(self.sc):
with self.assertRaisesRegexp(ValueError, 'f must be either SQL_BATCHED_UDF or '
'SQL_SCALAR_PANDAS_UDF'):
self.spark.catalog.registerFunction("foo_udf", foo_udf)
def test_decorator(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = self.data
@pandas_udf(
'id long, v int, v1 double, v2 long',
PandasUDFType.GROUPED_MAP
)
def foo(pdf):
return pdf.assign(v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id)
result = df.groupby('id').apply(foo).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(foo.func).reset_index(drop=True)
self.assertPandasEqual(expected, result)
def test_coerce(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = self.data
foo = pandas_udf(
lambda pdf: pdf,
'id long, v double',
PandasUDFType.GROUPED_MAP
)
result = df.groupby('id').apply(foo).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(foo.func).reset_index(drop=True)
expected = expected.assign(v=expected.v.astype('float64'))
self.assertPandasEqual(expected, result)
def test_complex_groupby(self):
from pyspark.sql.functions import pandas_udf, col, PandasUDFType
df = self.data
@pandas_udf(
'id long, v int, norm double',
PandasUDFType.GROUPED_MAP
)
def normalize(pdf):
v = pdf.v
return pdf.assign(norm=(v - v.mean()) / v.std())
result = df.groupby(col('id') % 2 == 0).apply(normalize).sort('id', 'v').toPandas()
pdf = df.toPandas()
expected = pdf.groupby(pdf['id'] % 2 == 0).apply(normalize.func)
expected = expected.sort_values(['id', 'v']).reset_index(drop=True)
expected = expected.assign(norm=expected.norm.astype('float64'))
self.assertPandasEqual(expected, result)
def test_empty_groupby(self):
from pyspark.sql.functions import pandas_udf, col, PandasUDFType
df = self.data
@pandas_udf(
'id long, v int, norm double',
PandasUDFType.GROUPED_MAP
)
def normalize(pdf):
v = pdf.v
return pdf.assign(norm=(v - v.mean()) / v.std())
result = df.groupby().apply(normalize).sort('id', 'v').toPandas()
pdf = df.toPandas()
expected = normalize.func(pdf)
expected = expected.sort_values(['id', 'v']).reset_index(drop=True)
expected = expected.assign(norm=expected.norm.astype('float64'))
self.assertPandasEqual(expected, result)
def test_datatype_string(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = self.data
foo_udf = pandas_udf(
lambda pdf: pdf.assign(v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id),
'id long, v int, v1 double, v2 long',
PandasUDFType.GROUPED_MAP
)
result = df.groupby('id').apply(foo_udf).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(foo_udf.func).reset_index(drop=True)
self.assertPandasEqual(expected, result)
def test_wrong_return_type(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*grouped map Pandas UDF.*MapType'):
pandas_udf(
lambda pdf: pdf,
'id long, v map<int, int>',
PandasUDFType.GROUPED_MAP)
def test_wrong_args(self):
from pyspark.sql.functions import udf, pandas_udf, sum, PandasUDFType
df = self.data
with QuietTest(self.sc):
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(lambda x: x)
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(udf(lambda x: x, DoubleType()))
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(sum(df.v))
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(df.v + 1)
with self.assertRaisesRegexp(ValueError, 'Invalid function'):
df.groupby('id').apply(
pandas_udf(lambda: 1, StructType([StructField("d", DoubleType())])))
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(pandas_udf(lambda x, y: x, DoubleType()))
with self.assertRaisesRegexp(ValueError, 'Invalid udf.*GROUPED_MAP'):
df.groupby('id').apply(
pandas_udf(lambda x, y: x, DoubleType(), PandasUDFType.SCALAR))
def test_unsupported_types(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
schema = StructType(
[StructField("id", LongType(), True),
StructField("map", MapType(StringType(), IntegerType()), True)])
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*grouped map Pandas UDF.*MapType'):
pandas_udf(lambda x: x, schema, PandasUDFType.GROUPED_MAP)
schema = StructType(
[StructField("id", LongType(), True),
StructField("arr_ts", ArrayType(TimestampType()), True)])
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*grouped map Pandas UDF.*ArrayType.*TimestampType'):
pandas_udf(lambda x: x, schema, PandasUDFType.GROUPED_MAP)
# Regression test for SPARK-23314
def test_timestamp_dst(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
# Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am
dt = [datetime.datetime(2015, 11, 1, 0, 30),
datetime.datetime(2015, 11, 1, 1, 30),
datetime.datetime(2015, 11, 1, 2, 30)]
df = self.spark.createDataFrame(dt, 'timestamp').toDF('time')
foo_udf = pandas_udf(lambda pdf: pdf, 'time timestamp', PandasUDFType.GROUPED_MAP)
result = df.groupby('time').apply(foo_udf).sort('time')
self.assertPandasEqual(df.toPandas(), result.toPandas())
def test_udf_with_key(self):
from pyspark.sql.functions import pandas_udf, col, PandasUDFType
df = self.data
pdf = df.toPandas()
def foo1(key, pdf):
import numpy as np
assert type(key) == tuple
assert type(key[0]) == np.int64
return pdf.assign(v1=key[0],
v2=pdf.v * key[0],
v3=pdf.v * pdf.id,
v4=pdf.v * pdf.id.mean())
def foo2(key, pdf):
import numpy as np
assert type(key) == tuple
assert type(key[0]) == np.int64
assert type(key[1]) == np.int32
return pdf.assign(v1=key[0],
v2=key[1],
v3=pdf.v * key[0],
v4=pdf.v + key[1])
def foo3(key, pdf):
assert type(key) == tuple
assert len(key) == 0
return pdf.assign(v1=pdf.v * pdf.id)
# v2 is int because numpy.int64 * pd.Series<int32> results in pd.Series<int32>
# v3 is long because pd.Series<int64> * pd.Series<int32> results in pd.Series<int64>
udf1 = pandas_udf(
foo1,
'id long, v int, v1 long, v2 int, v3 long, v4 double',
PandasUDFType.GROUPED_MAP)
udf2 = pandas_udf(
foo2,
'id long, v int, v1 long, v2 int, v3 int, v4 int',
PandasUDFType.GROUPED_MAP)
udf3 = pandas_udf(
foo3,
'id long, v int, v1 long',
PandasUDFType.GROUPED_MAP)
# Test groupby column
result1 = df.groupby('id').apply(udf1).sort('id', 'v').toPandas()
expected1 = pdf.groupby('id')\
.apply(lambda x: udf1.func((x.id.iloc[0],), x))\
.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected1, result1)
# Test groupby expression
result2 = df.groupby(df.id % 2).apply(udf1).sort('id', 'v').toPandas()
expected2 = pdf.groupby(pdf.id % 2)\
.apply(lambda x: udf1.func((x.id.iloc[0] % 2,), x))\
.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected2, result2)
# Test complex groupby
result3 = df.groupby(df.id, df.v % 2).apply(udf2).sort('id', 'v').toPandas()
expected3 = pdf.groupby([pdf.id, pdf.v % 2])\
.apply(lambda x: udf2.func((x.id.iloc[0], (x.v % 2).iloc[0],), x))\
.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected3, result3)
# Test empty groupby
result4 = df.groupby().apply(udf3).sort('id', 'v').toPandas()
expected4 = udf3.func((), pdf)
self.assertPandasEqual(expected4, result4)
def test_column_order(self):
from collections import OrderedDict
import pandas as pd
from pyspark.sql.functions import pandas_udf, PandasUDFType
# Helper function to set column names from a list
def rename_pdf(pdf, names):
pdf.rename(columns={old: new for old, new in
zip(pd_result.columns, names)}, inplace=True)
df = self.data
grouped_df = df.groupby('id')
grouped_pdf = df.toPandas().groupby('id')
# Function returns a pdf with required column names, but order could be arbitrary using dict
def change_col_order(pdf):
# Constructing a DataFrame from a dict should result in the same order,
# but use from_items to ensure the pdf column order is different than schema
return pd.DataFrame.from_items([
('id', pdf.id),
('u', pdf.v * 2),
('v', pdf.v)])
ordered_udf = pandas_udf(
change_col_order,
'id long, v int, u int',
PandasUDFType.GROUPED_MAP
)
# The UDF result should assign columns by name from the pdf
result = grouped_df.apply(ordered_udf).sort('id', 'v')\
.select('id', 'u', 'v').toPandas()
pd_result = grouped_pdf.apply(change_col_order)
expected = pd_result.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected, result)
# Function returns a pdf with positional columns, indexed by range
def range_col_order(pdf):
# Create a DataFrame with positional columns, fix types to long
return pd.DataFrame(list(zip(pdf.id, pdf.v * 3, pdf.v)), dtype='int64')
range_udf = pandas_udf(
range_col_order,
'id long, u long, v long',
PandasUDFType.GROUPED_MAP
)
# The UDF result uses positional columns from the pdf
result = grouped_df.apply(range_udf).sort('id', 'v') \
.select('id', 'u', 'v').toPandas()
pd_result = grouped_pdf.apply(range_col_order)
rename_pdf(pd_result, ['id', 'u', 'v'])
expected = pd_result.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected, result)
# Function returns a pdf with columns indexed with integers
def int_index(pdf):
return pd.DataFrame(OrderedDict([(0, pdf.id), (1, pdf.v * 4), (2, pdf.v)]))
int_index_udf = pandas_udf(
int_index,
'id long, u int, v int',
PandasUDFType.GROUPED_MAP
)
# The UDF result should assign columns by position of integer index
result = grouped_df.apply(int_index_udf).sort('id', 'v') \
.select('id', 'u', 'v').toPandas()
pd_result = grouped_pdf.apply(int_index)
rename_pdf(pd_result, ['id', 'u', 'v'])
expected = pd_result.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected, result)
@pandas_udf('id long, v int', PandasUDFType.GROUPED_MAP)
def column_name_typo(pdf):
return pd.DataFrame({'iid': pdf.id, 'v': pdf.v})
@pandas_udf('id long, v int', PandasUDFType.GROUPED_MAP)
def invalid_positional_types(pdf):
return pd.DataFrame([(u'a', 1.2)])
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, "KeyError: 'id'"):
grouped_df.apply(column_name_typo).collect()
with self.assertRaisesRegexp(Exception, "No cast implemented"):
grouped_df.apply(invalid_positional_types).collect()
def test_positional_assignment_conf(self):
import pandas as pd
from pyspark.sql.functions import pandas_udf, PandasUDFType
with self.sql_conf({"spark.sql.execution.pandas.groupedMap.assignColumnsByPosition": True}):
@pandas_udf("a string, b float", PandasUDFType.GROUPED_MAP)
def foo(_):
return pd.DataFrame([('hi', 1)], columns=['x', 'y'])
df = self.data
result = df.groupBy('id').apply(foo).select('a', 'b').collect()
for r in result:
self.assertEqual(r.a, 'hi')
self.assertEqual(r.b, 1)
def test_self_join_with_pandas(self):
import pyspark.sql.functions as F
@F.pandas_udf('key long, col string', F.PandasUDFType.GROUPED_MAP)
def dummy_pandas_udf(df):
return df[['key', 'col']]
df = self.spark.createDataFrame([Row(key=1, col='A'), Row(key=1, col='B'),
Row(key=2, col='C')])
df_with_pandas = df.groupBy('key').apply(dummy_pandas_udf)
# this was throwing an AnalysisException before SPARK-24208
res = df_with_pandas.alias('temp0').join(df_with_pandas.alias('temp1'),
F.col('temp0.key') == F.col('temp1.key'))
self.assertEquals(res.count(), 5)
def test_mixed_scalar_udfs_followed_by_grouby_apply(self):
import pandas as pd
from pyspark.sql.functions import udf, pandas_udf, PandasUDFType
df = self.spark.range(0, 10).toDF('v1')
df = df.withColumn('v2', udf(lambda x: x + 1, 'int')(df['v1'])) \
.withColumn('v3', pandas_udf(lambda x: x + 2, 'int')(df['v1']))
result = df.groupby() \
.apply(pandas_udf(lambda x: pd.DataFrame([x.sum().sum()]),
'sum int',
PandasUDFType.GROUPED_MAP))
self.assertEquals(result.collect()[0]['sum'], 165)
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
class GroupedAggPandasUDFTests(ReusedSQLTestCase):
@property
def data(self):
from pyspark.sql.functions import array, explode, col, lit
return self.spark.range(10).toDF('id') \
.withColumn("vs", array([lit(i * 1.0) + col('id') for i in range(20, 30)])) \
.withColumn("v", explode(col('vs'))) \
.drop('vs') \
.withColumn('w', lit(1.0))
@property
def python_plus_one(self):
from pyspark.sql.functions import udf
@udf('double')
def plus_one(v):
assert isinstance(v, (int, float))
return v + 1
return plus_one
@property
def pandas_scalar_plus_two(self):
import pandas as pd
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.SCALAR)
def plus_two(v):
assert isinstance(v, pd.Series)
return v + 2
return plus_two
@property
def pandas_agg_mean_udf(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def avg(v):
return v.mean()
return avg
@property
def pandas_agg_sum_udf(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def sum(v):
return v.sum()
return sum
@property
def pandas_agg_weighted_mean_udf(self):
import numpy as np
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def weighted_mean(v, w):
return np.average(v, weights=w)
return weighted_mean
def test_manual(self):
from pyspark.sql.functions import pandas_udf, array
df = self.data
sum_udf = self.pandas_agg_sum_udf
mean_udf = self.pandas_agg_mean_udf
mean_arr_udf = pandas_udf(
self.pandas_agg_mean_udf.func,
ArrayType(self.pandas_agg_mean_udf.returnType),
self.pandas_agg_mean_udf.evalType)
result1 = df.groupby('id').agg(
sum_udf(df.v),
mean_udf(df.v),
mean_arr_udf(array(df.v))).sort('id')
expected1 = self.spark.createDataFrame(
[[0, 245.0, 24.5, [24.5]],
[1, 255.0, 25.5, [25.5]],
[2, 265.0, 26.5, [26.5]],
[3, 275.0, 27.5, [27.5]],
[4, 285.0, 28.5, [28.5]],
[5, 295.0, 29.5, [29.5]],
[6, 305.0, 30.5, [30.5]],
[7, 315.0, 31.5, [31.5]],
[8, 325.0, 32.5, [32.5]],
[9, 335.0, 33.5, [33.5]]],
['id', 'sum(v)', 'avg(v)', 'avg(array(v))'])
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_basic(self):
from pyspark.sql.functions import col, lit, sum, mean
df = self.data
weighted_mean_udf = self.pandas_agg_weighted_mean_udf
# Groupby one column and aggregate one UDF with literal
result1 = df.groupby('id').agg(weighted_mean_udf(df.v, lit(1.0))).sort('id')
expected1 = df.groupby('id').agg(mean(df.v).alias('weighted_mean(v, 1.0)')).sort('id')
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
# Groupby one expression and aggregate one UDF with literal
result2 = df.groupby((col('id') + 1)).agg(weighted_mean_udf(df.v, lit(1.0)))\
.sort(df.id + 1)
expected2 = df.groupby((col('id') + 1))\
.agg(mean(df.v).alias('weighted_mean(v, 1.0)')).sort(df.id + 1)
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
# Groupby one column and aggregate one UDF without literal
result3 = df.groupby('id').agg(weighted_mean_udf(df.v, df.w)).sort('id')
expected3 = df.groupby('id').agg(mean(df.v).alias('weighted_mean(v, w)')).sort('id')
self.assertPandasEqual(expected3.toPandas(), result3.toPandas())
# Groupby one expression and aggregate one UDF without literal
result4 = df.groupby((col('id') + 1).alias('id'))\
.agg(weighted_mean_udf(df.v, df.w))\
.sort('id')
expected4 = df.groupby((col('id') + 1).alias('id'))\
.agg(mean(df.v).alias('weighted_mean(v, w)'))\
.sort('id')
self.assertPandasEqual(expected4.toPandas(), result4.toPandas())
def test_unsupported_types(self):
from pyspark.sql.types import DoubleType, MapType
from pyspark.sql.functions import pandas_udf, PandasUDFType
with QuietTest(self.sc):
with self.assertRaisesRegexp(NotImplementedError, 'not supported'):
pandas_udf(
lambda x: x,
ArrayType(ArrayType(TimestampType())),
PandasUDFType.GROUPED_AGG)
with QuietTest(self.sc):
with self.assertRaisesRegexp(NotImplementedError, 'not supported'):
@pandas_udf('mean double, std double', PandasUDFType.GROUPED_AGG)
def mean_and_std_udf(v):
return v.mean(), v.std()
with QuietTest(self.sc):
with self.assertRaisesRegexp(NotImplementedError, 'not supported'):
@pandas_udf(MapType(DoubleType(), DoubleType()), PandasUDFType.GROUPED_AGG)
def mean_and_std_udf(v):
return {v.mean(): v.std()}
def test_alias(self):
from pyspark.sql.functions import mean
df = self.data
mean_udf = self.pandas_agg_mean_udf
result1 = df.groupby('id').agg(mean_udf(df.v).alias('mean_alias'))
expected1 = df.groupby('id').agg(mean(df.v).alias('mean_alias'))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_mixed_sql(self):
"""
Test mixing group aggregate pandas UDF with sql expression.
"""
from pyspark.sql.functions import sum, mean
df = self.data
sum_udf = self.pandas_agg_sum_udf
# Mix group aggregate pandas UDF with sql expression
result1 = (df.groupby('id')
.agg(sum_udf(df.v) + 1)
.sort('id'))
expected1 = (df.groupby('id')
.agg(sum(df.v) + 1)
.sort('id'))
# Mix group aggregate pandas UDF with sql expression (order swapped)
result2 = (df.groupby('id')
.agg(sum_udf(df.v + 1))
.sort('id'))
expected2 = (df.groupby('id')
.agg(sum(df.v + 1))
.sort('id'))
# Wrap group aggregate pandas UDF with two sql expressions
result3 = (df.groupby('id')
.agg(sum_udf(df.v + 1) + 2)
.sort('id'))
expected3 = (df.groupby('id')
.agg(sum(df.v + 1) + 2)
.sort('id'))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
self.assertPandasEqual(expected3.toPandas(), result3.toPandas())
def test_mixed_udfs(self):
"""
Test mixing group aggregate pandas UDF with python UDF and scalar pandas UDF.
"""
from pyspark.sql.functions import sum, mean
df = self.data
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
sum_udf = self.pandas_agg_sum_udf
# Mix group aggregate pandas UDF and python UDF
result1 = (df.groupby('id')
.agg(plus_one(sum_udf(df.v)))
.sort('id'))
expected1 = (df.groupby('id')
.agg(plus_one(sum(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and python UDF (order swapped)
result2 = (df.groupby('id')
.agg(sum_udf(plus_one(df.v)))
.sort('id'))
expected2 = (df.groupby('id')
.agg(sum(plus_one(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and scalar pandas UDF
result3 = (df.groupby('id')
.agg(sum_udf(plus_two(df.v)))
.sort('id'))
expected3 = (df.groupby('id')
.agg(sum(plus_two(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and scalar pandas UDF (order swapped)
result4 = (df.groupby('id')
.agg(plus_two(sum_udf(df.v)))
.sort('id'))
expected4 = (df.groupby('id')
.agg(plus_two(sum(df.v)))
.sort('id'))
# Wrap group aggregate pandas UDF with two python UDFs and use python UDF in groupby
result5 = (df.groupby(plus_one(df.id))
.agg(plus_one(sum_udf(plus_one(df.v))))
.sort('plus_one(id)'))
expected5 = (df.groupby(plus_one(df.id))
.agg(plus_one(sum(plus_one(df.v))))
.sort('plus_one(id)'))
# Wrap group aggregate pandas UDF with two scala pandas UDF and user scala pandas UDF in
# groupby
result6 = (df.groupby(plus_two(df.id))
.agg(plus_two(sum_udf(plus_two(df.v))))
.sort('plus_two(id)'))
expected6 = (df.groupby(plus_two(df.id))
.agg(plus_two(sum(plus_two(df.v))))
.sort('plus_two(id)'))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
self.assertPandasEqual(expected3.toPandas(), result3.toPandas())
self.assertPandasEqual(expected4.toPandas(), result4.toPandas())
self.assertPandasEqual(expected5.toPandas(), result5.toPandas())
self.assertPandasEqual(expected6.toPandas(), result6.toPandas())
def test_multiple_udfs(self):
"""
Test multiple group aggregate pandas UDFs in one agg function.
"""
from pyspark.sql.functions import col, lit, sum, mean
df = self.data
mean_udf = self.pandas_agg_mean_udf
sum_udf = self.pandas_agg_sum_udf
weighted_mean_udf = self.pandas_agg_weighted_mean_udf
result1 = (df.groupBy('id')
.agg(mean_udf(df.v),
sum_udf(df.v),
weighted_mean_udf(df.v, df.w))
.sort('id')
.toPandas())
expected1 = (df.groupBy('id')
.agg(mean(df.v),
sum(df.v),
mean(df.v).alias('weighted_mean(v, w)'))
.sort('id')
.toPandas())
self.assertPandasEqual(expected1, result1)
def test_complex_groupby(self):
from pyspark.sql.functions import lit, sum
df = self.data
sum_udf = self.pandas_agg_sum_udf
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
# groupby one expression
result1 = df.groupby(df.v % 2).agg(sum_udf(df.v))
expected1 = df.groupby(df.v % 2).agg(sum(df.v))
# empty groupby
result2 = df.groupby().agg(sum_udf(df.v))
expected2 = df.groupby().agg(sum(df.v))
# groupby one column and one sql expression
result3 = df.groupby(df.id, df.v % 2).agg(sum_udf(df.v)).orderBy(df.id, df.v % 2)
expected3 = df.groupby(df.id, df.v % 2).agg(sum(df.v)).orderBy(df.id, df.v % 2)
# groupby one python UDF
result4 = df.groupby(plus_one(df.id)).agg(sum_udf(df.v))
expected4 = df.groupby(plus_one(df.id)).agg(sum(df.v))
# groupby one scalar pandas UDF
result5 = df.groupby(plus_two(df.id)).agg(sum_udf(df.v))
expected5 = df.groupby(plus_two(df.id)).agg(sum(df.v))
# groupby one expression and one python UDF
result6 = df.groupby(df.v % 2, plus_one(df.id)).agg(sum_udf(df.v))
expected6 = df.groupby(df.v % 2, plus_one(df.id)).agg(sum(df.v))
# groupby one expression and one scalar pandas UDF
result7 = df.groupby(df.v % 2, plus_two(df.id)).agg(sum_udf(df.v)).sort('sum(v)')
expected7 = df.groupby(df.v % 2, plus_two(df.id)).agg(sum(df.v)).sort('sum(v)')
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
self.assertPandasEqual(expected3.toPandas(), result3.toPandas())
self.assertPandasEqual(expected4.toPandas(), result4.toPandas())
self.assertPandasEqual(expected5.toPandas(), result5.toPandas())
self.assertPandasEqual(expected6.toPandas(), result6.toPandas())
self.assertPandasEqual(expected7.toPandas(), result7.toPandas())
def test_complex_expressions(self):
from pyspark.sql.functions import col, sum
df = self.data
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
sum_udf = self.pandas_agg_sum_udf
# Test complex expressions with sql expression, python UDF and
# group aggregate pandas UDF
result1 = (df.withColumn('v1', plus_one(df.v))
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum_udf(col('v')),
sum_udf(col('v1') + 3),
sum_udf(col('v2')) + 5,
plus_one(sum_udf(col('v1'))),
sum_udf(plus_one(col('v2'))))
.sort('id')
.toPandas())
expected1 = (df.withColumn('v1', df.v + 1)
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum(col('v')),
sum(col('v1') + 3),
sum(col('v2')) + 5,
plus_one(sum(col('v1'))),
sum(plus_one(col('v2'))))
.sort('id')
.toPandas())
# Test complex expressions with sql expression, scala pandas UDF and
# group aggregate pandas UDF
result2 = (df.withColumn('v1', plus_one(df.v))
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum_udf(col('v')),
sum_udf(col('v1') + 3),
sum_udf(col('v2')) + 5,
plus_two(sum_udf(col('v1'))),
sum_udf(plus_two(col('v2'))))
.sort('id')
.toPandas())
expected2 = (df.withColumn('v1', df.v + 1)
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum(col('v')),
sum(col('v1') + 3),
sum(col('v2')) + 5,
plus_two(sum(col('v1'))),
sum(plus_two(col('v2'))))
.sort('id')
.toPandas())
# Test sequential groupby aggregate
result3 = (df.groupby('id')
.agg(sum_udf(df.v).alias('v'))
.groupby('id')
.agg(sum_udf(col('v')))
.sort('id')
.toPandas())
expected3 = (df.groupby('id')
.agg(sum(df.v).alias('v'))
.groupby('id')
.agg(sum(col('v')))
.sort('id')
.toPandas())
self.assertPandasEqual(expected1, result1)
self.assertPandasEqual(expected2, result2)
self.assertPandasEqual(expected3, result3)
def test_retain_group_columns(self):
from pyspark.sql.functions import sum, lit, col
with self.sql_conf({"spark.sql.retainGroupColumns": False}):
df = self.data
sum_udf = self.pandas_agg_sum_udf
result1 = df.groupby(df.id).agg(sum_udf(df.v))
expected1 = df.groupby(df.id).agg(sum(df.v))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_array_type(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = self.data
array_udf = pandas_udf(lambda x: [1.0, 2.0], 'array<double>', PandasUDFType.GROUPED_AGG)
result1 = df.groupby('id').agg(array_udf(df['v']).alias('v2'))
self.assertEquals(result1.first()['v2'], [1.0, 2.0])
def test_invalid_args(self):
from pyspark.sql.functions import mean
df = self.data
plus_one = self.python_plus_one
mean_udf = self.pandas_agg_mean_udf
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'nor.*aggregate function'):
df.groupby(df.id).agg(plus_one(df.v)).collect()
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'aggregate function.*argument.*aggregate function'):
df.groupby(df.id).agg(mean_udf(mean_udf(df.v))).collect()
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'mixture.*aggregate function.*group aggregate pandas UDF'):
df.groupby(df.id).agg(mean_udf(df.v), mean(df.v)).collect()
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
class WindowPandasUDFTests(ReusedSQLTestCase):
@property
def data(self):
from pyspark.sql.functions import array, explode, col, lit
return self.spark.range(10).toDF('id') \
.withColumn("vs", array([lit(i * 1.0) + col('id') for i in range(20, 30)])) \
.withColumn("v", explode(col('vs'))) \
.drop('vs') \
.withColumn('w', lit(1.0))
@property
def python_plus_one(self):
from pyspark.sql.functions import udf
return udf(lambda v: v + 1, 'double')
@property
def pandas_scalar_time_two(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
return pandas_udf(lambda v: v * 2, 'double')
@property
def pandas_agg_mean_udf(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def avg(v):
return v.mean()
return avg
@property
def pandas_agg_max_udf(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def max(v):
return v.max()
return max
@property
def pandas_agg_min_udf(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def min(v):
return v.min()
return min
@property
def unbounded_window(self):
return Window.partitionBy('id') \
.rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)
@property
def ordered_window(self):
return Window.partitionBy('id').orderBy('v')
@property
def unpartitioned_window(self):
return Window.partitionBy()
def test_simple(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType, percent_rank, mean, max
df = self.data
w = self.unbounded_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('mean_v', mean_udf(df['v']).over(w))
expected1 = df.withColumn('mean_v', mean(df['v']).over(w))
result2 = df.select(mean_udf(df['v']).over(w))
expected2 = df.select(mean(df['v']).over(w))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
def test_multiple_udfs(self):
from pyspark.sql.functions import max, min, mean
df = self.data
w = self.unbounded_window
result1 = df.withColumn('mean_v', self.pandas_agg_mean_udf(df['v']).over(w)) \
.withColumn('max_v', self.pandas_agg_max_udf(df['v']).over(w)) \
.withColumn('min_w', self.pandas_agg_min_udf(df['w']).over(w))
expected1 = df.withColumn('mean_v', mean(df['v']).over(w)) \
.withColumn('max_v', max(df['v']).over(w)) \
.withColumn('min_w', min(df['w']).over(w))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_replace_existing(self):
from pyspark.sql.functions import mean
df = self.data
w = self.unbounded_window
result1 = df.withColumn('v', self.pandas_agg_mean_udf(df['v']).over(w))
expected1 = df.withColumn('v', mean(df['v']).over(w))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_mixed_sql(self):
from pyspark.sql.functions import mean
df = self.data
w = self.unbounded_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('v', mean_udf(df['v'] * 2).over(w) + 1)
expected1 = df.withColumn('v', mean(df['v'] * 2).over(w) + 1)
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_mixed_udf(self):
from pyspark.sql.functions import mean
df = self.data
w = self.unbounded_window
plus_one = self.python_plus_one
time_two = self.pandas_scalar_time_two
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn(
'v2',
plus_one(mean_udf(plus_one(df['v'])).over(w)))
expected1 = df.withColumn(
'v2',
plus_one(mean(plus_one(df['v'])).over(w)))
result2 = df.withColumn(
'v2',
time_two(mean_udf(time_two(df['v'])).over(w)))
expected2 = df.withColumn(
'v2',
time_two(mean(time_two(df['v'])).over(w)))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
def test_without_partitionBy(self):
from pyspark.sql.functions import mean
df = self.data
w = self.unpartitioned_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('v2', mean_udf(df['v']).over(w))
expected1 = df.withColumn('v2', mean(df['v']).over(w))
result2 = df.select(mean_udf(df['v']).over(w))
expected2 = df.select(mean(df['v']).over(w))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
def test_mixed_sql_and_udf(self):
from pyspark.sql.functions import max, min, rank, col
df = self.data
w = self.unbounded_window
ow = self.ordered_window
max_udf = self.pandas_agg_max_udf
min_udf = self.pandas_agg_min_udf
result1 = df.withColumn('v_diff', max_udf(df['v']).over(w) - min_udf(df['v']).over(w))
expected1 = df.withColumn('v_diff', max(df['v']).over(w) - min(df['v']).over(w))
# Test mixing sql window function and window udf in the same expression
result2 = df.withColumn('v_diff', max_udf(df['v']).over(w) - min(df['v']).over(w))
expected2 = expected1
# Test chaining sql aggregate function and udf
result3 = df.withColumn('max_v', max_udf(df['v']).over(w)) \
.withColumn('min_v', min(df['v']).over(w)) \
.withColumn('v_diff', col('max_v') - col('min_v')) \
.drop('max_v', 'min_v')
expected3 = expected1
# Test mixing sql window function and udf
result4 = df.withColumn('max_v', max_udf(df['v']).over(w)) \
.withColumn('rank', rank().over(ow))
expected4 = df.withColumn('max_v', max(df['v']).over(w)) \
.withColumn('rank', rank().over(ow))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
self.assertPandasEqual(expected3.toPandas(), result3.toPandas())
self.assertPandasEqual(expected4.toPandas(), result4.toPandas())
def test_array_type(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = self.data
w = self.unbounded_window
array_udf = pandas_udf(lambda x: [1.0, 2.0], 'array<double>', PandasUDFType.GROUPED_AGG)
result1 = df.withColumn('v2', array_udf(df['v']).over(w))
self.assertEquals(result1.first()['v2'], [1.0, 2.0])
def test_invalid_args(self):
from pyspark.sql.functions import mean, pandas_udf, PandasUDFType
df = self.data
w = self.unbounded_window
ow = self.ordered_window
mean_udf = self.pandas_agg_mean_udf
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'.*not supported within a window function'):
foo_udf = pandas_udf(lambda x: x, 'v double', PandasUDFType.GROUPED_MAP)
df.withColumn('v2', foo_udf(df['v']).over(w))
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'.*Only unbounded window frame is supported.*'):
df.withColumn('mean_v', mean_udf(df['v']).over(ow))
if __name__ == "__main__":
from pyspark.sql.tests import *
if xmlrunner:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'), verbosity=2)
else:
unittest.main(verbosity=2)
| apache-2.0 | -8,004,263,089,135,259,000 | 41.616225 | 100 | 0.578001 | false |
ChenglongChen/Kaggle_HomeDepot | Code/Igor&Kostia/text_processing.py | 1 | 63488 | # -*- coding: utf-8 -*-
"""
Initial text preprocessing.
Although text processing can be technically done within feature generation functions,
we found it to be very efficient to make all preprocessing first and only then move to
feature generation. It is because the same processed text is used as an input to
generate several different features.
Competition: HomeDepot Search Relevance
Author: Igor Buinyi
Team: Turing test
"""
from config_IgorKostia import *
import numpy as np
import pandas as pd
from time import time
import re
import csv
import os
import nltk
from nltk.corpus import wordnet as wn
from nltk.corpus import stopwords
stoplist = stopwords.words('english')
stoplist.append('till') # add 'till' to stoplist
# 'can' also might mean 'a container' like in 'trash can'
# so we create a separate stop list without 'can' to be used for query and product title
stoplist_wo_can=stoplist[:]
stoplist_wo_can.remove('can')
from homedepot_functions import *
from google_dict import *
t0 = time()
t1 = time()
############################################
####### PREPROCESSING ######################
############################################
### load train and test ###################
df_train = pd.read_csv(DATA_DIR+'/train.csv', encoding="ISO-8859-1")
df_test = pd.read_csv(DATA_DIR+'/test.csv', encoding="ISO-8859-1")
df_all = pd.concat((df_train, df_test), axis=0, ignore_index=True)
### load product attributes ###############
df_attr = pd.read_csv(DATA_DIR+'/attributes.csv', encoding="ISO-8859-1")
print 'loading time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
### find unique brands from the attributes file
### for a few product_uids there are at least two different names in "MFG Brand Name"
### in such cases we keep only one of the names
df_all = pd.merge(df_all, df_attr[df_attr['name']=="MFG Brand Name"][['product_uid','value']], how='left', on='product_uid')
df_all['brand']=df_all['value'].fillna("").map(lambda x: x.encode('utf-8'))
df_all=df_all.drop('value',axis=1)
### Create a list of words with lowercase and uppercase letters
### Examples: 'InSinkErator', 'EpoxyShield'
### They are words from brand names or words from product title.
### The dict is used to correct product description which contins concatenated
### lines of text without separators :
### ---View lawn edgings and brick/ paver edgingsUtility stakes can be used for many purposes---
### Here we need to replace 'edgingsUtility' with 'edgings utility'.
### But we don't need to replace 'InSinkErator' with 'in sink erator'
add_space_stop_list=[]
uniq_brands=list(set(list(df_all['brand'])))
for i in range(0,len(uniq_brands)):
uniq_brands[i]=simple_parser(uniq_brands[i])
if re.search(r'[a-z][A-Z][a-z]',uniq_brands[i])!=None:
for word in uniq_brands[i].split():
if re.search(r'[a-z][A-Z][a-z]',word)!=None:
add_space_stop_list.append(word.lower())
add_space_stop_list=list(set(add_space_stop_list))
print len(add_space_stop_list)," words from brands in add_space_stop_list"
uniq_titles=list(set(list(df_all['product_title'])))
for i in range(0,len(uniq_titles)):
uniq_titles[i]=simple_parser(uniq_titles[i])
if re.search(r'[a-z][A-Z][a-z]',uniq_titles[i])!=None:
for word in uniq_titles[i].split():
if re.search(r'[a-z][A-Z][a-z]',word)!=None:
add_space_stop_list.append(word.lower())
add_space_stop_list=list(set(add_space_stop_list))
print len(add_space_stop_list) ," total words from brands and product titles in add_space_stop_list\n"
#################################################################
##### First step of spell correction: using the Google dict
##### from the forum
# https://www.kaggle.com/steubk/home-depot-product-search-relevance/fixing-typos
df_all['search_term']=df_all['search_term'].map(lambda x: google_dict[x] if x in google_dict.keys() else x)
#################################################################
##### AUTOMATIC SPELL CHECKER ###################################
#################################################################
### A simple spell checker is implemented here
### First, we get unique words from search_term and product_title
### Then, we count how many times word occurs in search_term and product_title
### Finally, if the word is not present in product_title and not meaningful
### (i.e. wn.synsets(word) returns empty list), the word is likely
### to be misspelled, so we try to correct it using bigrams, words from matched
### products or all products. The best match is chosen using
### difflib.SequenceMatcher()
def is_word_in_string(word,s):
return word in s.split()
def create_bigrams(s):
lst=[word for word in s.split() if len(re.sub('[^0-9]', '', word))==0 and len(word)>2]
output=""
i=0
if len(lst)>=2:
while i<len(lst)-1:
output+= " "+lst[i]+"_"+lst[i+1]
i+=1
return output
df_all['product_title_simpleparsed']=df_all['product_title'].map(lambda x: simple_parser(x).lower())
df_all['search_term_simpleparsed']=df_all['search_term'].map(lambda x: simple_parser(x).lower())
str_title=" ".join(list(df_all['product_title'].map(lambda x: simple_parser(x).lower())))
str_query=" ".join(list(df_all['search_term'].map(lambda x: simple_parser(x).lower())))
# create bigrams
bigrams_str_title=" ".join(list(df_all['product_title'].map(lambda x: create_bigrams(simple_parser(x).lower()))))
bigrams_set=set(bigrams_str_title.split())
### count word frequencies for query and product title
my_dict={}
str1= str_title+" "+str_query
for word in list(set(list(str1.split()))):
my_dict[word]={"title":0, "query":0, 'word':word}
for word in str_title.split():
my_dict[word]["title"]+=1
for word in str_query.split():
my_dict[word]["query"]+=1
### 1. Process words without digits
### Potential errors: words that appear only in query
### Correct words: 5 or more times in product_title
errors_dict={}
correct_dict={}
for word in my_dict.keys():
if len(word)>=3 and len(re.sub('[^0-9]', '', word))==0:
if my_dict[word]["title"]==0:
if len(wn.synsets(word))>0 \
or (word.endswith('s') and (word[:-1] in my_dict.keys()) and my_dict[word[:-1]]["title"]>0)\
or (word[-1]!='s' and (word+'s' in my_dict.keys()) and my_dict[word+'s']["title"]>0):
1
else:
errors_dict[word]=my_dict[word]
elif my_dict[word]["title"]>=5:
correct_dict[word]=my_dict[word]
### for each error word try finding a good match in bigrams, matched products, all products
cnt=0
NN=len(errors_dict.keys())
t0=time()
for i in range(0,len(errors_dict.keys())):
word=sorted(errors_dict.keys())[i]
cnt+=1
lst=[]
lst_tuple=[]
suggested=False
suggested_word=""
rt_max=0
# if only one word in query, use be more selective in choosing a correction
min_query_len=min(df_all['search_term_simpleparsed'][df_all['search_term_simpleparsed'].map(lambda x: is_word_in_string(word,x))].map(lambda x: len(x.split())))
delta=0.05*int(min_query_len<2)
words_from_matched_titles=[item for item in \
" ".join(list(set(df_all['product_title_simpleparsed'][df_all['search_term_simpleparsed'].map(lambda x: is_word_in_string(word,x))]))).split() \
if len(item)>2 and len(re.sub('[^0-9]', '', item))==0]
words_from_matched_titles=list(set(words_from_matched_titles))
words_from_matched_titles.sort()
source=""
for bigram in bigrams_set:
if bigram.replace("_","")==word:
suggested=True
suggested_word=bigram.replace("_"," ")
source="from bigrams"
if source=="":
for correct_word in words_from_matched_titles:
rt, rt_scaled = seq_matcher(word,correct_word)
#print correct_word, rt,rt_scaled
if rt>0.75+delta or (len(word)<6 and rt>0.68+delta):
lst.append(correct_word)
lst_tuple.append((correct_word,my_dict[correct_word]["title"]))
if rt>rt_max:
rt_max=rt
suggested=True
source="from matched products"
suggested_word=correct_word
elif rt==rt_max and seq_matcher("".join(sorted(word)),"".join(sorted(correct_word)))[0]>seq_matcher("".join(sorted(word)),"".join(sorted(suggested_word)))[0]:
suggested_word=correct_word
elif rt==rt_max:
suggested=False
source=""
if source=="" and len(lst)==0:
source="from all products"
for correct_word in correct_dict.keys():
rt, rt_scaled = seq_matcher(word,correct_word)
#print correct_word, rt,rt_scaled
if correct_dict[correct_word]["title"]>10 and (rt>0.8+delta or (len(word)<6 and rt>0.73+delta)):
#print correct_word, rt,rt_scaled
lst.append(correct_word)
lst_tuple.append((correct_word,correct_dict[correct_word]["title"]))
if rt>rt_max:
rt_max=rt
suggested=True
suggested_word=correct_word
elif rt==rt_max and seq_matcher("".join(sorted(word)),"".join(sorted(correct_word)))[0]>seq_matcher("".join(sorted(word)),"".join(sorted(suggested_word)))[0]:
suggested_word=correct_word
elif rt==rt_max:
suggested=False
if suggested==True:
errors_dict[word]["suggestion"]=suggested_word
errors_dict[word]["others"]=lst_tuple
errors_dict[word]["source"]=source
else:
errors_dict[word]["suggestion"]=""
errors_dict[word]["others"]=lst_tuple
errors_dict[word]["source"]=source
#print(cnt, word, errors_dict[word]["query"], errors_dict[word]["suggestion"], source, errors_dict[word]["others"])
#if (cnt % 20)==0:
# print cnt, " out of ", NN, "; ", round((time()-t0),1) ,' sec'
### 2. Add some words with digits
### If the word begins with a meanigful part [len(wn.synsets(srch.group(0)))>0],
### ends with a number and has vowels
for word in my_dict.keys():
if my_dict[word]['query']>0 and my_dict[word]['title']==0 \
and len(re.sub('[^0-9]', '', word))!=0 and len(re.sub('[^a-z]', '', word))!=0:
srch=re.search(r'(?<=^)[a-z][a-z][a-z]+(?=[0-9])',word)
if srch!=None and len(wn.synsets(srch.group(0)))>0 \
and len(re.sub('[^aeiou]', '', word))>0 and word[-1] in '0123456789':
errors_dict[word]=my_dict[word]
errors_dict[word]["source"]="added space before digit"
errors_dict[word]["suggestion"]=re.sub(r'(?<=^)'+srch.group(0)+r'(?=[a-zA-Z0-9])',srch.group(0)+' ',word)
#print word, re.sub(r'(?<=^)'+srch.group(0)+r'(?=[a-zA-Z0-9])',srch.group(0)+' ',word)
### save dictionary
corrections_df=pd.DataFrame(errors_dict).transpose()
corrections_df.to_csv(PROCESSINGTEXT_DIR+"/automatically_generated_word_corrections.csv")
print 'building spell checker time:',round((time()-t0)/60,1) ,'minutes\n'
##### END OF SPELL CHECKER ######################################
#################################################################
########################################
##### load words for spell checker
spell_check_dict={}
for word in errors_dict.keys():
if errors_dict[word]['suggestion']!="":
spell_check_dict[word]=errors_dict[word]['suggestion']
"""
spell_check_dict={}
with open(PROCESSINGTEXT_DIR+'/automatically_generated_word_corrections.csv') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['suggestion']!="":
spell_check_dict[row['word']]=row['suggestion']
"""
###############################################
### parse query and product title
df_all['search_term_parsed']=col_parser(df_all['search_term'],automatic_spell_check_dict=spell_check_dict,\
add_space_stop_list=[]).map(lambda x: x.encode('utf-8'))
df_all['search_term_parsed_wospellcheck']=col_parser(df_all['search_term'],automatic_spell_check_dict={},\
add_space_stop_list=[]).map(lambda x: x.encode('utf-8'))
print 'search_term parsing time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
### function to check whether queries parsed with and without spell correction are identical
def match_queries(q1,q2):
q1=re.sub('[^a-z\ ]', '', q1)
q2=re.sub('[^a-z\ ]', '', q2)
q1= " ".join([word[0:(len(word)-int(word[-1]=='s'))] for word in q1.split()])
q2= " ".join([word[0:(len(word)-int(word[-1]=='s'))] for word in q2.split()])
return difflib.SequenceMatcher(None, q1,q2).ratio()
df_all['is_query_misspelled']=df_all.apply(lambda x: \
match_queries(x['search_term_parsed'],x['search_term_parsed_wospellcheck']),axis=1)
df_all=df_all.drop(['search_term_parsed_wospellcheck'],axis=1)
print 'create dummy "is_query_misspelled" time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
df_all['product_title_parsed']=col_parser(df_all['product_title'],add_space_stop_list=[],\
remove_from_brackets=True).map(lambda x: x.encode('utf-8'))
print 'product_title parsing time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
#################################################################
##### COUNT BRAND NAMES #########################################
#################################################################
### some brand names in "MFG Brand Name" of attributes.csv have a few words
### but it is much more likely for a person to search for brand 'BEHR'
### than 'BEHR PREMIUM PLUS ULTRA'. That is why we replace long brand names
### with a shorter alternatives
replace_brand_dict={
'acurio latticeworks': 'acurio',
'american kennel club':'akc',
'amerimax home products': 'amerimax',
'barclay products':'barclay',
'behr marquee': 'behr',
'behr premium': 'behr',
'behr premium deckover': 'behr',
'behr premium plus': 'behr',
'behr premium plus ultra': 'behr',
'behr premium textured deckover': 'behr',
'behr pro': 'behr',
'bel air lighting': 'bel air',
'bootz industries':'bootz',
'campbell hausfeld':'campbell',
'columbia forest products': 'columbia',
'essick air products':'essick air',
'evergreen enterprises':'evergreen',
'feather river doors': 'feather river',
'gardner bender':'gardner',
'ge parts':'ge',
'ge reveal':'ge',
'gibraltar building products':'gibraltar',
'gibraltar mailboxes':'gibraltar',
'glacier bay':'glacier',
'great outdoors by minka lavery': 'great outdoors',
'hamilton beach': 'hamilton',
'hampton bay':'hampton',
'hampton bay quickship':'hampton',
'handy home products':'handy home',
'hickory hardware': 'hickory',
'home accents holiday': 'home accents',
'home decorators collection': 'home decorators',
'homewerks worldwide':'homewerks',
'klein tools': 'klein',
'lakewood cabinets':'lakewood',
'leatherman tool group':'leatherman',
'legrand adorne':'legrand',
'legrand wiremold':'legrand',
'lg hausys hi macs':'lg',
'lg hausys viatera':'lg',
'liberty foundry':'liberty',
'liberty garden':'liberty',
'lithonia lighting':'lithonia',
'loloi rugs':'loloi',
'maasdam powr lift':'maasdam',
'maasdam powr pull':'maasdam',
'martha stewart living': 'martha stewart',
'merola tile': 'merola',
'miracle gro':'miracle',
'miracle sealants':'miracle',
'mohawk home': 'mohawk',
'mtd genuine factory parts':'mtd',
'mueller streamline': 'mueller',
'newport coastal': 'newport',
'nourison overstock':'nourison',
'nourison rug boutique':'nourison',
'owens corning': 'owens',
'premier copper products':'premier',
'price pfister':'pfister',
'pride garden products':'pride garden',
'prime line products':'prime line',
'redi base':'redi',
'redi drain':'redi',
'redi flash':'redi',
'redi ledge':'redi',
'redi neo':'redi',
'redi niche':'redi',
'redi shade':'redi',
'redi trench':'redi',
'reese towpower':'reese',
'rheem performance': 'rheem',
'rheem ecosense': 'rheem',
'rheem performance plus': 'rheem',
'rheem protech': 'rheem',
'richelieu hardware':'richelieu',
'rubbermaid commercial products': 'rubbermaid',
'rust oleum american accents': 'rust oleum',
'rust oleum automotive': 'rust oleum',
'rust oleum concrete stain': 'rust oleum',
'rust oleum epoxyshield': 'rust oleum',
'rust oleum flexidip': 'rust oleum',
'rust oleum marine': 'rust oleum',
'rust oleum neverwet': 'rust oleum',
'rust oleum parks': 'rust oleum',
'rust oleum professional': 'rust oleum',
'rust oleum restore': 'rust oleum',
'rust oleum rocksolid': 'rust oleum',
'rust oleum specialty': 'rust oleum',
'rust oleum stops rust': 'rust oleum',
'rust oleum transformations': 'rust oleum',
'rust oleum universal': 'rust oleum',
'rust oleum painter touch 2': 'rust oleum',
'rust oleum industrial choice':'rust oleum',
'rust oleum okon':'rust oleum',
'rust oleum painter touch':'rust oleum',
'rust oleum painter touch 2':'rust oleum',
'rust oleum porch and floor':'rust oleum',
'salsbury industries':'salsbury',
'simpson strong tie': 'simpson',
'speedi boot': 'speedi',
'speedi collar': 'speedi',
'speedi grille': 'speedi',
'speedi products': 'speedi',
'speedi vent': 'speedi',
'pass and seymour': 'seymour',
'pavestone rumblestone': 'rumblestone',
'philips advance':'philips',
'philips fastener':'philips',
'philips ii plus':'philips',
'philips manufacturing company':'philips',
'safety first':'safety 1st',
'sea gull lighting': 'sea gull',
'scott':'scotts',
'scotts earthgro':'scotts',
'south shore furniture': 'south shore',
'tafco windows': 'tafco',
'trafficmaster allure': 'trafficmaster',
'trafficmaster allure plus': 'trafficmaster',
'trafficmaster allure ultra': 'trafficmaster',
'trafficmaster ceramica': 'trafficmaster',
'trafficmaster interlock': 'trafficmaster',
'thomas lighting': 'thomas',
'unique home designs':'unique home',
'veranda hp':'veranda',
'whitehaus collection':'whitehaus',
'woodgrain distritubtion':'woodgrain',
'woodgrain millwork': 'woodgrain',
'woodford manufacturing company': 'woodford',
'wyndham collection':'wyndham',
'yardgard select': 'yardgard',
'yosemite home decor': 'yosemite'
}
df_all['brand_parsed']=col_parser(df_all['brand'].map(lambda x: re.sub('^[t|T]he ', '', x.replace(".N/A","").replace("N.A.","").replace("n/a","").replace("Generic Unbranded","").replace("Unbranded","").replace("Generic",""))),add_space_stop_list=add_space_stop_list)
list_brands=list(df_all['brand_parsed'])
df_all['brand_parsed']=df_all['brand_parsed'].map(lambda x: replace_brand_dict[x] if x in replace_brand_dict.keys() else x)
### count frequencies of brands in query and product_title
str_query=" : ".join(list(df_all['search_term_parsed'])).lower()
print "\nGenerating brand dict: How many times each brand appears in the dataset?"
brand_dict=get_attribute_dict(list_brands,str_query=str_query)
### These words are likely to mean other things than brand names.
### For example, it would not be prudent to consider each occurence of 'design' or 'veranda' as a brand name.
### We decide not to use these words as brands and exclude them from our brand dictionary.
# The list is shared on the forum.
del_list=['aaa','off','impact','square','shelves','finish','ring','flood','dual','ball','cutter',\
'max','off','mat','allure','diamond','drive', 'edge','anchor','walls','universal','cat', 'dawn','ion','daylight',\
'roman', 'weed eater', 'restore', 'design', 'caddy', 'pole caddy', 'jet', 'classic', 'element', 'aqua',\
'terra', 'decora', 'ez', 'briggs', 'wedge', 'sunbrella', 'adorne', 'santa', 'bella', 'duck', 'hotpoint',\
'duck', 'tech', 'titan', 'powerwasher', 'cooper lighting', 'heritage', 'imperial', 'monster', 'peak',
'bell', 'drive', 'trademark', 'toto', 'champion', 'shop vac', 'lava', 'jet', 'flood', \
'roman', 'duck', 'magic', 'allen', 'bunn', 'element', 'international', 'larson', 'tiki', 'titan', \
'space saver', 'cutter', 'scotch', 'adorne', 'ball', 'sunbeam', 'fatmax', 'poulan', 'ring', 'sparkle', 'bissell', \
'universal', 'paw', 'wedge', 'restore', 'daylight', 'edge', 'americana', 'wacker', 'cat', 'allure', 'bonnie plants', \
'troy', 'impact', 'buffalo', 'adams', 'jasco', 'rapid dry', 'aaa', 'pole caddy', 'pac', 'seymour', 'mobil', \
'mastercool', 'coca cola', 'timberline', 'classic', 'caddy', 'sentry', 'terrain', 'nautilus', 'precision', \
'artisan', 'mural', 'game', 'royal', 'use', 'dawn', 'task', 'american line', 'sawtrax', 'solo', 'elements', \
'summit', 'anchor', 'off', 'spruce', 'medina', 'shoulder dolly', 'brentwood', 'alex', 'wilkins', 'natural magic', \
'kodiak', 'metro', 'shelter', 'centipede', 'imperial', 'cooper lighting', 'exide', 'bella', 'ez', 'decora', \
'terra', 'design', 'diamond', 'mat', 'finish', 'tilex', 'rhino', 'crock pot', 'legend', 'leatherman', 'remove', \
'architect series', 'greased lightning', 'castle', 'spirit', 'corian', 'peak', 'monster', 'heritage', 'powerwasher',\
'reese', 'tech', 'santa', 'briggs', 'aqua', 'weed eater', 'ion', 'walls', 'max', 'dual', 'shelves', 'square',\
'hickory', "vikrell", "e3", "pro series", "keeper", "coastal shower doors", 'cadet','church','gerber','glidden',\
'cooper wiring devices', 'border blocks', 'commercial electric', 'pri','exteria','extreme', 'veranda',\
'gorilla glue','gorilla','shark','wen']
del_list=list(set(list(del_list)))
for key in del_list:
if key in brand_dict.keys():
del(brand_dict[key])
# save to file
brand_df=pd.DataFrame(brand_dict).transpose()
brand_df.to_csv(PROCESSINGTEXT_DIR+"/brand_statistics.csv")
"""
brand_dict={}
import csv
with open(PROCESSINGTEXT_DIR+'/brand_statistics.csv') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
brand_dict[row['name']]={'cnt_attribute': int(row['cnt_attribute']), 'cnt_query': int(row['cnt_query']),
'name': row['name'], 'nwords': int(row['nwords'])}
"""
### Later we will create features like match between brands in query and product titles.
### But we only process brands that apper frequently enough in the dataset:
### Either 8+ times in product title or [1+ time in query and 3+ times in product title]
for item in brand_dict.keys():
if (brand_dict[item]['cnt_attribute']>=3 and brand_dict[item]['cnt_query']>=1) \
or (brand_dict[item]['cnt_attribute'])>=8:
1
else:
del(brand_dict[item])
brand_df=pd.DataFrame(brand_dict).transpose().sort(['cnt_query'], ascending=[1])
print 'brand dict creation time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
##### END OF COUNT BRAND NAMES ##################################
#################################################################
#################################################################
##### COUNT MATERIALS ###########################################
#################################################################
### First, create list of unique materials. We need to replace some complex names
### (see change_material() function)
### File attributes.csv for some product_uid contains multiple different values of "Material"
### That is why we have to concatenate all such values to ensure that each product_uid
### has only one value for material
tmp_material=df_attr[df_attr['name']=="Material"][['product_uid','value']]
tmp_material=tmp_material[tmp_material['value']!="Other"]
tmp_material=tmp_material[tmp_material['value']!="*"]
def change_material(s):
replace_dict={'Medium Density Fiberboard (MDF)':'mdf', 'High Density Fiberboard (HDF)':'hdf',\
'Fibre Reinforced Polymer (FRP)': 'frp', 'Acrylonitrile Butadiene Styrene (ABS)': 'abs',\
'Cross-Linked Polyethylene (PEX)':'pex', 'Chlorinated Poly Vinyl Chloride (CPVC)': 'cpvc',\
'PVC (vinyl)': 'pvc','Thermoplastic rubber (TPR)':'tpr','Poly Lactic Acid (PLA)': 'pla',\
'100% Polyester':'polyester','100% UV Olefin':'olefin', '100% BCF Polypropylene': 'polypropylene',\
'100% PVC':'pvc'}
if s in replace_dict.keys():
s=replace_dict[s]
return s
tmp_material['value'] = tmp_material['value'].map(lambda x: change_material(x))
dict_materials = {}
key_list=tmp_material['product_uid'].keys()
for i in range(0,len(key_list)):
if tmp_material['product_uid'][key_list[i]] not in dict_materials.keys():
dict_materials[tmp_material['product_uid'][key_list[i]]]={}
dict_materials[tmp_material['product_uid'][key_list[i]]]['product_uid']=tmp_material['product_uid'][key_list[i]]
dict_materials[tmp_material['product_uid'][key_list[i]]]['cnt']=1
dict_materials[tmp_material['product_uid'][key_list[i]]]['material']=tmp_material['value'][key_list[i]]
else:
##print key_list[i]
dict_materials[tmp_material['product_uid'][key_list[i]]]['material']=dict_materials[tmp_material['product_uid'][key_list[i]]]['material']+' '+tmp_material['value'][key_list[i]]
dict_materials[tmp_material['product_uid'][key_list[i]]]['cnt']+=1
if (i % 10000)==0:
print i
df_materials=pd.DataFrame(dict_materials).transpose()
### merge created 'material' column with df_all
df_all = pd.merge(df_all, df_materials[['product_uid','material']], how='left', on='product_uid')
df_all['material']=df_all['material'].fillna("").map(lambda x: x.encode('utf-8'))
df_all['material_parsed']=col_parser(df_all['material'].map(lambda x: x.replace("Other","").replace("*","")), parse_material=True,add_space_stop_list=[])
### list of all materials
list_materials=list(df_all['material_parsed'].map(lambda x: x.lower()))
### count frequencies of materials in query and product_title
print "\nGenerating material dict: How many times each material appears in the dataset?"
material_dict=get_attribute_dict(list_materials,str_query=str_query)
### create dataframe and save to file
material_df=pd.DataFrame(material_dict).transpose()
material_df.to_csv(PROCESSINGTEXT_DIR+"/material_statistics.csv")
### For further processing keep only materials that appear
### more 10+ times in product_title and at least once in query
"""
for item in material_dict.keys():
if (material_dict[item]['cnt_attribute']>=10 and material_dict[item]['cnt_query']>=1):
1
else:
del(material_dict[item])
"""
for key in set(material_dict.keys()):
if material_dict[key]['cnt_attribute']<20 or material_dict[key]['cnt_query']>3*material_dict[key]['cnt_attribute']:
del(material_dict[key])
material_df=pd.DataFrame(material_dict).transpose().sort(['cnt_query'], ascending=[1])
print 'material dict creation time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
#################################################################
##### END OF COUNT MATERIALS ####################################
#################################################################
#################################################################
##### EXTRACT MATERIALS FROM QUERY AND PRODUCT TITLE ############
#################################################################
### At this moment we have parsed query and product title
### Now we will produce for query:
### brands_in_query, materials_in_query
### query_without_brand_names (we remove brand names from the text)
### query_without_brand_names_and_materials.
### Also, similar columns for product title.
def getremove_brand_or_material_from_str(s,df, replace_brand_dict={}):
items_found=[]
df=df.sort_values(['nwords'],ascending=[0])
key_list=df['nwords'].keys()
#start with several-word brands or materials
#assert df['nwords'][key_list[0]]>1
for i in range(0,len(key_list)):
item=df['name'][key_list[i]]
if item in s:
if re.search(r'\b'+item+r'\b',s)!=None:
s=re.sub(r'\b'+item+r'\b', '', s)
if item in replace_brand_dict.keys():
items_found.append(replace_brand_dict[item])
else:
items_found.append(item)
return " ".join(s.split()), ";".join(items_found)
### We process only unique queries and product titles
### to reduce the processing time by more than 50%
aa=list(set(list(df_all['search_term_parsed'])))
my_dict={}
for i in range(0,len(aa)):
my_dict[aa[i]]=getremove_brand_or_material_from_str(aa[i],brand_df)
if (i % 5000)==0:
print "Extracted brands from",i,"out of",len(aa),"unique search terms; ", str(round((time()-t0)/60,1)),"minutes"
df_all['search_term_tuple']= df_all['search_term_parsed'].map(lambda x: my_dict[x])
df_all['search_term_parsed_woBrand']= df_all['search_term_tuple'].map(lambda x: x[0])
df_all['brands_in_search_term']= df_all['search_term_tuple'].map(lambda x: x[1])
print 'extract brands from query time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
df_all['search_term_tuple']= df_all['search_term_parsed_woBrand'].map(lambda x: getremove_brand_or_material_from_str(x,material_df))
df_all['search_term_parsed_woBM']= df_all['search_term_tuple'].map(lambda x: x[0])
df_all['materials_in_search_term']= df_all['search_term_tuple'].map(lambda x: x[1])
df_all=df_all.drop('search_term_tuple',axis=1)
print 'extract materials from query time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
##############################
aa=list(set(list(df_all['product_title_parsed'])))
my_dict={}
for i in range(0,len(aa)):
my_dict[aa[i]]=getremove_brand_or_material_from_str(aa[i],brand_df)
if (i % 5000)==0:
print "Extracted brands from",i,"out of",len(aa),"unique product titles; ", str(round((time()-t0)/60,1)),"minutes"
df_all['product_title_tuple']= df_all['product_title_parsed'].map(lambda x: my_dict[x])
df_all['product_title_parsed_woBrand']= df_all['product_title_tuple'].map(lambda x: x[0])
df_all['brands_in_product_title']= df_all['product_title_tuple'].map(lambda x: x[1])
print 'extract brands from product title time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
df_all['product_title_tuple']= df_all['product_title_parsed_woBrand'].map(lambda x: getremove_brand_or_material_from_str(x,material_df))
df_all['product_title_parsed_woBM']= df_all['product_title_tuple'].map(lambda x: x[0])
df_all['materials_in_product_title']= df_all['product_title_tuple'].map(lambda x: x[1])
df_all=df_all.drop('product_title_tuple',axis=1)
print 'extract materials from product titles time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
##### END OF EXTRACT MATERIALS FROM QUERY AND PRODUCT TITLE #####
#################################################################
###################################
##### Tagging #####################
### We use nltk.pos_tagger() to tag words
df_all['search_term_tokens'] =col_tagger(df_all['search_term_parsed_woBM'])
print 'search term tagging time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
df_all['product_title_tokens'] =col_tagger(df_all['product_title_parsed_woBM'])
print 'product title tagging time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
#################################################################
##### PROCESS ATTRIBUTES BULLETS ################################
#################################################################
### File attribute.csv contains 5343 different categories
### (https://www.kaggle.com/briantc/home-depot-product-search-relevance/homedepot-first-dataexploreation-k)
### Here we get we process only text in categories named 'Bullet##' where # stands for a number.
### This text is similar to product descriptions from 'product_descriptions.csv'.
### First, we concatenate all bullets for the same product_uid
df_attr['product_uid']=df_attr['product_uid'].fillna(0)
df_attr['value']=df_attr['value'].fillna("")
df_attr['name']=df_attr['name'].fillna("")
dict_attr={}
for product_uid in list(set(list(df_attr['product_uid']))):
dict_attr[int(product_uid)]={'product_uid':int(product_uid),'attribute_bullets':[]}
for i in range(0,len(df_attr['product_uid'])):
if (i % 100000)==0:
print "Read",i,"out of", len(df_attr['product_uid']), "rows in attributes.csv in", round((time()-t0)/60,1) ,'minutes'
if df_attr['name'][i][0:6]=="Bullet":
dict_attr[int(df_attr['product_uid'][i])]['attribute_bullets'].append(df_attr['value'][i])
if 0 in dict_attr.keys():
del(dict_attr[0])
for item in dict_attr.keys():
if len(dict_attr[item]['attribute_bullets'])>0:
dict_attr[item]['attribute_bullets']=". ".join(dict_attr[item]['attribute_bullets'])
dict_attr[item]['attribute_bullets']+="."
else:
dict_attr[item]['attribute_bullets']=""
df_attr_bullets=pd.DataFrame(dict_attr).transpose()
df_attr_bullets['attribute_bullets']=df_attr_bullets['attribute_bullets'].map(lambda x: x.replace("..",".").encode('utf-8'))
print 'create attributes bullets time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
### Then we follow similar steps as for query and product title above
### Parsing
df_attr_bullets['attribute_bullets_parsed'] = df_attr_bullets['attribute_bullets'].map(lambda x:str_parser(x,add_space_stop_list=[]))
print 'attribute bullets parsing time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
### Extracting brands...
df_attr_bullets['attribute_bullets_tuple']= df_attr_bullets['attribute_bullets_parsed'].map(lambda x: getremove_brand_or_material_from_str(x,brand_df))
df_attr_bullets['attribute_bullets_parsed_woBrand']= df_attr_bullets['attribute_bullets_tuple'].map(lambda x: x[0])
df_attr_bullets['brands_in_attribute_bullets']= df_attr_bullets['attribute_bullets_tuple'].map(lambda x: x[1])
print 'extract brands from attribute_bullets time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
### ... and materials from text...
df_attr_bullets['attribute_bullets_tuple']= df_attr_bullets['attribute_bullets_parsed_woBrand'].map(lambda x: getremove_brand_or_material_from_str(x,material_df))
df_attr_bullets['attribute_bullets_parsed_woBM']= df_attr_bullets['attribute_bullets_tuple'].map(lambda x: x[0])
df_attr_bullets['materials_in_attribute_bullets']= df_attr_bullets['attribute_bullets_tuple'].map(lambda x: x[1])
df_attr_bullets=df_attr_bullets.drop(['attribute_bullets_tuple'],axis=1)
print 'extract materials from attribute_bullets time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
### ... and tagging text using NLTK
df_attr_bullets['attribute_bullets_tokens'] =col_tagger(df_attr_bullets['attribute_bullets_parsed_woBM'])
print 'attribute bullets tagging time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
##### END OF PROCESS ATTRIBUTES BULLETS #########################
#################################################################
#################################################################
##### PROCESS PRODUCT DESCRIPTIONS ##############################
#################################################################
df_pro_desc = pd.read_csv(DATA_DIR+'/product_descriptions.csv')
### Parsing
df_pro_desc['product_description_parsed'] = df_pro_desc['product_description'].map(lambda x:str_parser(x,add_space_stop_list=add_space_stop_list).encode('utf-8'))
print 'product description parsing time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
### Extracting brands...
df_pro_desc['product_description_tuple']= df_pro_desc['product_description_parsed'].map(lambda x: getremove_brand_or_material_from_str(x,brand_df))
df_pro_desc['product_description_parsed_woBrand']= df_pro_desc['product_description_tuple'].map(lambda x: x[0])
df_pro_desc['brands_in_product_description']= df_pro_desc['product_description_tuple'].map(lambda x: x[1])
print 'extract brands from product_description time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
### ... and materials from text...
df_pro_desc['product_description_tuple']= df_pro_desc['product_description_parsed_woBrand'].map(lambda x: getremove_brand_or_material_from_str(x,material_df))
df_pro_desc['product_description_parsed_woBM']= df_pro_desc['product_description_tuple'].map(lambda x: x[0])
df_pro_desc['materials_in_product_description']= df_pro_desc['product_description_tuple'].map(lambda x: x[1])
df_pro_desc=df_pro_desc.drop(['product_description_tuple'],axis=1)
print 'extract materials from product_description time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
### ... and tagging text using NLTK
df_pro_desc['product_description_tokens'] = col_tagger(df_pro_desc['product_description_parsed_woBM'])
print 'product decription tagging time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
df_pro_desc['product_description']= df_pro_desc['product_description'].map(lambda x: x.encode('utf-8'))
#df_attr_bullets['attribute_bullets_stemmed']=df_attr_bullets['attribute_bullets_parsed'].map(lambda x:str_stemmer_wo_parser(x))
#df_attr_bullets['attribute_bullets_stemmed_woBM']=df_attr_bullets['attribute_bullets_parsed_woBM'].map(lambda x:str_stemmer_wo_parser(x))
#df_attr_bullets['attribute_bullets_stemmed_woBrand']=df_attr_bullets['attribute_bullets_parsed_woBrand'].map(lambda x:str_stemmer_wo_parser(x))
#df_pro_desc['product_description_stemmed']=df_pro_desc['product_description_parsed'].map(lambda x:str_stemmer_wo_parser(x))
#df_pro_desc['product_description_stemmed_woBM']=df_pro_desc['product_description_parsed_woBM'].map(lambda x:str_stemmer_wo_parser(x))
#df_pro_desc['product_description_stemmed_woBrand']=df_pro_desc['product_description_parsed_woBrand'].map(lambda x:str_stemmer_wo_parser(x))
#print 'stemming description and bullets time:',round((time()-t0)/60,1) ,'minutes\n'
#t0 = time()
##### END OF PROCESS PRODUCT DESCRIPTIONS #######################
#################################################################
#################################################################
##### GET IMPORTANT WORDS FROM QUERY AND PRODUCT TITLE ##########
#################################################################
### We started this work on our own by observing irregularities in models predictions,
### but we ended up with something similar to extracting the top trigram from
### http://blog.kaggle.com/2015/07/22/crowdflower-winners-interview-3rd-place-team-quartet/
### We found that some words are more important than the other
### for predicting the relevance. For example, if the customer
### asks for 'kitchen faucet with side spray', she is looking for
### faucet, not for spray, side or kitchen. Therefore, faucets will
### be more relevant for this query, but sprays, sides and kitchens
### will be less relevant.
### Let us define the most important word (or keyword) 'thekey'.
### The two words before it are 'beforethekey and 'before2thekey'.
### Example: query='outdoor ceiling fan with light'
### thekey='fan'
### beforethekey='ceiling'
### before2thekey='outdoor'
### Below we build an algorithm to get such important words
### from query and product titles.
### Our task is simplified due to (1) fairly uniform structure of
### product titles and (2) small number of words in query.
### In the first step we delete irrelevant words using the following function.
### Although it may appear complex since we tried to correctly process as many
### entries as possible, but the basic logic is very simple:
### delete all words after 'with', 'for', 'in', 'that', 'on'
### as well as in some cases all words after colon ','
def cut_product_title(s):
s=s.lower()
s = re.sub('&', '&', s)
s = re.sub(' ', '', s)
s = re.sub(''', '', s)
s = re.sub(r'(?<=[0-9]),[\ ]*(?=[0-9])', '', s)
s = re.sub(r'(?<=\))(?=[a-zA-Z0-9])', ' ', s) # add space between parentheses and letters
s = re.sub(r'(?<=[a-zA-Z0-9])(?=\()', ' ', s) # add space between parentheses and letters
s = s.replace(";",". ")
s = s.replace(":"," . ")
s=s.replace("&"," and ")
s = re.sub('[^a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)\,\+]', ' ', s)
s= " ".join(s.split())
s = re.sub(r'(?<=[0-9])\.\ ', ' ', s)
s = re.sub(r'(?<=\ in)\.(?=[a-zA-Z])', '. ', s)
s=replace_in_parser(s)
s = re.sub(r'\-discontinued', '', s)
s = re.sub(r' \+ free app(?=$)', '', s)
s = s.replace("+"," ")
s = re.sub('\([a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)\,]+?\)', '', s)
#s= re.sub('[\(\)]', '', s)
if " - " in s:
#srch=re.search(r'(?<= - )[a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)\,]+',s)
if re.search(r'(\d|\.|mm|cm|in|ft|mhz|volt|\bhp|\bl|oz|lb|gal) \- \d',s)==None \
and re.search(r' (sign|carpet|decal[s]*|figure[s]*)(?=$)',s)==None and re.search(r'\d \- (way\b|day\b)',s)==None:
#if ' - ' is found and the string doesnt end with word 'sign' or 'carpet' or 'decal' and not string '[0-9] - way' found
s = re.sub(r' - [a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)\,]*', '', s) #greedy regular expression
if "uilt in" not in s and "uilt In" not in s:
s = re.sub(r'(?<=[a-zA-Z\%\$\#\@\&\/\.\*])[\ ]+[I|i]n [a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)\,]*', '', s)
s = s.replace(" - "," ")
if re.search(r' (sign|decal[s]*|figure[s]*)(?=$)',s)==None:
s = re.sub(r'(?<=[a-zA-Z0-9\%\$\#\@\&\/\.\*])[\ ]+[W|w]ith [a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)\,]*', '', s)
s = re.sub(r'(?<=[a-zA-Z0-9\%\$\#\@\&\/\.\*])[\ ]+[W|w]ithout [a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)\,]*', '', s)
s = re.sub(r'(?<=[a-zA-Z\%\$\#\@\&\/\.\*])[\ ]+[w]/[\ a-z0-9][a-z0-9][a-z0-9\.][a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)\,]*', '', s)
if " fits for " not in s and " for fits " not in s:
s = re.sub(r'(?<=[a-zA-Z0-9\%\$\#\@\&\/\.\*])[\ ]+fits [a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)\,]*', '', s)
if " for lease " not in s and re.search(r' (sign|decal[s]*|figure[s]*)(?=$)',s)==None:
s = re.sub(r'(?<=[a-zA-Z0-9\%\$\#\@\&\/\.\*])[\ ]+[F|f]or [a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)\,]*', '', s)
s = re.sub(r'(?<=[a-zA-Z0-9\%\$\#\@\&\/\.\*])[\ ]+[T|t]hat [a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)\,]*', '', s)
s = re.sub(r' on (wheels|a pallet|spool|bracket|3 in|blue post|360|track|spike|rock|lamp|11 in|2 in|pedestal|square base|tub|steel work)[a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)]*', '', s)
s = re.sub(r' on (plinth|insulator|casters|pier base|reel|fireplace|moon|bracket|24p ebk|zinc spike|mailbox|cream chand|blue post)[a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)]*', '', s)
s = re.sub(r'(?<= on white stiker)[a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)\,]*', '', s)
s = re.sub(r' on [installing][a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)\,]*', '', s)
if "," in s:
srch=re.search(r'(?<=, )[a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)\,]*',s) #greedy regular expression
if srch!=None:
if len(re.sub('[^a-zA-Z\ ]', '', srch.group(0)))<25:
s = re.sub(r', [a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)\,]*', '', s)
s = re.sub(r'(?<=recessed door reinforcer), [a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)\,]*', '', s)
#s = re.sub(r'(?<=[a-zA-Z0-9]),\ [a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)]*', '', s)
s = re.sub(r'(?<=[a-zA-Z\%\$\#\@\&\/\.\*]) [F|f]eaturing [a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)]*', '', s)
s = re.sub(r'(?<=[a-zA-Z\%\$\#\@\&\/\.\*]) [I|i]ncludes [a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)]*', '', s)
s = re.sub(' [\#]\d+[\-\d]*[\,]*', '', s)
s = re.sub(r'(?<=[a-zA-Z\ ])\/(?=[a-zA-Z])', ' ', s)
s = re.sub(r'(?<=[a-zA-Z\ ])\-(?=[a-zA-Z])', ' ', s)
s = s.replace(",",". ")
s = s.replace("..",".")
s = s.replace("..",".")
s = s.replace("*","")
return " ".join([word.replace("-","") for word in s.split() if re.search(r'\d\-\d',word)==None])
### The next step is identify the most important words.
### We exclude brand names and similar words like 'EpoxyShield'
### (see how add_space_stop_list is created)
not_keyword_list=list(brand_df['name'][brand_df['nwords']==1])
for item in add_space_stop_list:
if len(wn.synsets(item,pos=wn.NOUN))==0:
not_keyword_list.append(item)
### We want 'thekey' to be a noun as identified by NLTK WordNet
### and NN, NNS, VBG in the sentence as identified by NLTK.pos_tagger()
### Since pos_tagger often fails, we run it two times: on full sentence with
### punctuation and on separate words. We reject the word only if in neither run
### it is identified as NN, NNS, VBG.
### We also have to create in_list
### with words that are always to be identified as thekeys. Words ending with
### '-er', '-ers', '-or', '-ors' are also thekeys.
### We exclude some words from potential thekeys, they are added to out_list.
### Once thekey is identified, we read the words to the left and consider
### them as keywords (or important words). If we encounter nouns, we continue.
### IF we encounter ['JJ','JJS', 'JJR', 'RB', 'RBS', 'RBR', 'VBG', 'VBD', 'VBN','VBP'],
### we add the word to the keywords, but stop unless the next word is 'and'.
### In other cases (word with digits or preposition etc) we just stop.
# the word lists from the following function are shared on the forum.
def get_key_words(tag_list,wordtag_list, string_output=False,out_list=not_keyword_list[:]):
i=len(tag_list)
in_list=['tv','downrod', 'sillcock', 'shelving', 'luminaire', 'paracord', 'ducting', \
'recyclamat', 'rebar', 'spackling', 'hoodie', 'placemat', 'innoculant', 'protectant', \
'colorant', 'penetrant', 'attractant', 'bibb', 'nosing', 'subflooring', 'torchiere', 'thhn',\
'lantern','epoxy','cloth','trim','adhesive','light','lights','saw','pad','polish','nose','stove',\
'elbow','elbows','lamp','door','doors','pipe','bulb','wood','woods','wire','sink','hose','tile','bath','table','duct',\
'windows','mesh','rug','rugs','shower','showers','wheels','fan','lock','rod','mirror','cabinet','shelves','paint',\
'plier','pliers','set','screw','lever','bathtub','vacuum','nut', 'nipple','straw','saddle','pouch','underlayment',\
'shade','top', 'bulb', 'bulbs', 'paint', 'oven', 'ranges', 'sharpie', 'shed', 'faucet',\
'finish','microwave', 'can', 'nozzle', 'grabber', 'tub', 'angles','showerhead', 'dehumidifier', 'shelving', 'urinal', 'mdf']
out_list= out_list +['free','height', 'width', 'depth', 'model','pcs', 'thick','pack','adhesive','steel','cordless', 'aaa' 'b', 'nm', 'hc', 'insulated','gll', 'nutmeg',\
'pnl', 'sotc','withe','stainless','chrome','beige','max','acrylic', 'cognac', 'cherry', 'ivory','electric','fluorescent', 'recessed', 'matte',\
'propane','sku','brushless','quartz','gfci','shut','sds','value','brown','white','black','red','green','yellow','blue','silver','pink',\
'gray','gold','thw','medium','type','flush',"metaliks", 'metallic', 'amp','btu','gpf','pvc','mil','gcfi','plastic', 'vinyl','aaa',\
'aluminum','brass','antique', 'brass','copper','nickel','satin','rubber','porcelain','hickory','marble','polyacrylic','golden','fiberglass',\
'nylon','lmapy','maple','polyurethane','mahogany','enamel', 'enameled', 'linen','redwood', 'sku','oak','quart','abs','travertine', 'resin',\
'birch','birchwood','zinc','pointe','polycarbonate', 'ash', 'wool', 'rockwool', 'teak','alder','frp','cellulose','abz', 'male', 'female', 'used',\
'hepa','acc','keyless','aqg','arabesque','polyurethane', 'polyurethanes','ardex','armorguard','asb', 'motion','adorne','fatpack',\
'fatmax','feet','ffgf','fgryblkg', 'douglas', 'fir', 'fleece','abba', 'nutri', 'thermal','thermoclear', 'heat', 'water', 'systemic',\
'heatgasget', 'cool', 'fusion', 'awg', 'par', 'parabolic', 'tpi', 'pint', 'draining', 'rain', 'cost', 'costs', 'costa','ecostorage',
'mtd', 'pass', 'emt', 'jeld', 'npt', 'sch', 'pvc', 'dusk', 'dawn', 'lathe','lows','pressure', 'round', 'series','impact', 'resistant','outdoor',\
'off', 'sawall', 'elephant', 'ear', 'abb', 'baby', 'feedback', 'fastback','jumbo', 'flexlock', 'instant', 'natol', 'naples','florcant',\
'canna','hammock', 'jrc', 'honeysuckle', 'honey', 'serrano','sequoia', 'amass', 'ashford', 'gal','gas', 'gasoline', 'compane','occupancy',\
'home','bakeware', 'lite', 'lithium', 'golith','gxwh', 'wht', 'heirloom', 'marine', 'marietta', 'cambria', 'campane','birmingham',\
'bellingham','chamois', 'chamomile', 'chaosaw', 'chanpayne', 'thats', 'urethane', 'champion', 'chann', 'mocha', 'bay', 'rough',\
'undermount', 'price', 'prices', 'way', 'air', 'bazaar', 'broadway', 'driveway', 'sprayway', 'subway', 'flood', 'slate', 'wet',\
'clean', 'tweed', 'weed', 'cub', 'barb', 'salem', 'sale', 'sales', 'slip', 'slim', 'gang', 'office', 'allure', 'bronze', 'banbury',\
'tuscan','tuscany', 'refinishing', 'fleam','schedule', 'doeskin','destiny', 'mean', 'hide', 'bobbex', 'pdi', 'dpdt', 'tri', 'order',\
'kamado','seahawks','weymouth', 'summit','tel','riddex', 'alick','alvin', 'ano', 'assy', 'grade', 'barranco', 'batte','banbury',\
'mcmaster', 'carr', 'ccl', 'china', 'choc', 'colle', 'cothom', 'cucbi', 'cuv', 'cwg', 'cylander', 'cylinoid', 'dcf', 'number', 'ultra',\
'diat','discon', 'disconnect', 'plantation', 'dpt', 'duomo', 'dupioni', 'eglimgton', 'egnighter','ert','euroloft', 'everready',\
'felxfx', 'financing', 'fitt', 'fosle', 'footage', 'gpf','fro', 'genis', 'giga', 'glu', 'gpxtpnrf', 'size', 'hacr', 'hardw',\
'hexagon', 'hire', 'hoo','number','cosm', 'kelston', 'kind', 'all', 'semi', 'gloss', 'lmi', 'luana', 'gdak', 'natol', 'oatu',\
'oval', 'olinol', 'pdi','penticlea', 'portalino', 'racc', 'rads', 'renat', 'roc', 'lon', 'sendero', 'adora', 'sleave', 'swu',
'tilde', 'cordoba', 'tuvpl','yel', 'acacia','mig','parties','alkaline','plexiglass', 'iii', 'watt']
output_list=[]
if i>0:
finish=False
started = False
while not finish:
i-=1
if started==False:
if (wordtag_list[i][0] not in out_list) \
and (wordtag_list[i][0] in in_list \
or (re.search(r'(?=[e|o]r[s]*\b)',wordtag_list[i][0])!=None and re.search(r'\d+',wordtag_list[i][0])==None) \
or (len(wordtag_list[i][0])>2 and re.search(r'\d+',wordtag_list[i][0])==None and len(wn.synsets(wordtag_list[i][0],pos=wn.NOUN))>0 \
and (wordtag_list[i][1] in ['NN', 'NNS','VBG'] or tag_list[i][1] in ['NN', 'NNS','VBG']) \
and len(re.sub('[^aeiouy]', '', wordtag_list[i][0]))>0 )): #exclude VBD
started = True
output_list.insert(0,wordtag_list[i])
# handle exceptions below
# 'iron' only with -ing is OK: soldering iron, seaming iron
if i>1 and wordtag_list[i][0] in ['iron','irons'] and re.search(r'ing\b',wordtag_list[i-1][0])==None:
output_list=[]
started = False
else:
if tag_list[i][1] in ['NN','NNP', 'NNPS', 'NNS']:
if len(re.sub('[^0-9]', '', tag_list[i][0]))==0 and \
len(re.sub('[^a-zA-Z0-9\-]', '', tag_list[i][0]))>2 \
and tag_list[i][0] not in ['amp','btu','gpf','pvc','mil','watt','gcfi']\
and (len(wn.synsets(tag_list[i][0]))>0 or re.search(r'(?=[e|o]r[s]*\b)',tag_list[i][0])!=None):
output_list.insert(0,tag_list[i])
elif tag_list[i][0]=='and':
output_list.insert(0,tag_list[i])
started=False
else:
if tag_list[max(0,i-1)][0]!="and" and (tag_list[i][1] not in ['VBD', 'VBN']):
finish=True
if tag_list[i][1] in ['JJ','JJS', 'JJR', 'RB', 'RBS', 'RBR', 'VBG', 'VBD', 'VBN','VBP']:
if len(re.sub('[^0-9]', '', tag_list[i][0]))==0 and \
len(re.sub('[^a-zA-Z0-9\-]', '', tag_list[i][0]))>2 \
and tag_list[i][0] not in ['amp','btu','gpf','pvc','mil','watt','gcfi']\
and len(wn.synsets(tag_list[i][0]))>0:
output_list.insert(0,tag_list[i])
if i==0:
finish=True
if string_output==True:
return " ".join([tag[0] for tag in output_list])
else:
return output_list
### Apply the function to product_title
### We have to start with product_title, not product_title_parsed,
### since punctuation is important for our task ...
df_all['product_title_cut']= df_all['product_title'].map(lambda x: cut_product_title(x).encode('utf-8'))
### ... and that is why we have to remove the brand names again
aa=list(set(list(df_all['product_title_cut'])))
my_dict={}
for i in range(0,len(aa)):
my_dict[aa[i]]=getremove_brand_or_material_from_str(aa[i],brand_df)
if (i % 5000)==0:
print "processed "+str(i)+" out of "+str(len(aa))+" unique cut product titles; "+str(round((time()-t0)/60,1))+" minutes"
df_all['product_title_cut_tuple']= df_all['product_title_cut'].map(lambda x: my_dict[x])
df_all['product_title_cut_woBrand']= df_all['product_title_cut_tuple'].map(lambda x: x[0])
df_all=df_all.drop(['product_title_cut_tuple'],axis=1)
print 'extract brands from cut product title:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
### Tagging two times: full sentences and separate words
df_all['product_title_cut_tokens'] =col_tagger(df_all['product_title_cut_woBrand'])
df_all['product_title_cut_wordtokens'] =col_wordtagger(df_all['product_title_cut_woBrand'])
### the same steps for search term, but we now we continue with the preprocessed resuts
### since punctuation is not as important in query as it is in product title
df_all['search_term_cut_woBrand']= df_all['search_term_parsed_woBrand'].map(lambda x: cut_product_title(x).encode('utf-8'))
df_all['search_term_cut_tokens'] =col_tagger(df_all['search_term_cut_woBrand'])
df_all['search_term_cut_wordtokens'] =col_wordtagger(df_all['search_term_cut_woBrand'])
### Transform tags into text, it may look like unecessary step.
### But in our work we have to frequently save processing results and recover tags from text.
### Here this transformation is used to make the _tokens variables compatibe with
### parser_mystr2tuple() function
df_all['search_term_cut_tokens']=df_all['search_term_cut_tokens'].map(lambda x: str(x))
df_all['search_term_cut_wordtokens']=df_all['search_term_cut_wordtokens'].map(lambda x: str(x))
df_all['product_title_cut_tokens']=df_all['product_title_cut_tokens'].map(lambda x: str(x))
df_all['product_title_cut_wordtokens']=df_all['product_title_cut_wordtokens'].map(lambda x: str(x))
df_all['search_term_keys']=df_all.apply(lambda x: \
get_key_words(parser_mystr2tuple(x['search_term_cut_tokens']),parser_mystr2tuple(x['search_term_cut_wordtokens']),string_output=True),axis=1)
df_all['product_title_keys']=df_all.apply(lambda x: \
get_key_words(parser_mystr2tuple(x['product_title_cut_tokens']),parser_mystr2tuple(x['product_title_cut_wordtokens']),string_output=True),axis=1)
### Now we just need to assing the last word from keywords as thekey,
### the words before it as beforethekey and before2thekey.
### One more trick: we first get this trigram from product_title,
### than use thekey from product title to choose the most similar word
### in case query contains two candidates separated by 'and'
### For example, for query 'microwave and stove' we may chose either
### 'microwave' or 'stove' depending on the thekey from product title.
def get_last_words_from_parsed_title(s):
words=s.split()
if len(words)==0:
last_word=""
word_before_last=""
word2_before_last=""
else:
last_word=words[len(words)-1]
word_before_last=""
word2_before_last=""
if len(words)>1:
word_before_last=words[len(words)-2]
if word_before_last=="and":
word_before_last=""
if len(words)>2 and word_before_last!="and":
word2_before_last=words[len(words)-3]
if word2_before_last=="and":
word2_before_last=""
return last_word, word_before_last, word2_before_last
def get_last_words_from_parsed_query(s,last_word_in_title):
words=s.split()
if len(words)==0:
last_word=""
word_before_last=""
word2_before_last=""
else:
last_word=words[len(words)-1]
word_before_last=""
word2_before_last=""
if len(words)>1:
word_before_last=words[len(words)-2]
if len(words)>2 and word_before_last!="and":
word2_before_last=words[len(words)-3]
if word2_before_last=="and":
word2_before_last=""
if word_before_last=="and":
word_before_last=""
if len(words)>2:
cmp_word=words[len(words)-3]
sm1=find_similarity(last_word,last_word_in_title)[0]
sm2=find_similarity(cmp_word,last_word_in_title)[0]
if sm1<sm2:
last_word=cmp_word
if len(words)>3:
word_before_last=words[len(words)-4]
return last_word, word_before_last, word2_before_last
### get trigram from product title
df_all['product_title_thekey_tuple']=df_all['product_title_keys'].map(lambda x: get_last_words_from_parsed_title(x))
df_all['product_title_thekey']=df_all['product_title_thekey_tuple'].map(lambda x: x[0])
df_all['product_title_beforethekey']=df_all['product_title_thekey_tuple'].map(lambda x: x[1])
df_all['product_title_before2thekey']=df_all['product_title_thekey_tuple'].map(lambda x: x[2])
df_all=df_all.drop(['product_title_thekey_tuple'],axis=1)
### get trigram from query
df_all['search_term_thekey_tuple']=df_all.apply(lambda x: \
get_last_words_from_parsed_query(x['search_term_keys'],x['product_title_thekey']),axis=1)
#df_all['thekey_info']=df_all['search_term_keys']+"\t"+df_all['product_title_thekey']
#df_all['search_term_thekey_tuple']=df_all['thekey_info'].map(lambda x: get_last_words_from_parsed_query(x.split("\t")[0],x.split("\t")[1]))
df_all['search_term_thekey']=df_all['search_term_thekey_tuple'].map(lambda x: x[0])
df_all['search_term_beforethekey']=df_all['search_term_thekey_tuple'].map(lambda x: x[1])
df_all['search_term_before2thekey']=df_all['search_term_thekey_tuple'].map(lambda x: x[2])
df_all=df_all.drop(['search_term_thekey_tuple'],axis=1)
#df_all['search_term_thekey_stemmed']=df_all['search_term_thekey'].map(lambda x: str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
#df_all['product_title_thekey_stemmed']=df_all['product_title_thekey'].map(lambda x: str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
#df_all['search_term_beforethekey_stemmed']=df_all['search_term_beforethekey'].map(lambda x: str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
#df_all['product_title_beforethekey_stemmed']=df_all['product_title_beforethekey'].map(lambda x: str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
#df_all['search_term_before2thekey_stemmed']=df_all['search_term_before2thekey'].map(lambda x: str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
#df_all['product_title_before2thekey_stemmed']=df_all['product_title_before2thekey'].map(lambda x: str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
print 'extracting important words time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
##### END OF GET IMPORTANT WORDS FROM QUERY AND PRODUCT TITLE ###
#################################################################
#################################################################
##### STEMMING ##################################################
#################################################################
### We also exclude stopwords here.
### Sometimes people search for 'can' with the meaning 'a container'
### like in 'trash can'. That is why we keep 'can' in query and product title.
df_attr_bullets['attribute_bullets_stemmed']=df_attr_bullets['attribute_bullets_parsed'].map(lambda x:str_stemmer_wo_parser(x))
df_attr_bullets['attribute_bullets_stemmed_woBM']=df_attr_bullets['attribute_bullets_parsed_woBM'].map(lambda x:str_stemmer_wo_parser(x))
df_attr_bullets['attribute_bullets_stemmed_woBrand']=df_attr_bullets['attribute_bullets_parsed_woBrand'].map(lambda x:str_stemmer_wo_parser(x))
df_pro_desc['product_description_stemmed']=df_pro_desc['product_description_parsed'].map(lambda x:str_stemmer_wo_parser(x))
df_pro_desc['product_description_stemmed_woBM']=df_pro_desc['product_description_parsed_woBM'].map(lambda x:str_stemmer_wo_parser(x))
df_pro_desc['product_description_stemmed_woBrand']=df_pro_desc['product_description_parsed_woBrand'].map(lambda x:str_stemmer_wo_parser(x))
df_all['search_term_keys_stemmed']=df_all['search_term_keys'].map(lambda x: str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
df_all['product_title_keys_stemmed']=df_all['product_title_keys'].map(lambda x: str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
df_all['search_term_stemmed']=df_all['search_term_parsed'].map(lambda x:str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
df_all['search_term_stemmed_woBM']=df_all['search_term_parsed_woBM'].map(lambda x:str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
df_all['search_term_stemmed_woBrand']=df_all['search_term_parsed_woBrand'].map(lambda x:str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
df_all['product_title_stemmed']=df_all['product_title_parsed'].map(lambda x:str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
df_all['product_title_stemmed_woBM']=df_all['product_title_parsed_woBM'].map(lambda x:str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
df_all['product_title_stemmed_woBrand']=df_all['product_title_parsed_woBrand'].map(lambda x:str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
df_all['search_term_thekey_stemmed']=df_all['search_term_thekey'].map(lambda x: str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
df_all['product_title_thekey_stemmed']=df_all['product_title_thekey'].map(lambda x: str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
df_all['search_term_beforethekey_stemmed']=df_all['search_term_beforethekey'].map(lambda x: str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
df_all['product_title_beforethekey_stemmed']=df_all['product_title_beforethekey'].map(lambda x: str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
df_all['search_term_before2thekey_stemmed']=df_all['search_term_before2thekey'].map(lambda x: str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
df_all['product_title_before2thekey_stemmed']=df_all['product_title_before2thekey'].map(lambda x: str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
print 'stemming time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
##### END OF STEMMING ###########################################
#################################################################
### Save everything into files
df_all['product_title']= df_all['product_title'].map(lambda x: x.encode('utf-8'))
df_all.to_csv(PROCESSINGTEXT_DIR+"/df_train_and_test_processed.csv", index=False)
df_attr_bullets.to_csv(PROCESSINGTEXT_DIR+"/df_attribute_bullets_processed.csv", index=False)
df_pro_desc.to_csv(PROCESSINGTEXT_DIR+"/df_product_descriptions_processed.csv", index=False)
print 'TOTAL PROCESSING TIME:',round((time()-t1)/60,1) ,'minutes\n'
t1 = time()
df_all=df_all.drop(list(df_all.keys()),axis=1)
df_attr_bullets=df_attr_bullets.drop(list(df_attr_bullets.keys()),axis=1)
df_pro_desc=df_pro_desc.drop(list(df_pro_desc.keys()),axis=1)
| mit | -6,932,427,172,720,892,000 | 49.267617 | 266 | 0.608824 | false |
vsjha18/finplots | rsi.py | 1 | 5108 | """
This module plots Relative Strength Index on a given
axis of matplotlib. All the style attributes are passed
through argument so that it can be independently used
as general purpose library in most of the trivial situations.
However for ease of use we have one more sugar api which needs
only the style object axis and the dataframe. This api simply
calls the underlying _plot_rsi by resolving its arguments from
the style object.
"""
import matplotlib.pyplot as plt
import pandas as pd
import matplotlib.ticker as mticker
from finfunctions import relative_strength_index
from finplots import style
def plot_rsi(ax, df, period=14, style=style):
""" plot rsi
:param ax: axis
:param df: price dataframe
:param style: style object
:return: axis
"""
rsi_data = relative_strength_index(df.close, n=period)
legend_text = 'RSI %s' % str(period)
ax = _plot_rsi(ax, df.index, rsi_data,
line_color=style.rsi_line_color,
line_width=style.rsi_linewidth,
signal_line_color=style.rsi_signal_line_color,
signal_line_alpha=style.rsi_signal_line_alpha,
fill_alpha=style.rsi_fill_alpha,
overbought_color=style.rsi_overbought_color,
oversold_color=style.rsi_oversold_color,
edge_color=style.rsi_edge_color,
label_color=style.rsi_label_color,
text_color=style.rsi_text_color,
spine_color=style.rsi_spine_color,
grid_alpha=style.rsi_grid_alpha,
grid_color=style.rsi_grid_color,
tick_color=style.rsi_tick_color,
legend_text=legend_text,
legend_text_x=style.legend_text_x,
legend_text_y=style.legend_text_y)
# ax = _plot_rsi(ax, df.index, rsi_data)
return ax
def _plot_rsi(ax, x, rsi_data,
line_color='cyan',
line_width=1,
signal_line_color='white',
signal_line_alpha=1,
fill_alpha = 1,
overbought_color='red',
oversold_color='green',
edge_color='cyan',
label_color='white',
text_color='white',
spine_color='blue',
grid_alpha=1,
grid_color='white',
tick_color='white',
legend_text=None,
legend_text_x=0.015,
legend_text_y=0.95):
""" plot rsi
:param ax: axis
:param x: x axis series
:param rsi_data: rsi data series
:param line_color: rsi line color
:param line_width: rsi line width
:param signal_line_color: color for 30, 70 markers
:param signal_line_alpha: alpha value for above line
:param overbought_color: color for overbough conditions
:param oversold_color: color for oversold conditions
:param label_color: label color
:param spine_color: spine color
:param grid_alpha: alpha value for grids
:param grid_color: color for grids
:param tick_color: color for ticks
:return: axis
"""
ax.plot(x, rsi_data, line_color, linewidth=line_width)
# prune the yaxis
plt.gca().yaxis.set_major_locator(mticker.MaxNLocator(prune='upper'))
# put markers for signal line
# following line needs as many stuff as there are markers
# hence we have commented this out.
# ax2.axes.yaxis.set_ticklabels([30, 70])
ax.set_yticks([30, 70])
# provide the yaxis range
ax.set_ylim(0, 100)
# draw horizontal lines
ax.axhline(70, color=signal_line_color, alpha=signal_line_alpha)
ax.axhline(50, color=signal_line_color, alpha=signal_line_alpha)
ax.axhline(30, color=signal_line_color, alpha=signal_line_alpha)
# fill color
ax.fill_between(x, rsi_data, 70,
where=(rsi_data >= 70),
facecolor=overbought_color,
edgecolor=edge_color,
alpha=fill_alpha)
ax.fill_between(x, rsi_data, 30,
where=(rsi_data <= 30),
facecolor=oversold_color,
edgecolor=edge_color,
alpha=fill_alpha)
# write text as legend for macd setup
if legend_text is not None:
ax.text(legend_text_x, legend_text_y, legend_text,
va='top',
color=text_color,
transform=ax.transAxes)
# label color
ax.yaxis.label.set_color(label_color)
# spine colors
ax.spines['bottom'].set_color(spine_color)
ax.spines['top'].set_color(spine_color)
ax.spines['left'].set_color(spine_color)
ax.spines['right'].set_color(spine_color)
# tick params color
ax.tick_params(axis='y', colors=tick_color)
ax.tick_params(axis='x', colors=tick_color)
# show tick params on right axis as well
ax.tick_params(labelright=True)
# plot the grids.
ax.grid(True, alpha=grid_alpha, color=grid_color)
plt.ylabel('RSI', color=label_color)
return ax
| gpl-3.0 | -3,049,561,174,342,176,300 | 33.986301 | 73 | 0.599452 | false |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/doc/mpl_examples/event_handling/poly_editor.py | 3 | 5397 | """
This is an example to show how to build cross-GUI applications using
matplotlib event handling to interact with objects on the canvas
"""
import numpy as np
from matplotlib.lines import Line2D
from matplotlib.artist import Artist
from matplotlib.mlab import dist_point_to_segment
class PolygonInteractor:
"""
An polygon editor.
Key-bindings
't' toggle vertex markers on and off. When vertex markers are on,
you can move them, delete them
'd' delete the vertex under point
'i' insert a vertex at point. You must be within epsilon of the
line connecting two existing vertices
"""
showverts = True
epsilon = 5 # max pixel distance to count as a vertex hit
def __init__(self, ax, poly):
if poly.figure is None:
raise RuntimeError('You must first add the polygon to a figure or canvas before defining the interactor')
self.ax = ax
canvas = poly.figure.canvas
self.poly = poly
x, y = zip(*self.poly.xy)
self.line = Line2D(x, y, marker='o', markerfacecolor='r', animated=True)
self.ax.add_line(self.line)
#self._update_line(poly)
cid = self.poly.add_callback(self.poly_changed)
self._ind = None # the active vert
canvas.mpl_connect('draw_event', self.draw_callback)
canvas.mpl_connect('button_press_event', self.button_press_callback)
canvas.mpl_connect('key_press_event', self.key_press_callback)
canvas.mpl_connect('button_release_event', self.button_release_callback)
canvas.mpl_connect('motion_notify_event', self.motion_notify_callback)
self.canvas = canvas
def draw_callback(self, event):
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
self.ax.draw_artist(self.poly)
self.ax.draw_artist(self.line)
self.canvas.blit(self.ax.bbox)
def poly_changed(self, poly):
'this method is called whenever the polygon object is called'
# only copy the artist props to the line (except visibility)
vis = self.line.get_visible()
Artist.update_from(self.line, poly)
self.line.set_visible(vis) # don't use the poly visibility state
def get_ind_under_point(self, event):
'get the index of the vertex under point if within epsilon tolerance'
# display coords
xy = np.asarray(self.poly.xy)
xyt = self.poly.get_transform().transform(xy)
xt, yt = xyt[:, 0], xyt[:, 1]
d = np.sqrt((xt-event.x)**2 + (yt-event.y)**2)
indseq = np.nonzero(np.equal(d, np.amin(d)))[0]
ind = indseq[0]
if d[ind]>=self.epsilon:
ind = None
return ind
def button_press_callback(self, event):
'whenever a mouse button is pressed'
if not self.showverts: return
if event.inaxes==None: return
if event.button != 1: return
self._ind = self.get_ind_under_point(event)
def button_release_callback(self, event):
'whenever a mouse button is released'
if not self.showverts: return
if event.button != 1: return
self._ind = None
def key_press_callback(self, event):
'whenever a key is pressed'
if not event.inaxes: return
if event.key=='t':
self.showverts = not self.showverts
self.line.set_visible(self.showverts)
if not self.showverts: self._ind = None
elif event.key=='d':
ind = self.get_ind_under_point(event)
if ind is not None:
self.poly.xy = [tup for i,tup in enumerate(self.poly.xy) if i!=ind]
self.line.set_data(zip(*self.poly.xy))
elif event.key=='i':
xys = self.poly.get_transform().transform(self.poly.xy)
p = event.x, event.y # display coords
for i in range(len(xys)-1):
s0 = xys[i]
s1 = xys[i+1]
d = dist_point_to_segment(p, s0, s1)
if d<=self.epsilon:
self.poly.xy = np.array(
list(self.poly.xy[:i]) +
[(event.xdata, event.ydata)] +
list(self.poly.xy[i:]))
self.line.set_data(zip(*self.poly.xy))
break
self.canvas.draw()
def motion_notify_callback(self, event):
'on mouse movement'
if not self.showverts: return
if self._ind is None: return
if event.inaxes is None: return
if event.button != 1: return
x,y = event.xdata, event.ydata
self.poly.xy[self._ind] = x,y
self.line.set_data(zip(*self.poly.xy))
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.poly)
self.ax.draw_artist(self.line)
self.canvas.blit(self.ax.bbox)
if __name__ == '__main__':
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
fig = plt.figure()
theta = np.arange(0, 2*np.pi, 0.1)
r = 1.5
xs = r*np.cos(theta)
ys = r*np.sin(theta)
poly = Polygon(list(zip(xs, ys)), animated=True)
ax = plt.subplot(111)
ax.add_patch(poly)
p = PolygonInteractor(ax, poly)
#ax.add_line(p.line)
ax.set_title('Click and drag a point to move it')
ax.set_xlim((-2,2))
ax.set_ylim((-2,2))
plt.show()
| mit | 106,231,732,139,781,250 | 31.512048 | 117 | 0.593107 | false |
acbecker/BART | samplers.py | 1 | 24227 | """
This file contains the class definition for the sampler MCMCSample classes.
"""
__author__ = 'Brandon C. Kelly'
import numpy as np
import progressbar
from matplotlib import pyplot as plt
import acor
class MCMCSample(object):
"""
Class object for parameter samples generated by a yamcmc++ sampler. This class contains a dictionary of samples
generated by an MCMC sampler for a set of parameters, as well as methods for plotting and summarizing the results.
In general, the MCMCSample object is empty upon instantiation. One adds parameters to the dictionary through the
AddStep method of a Sampler object. Running a Sampler object then fills the dictionary up with the parameter values.
After running a Sampler object, the MCMCSample object will contain the parameter values, which can then be analyzed
further.
Alternatively, one can load the parameters and their values from a file. This is done through the method
generate_from_file. This is helpful if one has a set of MCMC samples generated by a different program.
"""
__slots__ = ["samples", "logpost"]
def __init__(self, filename=None, logpost=None, trace=None):
"""
Constructor for an MCMCSample object. If no arguments are supplied, then this just creates an empty dictionary
that will contain the MCMC samples. In this case parameters are added to the dictionary through the addstep
method of a Sampler object, and the values are generated by running the Sampler object. Otherwise, if a
filename is supplied then the parameter names and MCMC samples are read in from that file.
:param filename: A string giving the name of an asciifile containing the MCMC samples.
"""
self.samples = dict() # Empty dictionary. We will place the samples for each tracked parameter here.
if logpost is not None:
self.logpost = logpost
if trace is not None:
self.generate_from_trace(trace)
elif filename is not None:
# Construct MCMCSample object by reading in MCMC samples from one or more asciifiles.
self.generate_from_file([filename])
def generate_from_trace(self):
pass
def get_samples(self, name):
"""
Returns a copy of the numpy array containing the samples for a parameter. This is safer then directly
accessing the dictionary object containing the samples to prevent one from inadvertently changes the values of
the samples output from an MCMC sampler.
:param name: The name of the parameter for which the samples are desired.
"""
return self.samples[name].copy()
def generate_from_file(self, filename):
"""
Build the dictionary of parameter samples from an ascii file of MCMC samples. The first line of this file
should contain the parameter names.
:param filename: The name of the file containing the MCMC samples.
"""
# TODO: put in exceptions to make sure files are ready correctly
for fname in filename:
file = open(fname, 'r')
name = file.readline()
# Grab the MCMC output
trace = np.genfromtxt(fname, skip_header=1)
if name not in self.samples:
# Parameter is not already in the dictionary, so add it. Otherwise do nothing.
self.samples[name] = trace
self.newaxis()
def autocorr_timescale(self, trace):
"""
Compute the autocorrelation time scale as estimated by the `acor` module.
:param trace: The parameter trace, a numpy array.
"""
acors = []
for i in range(trace.shape[1]):
tau, mean, sigma = acor.acor(trace[:, i].real) # Warning, does not work with numpy.complex
acors.append(tau)
return np.array(acors)
def effective_samples(self, name):
"""
Return the effective number of independent samples of the MCMC sampler.
:param name: The name of the parameter to compute the effective number of independent samples for.
"""
if not self.samples.has_key(name):
print "WARNING: sampler does not have", name
return
else:
print "Calculating effective number of samples"
traces = self.samples[name] # Get the sampled parameter values
npts = traces.shape[0]
timescale = self.autocorr_timescale(traces)
return npts / timescale
def plot_trace(self, name, doShow=False):
"""
Plot the trace of the values, a time series showing the evolution of the parameter values for the MCMC sampler.
Only a single parameter element trace is shown per plot, and all plots are shown on the same plotting window. In
particular, if a parameter is array-valued, then the traces for each element of its array are plotted on a
separate subplot.
:param name: The parameter name.
"""
if not self.samples.has_key(name):
print "WARNING: sampler does not have", name
return
else:
print "Plotting Trace"
fig = plt.figure()
traces = self.samples[name] # Get the sampled parameter values
ntrace = traces.shape[1]
spN = plt.subplot(ntrace, 1, ntrace)
spN.plot(traces[:,-1], ".", markersize=2)
spN.set_xlabel("Step")
spN.set_ylabel("par %d" % (ntrace-1))
for i in range(ntrace-1):
sp = plt.subplot(ntrace, 1, i+1, sharex=spN)
sp.plot(traces[:,i], ".", markersize=2)
sp.set_ylabel("par %d" % (i))
plt.setp(sp.get_xticklabels(), visible=False)
plt.suptitle(name)
if doShow:
plt.show()
def plot_1dpdf(self, name, doShow=False):
"""
Plot histograms of the parameter values generated by the MCMC sampler. If the parameter is array valued then
histograms of all of the parameter's elements will be plotted.
:param name: The parameter name.
"""
if not self.samples.has_key(name):
print "WARNING: sampler does not have", name
return
else:
print "Plotting 1d PDF"
fig = plt.figure()
traces = self.samples[name] # Get the sampled parameter values
ntrace = traces.shape[1]
for i in range(ntrace):
sp = plt.subplot(ntrace, 1, i+1)
sp.hist(traces[:,i], bins=50, normed=True)
sp.set_ylabel("par %d" % (i))
if i == ntrace-1:
sp.set_xlabel("val")
plt.suptitle(name)
if doShow:
plt.show()
def plot_2dpdf(self, name1, name2, pindex1=0, pindex2=0, doShow=False):
"""
Plot joint distribution of the parameter values generated by the MCMC sampler.
:param name1: The parameter name along x-axis
:param name2: The parameter name along y-axis
:param pindex1: Which element of the array to plot
:param pindex2: Which element of the array to plot
:param doShow: Call plt.show()
"""
if (not self.samples.has_key(name1)) or (not self.samples.has_key(name2)) :
print "WARNING: sampler does not have", name1, name2
return
if pindex1 >= self.samples[name1].shape[1]:
print "WARNING: not enough data in", name1
return
if pindex2 >= self.samples[name2].shape[1]:
print "WARNING: not enough data in", name2
return
print "Plotting 2d PDF"
fig = plt.figure()
trace1 = self.samples[name1][:,pindex1]
trace2 = self.samples[name2][:,pindex2]
# joint distribution
axJ = fig.add_axes([0.1, 0.1, 0.7, 0.7]) # [left, bottom, width, height]
# y histogram
axY = fig.add_axes([0.8, 0.1, 0.125, 0.7], sharey=axJ)
# x histogram
axX = fig.add_axes([0.1, 0.8, 0.7, 0.125], sharex=axJ)
axJ.plot(trace1, trace2, 'ro', ms=1, alpha=0.5)
axX.hist(trace1, bins=100)
axY.hist(trace2, orientation='horizontal', bins=100)
axJ.set_xlabel("%s %d" % (name1, pindex1))
axJ.set_ylabel("%s %d" % (name2, pindex2))
plt.setp(axX.get_xticklabels()+axX.get_yticklabels(), visible=False)
plt.setp(axY.get_xticklabels()+axY.get_yticklabels(), visible=False)
if doShow:
plt.show()
def plot_2dkde(self, name1, name2, pindex1=0, pindex2=0,
nbins=100, doPlotStragglers=True, doShow=False):
"""
Plot joint distribution of the parameter values generated by the MCMC sampler using a kernel density estimate.
:param name1: The parameter name along x-axis
:param name2: The parameter name along y-axis
:param pindex1: Which element of the array to plot
:param pindex2: Which element of the array to plot
:param doShow: Call plt.show()
:param nbins: Number of bins along each axis for KDE
:param doPlotStragglers: Plot individual data points outside KDE contours. Works poorly for small samples.
"""
if (not self.samples.has_key(name1)) or (not self.samples.has_key(name2)) :
print "WARNING: sampler does not have", name1, name2
return
if pindex1 >= self.samples[name1].shape[1]:
print "WARNING: not enough data in", name1
return
if pindex2 >= self.samples[name2].shape[1]:
print "WARNING: not enough data in", name2
return
print "Plotting 2d PDF w KDE"
fig = plt.figure()
trace1 = self.samples[name1][:,pindex1].real # JIC we get something imaginary?
trace2 = self.samples[name2][:,pindex2].real
npts = trace1.shape[0]
kde = scipy.stats.gaussian_kde((trace1, trace2))
bins1 = np.linspace(trace1.min(), trace1.max(), nbins)
bins2 = np.linspace(trace2.min(), trace2.max(), nbins)
mesh1, mesh2 = np.meshgrid(bins1, bins2)
hist = kde([mesh1.ravel(), mesh2.ravel()]).reshape(mesh1.shape)
clevels = []
for frac in [0.9973, 0.9545, 0.6827]:
hfrac = lambda level, hist=hist, frac=frac: hist[hist>=level].sum()/hist.sum() - frac
level = scipy.optimize.bisect(hfrac, hist.min(), hist.max())
clevels.append(level)
# joint distribution
axJ = fig.add_axes([0.1, 0.1, 0.7, 0.7]) # [left, bottom, width, height]
# y histogram
axY = fig.add_axes([0.8, 0.1, 0.125, 0.7], sharey=axJ)
# x histogram
axX = fig.add_axes([0.1, 0.8, 0.7, 0.125], sharex=axJ)
cont = axJ.contour(mesh1, mesh2, hist, clevels, linestyles="solid", cmap=plt.cm.jet)
axX.hist(trace1, bins=100)
axY.hist(trace2, orientation='horizontal', bins=100)
axJ.set_xlabel("par %d" % (pindex1))
axJ.set_ylabel("par %d" % (pindex2))
plt.setp(axX.get_xticklabels()+axX.get_yticklabels(), visible=False)
plt.setp(axY.get_xticklabels()+axY.get_yticklabels(), visible=False)
# Note to self: you need to set up the contours above to have
# the outer one first, for collections[0] to work below.
#
# Also a note: this does not work if the outer contour is not
# fully connected.
if doPlotStragglers:
outer = cont.collections[0]._paths
sx = []
sy = []
for i in range(npts):
found = [o.contains_point((trace1[i], trace2[i])) for o in outer]
if not (True in found):
sx.append(trace1[i])
sy.append(trace2[i])
axJ.plot(sx, sy, 'k.', ms = 1, alpha = 0.1)
if doShow:
plt.show()
def plot_autocorr(self, name, acorrFac = 10.0, doShow=False):
"""
Plot the autocorrelation functions of the traces for a parameter. If the parameter is array-value then
autocorrelation plots for each of the parameter's elements will be plotted.
:param name: The parameter name.
"""
if not self.samples.has_key(name):
print "WARNING: sampler does not have", name
return
else:
print "Plotting autocorrelation function (this make take a while)"
fig = plt.figure()
traces = self.samples[name] # Get the sampled parameter values
mtrace = np.mean(traces, axis=0)
ntrace = traces.shape[1]
acorr = self.autocorr_timescale(traces)
for i in range(ntrace):
sp = plt.subplot(ntrace, 1, i+1)
lags, acf, not_needed1, not_needed2 = plt.acorr(traces[:, i] - mtrace[i], maxlags=traces.shape[0]-1, lw=2)
sp.set_xlim(-0.5, acorrFac * acorr[i])
sp.set_ylim(-0.01, 1.01)
sp.axhline(y=0.5, c='k', linestyle='--')
sp.axvline(x=acorr[i], c='r', linestyle='--')
sp.set_ylabel("par %d autocorr" % (i))
if i == ntrace-1:
sp.set_xlabel("lag")
plt.suptitle(name)
if doShow:
plt.show()
def plot_parameter(self, name, pindex=0, doShow=False):
"""
Simultaneously plots the trace, histogram, and autocorrelation of this parameter's values. If the parameter
is array-valued, then the user must specify the index of the array to plot, as these are all 1-d plots on a
single plotting window.
:param name: The name of the parameter that the plots are made for.
:param pindex: If the parameter is array-valued, then this is the index of the array that the plots are made
for.
"""
if not self.samples.has_key(name):
print "WARNING: sampler does not have", name
return
else:
print "Plotting parameter summary"
fig = plt.figure()
traces = self.samples[name]
plot_title = name
if traces.ndim > 1:
# Parameter is array valued, grab the column corresponding to pindex
if traces.ndim > 2:
# Parameter values are at least matrix-valued, reshape to a vector
traces = traces.reshape(traces.shape[0], np.prod(traces.shape[1:]))
traces = traces[:, pindex]
plot_title = name + ", element " + str(pindex)
# First plot the trace
plt.subplot(211)
plt.plot(traces, '.', markersize=2)
plt.xlim(0, traces.size)
plt.xlabel("Iteration")
plt.ylabel("Value")
plt.title(plot_title)
# Now add the histogram of values to the trace plot axes
pdf, bin_edges = np.histogram(traces, bins=25)
bin_edges = bin_edges[0:pdf.size]
# Stretch the PDF so that it is readable on the trace plot when plotted horizontally
pdf = pdf / float(pdf.max()) * 0.34 * traces.size
# Add the histogram to the plot
plt.barh(bin_edges, pdf, height=bin_edges[1] - bin_edges[0], alpha=0.75)
# Finally, plot the autocorrelation function of the trace
plt.subplot(212)
centered_trace = traces - traces.mean()
lags, acf, not_needed1, not_needed2 = plt.acorr(centered_trace, maxlags=traces.size - 1, lw=2)
acf = acf[acf.size / 2:]
plt.ylabel("ACF")
plt.xlabel("Lag")
# Compute the autocorrelation timescale, and then reset the x-axis limits accordingly
# acf_timescale = self.autocorr_timescale(traces)
plt.xlim(0, traces.size / 10.0)
if doShow:
plt.show()
def posterior_summaries(self, name):
"""
Print out the posterior medians, standard deviations, and 68th, 95th, and 99th credibility intervals.
:param name: The name of the parameter for which the summaries are desired.
See the documentation for MCMCSample.plot_trace for further information.
"""
if not self.samples.has_key(name):
print "WARNING: sampler does not have", name
return
else:
print "Plotting parameter summary"
fig = plt.figure()
traces = self.samples[name] # Get the sampled parameter values
effective_nsamples = self.effective_samples(name) # Get the effective number of independent samples
if traces.ndim == 1:
# Parameter is scalar valued, so this is easy
print "Posterior summary for parameter", name
print "----------------------------------------------"
print "Effective number of independent samples:", effective_nsamples
print "Median:", np.median(traces)
print "Standard deviation:", np.std(traces)
print "68% credibility interval:", np.percentile(traces, (16.0, 84.0))
print "95% credibility interval:", np.percentile(traces, (2.5, 97.5))
print "99% credibility interval:", np.percentile(traces, (0.5, 99.5))
else:
if traces.ndim > 2:
# Parameter values are at least matrix-valued, reshape to a vector.
traces = traces.reshape(traces.shape[0], np.prod(traces.shape[1:]))
for i in xrange(traces.shape[1]):
# give summary for each element of this parameter separately
# Parameter is scalar valued, so this is easy
print "Posterior summary for parameter", name, " element", i
print "----------------------------------------------"
print "Effective number of independent samples:", effective_nsamples[i]
print "Median:", np.median(traces[:, i])
print "Standard deviation:", np.std(traces[:, i])
print "68% credibility interval:", np.percentile(traces[:, i], (16.0, 84.0))
print "95% credibility interval:", np.percentile(traces[:, i], (2.5, 97.5))
print "99% credibility interval:", np.percentile(traces[:, i], (0.5, 99.5))
def newaxis(self):
for key in self.samples.keys():
if len(self.samples[key].shape) == 1:
self.samples[key] = self.samples[key][:, np.newaxis]
class Sampler(object):
"""
A class to generate samples of parameter from their probability distribution. Samplers consist of a series of
steps, where each step updates the value of the parameter(s) associated with it. The samples for the tracked
parameters are saved to a MCMCSample object.
"""
__slots__ = ["sample_size", "burnin", "thin", "_steps", "_burnin_bar", "_sampler_bar", "mcmc_samples"]
def __init__(self, steps=None):
"""
Constructor for Sampler object.
:param steps: A list of step objects to iterate over in one MCMC iteration.
:param mcmc_samples: An MCMCSample object. The generated samples are added to this object.
"""
self.sample_size = 0
self.burnin = 0
self.thin = 1
self._steps = [] # Empty list that will eventually contain the step objects.
if steps is not None:
for s in steps:
self.add_step(s)
# Construct progress bar objects
self._burnin_bar = progressbar.ProgressBar()
self._sampler_bar = progressbar.ProgressBar()
self.mcmc_samples = MCMCSample() # MCMCSample class object. This is where the sampled values are stored.
def add_step(self, step):
"""
Method to add a step object to the sampler. The sampler will iterate over the step objects, calling their
Draw() method once per iteration. This method will also initialize the parameter value associated with this
step, and if the parameter is tracked it will add it to the dictionary containing the samples.
"""
self._steps.append(step)
def _allocate_arrays(self):
for step in self._steps:
if step._parameter.track:
# We are saving this parameter's values, so add to dictionary of samples.
if np.isscalar(step._parameter.value):
# Parameter is scalar-valued, so this is easy
value_array = np.empty(self.sample_size)
else:
# Parameter is array-like, so get shape of parameter array first
pshape = step._parameter.value.shape
trace_shape = (self.sample_size,) + pshape
# Get numpy array that will store the samples values for this parameter
value_array = np.empty(trace_shape)
# Add the array that will hold the sampled parameter values to the dictionary of samples.
self.mcmc_samples.samples[step._parameter.name] = value_array
def start(self):
for step in self._steps:
step._parameter.set_starting_value()
self._allocate_arrays()
self._burnin_bar.maxval = self.burnin
self._sampler_bar.maxval = self.sample_size
def iterate(self, niter, burnin_stage):
"""
Method to perform niter iterations of the sampler.
:param niter: The number of iterations to perform.
:param burnin_stage: Are we in the burn-in stage? A boolean.
"""
for i in xrange(niter):
for step in self._steps:
step.do_step()
if burnin_stage:
# Update the burn-in progress bar
self._burnin_bar.update(i + 1)
def save_values(self):
"""
Save the parameter values. These values are saved in a dictionary of numpy arrays, indexed according to the
parameter names. The dictionary of samples is accessed as Sampler.samples.
"""
current_iteration = self._sampler_bar.currval # Progress bar keeps track of how many iterations we have run
for step in self._steps:
# Save the parameter value associated with each step.
if np.isscalar(step._parameter.value):
# Need to treat scalar case separately
self.mcmc_samples.samples[step._parameter.name][current_iteration] = step._parameter.value
else:
# Have a vector- or matrix-valued parameter
self.mcmc_samples.samples[step._parameter.name][current_iteration, :] = step._parameter.value
def run(self, burnin, nsamples, thin=1):
"""
Run the sampler.
:param nsamples: The final sample size to generate. A total of burnin + thin * nsamples iterations will
be performed.
:param burnin: The number of burnin iterations to run.
:param thin: The thinning interval. Every thin iterations will be kept.
"""
self.burnin = burnin
self.sample_size = nsamples
self.thin = thin
# Set starting values
self.start()
print "Using", len(self._steps), "steps in the MCMC sampler."
print "Obtaining samples of size", self.sample_size, "for", len(self.mcmc_samples.samples), "parameters."
# Do burn-in stage
print "Doing burn-in stage first..."
self._burnin_bar.start()
self.iterate(self.burnin, True) # Perform the burn-in iterations
# Now run the sampler.
print "Sampling..."
self._sampler_bar.start()
for i in xrange(self.sample_size):
if self.thin == 1:
# No thinning is performed, so don't waste time calling self.Iterate.
for step in self._steps:
step.do_step()
else:
# Need to thin the samples, so do thin iterations.
self.iterate(self.thin, False)
# Now save the tracked parameter values to the samples dictionary object
self.save_values()
self._sampler_bar.update(i + 1) # Update the progress bar
return self.mcmc_samples
def restart(self, sample_size, thin=1):
"""
Restart the MCMC sampler at the current value. No burn-in stage will be performed.
"""
pass
| mit | -7,085,109,834,660,766,000 | 42.339893 | 120 | 0.599909 | false |
Ziqi-Li/bknqgis | pandas/pandas/conftest.py | 7 | 2021 | import pytest
import numpy
import pandas
import pandas.util.testing as tm
def pytest_addoption(parser):
parser.addoption("--skip-slow", action="store_true",
help="skip slow tests")
parser.addoption("--skip-network", action="store_true",
help="skip network tests")
parser.addoption("--run-high-memory", action="store_true",
help="run high memory tests")
parser.addoption("--only-slow", action="store_true",
help="run only slow tests")
def pytest_runtest_setup(item):
if 'slow' in item.keywords and item.config.getoption("--skip-slow"):
pytest.skip("skipping due to --skip-slow")
if 'slow' not in item.keywords and item.config.getoption("--only-slow"):
pytest.skip("skipping due to --only-slow")
if 'network' in item.keywords and item.config.getoption("--skip-network"):
pytest.skip("skipping due to --skip-network")
if 'high_memory' in item.keywords and not item.config.getoption(
"--run-high-memory"):
pytest.skip(
"skipping high memory test since --run-high-memory was not set")
# Configurations for all tests and all test modules
@pytest.fixture(autouse=True)
def configure_tests():
pandas.set_option('chained_assignment', 'raise')
# For running doctests: make np and pd names available
@pytest.fixture(autouse=True)
def add_imports(doctest_namespace):
doctest_namespace['np'] = numpy
doctest_namespace['pd'] = pandas
@pytest.fixture(params=['bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil'])
def spmatrix(request):
tm._skip_if_no_scipy()
from scipy import sparse
return getattr(sparse, request.param + '_matrix')
@pytest.fixture
def ip():
"""
Get an instance of IPython.InteractiveShell.
Will raise a skip if IPython is not installed.
"""
pytest.importorskip('IPython', minversion="6.0.0")
from IPython.core.interactiveshell import InteractiveShell
return InteractiveShell()
| gpl-2.0 | -5,245,325,280,974,097,000 | 29.164179 | 78 | 0.6571 | false |
jcchin/MagnePlane | src/hyperloop/Python/mission/tests/test_straight_track.py | 3 | 6317 | from __future__ import division, print_function, absolute_import
import unittest
import numpy as np
try:
from openmdao.api import pyOptSparseDriver
except:
pyOptSparseDriver = None
from openmdao.api import ScipyOptimizer
from pointer.components import Problem, Trajectory, CollocationPhase
from hyperloop.Python.mission.rhs import MagnePlaneRHS
class MagneplaneTestStraightTrack(unittest.TestCase):
def setUp(self):
solver = 'SLSQP'
num_seg = 10
seg_ncn = 2
rel_lengths = 'lgl'
# Instantiate a problem and set it's root to an empty Trajectory
prob = Problem()
prob.add_traj(Trajectory("traj0"))
if solver == 'SNOPT':
driver = pyOptSparseDriver()
driver.options['optimizer'] = solver
driver.opt_settings['Major iterations limit'] = 1000
driver.opt_settings['iSumm'] = 6
driver.opt_settings['Major step limit'] = 0.5
driver.opt_settings["Major feasibility tolerance"] = 1.0E-6
driver.opt_settings["Major optimality tolerance"] = 1.0E-6
driver.opt_settings["Minor feasibility tolerance"] = 1.0E-4
driver.opt_settings['Verify level'] = 3
else:
driver = ScipyOptimizer()
driver.options['tol'] = 1.0E-6
driver.options['disp'] = True
driver.options['maxiter'] = 500
prob.trajectories["traj0"].add_objective(name="t", phase="phase0",
place="end", scaler=1.0)
prob.driver = driver
dynamic_controls = None
static_controls = [{'name': 'mass', 'units': 'kg'},
{'name': 'g', 'units': 'm/s/s'},
{'name': 'theta', 'units': 'deg'},
{'name': 'psi', 'units': 'deg'},
{'name': 'Cd', 'units': 'unitless'},
{'name': 'S', 'units': 'm**2'},
{'name': 'p_tube', 'units': 'Pa'},
{'name': 'T_ambient', 'units': 'K'},
{'name': 'R', 'units': 'J/(kg*K)'},
{'name': 'D_magnetic', 'units': 'N'}]
phase0 = CollocationPhase(name='phase0', rhs_class=MagnePlaneRHS,
num_seg=num_seg, seg_ncn=seg_ncn,
rel_lengths=rel_lengths,
dynamic_controls=dynamic_controls,
static_controls=static_controls)
prob.trajectories["traj0"].add_phase(phase0)
phase0.set_state_options('x', lower=0, upper=100000,
ic_val=0, ic_fix=True,
fc_val=1000, fc_fix=False, defect_scaler=0.1)
phase0.set_state_options('y', lower=0, upper=0, ic_val=0, ic_fix=False,
fc_val=0, fc_fix=False, defect_scaler=0.1)
phase0.set_state_options('z', lower=0, upper=0, ic_val=0, ic_fix=False,
fc_val=0, fc_fix=False, defect_scaler=0.1)
phase0.set_state_options('v', lower=0, upper=np.inf, ic_val=0.0,
ic_fix=True, fc_val=335.0,
fc_fix=True, defect_scaler=0.1)
phase0.set_static_control_options('theta', val=0.0, opt=False)
phase0.set_static_control_options('psi', val=0.0, opt=False)
phase0.set_static_control_options(name='g', val=9.80665, opt=False)
phase0.set_static_control_options(name='mass', val=3100.0, opt=False)
phase0.set_static_control_options(name='Cd', val=0.2, opt=False)
phase0.set_static_control_options(name='S', val=1.4, opt=False)
phase0.set_static_control_options(name='p_tube', val=850.0, opt=False)
phase0.set_static_control_options(name='T_ambient', val=298.0,
opt=False)
phase0.set_static_control_options(name='R', val=287.0, opt=False)
phase0.set_static_control_options(name='D_magnetic', val=150.0,
opt=False)
phase0.set_time_options(t0_val=0, t0_lower=0, t0_upper=0,
tp_val=30.0, tp_lower=0.5, tp_upper=1000.0)
self.prob = prob
def test_straight_track_time(self):
self.prob.setup()
self.prob.run()
np.testing.assert_almost_equal(self.prob['traj0.phase0.rhs_c.t'][-1],
35.09879341,
decimal=3)
# # SLSQP is naive about having more constraints than desvars.
# Uncomment the following block if you get this error, and you
# can see where these issues are coming from.
# num_desvars = 0
# for desvar in prob.driver._desvars:
# print(desvar, prob.driver._desvars[desvar]['size'])
# num_desvars = num_desvars + prob.driver._desvars[desvar]['size']
# print('num_desvars = {0}'.format(num_desvars))
#
# num_cons = 0
# for con in prob.driver._cons:
# print(con, prob.driver._cons[con]['size'])
# num_cons = num_cons + prob.driver._cons[con]['size']
# print('num_cons = {0}'.format(num_cons))
# # Uncomment the following to plot the trajectory
# results = self.prob.trajectories['traj0'].phases['phase0'].\
# simulate(dt=0.05)
# import matplotlib.pyplot as plt
# plt.figure()
# plt.plot(self.prob["traj0.phase0.rhs_c.t"],
# self.prob["traj0.phase0.rhs_c.x"],"ro")
# #plt.plot(prob["phase0.rhs_c.x"],prob["phase0.rhs_c.y"],"bo")
# plt.plot(self.prob["traj0.phase0.rhs_i.t"],
# self.prob["traj0.phase0.rhs_i.x"],"rx")
# plt.plot(results["t"],results["x"],"b-")
# plt.figure()
# plt.plot(self.prob["traj0.phase0.rhs_c.t"],
# self.prob["traj0.phase0.rhs_c.v"],"ro")
# #plt.plot(prob["phase0.rhs_c.x"],prob["phase0.rhs_c.y"],"bo")
# plt.plot(self.prob["traj0.phase0.rhs_i.t"],
# self.prob["traj0.phase0.rhs_i.v"],"rx")
# plt.plot(results["t"],results["v"],"b-")
# plt.show()
| apache-2.0 | 2,626,302,035,135,182,000 | 42.267123 | 79 | 0.523983 | false |
hdoria/HnTool | HnTool/output/html.py | 1 | 8712 | # -*- coding: utf-8 -*-
#
# HnTool - output module - html
# Copyright (C) 2009-2010 Authors
# Authors:
# * Hugo Doria <mail at hugodoria dot org>
# * Aurelio A. Heckert <aurium at gmail dot com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# ( at your option ) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import HnTool.modules
import string
class Format:
description = "HTML output for a web browser"
def __init__(self, options):
pass
def format_status(self, token):
if token == 'ok':
return '<td class="status-ok">OK</td>'
elif token == 'low':
return '<td class="status-low">LOW</td>'
elif token == 'medium':
return '<td class="status-medium">MEDIUM</td>'
elif token == 'high':
return '<td class="status-high">HIGH</td>'
elif token == 'info':
return '<td class="status-info">INFO</td>'
# Method to show the check results
def msg_status(self, msg, status):
'''
Method to show the check results
'''
return '<tr>' + \
self.format_status(status) + \
'<td>' + msg + '</td>' + \
'</tr>'
def statistics_graphic(self, statistics):
import matplotlib.pyplot as Matplot
import base64
import os # para remover o grafico gerado
# Matplot.title('types of results')
# Matplot.ylabel('occurrences')
Matplot.grid(True)
Matplot.rcParams.update({'font.size': 18})
Matplot.rcParams.update({'font.weight': 'bold'})
bar_width = 0.6
Matplot.bar(1, statistics['ok'], width=bar_width, facecolor='lightgreen', align='center')
Matplot.bar(2, statistics['high'], width=bar_width, facecolor='red', align='center')
Matplot.bar(3, statistics['medium'], width=bar_width, facecolor='yellow', align='center')
Matplot.bar(4, statistics['low'], width=bar_width, facecolor='lightgray', align='center')
Matplot.bar(5, statistics['info'], width=bar_width, facecolor='lightblue', align='center')
Matplot.xticks([1, 2, 3, 4, 5], ['OK', 'HIGH', 'MEDIUM', 'LOW', 'INFO'])
graphic_name = 'statistics.png'
Matplot.savefig(graphic_name)
width = 270
height = 200
image_file = open(graphic_name, 'r')
img_base64 = base64.b64encode(image_file.read())
image_file.close()
os.remove(graphic_name)
# imagem redimensionada no html para preservar a qualidade
img_tag = '<img src="data:image/png;base64,{0}" alt="statistics graphic" width="{1}" height="{2}" />'.format(img_base64, width, height)
#img_tag = '<img src="{0}" alt="statistics graphic" width="{1}" height="{2}" />'.format(graphic_name, width, height)
return img_tag
def output(self, report, conf):
self.conf = conf
# Print all the results, from the 5 types of messages ( ok, low, medium, high and info ).
# First message is the "ok" one ( m['results'][0] ). The second one is
# "low" ( m['results'][1] ). The third ( m['results'][2] ) is for "warnings"
# and the fourth one is "high" ( m['results'][3] ), The last one is for
# info messages.
print '''<!DOCTYPE html>\n<html>
<head>
<title>HnTool - A hardening tool for *nixes - Report</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<style type="text/css">
body {
font: 12px 'Lucida Grande', sans-serif;
color: #666666;
text-align: center;
margin-right: auto;
margin-left: auto;
margin-top: 0;
border-top: 3px solid black;
}
h2 {
text-align: left;
font-size: 15px;
padding-top: 15px;
padding-bottom: 5px;
padding-left: 5px;
border-bottom: 1px solid #000;
}
#wrap {
width: 1000px;
margin:0 auto;
margin-top: 10px;
text-align: center;
}
#left {
width: 700px;
float: left;
}
#right {
margin-top: 15px;
margin-left: 720px;
border: 1px solid #ddd;
}
ul {
text-align: left;
text-decoration: none;
}
table {
border: 0;
width: 690px;
}
td {
color: #000;
padding: 5px;
text-align: left;
}
.status-ok {
background: lightgreen;
text-align: center;
font-size: 12px;
}
.status-low {
background: lightgray;
text-align: center;
font-size: 12px;
}
.status-medium {
background: yellow;
text-align: center;
font-size: 12px;
}
.status-high {
background: red;
text-align: center;
font-size: 12px;
}
.status-info {
background: lightblue;
text-align: center;
font-size: 12px;
}
</style>
</head>
<body>
<div id="wrap">
<div id="header">
<h1>HnTool - A hardening tool for *nixes - Report</h1>
</div>
<div id="left">
<table>'''
statistics = {'ok': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0}
for m in report:
print '<tr><th colspan="2" align="left"><h2>' + m['title'] + '</h2></th></tr>'
if m['results']['ok'] != []:
for result in m['results']['ok']:
print self.msg_status(result, 'ok')
statistics['ok'] += 1
if m['results']['low'] != []:
for result in m['results']['low']:
print self.msg_status(result, 'low')
statistics['low'] += 1
if m['results']['medium'] != []:
for result in m['results']['medium']:
print self.msg_status(result, 'medium')
statistics['medium'] += 1
if m['results']['high'] != []:
for result in m['results']['high']:
print self.msg_status(result, 'high')
statistics['high'] += 1
if m['results']['info'] != []:
for result in m['results']['info']:
print self.msg_status(result, 'info')
statistics['info'] += 1
print '''
</table>
</div> <!-- closing the left div -->
<div id="right">
<h3>Statistics</h3>
<ul>'''
print ' <li><strong>OK:</strong> ' + str(statistics['ok']) + '</li>'
print ' <li><strong>HIGH:</strong> ' + str(statistics['high']) + '</li>'
print ' <li><strong>MEDIUM:</strong> ' + str(statistics['medium']) + '</li>'
print ' <li><strong>LOW:</strong> ' + str(statistics['low']) + '</li>'
print ' <li><strong>INFO:</strong> ' + str(statistics['info']) + '</li>'
print ' </ul>'
print self.statistics_graphic(statistics)
print '''
</div> <!-- closing the right div -->
</div> <!-- closing the wrap div -->
</body>
</html>'''
| gpl-2.0 | -7,514,285,492,901,710,000 | 34.851852 | 143 | 0.473485 | false |
alorenzo175/pvlib-python | pvlib/forecast.py | 1 | 37917 | '''
The 'forecast' module contains class definitions for
retreiving forecasted data from UNIDATA Thredd servers.
'''
import datetime
from netCDF4 import num2date
import numpy as np
import pandas as pd
from requests.exceptions import HTTPError
from xml.etree.ElementTree import ParseError
from pvlib.location import Location
from pvlib.irradiance import liujordan, get_extra_radiation, disc
from siphon.catalog import TDSCatalog
from siphon.ncss import NCSS
import warnings
warnings.warn(
'The forecast module algorithms and features are highly experimental. '
'The API may change, the functionality may be consolidated into an io '
'module, or the module may be separated into its own package.')
class ForecastModel(object):
"""
An object for querying and holding forecast model information for
use within the pvlib library.
Simplifies use of siphon library on a THREDDS server.
Parameters
----------
model_type: string
UNIDATA category in which the model is located.
model_name: string
Name of the UNIDATA forecast model.
set_type: string
Model dataset type.
Attributes
----------
access_url: string
URL specifying the dataset from data will be retrieved.
base_tds_url : string
The top level server address
catalog_url : string
The url path of the catalog to parse.
data: pd.DataFrame
Data returned from the query.
data_format: string
Format of the forecast data being requested from UNIDATA.
dataset: Dataset
Object containing information used to access forecast data.
dataframe_variables: list
Model variables that are present in the data.
datasets_list: list
List of all available datasets.
fm_models: Dataset
TDSCatalog object containing all available
forecast models from UNIDATA.
fm_models_list: list
List of all available forecast models from UNIDATA.
latitude: list
A list of floats containing latitude values.
location: Location
A pvlib Location object containing geographic quantities.
longitude: list
A list of floats containing longitude values.
lbox: boolean
Indicates the use of a location bounding box.
ncss: NCSS object
NCSS
model_name: string
Name of the UNIDATA forecast model.
model: Dataset
A dictionary of Dataset object, whose keys are the name of the
dataset's name.
model_url: string
The url path of the dataset to parse.
modelvariables: list
Common variable names that correspond to queryvariables.
query: NCSS query object
NCSS object used to complete the forecast data retrival.
queryvariables: list
Variables that are used to query the THREDDS Data Server.
time: DatetimeIndex
Time range.
variables: dict
Defines the variables to obtain from the weather
model and how they should be renamed to common variable names.
units: dict
Dictionary containing the units of the standard variables
and the model specific variables.
vert_level: float or integer
Vertical altitude for query data.
"""
access_url_key = 'NetcdfSubset'
catalog_url = 'https://thredds.ucar.edu/thredds/catalog.xml'
base_tds_url = catalog_url.split('/thredds/')[0]
data_format = 'netcdf'
units = {
'temp_air': 'C',
'wind_speed': 'm/s',
'ghi': 'W/m^2',
'ghi_raw': 'W/m^2',
'dni': 'W/m^2',
'dhi': 'W/m^2',
'total_clouds': '%',
'low_clouds': '%',
'mid_clouds': '%',
'high_clouds': '%'}
def __init__(self, model_type, model_name, set_type, vert_level=None):
self.model_type = model_type
self.model_name = model_name
self.set_type = set_type
self.connected = False
self.vert_level = vert_level
def connect_to_catalog(self):
self.catalog = TDSCatalog(self.catalog_url)
self.fm_models = TDSCatalog(
self.catalog.catalog_refs[self.model_type].href)
self.fm_models_list = sorted(list(self.fm_models.catalog_refs.keys()))
try:
model_url = self.fm_models.catalog_refs[self.model_name].href
except ParseError:
raise ParseError(self.model_name + ' model may be unavailable.')
try:
self.model = TDSCatalog(model_url)
except HTTPError:
try:
self.model = TDSCatalog(model_url)
except HTTPError:
raise HTTPError(self.model_name + ' model may be unavailable.')
self.datasets_list = list(self.model.datasets.keys())
self.set_dataset()
self.connected = True
def __repr__(self):
return '{}, {}'.format(self.model_name, self.set_type)
def set_dataset(self):
'''
Retrieves the designated dataset, creates NCSS object, and
creates a NCSS query object.
'''
keys = list(self.model.datasets.keys())
labels = [item.split()[0].lower() for item in keys]
if self.set_type == 'best':
self.dataset = self.model.datasets[keys[labels.index('best')]]
elif self.set_type == 'latest':
self.dataset = self.model.datasets[keys[labels.index('latest')]]
elif self.set_type == 'full':
self.dataset = self.model.datasets[keys[labels.index('full')]]
self.access_url = self.dataset.access_urls[self.access_url_key]
self.ncss = NCSS(self.access_url)
self.query = self.ncss.query()
def set_query_latlon(self):
'''
Sets the NCSS query location latitude and longitude.
'''
if (isinstance(self.longitude, list) and
isinstance(self.latitude, list)):
self.lbox = True
# west, east, south, north
self.query.lonlat_box(self.longitude[0], self.longitude[1],
self.latitude[0], self.latitude[1])
else:
self.lbox = False
self.query.lonlat_point(self.longitude, self.latitude)
def set_location(self, time, latitude, longitude):
'''
Sets the location for the query.
Parameters
----------
time: datetime or DatetimeIndex
Time range of the query.
'''
if isinstance(time, datetime.datetime):
tzinfo = time.tzinfo
else:
tzinfo = time.tz
if tzinfo is None:
self.location = Location(latitude, longitude)
else:
self.location = Location(latitude, longitude, tz=tzinfo)
def get_data(self, latitude, longitude, start, end,
vert_level=None, query_variables=None,
close_netcdf_data=True, **kwargs):
"""
Submits a query to the UNIDATA servers using Siphon NCSS and
converts the netcdf data to a pandas DataFrame.
Parameters
----------
latitude: float
The latitude value.
longitude: float
The longitude value.
start: datetime or timestamp
The start time.
end: datetime or timestamp
The end time.
vert_level: None, float or integer, default None
Vertical altitude of interest.
query_variables: None or list, default None
If None, uses self.variables.
close_netcdf_data: bool, default True
Controls if the temporary netcdf data file should be closed.
Set to False to access the raw data.
**kwargs:
Additional keyword arguments are silently ignored.
Returns
-------
forecast_data : DataFrame
column names are the weather model's variable names.
"""
if not self.connected:
self.connect_to_catalog()
if vert_level is not None:
self.vert_level = vert_level
if query_variables is None:
self.query_variables = list(self.variables.values())
else:
self.query_variables = query_variables
self.latitude = latitude
self.longitude = longitude
self.set_query_latlon() # modifies self.query
self.set_location(start, latitude, longitude)
self.start = start
self.end = end
self.query.time_range(self.start, self.end)
if self.vert_level is not None:
self.query.vertical_level(self.vert_level)
self.query.variables(*self.query_variables)
self.query.accept(self.data_format)
self.netcdf_data = self.ncss.get_data(self.query)
# might be better to go to xarray here so that we can handle
# higher dimensional data for more advanced applications
self.data = self._netcdf2pandas(self.netcdf_data, self.query_variables,
self.start, self.end)
if close_netcdf_data:
self.netcdf_data.close()
return self.data
def process_data(self, data, **kwargs):
"""
Defines the steps needed to convert raw forecast data
into processed forecast data. Most forecast models implement
their own version of this method which also call this one.
Parameters
----------
data: DataFrame
Raw forecast data
Returns
-------
data: DataFrame
Processed forecast data.
"""
data = self.rename(data)
return data
def get_processed_data(self, *args, **kwargs):
"""
Get and process forecast data.
Parameters
----------
*args: positional arguments
Passed to get_data
**kwargs: keyword arguments
Passed to get_data and process_data
Returns
-------
data: DataFrame
Processed forecast data
"""
return self.process_data(self.get_data(*args, **kwargs), **kwargs)
def rename(self, data, variables=None):
"""
Renames the columns according the variable mapping.
Parameters
----------
data: DataFrame
variables: None or dict, default None
If None, uses self.variables
Returns
-------
data: DataFrame
Renamed data.
"""
if variables is None:
variables = self.variables
return data.rename(columns={y: x for x, y in variables.items()})
def _netcdf2pandas(self, netcdf_data, query_variables, start, end):
"""
Transforms data from netcdf to pandas DataFrame.
Parameters
----------
data: netcdf
Data returned from UNIDATA NCSS query.
query_variables: list
The variables requested.
start: Timestamp
The start time
end: Timestamp
The end time
Returns
-------
pd.DataFrame
"""
# set self.time
try:
time_var = 'time'
self.set_time(netcdf_data.variables[time_var])
except KeyError:
# which model does this dumb thing?
time_var = 'time1'
self.set_time(netcdf_data.variables[time_var])
data_dict = {}
for key, data in netcdf_data.variables.items():
# if accounts for possibility of extra variable returned
if key not in query_variables:
continue
squeezed = data[:].squeeze()
if squeezed.ndim == 1:
data_dict[key] = squeezed
elif squeezed.ndim == 2:
for num, data_level in enumerate(squeezed.T):
data_dict[key + '_' + str(num)] = data_level
else:
raise ValueError('cannot parse ndim > 2')
data = pd.DataFrame(data_dict, index=self.time)
# sometimes data is returned as hours since T0
# where T0 is before start. Then the hours between
# T0 and start are added *after* end. So sort and slice
# to remove the garbage
data = data.sort_index().loc[start:end]
return data
def set_time(self, time):
'''
Converts time data into a pandas date object.
Parameters
----------
time: netcdf
Contains time information.
Returns
-------
pandas.DatetimeIndex
'''
times = num2date(time[:].squeeze(), time.units)
self.time = pd.DatetimeIndex(pd.Series(times), tz=self.location.tz)
def cloud_cover_to_ghi_linear(self, cloud_cover, ghi_clear, offset=35,
**kwargs):
"""
Convert cloud cover to GHI using a linear relationship.
0% cloud cover returns ghi_clear.
100% cloud cover returns offset*ghi_clear.
Parameters
----------
cloud_cover: numeric
Cloud cover in %.
ghi_clear: numeric
GHI under clear sky conditions.
offset: numeric, default 35
Determines the minimum GHI.
kwargs
Not used.
Returns
-------
ghi: numeric
Estimated GHI.
References
----------
Larson et. al. "Day-ahead forecasting of solar power output from
photovoltaic plants in the American Southwest" Renewable Energy
91, 11-20 (2016).
"""
offset = offset / 100.
cloud_cover = cloud_cover / 100.
ghi = (offset + (1 - offset) * (1 - cloud_cover)) * ghi_clear
return ghi
def cloud_cover_to_irradiance_clearsky_scaling(self, cloud_cover,
method='linear',
**kwargs):
"""
Estimates irradiance from cloud cover in the following steps:
1. Determine clear sky GHI using Ineichen model and
climatological turbidity.
2. Estimate cloudy sky GHI using a function of
cloud_cover e.g.
:py:meth:`~ForecastModel.cloud_cover_to_ghi_linear`
3. Estimate cloudy sky DNI using the DISC model.
4. Calculate DHI from DNI and GHI.
Parameters
----------
cloud_cover : Series
Cloud cover in %.
method : str, default 'linear'
Method for converting cloud cover to GHI.
'linear' is currently the only option.
**kwargs
Passed to the method that does the conversion
Returns
-------
irrads : DataFrame
Estimated GHI, DNI, and DHI.
"""
solpos = self.location.get_solarposition(cloud_cover.index)
cs = self.location.get_clearsky(cloud_cover.index, model='ineichen',
solar_position=solpos)
method = method.lower()
if method == 'linear':
ghi = self.cloud_cover_to_ghi_linear(cloud_cover, cs['ghi'],
**kwargs)
else:
raise ValueError('invalid method argument')
dni = disc(ghi, solpos['zenith'], cloud_cover.index)['dni']
dhi = ghi - dni * np.cos(np.radians(solpos['zenith']))
irrads = pd.DataFrame({'ghi': ghi, 'dni': dni, 'dhi': dhi}).fillna(0)
return irrads
def cloud_cover_to_transmittance_linear(self, cloud_cover, offset=0.75,
**kwargs):
"""
Convert cloud cover to atmospheric transmittance using a linear
model.
0% cloud cover returns offset.
100% cloud cover returns 0.
Parameters
----------
cloud_cover : numeric
Cloud cover in %.
offset : numeric, default 0.75
Determines the maximum transmittance.
kwargs
Not used.
Returns
-------
ghi : numeric
Estimated GHI.
"""
transmittance = ((100.0 - cloud_cover) / 100.0) * offset
return transmittance
def cloud_cover_to_irradiance_liujordan(self, cloud_cover, **kwargs):
"""
Estimates irradiance from cloud cover in the following steps:
1. Determine transmittance using a function of cloud cover e.g.
:py:meth:`~ForecastModel.cloud_cover_to_transmittance_linear`
2. Calculate GHI, DNI, DHI using the
:py:func:`pvlib.irradiance.liujordan` model
Parameters
----------
cloud_cover : Series
Returns
-------
irradiance : DataFrame
Columns include ghi, dni, dhi
"""
# in principle, get_solarposition could use the forecast
# pressure, temp, etc., but the cloud cover forecast is not
# accurate enough to justify using these minor corrections
solar_position = self.location.get_solarposition(cloud_cover.index)
dni_extra = get_extra_radiation(cloud_cover.index)
airmass = self.location.get_airmass(cloud_cover.index)
transmittance = self.cloud_cover_to_transmittance_linear(cloud_cover,
**kwargs)
irrads = liujordan(solar_position['apparent_zenith'],
transmittance, airmass['airmass_absolute'],
dni_extra=dni_extra)
irrads = irrads.fillna(0)
return irrads
def cloud_cover_to_irradiance(self, cloud_cover, how='clearsky_scaling',
**kwargs):
"""
Convert cloud cover to irradiance. A wrapper method.
Parameters
----------
cloud_cover : Series
how : str, default 'clearsky_scaling'
Selects the method for conversion. Can be one of
clearsky_scaling or liujordan.
**kwargs
Passed to the selected method.
Returns
-------
irradiance : DataFrame
Columns include ghi, dni, dhi
"""
how = how.lower()
if how == 'clearsky_scaling':
irrads = self.cloud_cover_to_irradiance_clearsky_scaling(
cloud_cover, **kwargs)
elif how == 'liujordan':
irrads = self.cloud_cover_to_irradiance_liujordan(
cloud_cover, **kwargs)
else:
raise ValueError('invalid how argument')
return irrads
def kelvin_to_celsius(self, temperature):
"""
Converts Kelvin to celsius.
Parameters
----------
temperature: numeric
Returns
-------
temperature: numeric
"""
return temperature - 273.15
def isobaric_to_ambient_temperature(self, data):
"""
Calculates temperature from isobaric temperature.
Parameters
----------
data: DataFrame
Must contain columns pressure, temperature_iso,
temperature_dew_iso. Input temperature in K.
Returns
-------
temperature : Series
Temperature in K
"""
P = data['pressure'] / 100.0 # noqa: N806
Tiso = data['temperature_iso'] # noqa: N806
Td = data['temperature_dew_iso'] - 273.15 # noqa: N806
# saturation water vapor pressure
e = 6.11 * 10**((7.5 * Td) / (Td + 273.3))
# saturation water vapor mixing ratio
w = 0.622 * (e / (P - e))
temperature = Tiso - ((2.501 * 10.**6) / 1005.7) * w
return temperature
def uv_to_speed(self, data):
"""
Computes wind speed from wind components.
Parameters
----------
data : DataFrame
Must contain the columns 'wind_speed_u' and 'wind_speed_v'.
Returns
-------
wind_speed : Series
"""
wind_speed = np.sqrt(data['wind_speed_u']**2 + data['wind_speed_v']**2)
return wind_speed
def gust_to_speed(self, data, scaling=1/1.4):
"""
Computes standard wind speed from gust.
Very approximate and location dependent.
Parameters
----------
data : DataFrame
Must contain the column 'wind_speed_gust'.
Returns
-------
wind_speed : Series
"""
wind_speed = data['wind_speed_gust'] * scaling
return wind_speed
class GFS(ForecastModel):
"""
Subclass of the ForecastModel class representing GFS
forecast model.
Model data corresponds to 0.25 degree resolution forecasts.
Parameters
----------
resolution: string, default 'half'
Resolution of the model, either 'half' or 'quarter' degree.
set_type: string, default 'best'
Type of model to pull data from.
Attributes
----------
dataframe_variables: list
Common variables present in the final set of data.
model: string
Name of the UNIDATA forecast model.
model_type: string
UNIDATA category in which the model is located.
variables: dict
Defines the variables to obtain from the weather
model and how they should be renamed to common variable names.
units: dict
Dictionary containing the units of the standard variables
and the model specific variables.
"""
_resolutions = ['Half', 'Quarter']
def __init__(self, resolution='half', set_type='best'):
model_type = 'Forecast Model Data'
resolution = resolution.title()
if resolution not in self._resolutions:
raise ValueError('resolution must in {}'.format(self._resolutions))
model = 'GFS {} Degree Forecast'.format(resolution)
# isobaric variables will require a vert_level to prevent
# excessive data downloads
self.variables = {
'temp_air': 'Temperature_surface',
'wind_speed_gust': 'Wind_speed_gust_surface',
'wind_speed_u': 'u-component_of_wind_isobaric',
'wind_speed_v': 'v-component_of_wind_isobaric',
'total_clouds':
'Total_cloud_cover_entire_atmosphere_Mixed_intervals_Average',
'low_clouds':
'Total_cloud_cover_low_cloud_Mixed_intervals_Average',
'mid_clouds':
'Total_cloud_cover_middle_cloud_Mixed_intervals_Average',
'high_clouds':
'Total_cloud_cover_high_cloud_Mixed_intervals_Average',
'boundary_clouds': ('Total_cloud_cover_boundary_layer_cloud_'
'Mixed_intervals_Average'),
'convect_clouds': 'Total_cloud_cover_convective_cloud',
'ghi_raw': ('Downward_Short-Wave_Radiation_Flux_'
'surface_Mixed_intervals_Average')}
self.output_variables = [
'temp_air',
'wind_speed',
'ghi',
'dni',
'dhi',
'total_clouds',
'low_clouds',
'mid_clouds',
'high_clouds']
super(GFS, self).__init__(model_type, model, set_type,
vert_level=100000)
def process_data(self, data, cloud_cover='total_clouds', **kwargs):
"""
Defines the steps needed to convert raw forecast data
into processed forecast data.
Parameters
----------
data: DataFrame
Raw forecast data
cloud_cover: str, default 'total_clouds'
The type of cloud cover used to infer the irradiance.
Returns
-------
data: DataFrame
Processed forecast data.
"""
data = super(GFS, self).process_data(data, **kwargs)
data['temp_air'] = self.kelvin_to_celsius(data['temp_air'])
data['wind_speed'] = self.uv_to_speed(data)
irrads = self.cloud_cover_to_irradiance(data[cloud_cover], **kwargs)
data = data.join(irrads, how='outer')
return data[self.output_variables]
class HRRR_ESRL(ForecastModel): # noqa: N801
"""
Subclass of the ForecastModel class representing
NOAA/GSD/ESRL's HRRR forecast model.
This is not an operational product.
Model data corresponds to NOAA/GSD/ESRL HRRR CONUS 3km resolution
surface forecasts.
Parameters
----------
set_type: string, default 'best'
Type of model to pull data from.
Attributes
----------
dataframe_variables: list
Common variables present in the final set of data.
model: string
Name of the UNIDATA forecast model.
model_type: string
UNIDATA category in which the model is located.
variables: dict
Defines the variables to obtain from the weather
model and how they should be renamed to common variable names.
units: dict
Dictionary containing the units of the standard variables
and the model specific variables.
"""
def __init__(self, set_type='best'):
warnings.warn('HRRR_ESRL is an experimental model and is not '
'always available.')
model_type = 'Forecast Model Data'
model = 'GSD HRRR CONUS 3km surface'
self.variables = {
'temp_air': 'Temperature_surface',
'wind_speed_gust': 'Wind_speed_gust_surface',
# 'temp_air': 'Temperature_height_above_ground', # GH 702
# 'wind_speed_u': 'u-component_of_wind_height_above_ground',
# 'wind_speed_v': 'v-component_of_wind_height_above_ground',
'total_clouds': 'Total_cloud_cover_entire_atmosphere',
'low_clouds': 'Low_cloud_cover_UnknownLevelType-214',
'mid_clouds': 'Medium_cloud_cover_UnknownLevelType-224',
'high_clouds': 'High_cloud_cover_UnknownLevelType-234',
'ghi_raw': 'Downward_short-wave_radiation_flux_surface', }
self.output_variables = [
'temp_air',
'wind_speed',
'ghi_raw',
'ghi',
'dni',
'dhi',
'total_clouds',
'low_clouds',
'mid_clouds',
'high_clouds']
super(HRRR_ESRL, self).__init__(model_type, model, set_type)
def process_data(self, data, cloud_cover='total_clouds', **kwargs):
"""
Defines the steps needed to convert raw forecast data
into processed forecast data.
Parameters
----------
data: DataFrame
Raw forecast data
cloud_cover: str, default 'total_clouds'
The type of cloud cover used to infer the irradiance.
Returns
-------
data: DataFrame
Processed forecast data.
"""
data = super(HRRR_ESRL, self).process_data(data, **kwargs)
data['temp_air'] = self.kelvin_to_celsius(data['temp_air'])
data['wind_speed'] = self.gust_to_speed(data)
# data['wind_speed'] = self.uv_to_speed(data) # GH 702
irrads = self.cloud_cover_to_irradiance(data[cloud_cover], **kwargs)
data = data.join(irrads, how='outer')
return data[self.output_variables]
class NAM(ForecastModel):
"""
Subclass of the ForecastModel class representing NAM
forecast model.
Model data corresponds to NAM CONUS 12km resolution forecasts
from CONDUIT.
Parameters
----------
set_type: string, default 'best'
Type of model to pull data from.
Attributes
----------
dataframe_variables: list
Common variables present in the final set of data.
model: string
Name of the UNIDATA forecast model.
model_type: string
UNIDATA category in which the model is located.
variables: dict
Defines the variables to obtain from the weather
model and how they should be renamed to common variable names.
units: dict
Dictionary containing the units of the standard variables
and the model specific variables.
"""
def __init__(self, set_type='best'):
model_type = 'Forecast Model Data'
model = 'NAM CONUS 12km from CONDUIT'
self.variables = {
'temp_air': 'Temperature_surface',
'wind_speed_gust': 'Wind_speed_gust_surface',
'total_clouds': 'Total_cloud_cover_entire_atmosphere_single_layer',
'low_clouds': 'Low_cloud_cover_low_cloud',
'mid_clouds': 'Medium_cloud_cover_middle_cloud',
'high_clouds': 'High_cloud_cover_high_cloud',
'ghi_raw': 'Downward_Short-Wave_Radiation_Flux_surface', }
self.output_variables = [
'temp_air',
'wind_speed',
'ghi',
'dni',
'dhi',
'total_clouds',
'low_clouds',
'mid_clouds',
'high_clouds']
super(NAM, self).__init__(model_type, model, set_type)
def process_data(self, data, cloud_cover='total_clouds', **kwargs):
"""
Defines the steps needed to convert raw forecast data
into processed forecast data.
Parameters
----------
data: DataFrame
Raw forecast data
cloud_cover: str, default 'total_clouds'
The type of cloud cover used to infer the irradiance.
Returns
-------
data: DataFrame
Processed forecast data.
"""
data = super(NAM, self).process_data(data, **kwargs)
data['temp_air'] = self.kelvin_to_celsius(data['temp_air'])
data['wind_speed'] = self.gust_to_speed(data)
irrads = self.cloud_cover_to_irradiance(data[cloud_cover], **kwargs)
data = data.join(irrads, how='outer')
return data[self.output_variables]
class HRRR(ForecastModel):
"""
Subclass of the ForecastModel class representing HRRR
forecast model.
Model data corresponds to NCEP HRRR CONUS 2.5km resolution
forecasts.
Parameters
----------
set_type: string, default 'best'
Type of model to pull data from.
Attributes
----------
dataframe_variables: list
Common variables present in the final set of data.
model: string
Name of the UNIDATA forecast model.
model_type: string
UNIDATA category in which the model is located.
variables: dict
Defines the variables to obtain from the weather
model and how they should be renamed to common variable names.
units: dict
Dictionary containing the units of the standard variables
and the model specific variables.
"""
def __init__(self, set_type='best'):
model_type = 'Forecast Model Data'
model = 'NCEP HRRR CONUS 2.5km'
self.variables = {
'temp_air': 'Temperature_height_above_ground',
'pressure': 'Pressure_surface',
'wind_speed_gust': 'Wind_speed_gust_surface',
'wind_speed_u': 'u-component_of_wind_height_above_ground',
'wind_speed_v': 'v-component_of_wind_height_above_ground',
'total_clouds': 'Total_cloud_cover_entire_atmosphere',
'low_clouds': 'Low_cloud_cover_low_cloud',
'mid_clouds': 'Medium_cloud_cover_middle_cloud',
'high_clouds': 'High_cloud_cover_high_cloud'}
self.output_variables = [
'temp_air',
'wind_speed',
'ghi',
'dni',
'dhi',
'total_clouds',
'low_clouds',
'mid_clouds',
'high_clouds', ]
super(HRRR, self).__init__(model_type, model, set_type)
def process_data(self, data, cloud_cover='total_clouds', **kwargs):
"""
Defines the steps needed to convert raw forecast data
into processed forecast data.
Parameters
----------
data: DataFrame
Raw forecast data
cloud_cover: str, default 'total_clouds'
The type of cloud cover used to infer the irradiance.
Returns
-------
data: DataFrame
Processed forecast data.
"""
data = super(HRRR, self).process_data(data, **kwargs)
wind_mapping = {
'wind_speed_u': 'u-component_of_wind_height_above_ground_0',
'wind_speed_v': 'v-component_of_wind_height_above_ground_0',
}
data = self.rename(data, variables=wind_mapping)
data['temp_air'] = self.kelvin_to_celsius(data['temp_air'])
data['wind_speed'] = self.uv_to_speed(data)
irrads = self.cloud_cover_to_irradiance(data[cloud_cover], **kwargs)
data = data.join(irrads, how='outer')
data = data.iloc[:-1, :] # issue with last point
return data[self.output_variables]
class NDFD(ForecastModel):
"""
Subclass of the ForecastModel class representing NDFD forecast
model.
Model data corresponds to NWS CONUS CONDUIT forecasts.
Parameters
----------
set_type: string, default 'best'
Type of model to pull data from.
Attributes
----------
dataframe_variables: list
Common variables present in the final set of data.
model: string
Name of the UNIDATA forecast model.
model_type: string
UNIDATA category in which the model is located.
variables: dict
Defines the variables to obtain from the weather
model and how they should be renamed to common variable names.
units: dict
Dictionary containing the units of the standard variables
and the model specific variables.
"""
def __init__(self, set_type='best'):
model_type = 'Forecast Products and Analyses'
model = 'National Weather Service CONUS Forecast Grids (CONDUIT)'
self.variables = {
'temp_air': 'Temperature_height_above_ground',
'wind_speed': 'Wind_speed_height_above_ground',
'total_clouds': 'Total_cloud_cover_surface', }
self.output_variables = [
'temp_air',
'wind_speed',
'ghi',
'dni',
'dhi',
'total_clouds', ]
super(NDFD, self).__init__(model_type, model, set_type)
def process_data(self, data, **kwargs):
"""
Defines the steps needed to convert raw forecast data
into processed forecast data.
Parameters
----------
data: DataFrame
Raw forecast data
Returns
-------
data: DataFrame
Processed forecast data.
"""
cloud_cover = 'total_clouds'
data = super(NDFD, self).process_data(data, **kwargs)
data['temp_air'] = self.kelvin_to_celsius(data['temp_air'])
irrads = self.cloud_cover_to_irradiance(data[cloud_cover], **kwargs)
data = data.join(irrads, how='outer')
return data[self.output_variables]
class RAP(ForecastModel):
"""
Subclass of the ForecastModel class representing RAP forecast model.
Model data corresponds to Rapid Refresh CONUS 20km resolution
forecasts.
Parameters
----------
resolution: string or int, default '20'
The model resolution, either '20' or '40' (km)
set_type: string, default 'best'
Type of model to pull data from.
Attributes
----------
dataframe_variables: list
Common variables present in the final set of data.
model: string
Name of the UNIDATA forecast model.
model_type: string
UNIDATA category in which the model is located.
variables: dict
Defines the variables to obtain from the weather
model and how they should be renamed to common variable names.
units: dict
Dictionary containing the units of the standard variables
and the model specific variables.
"""
_resolutions = ['20', '40']
def __init__(self, resolution='20', set_type='best'):
resolution = str(resolution)
if resolution not in self._resolutions:
raise ValueError('resolution must in {}'.format(self._resolutions))
model_type = 'Forecast Model Data'
model = 'Rapid Refresh CONUS {}km'.format(resolution)
self.variables = {
'temp_air': 'Temperature_surface',
'wind_speed_gust': 'Wind_speed_gust_surface',
'total_clouds': 'Total_cloud_cover_entire_atmosphere',
'low_clouds': 'Low_cloud_cover_low_cloud',
'mid_clouds': 'Medium_cloud_cover_middle_cloud',
'high_clouds': 'High_cloud_cover_high_cloud', }
self.output_variables = [
'temp_air',
'wind_speed',
'ghi',
'dni',
'dhi',
'total_clouds',
'low_clouds',
'mid_clouds',
'high_clouds', ]
super(RAP, self).__init__(model_type, model, set_type)
def process_data(self, data, cloud_cover='total_clouds', **kwargs):
"""
Defines the steps needed to convert raw forecast data
into processed forecast data.
Parameters
----------
data: DataFrame
Raw forecast data
cloud_cover: str, default 'total_clouds'
The type of cloud cover used to infer the irradiance.
Returns
-------
data: DataFrame
Processed forecast data.
"""
data = super(RAP, self).process_data(data, **kwargs)
data['temp_air'] = self.kelvin_to_celsius(data['temp_air'])
data['wind_speed'] = self.gust_to_speed(data)
irrads = self.cloud_cover_to_irradiance(data[cloud_cover], **kwargs)
data = data.join(irrads, how='outer')
return data[self.output_variables]
| bsd-3-clause | 9,113,747,094,163,487,000 | 31.602752 | 79 | 0.57341 | false |
MorganR/gaussian-processes | main.py | 1 | 2611 | # Main test file
import numpy as np
import matplotlib.pyplot as plt
from model_tester import ModelTester, import_model_tester
from data_holder import DataHolder, get_mnist_data
from svgp_tester import SvgpTester
from vgp_tester import VgpTester
from mcmc_tester import McmcTester
num_per_digits = [0]
num_inducing_inputs = [50]
for num_per_digit in num_per_digits:
digits = np.arange(0,10)
data = get_mnist_data(digits, num_per_digit)
data = data.get_pca_data(50)
# m_test = VgpTester(data, 'rbf')
# m_test.train()
# m_test.test(10000)
for num_inducing_input in num_inducing_inputs:
m_test = McmcTester(data, 'rbf', num_inducing_input, is_z_fixed=False)
m_test.train()
m_test.test()
# from mpl_toolkits.mplot3d import Axes3D
# import matplotlib.cm as cm
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# colors = cm.rainbow(np.linspace(0, 1, num_digits))
# for i,c in zip(np.unique(Y), colors):
# ax.plot(X[Y==i,0], X[Y==i,1], zs=X[Y==i,2], linestyle='', marker='o', label=i, c=c)
# num_inducing=100
# m_test = SvgpTester(data, num_inducing)
# m_test.plot_z(axes, num_rows, num_cols)
# num_cols = min([len(digits), 5])
# num_rows = int(np.ceil(len(digits)/num_cols))
# fig, axes = plt.subplots(num_rows, num_cols)
# if num_rows == 1 and num_cols == 1:
# data.plot_pca_data(axes, num_per_digit)
# m_test.visualize_density(axes, 0)
# axes.legend()
# elif num_rows > 1:
# for r in np.arange(0, num_rows):
# for col in np.arange(0, num_cols):
# data.plot_pca_data(axes[r,col], 15)
# m_test.visualize_density(axes[r,col], r*num_cols + col)
# axes[r,col].set_title('{}'.format(digits[r*num_cols + col]))
# else:
# for col in np.arange(0, num_cols):
# data.plot_pca_data(axes[col], 15)
# m_test.visualize_density(axes[col], col)
# axes[col].set_title('{}'.format(digits[col]))
# fig.suptitle('GP Classification Using {} Images and {} Inducing Inputs'.format(
# data.y.size, m_test.inducing_inputs.shape[0]
# ))
# fig.set_size_inches(18, 8)
# fig.subplots_adjust(top=0.91, bottom=0.05)
# fig.savefig('{}-{}.png'.format(data.y.size, m_test.inducing_inputs.shape[0]))
# fig.suptitle('GP Classification on 2D Data\nUsing {} Images with VGP'.format(
# data.y.size
# ))
# fig.set_size_inches(7, 4)
# fig.subplots_adjust(top=0.8, bottom=0.05)
# fig.savefig('vgp-{}c-{}.png'.format(len(digits), data.y.size))
# plt.show() | mit | -2,337,207,912,695,031,300 | 34.780822 | 89 | 0.613558 | false |
cggh/scikit-allel | allel/stats/diversity.py | 1 | 38740 | # -*- coding: utf-8 -*-
import logging
import numpy as np
from allel.model.ndarray import SortedIndex, AlleleCountsArray
from allel.model.util import locate_fixed_differences
from allel.util import asarray_ndim, ignore_invalid, check_dim0_aligned, \
ensure_dim1_aligned, mask_inaccessible
from allel.stats.window import windowed_statistic, per_base, moving_statistic
logger = logging.getLogger(__name__)
debug = logger.debug
def mean_pairwise_difference(ac, an=None, fill=np.nan):
"""Calculate for each variant the mean number of pairwise differences
between chromosomes sampled from within a single population.
Parameters
----------
ac : array_like, int, shape (n_variants, n_alleles)
Allele counts array.
an : array_like, int, shape (n_variants,), optional
Allele numbers. If not provided, will be calculated from `ac`.
fill : float
Use this value where there are no pairs to compare (e.g.,
all allele calls are missing).
Returns
-------
mpd : ndarray, float, shape (n_variants,)
Notes
-----
The values returned by this function can be summed over a genome
region and divided by the number of accessible bases to estimate
nucleotide diversity, a.k.a. *pi*.
Examples
--------
>>> import allel
>>> h = allel.HaplotypeArray([[0, 0, 0, 0],
... [0, 0, 0, 1],
... [0, 0, 1, 1],
... [0, 1, 1, 1],
... [1, 1, 1, 1],
... [0, 0, 1, 2],
... [0, 1, 1, 2],
... [0, 1, -1, -1]])
>>> ac = h.count_alleles()
>>> allel.mean_pairwise_difference(ac)
array([0. , 0.5 , 0.66666667, 0.5 , 0. ,
0.83333333, 0.83333333, 1. ])
See Also
--------
sequence_diversity, windowed_diversity
"""
# This function calculates the mean number of pairwise differences
# between haplotypes within a single population, generalising to any number
# of alleles.
# check inputs
ac = asarray_ndim(ac, 2)
# total number of haplotypes
if an is None:
an = np.sum(ac, axis=1)
else:
an = asarray_ndim(an, 1)
check_dim0_aligned(ac, an)
# total number of pairwise comparisons for each variant:
# (an choose 2)
n_pairs = an * (an - 1) / 2
# number of pairwise comparisons where there is no difference:
# sum of (ac choose 2) for each allele (i.e., number of ways to
# choose the same allele twice)
n_same = np.sum(ac * (ac - 1) / 2, axis=1)
# number of pairwise differences
n_diff = n_pairs - n_same
# mean number of pairwise differences, accounting for cases where
# there are no pairs
with ignore_invalid():
mpd = np.where(n_pairs > 0, n_diff / n_pairs, fill)
return mpd
def mean_pairwise_difference_between(ac1, ac2, an1=None, an2=None,
fill=np.nan):
"""Calculate for each variant the mean number of pairwise differences
between chromosomes sampled from two different populations.
Parameters
----------
ac1 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the first population.
ac2 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the second population.
an1 : array_like, int, shape (n_variants,), optional
Allele numbers for the first population. If not provided, will be
calculated from `ac1`.
an2 : array_like, int, shape (n_variants,), optional
Allele numbers for the second population. If not provided, will be
calculated from `ac2`.
fill : float
Use this value where there are no pairs to compare (e.g.,
all allele calls are missing).
Returns
-------
mpd : ndarray, float, shape (n_variants,)
Notes
-----
The values returned by this function can be summed over a genome
region and divided by the number of accessible bases to estimate
nucleotide divergence between two populations, a.k.a. *Dxy*.
Examples
--------
>>> import allel
>>> h = allel.HaplotypeArray([[0, 0, 0, 0],
... [0, 0, 0, 1],
... [0, 0, 1, 1],
... [0, 1, 1, 1],
... [1, 1, 1, 1],
... [0, 0, 1, 2],
... [0, 1, 1, 2],
... [0, 1, -1, -1]])
>>> ac1 = h.count_alleles(subpop=[0, 1])
>>> ac2 = h.count_alleles(subpop=[2, 3])
>>> allel.mean_pairwise_difference_between(ac1, ac2)
array([0. , 0.5 , 1. , 0.5 , 0. , 1. , 0.75, nan])
See Also
--------
sequence_divergence, windowed_divergence
"""
# This function calculates the mean number of pairwise differences
# between haplotypes from two different populations, generalising to any
# number of alleles.
# check inputs
ac1 = asarray_ndim(ac1, 2)
ac2 = asarray_ndim(ac2, 2)
check_dim0_aligned(ac1, ac2)
ac1, ac2 = ensure_dim1_aligned(ac1, ac2)
# total number of haplotypes sampled from each population
if an1 is None:
an1 = np.sum(ac1, axis=1)
else:
an1 = asarray_ndim(an1, 1)
check_dim0_aligned(ac1, an1)
if an2 is None:
an2 = np.sum(ac2, axis=1)
else:
an2 = asarray_ndim(an2, 1)
check_dim0_aligned(ac2, an2)
# total number of pairwise comparisons for each variant
n_pairs = an1 * an2
# number of pairwise comparisons where there is no difference:
# sum of (ac1 * ac2) for each allele (i.e., number of ways to
# choose the same allele twice)
n_same = np.sum(ac1 * ac2, axis=1)
# number of pairwise differences
n_diff = n_pairs - n_same
# mean number of pairwise differences, accounting for cases where
# there are no pairs
with ignore_invalid():
mpd = np.where(n_pairs > 0, n_diff / n_pairs, fill)
return mpd
def sequence_diversity(pos, ac, start=None, stop=None,
is_accessible=None):
"""Estimate nucleotide diversity within a given region, which is the
average proportion of sites (including monomorphic sites not present in the
data) that differ between randomly chosen pairs of chromosomes.
Parameters
----------
pos : array_like, int, shape (n_items,)
Variant positions, using 1-based coordinates, in ascending order.
ac : array_like, int, shape (n_variants, n_alleles)
Allele counts array.
start : int, optional
The position at which to start (1-based). Defaults to the first position.
stop : int, optional
The position at which to stop (1-based). Defaults to the last position.
is_accessible : array_like, bool, shape (len(contig),), optional
Boolean array indicating accessibility status for all positions in the
chromosome/contig.
Returns
-------
pi : float
Nucleotide diversity.
Notes
-----
If start and/or stop are not provided, uses the difference between the last
and the first position as a proxy for the total number of sites, which can
overestimate the sequence diversity.
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 0]],
... [[0, 0], [0, 1]],
... [[0, 0], [1, 1]],
... [[0, 1], [1, 1]],
... [[1, 1], [1, 1]],
... [[0, 0], [1, 2]],
... [[0, 1], [1, 2]],
... [[0, 1], [-1, -1]],
... [[-1, -1], [-1, -1]]])
>>> ac = g.count_alleles()
>>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27]
>>> pi = allel.sequence_diversity(pos, ac, start=1, stop=31)
>>> pi
0.13978494623655915
"""
# check inputs
if not isinstance(pos, SortedIndex):
pos = SortedIndex(pos, copy=False)
ac = asarray_ndim(ac, 2)
is_accessible = asarray_ndim(is_accessible, 1, allow_none=True)
# masking inaccessible sites from pos and ac
pos, ac = mask_inaccessible(is_accessible, pos, ac)
# deal with subregion
if start is not None or stop is not None:
loc = pos.locate_range(start, stop)
pos = pos[loc]
ac = ac[loc]
if start is None:
start = pos[0]
if stop is None:
stop = pos[-1]
# calculate mean pairwise difference
mpd = mean_pairwise_difference(ac, fill=0)
# sum differences over variants
mpd_sum = np.sum(mpd)
# calculate value per base
if is_accessible is None:
n_bases = stop - start + 1
else:
n_bases = np.count_nonzero(is_accessible[start-1:stop])
pi = mpd_sum / n_bases
return pi
def sequence_divergence(pos, ac1, ac2, an1=None, an2=None, start=None,
stop=None, is_accessible=None):
"""Estimate nucleotide divergence between two populations within a
given region, which is the average proportion of sites (including
monomorphic sites not present in the data) that differ between randomly
chosen pairs of chromosomes, one from each population.
Parameters
----------
pos : array_like, int, shape (n_items,)
Variant positions, using 1-based coordinates, in ascending order.
ac1 : array_like, int, shape (n_variants, n_alleles)
Allele counts array for the first population.
ac2 : array_like, int, shape (n_variants, n_alleles)
Allele counts array for the second population.
an1 : array_like, int, shape (n_variants,), optional
Allele numbers for the first population. If not provided, will be
calculated from `ac1`.
an2 : array_like, int, shape (n_variants,), optional
Allele numbers for the second population. If not provided, will be
calculated from `ac2`.
start : int, optional
The position at which to start (1-based). Defaults to the first position.
stop : int, optional
The position at which to stop (1-based). Defaults to the last position.
is_accessible : array_like, bool, shape (len(contig),), optional
Boolean array indicating accessibility status for all positions in the
chromosome/contig.
Returns
-------
Dxy : float
Nucleotide divergence.
Examples
--------
Simplest case, two haplotypes in each population::
>>> import allel
>>> h = allel.HaplotypeArray([[0, 0, 0, 0],
... [0, 0, 0, 1],
... [0, 0, 1, 1],
... [0, 1, 1, 1],
... [1, 1, 1, 1],
... [0, 0, 1, 2],
... [0, 1, 1, 2],
... [0, 1, -1, -1],
... [-1, -1, -1, -1]])
>>> ac1 = h.count_alleles(subpop=[0, 1])
>>> ac2 = h.count_alleles(subpop=[2, 3])
>>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27]
>>> dxy = sequence_divergence(pos, ac1, ac2, start=1, stop=31)
>>> dxy
0.12096774193548387
"""
# check inputs
if not isinstance(pos, SortedIndex):
pos = SortedIndex(pos, copy=False)
ac1 = asarray_ndim(ac1, 2)
ac2 = asarray_ndim(ac2, 2)
if an1 is not None:
an1 = asarray_ndim(an1, 1)
if an2 is not None:
an2 = asarray_ndim(an2, 1)
is_accessible = asarray_ndim(is_accessible, 1, allow_none=True)
# masking inaccessible sites from pos and ac
pos, ac1, ac2 = mask_inaccessible(is_accessible, pos, ac1, ac2)
# handle start/stop
if start is not None or stop is not None:
loc = pos.locate_range(start, stop)
pos = pos[loc]
ac1 = ac1[loc]
ac2 = ac2[loc]
if an1 is not None:
an1 = an1[loc]
if an2 is not None:
an2 = an2[loc]
if start is None:
start = pos[0]
if stop is None:
stop = pos[-1]
# calculate mean pairwise difference between the two populations
mpd = mean_pairwise_difference_between(ac1, ac2, an1=an1, an2=an2, fill=0)
# sum differences over variants
mpd_sum = np.sum(mpd)
# calculate value per base, N.B., expect pos is 1-based
if is_accessible is None:
n_bases = stop - start + 1
else:
n_bases = np.count_nonzero(is_accessible[start-1:stop])
dxy = mpd_sum / n_bases
return dxy
def windowed_diversity(pos, ac, size=None, start=None, stop=None, step=None,
windows=None, is_accessible=None, fill=np.nan):
"""Estimate nucleotide diversity in windows over a single
chromosome/contig.
Parameters
----------
pos : array_like, int, shape (n_items,)
Variant positions, using 1-based coordinates, in ascending order.
ac : array_like, int, shape (n_variants, n_alleles)
Allele counts array.
size : int, optional
The window size (number of bases).
start : int, optional
The position at which to start (1-based).
stop : int, optional
The position at which to stop (1-based).
step : int, optional
The distance between start positions of windows. If not given,
defaults to the window size, i.e., non-overlapping windows.
windows : array_like, int, shape (n_windows, 2), optional
Manually specify the windows to use as a sequence of (window_start,
window_stop) positions, using 1-based coordinates. Overrides the
size/start/stop/step parameters.
is_accessible : array_like, bool, shape (len(contig),), optional
Boolean array indicating accessibility status for all positions in the
chromosome/contig.
fill : object, optional
The value to use where a window is completely inaccessible.
Returns
-------
pi : ndarray, float, shape (n_windows,)
Nucleotide diversity in each window.
windows : ndarray, int, shape (n_windows, 2)
The windows used, as an array of (window_start, window_stop) positions,
using 1-based coordinates.
n_bases : ndarray, int, shape (n_windows,)
Number of (accessible) bases in each window.
counts : ndarray, int, shape (n_windows,)
Number of variants in each window.
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 0]],
... [[0, 0], [0, 1]],
... [[0, 0], [1, 1]],
... [[0, 1], [1, 1]],
... [[1, 1], [1, 1]],
... [[0, 0], [1, 2]],
... [[0, 1], [1, 2]],
... [[0, 1], [-1, -1]],
... [[-1, -1], [-1, -1]]])
>>> ac = g.count_alleles()
>>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27]
>>> pi, windows, n_bases, counts = allel.windowed_diversity(
... pos, ac, size=10, start=1, stop=31
... )
>>> pi
array([0.11666667, 0.21666667, 0.09090909])
>>> windows
array([[ 1, 10],
[11, 20],
[21, 31]])
>>> n_bases
array([10, 10, 11])
>>> counts
array([3, 4, 2])
"""
# check inputs
if not isinstance(pos, SortedIndex):
pos = SortedIndex(pos, copy=False)
is_accessible = asarray_ndim(is_accessible, 1, allow_none=True)
# masking inaccessible sites from pos and ac
pos, ac = mask_inaccessible(is_accessible, pos, ac)
# calculate mean pairwise difference
mpd = mean_pairwise_difference(ac, fill=0)
# sum differences in windows
mpd_sum, windows, counts = windowed_statistic(
pos, values=mpd, statistic=np.sum, size=size, start=start, stop=stop,
step=step, windows=windows, fill=0
)
# calculate value per base
pi, n_bases = per_base(mpd_sum, windows, is_accessible=is_accessible,
fill=fill)
return pi, windows, n_bases, counts
def windowed_divergence(pos, ac1, ac2, size=None, start=None, stop=None,
step=None, windows=None, is_accessible=None,
fill=np.nan):
"""Estimate nucleotide divergence between two populations in windows
over a single chromosome/contig.
Parameters
----------
pos : array_like, int, shape (n_items,)
Variant positions, using 1-based coordinates, in ascending order.
ac1 : array_like, int, shape (n_variants, n_alleles)
Allele counts array for the first population.
ac2 : array_like, int, shape (n_variants, n_alleles)
Allele counts array for the second population.
size : int, optional
The window size (number of bases).
start : int, optional
The position at which to start (1-based).
stop : int, optional
The position at which to stop (1-based).
step : int, optional
The distance between start positions of windows. If not given,
defaults to the window size, i.e., non-overlapping windows.
windows : array_like, int, shape (n_windows, 2), optional
Manually specify the windows to use as a sequence of (window_start,
window_stop) positions, using 1-based coordinates. Overrides the
size/start/stop/step parameters.
is_accessible : array_like, bool, shape (len(contig),), optional
Boolean array indicating accessibility status for all positions in the
chromosome/contig.
fill : object, optional
The value to use where a window is completely inaccessible.
Returns
-------
Dxy : ndarray, float, shape (n_windows,)
Nucleotide divergence in each window.
windows : ndarray, int, shape (n_windows, 2)
The windows used, as an array of (window_start, window_stop) positions,
using 1-based coordinates.
n_bases : ndarray, int, shape (n_windows,)
Number of (accessible) bases in each window.
counts : ndarray, int, shape (n_windows,)
Number of variants in each window.
Examples
--------
Simplest case, two haplotypes in each population::
>>> import allel
>>> h = allel.HaplotypeArray([[0, 0, 0, 0],
... [0, 0, 0, 1],
... [0, 0, 1, 1],
... [0, 1, 1, 1],
... [1, 1, 1, 1],
... [0, 0, 1, 2],
... [0, 1, 1, 2],
... [0, 1, -1, -1],
... [-1, -1, -1, -1]])
>>> ac1 = h.count_alleles(subpop=[0, 1])
>>> ac2 = h.count_alleles(subpop=[2, 3])
>>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27]
>>> dxy, windows, n_bases, counts = windowed_divergence(
... pos, ac1, ac2, size=10, start=1, stop=31
... )
>>> dxy
array([0.15 , 0.225, 0. ])
>>> windows
array([[ 1, 10],
[11, 20],
[21, 31]])
>>> n_bases
array([10, 10, 11])
>>> counts
array([3, 4, 2])
"""
# check inputs
pos = SortedIndex(pos, copy=False)
is_accessible = asarray_ndim(is_accessible, 1, allow_none=True)
# masking inaccessible sites from pos and ac
pos, ac1, ac2 = mask_inaccessible(is_accessible, pos, ac1, ac2)
# calculate mean pairwise divergence
mpd = mean_pairwise_difference_between(ac1, ac2, fill=0)
# sum in windows
mpd_sum, windows, counts = windowed_statistic(
pos, values=mpd, statistic=np.sum, size=size, start=start,
stop=stop, step=step, windows=windows, fill=0
)
# calculate value per base
dxy, n_bases = per_base(mpd_sum, windows, is_accessible=is_accessible,
fill=fill)
return dxy, windows, n_bases, counts
def windowed_df(pos, ac1, ac2, size=None, start=None, stop=None, step=None,
windows=None, is_accessible=None, fill=np.nan):
"""Calculate the density of fixed differences between two populations in
windows over a single chromosome/contig.
Parameters
----------
pos : array_like, int, shape (n_items,)
Variant positions, using 1-based coordinates, in ascending order.
ac1 : array_like, int, shape (n_variants, n_alleles)
Allele counts array for the first population.
ac2 : array_like, int, shape (n_variants, n_alleles)
Allele counts array for the second population.
size : int, optional
The window size (number of bases).
start : int, optional
The position at which to start (1-based).
stop : int, optional
The position at which to stop (1-based).
step : int, optional
The distance between start positions of windows. If not given,
defaults to the window size, i.e., non-overlapping windows.
windows : array_like, int, shape (n_windows, 2), optional
Manually specify the windows to use as a sequence of (window_start,
window_stop) positions, using 1-based coordinates. Overrides the
size/start/stop/step parameters.
is_accessible : array_like, bool, shape (len(contig),), optional
Boolean array indicating accessibility status for all positions in the
chromosome/contig.
fill : object, optional
The value to use where a window is completely inaccessible.
Returns
-------
df : ndarray, float, shape (n_windows,)
Per-base density of fixed differences in each window.
windows : ndarray, int, shape (n_windows, 2)
The windows used, as an array of (window_start, window_stop) positions,
using 1-based coordinates.
n_bases : ndarray, int, shape (n_windows,)
Number of (accessible) bases in each window.
counts : ndarray, int, shape (n_windows,)
Number of variants in each window.
See Also
--------
allel.model.locate_fixed_differences
"""
# check inputs
pos = SortedIndex(pos, copy=False)
is_accessible = asarray_ndim(is_accessible, 1, allow_none=True)
# masking inaccessible sites from pos and ac
pos, ac1, ac2 = mask_inaccessible(is_accessible, pos, ac1, ac2)
# locate fixed differences
loc_df = locate_fixed_differences(ac1, ac2)
# count number of fixed differences in windows
n_df, windows, counts = windowed_statistic(
pos, values=loc_df, statistic=np.count_nonzero, size=size, start=start,
stop=stop, step=step, windows=windows, fill=0
)
# calculate value per base
df, n_bases = per_base(n_df, windows, is_accessible=is_accessible,
fill=fill)
return df, windows, n_bases, counts
# noinspection PyPep8Naming
def watterson_theta(pos, ac, start=None, stop=None,
is_accessible=None):
"""Calculate the value of Watterson's estimator over a given region.
Parameters
----------
pos : array_like, int, shape (n_items,)
Variant positions, using 1-based coordinates, in ascending order.
ac : array_like, int, shape (n_variants, n_alleles)
Allele counts array.
start : int, optional
The position at which to start (1-based). Defaults to the first position.
stop : int, optional
The position at which to stop (1-based). Defaults to the last position.
is_accessible : array_like, bool, shape (len(contig),), optional
Boolean array indicating accessibility status for all positions in the
chromosome/contig.
Returns
-------
theta_hat_w : float
Watterson's estimator (theta hat per base).
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 0]],
... [[0, 0], [0, 1]],
... [[0, 0], [1, 1]],
... [[0, 1], [1, 1]],
... [[1, 1], [1, 1]],
... [[0, 0], [1, 2]],
... [[0, 1], [1, 2]],
... [[0, 1], [-1, -1]],
... [[-1, -1], [-1, -1]]])
>>> ac = g.count_alleles()
>>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27]
>>> theta_hat_w = allel.watterson_theta(pos, ac, start=1, stop=31)
>>> theta_hat_w
0.10557184750733138
"""
# check inputs
if not isinstance(pos, SortedIndex):
pos = SortedIndex(pos, copy=False)
is_accessible = asarray_ndim(is_accessible, 1, allow_none=True)
# masking inaccessible sites from pos and ac
pos, ac = mask_inaccessible(is_accessible, pos, ac)
if not hasattr(ac, 'count_segregating'):
ac = AlleleCountsArray(ac, copy=False)
# deal with subregion
if start is not None or stop is not None:
loc = pos.locate_range(start, stop)
pos = pos[loc]
ac = ac[loc]
if start is None:
start = pos[0]
if stop is None:
stop = pos[-1]
# count segregating variants
S = ac.count_segregating()
# assume number of chromosomes sampled is constant for all variants
n = ac.sum(axis=1).max()
# (n-1)th harmonic number
a1 = np.sum(1 / np.arange(1, n))
# calculate absolute value
theta_hat_w_abs = S / a1
# calculate value per base
if is_accessible is None:
n_bases = stop - start + 1
else:
n_bases = np.count_nonzero(is_accessible[start-1:stop])
theta_hat_w = theta_hat_w_abs / n_bases
return theta_hat_w
# noinspection PyPep8Naming
def windowed_watterson_theta(pos, ac, size=None, start=None, stop=None,
step=None, windows=None, is_accessible=None,
fill=np.nan):
"""Calculate the value of Watterson's estimator in windows over a single
chromosome/contig.
Parameters
----------
pos : array_like, int, shape (n_items,)
Variant positions, using 1-based coordinates, in ascending order.
ac : array_like, int, shape (n_variants, n_alleles)
Allele counts array.
size : int, optional
The window size (number of bases).
start : int, optional
The position at which to start (1-based).
stop : int, optional
The position at which to stop (1-based).
step : int, optional
The distance between start positions of windows. If not given,
defaults to the window size, i.e., non-overlapping windows.
windows : array_like, int, shape (n_windows, 2), optional
Manually specify the windows to use as a sequence of (window_start,
window_stop) positions, using 1-based coordinates. Overrides the
size/start/stop/step parameters.
is_accessible : array_like, bool, shape (len(contig),), optional
Boolean array indicating accessibility status for all positions in the
chromosome/contig.
fill : object, optional
The value to use where a window is completely inaccessible.
Returns
-------
theta_hat_w : ndarray, float, shape (n_windows,)
Watterson's estimator (theta hat per base).
windows : ndarray, int, shape (n_windows, 2)
The windows used, as an array of (window_start, window_stop) positions,
using 1-based coordinates.
n_bases : ndarray, int, shape (n_windows,)
Number of (accessible) bases in each window.
counts : ndarray, int, shape (n_windows,)
Number of variants in each window.
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 0]],
... [[0, 0], [0, 1]],
... [[0, 0], [1, 1]],
... [[0, 1], [1, 1]],
... [[1, 1], [1, 1]],
... [[0, 0], [1, 2]],
... [[0, 1], [1, 2]],
... [[0, 1], [-1, -1]],
... [[-1, -1], [-1, -1]]])
>>> ac = g.count_alleles()
>>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27]
>>> theta_hat_w, windows, n_bases, counts = allel.windowed_watterson_theta(
... pos, ac, size=10, start=1, stop=31
... )
>>> theta_hat_w
array([0.10909091, 0.16363636, 0.04958678])
>>> windows
array([[ 1, 10],
[11, 20],
[21, 31]])
>>> n_bases
array([10, 10, 11])
>>> counts
array([3, 4, 2])
""" # flake8: noqa
# check inputs
if not isinstance(pos, SortedIndex):
pos = SortedIndex(pos, copy=False)
is_accessible = asarray_ndim(is_accessible, 1, allow_none=True)
# masking inaccessible sites from pos and ac
pos, ac = mask_inaccessible(is_accessible, pos, ac)
if not hasattr(ac, 'count_segregating'):
ac = AlleleCountsArray(ac, copy=False)
# locate segregating variants
is_seg = ac.is_segregating()
# count segregating variants in windows
S, windows, counts = windowed_statistic(pos, is_seg,
statistic=np.count_nonzero,
size=size, start=start,
stop=stop, step=step,
windows=windows, fill=0)
# assume number of chromosomes sampled is constant for all variants
n = ac.sum(axis=1).max()
# (n-1)th harmonic number
a1 = np.sum(1 / np.arange(1, n))
# absolute value of Watterson's theta
theta_hat_w_abs = S / a1
# theta per base
theta_hat_w, n_bases = per_base(theta_hat_w_abs, windows=windows,
is_accessible=is_accessible, fill=fill)
return theta_hat_w, windows, n_bases, counts
# noinspection PyPep8Naming
def tajima_d(ac, pos=None, start=None, stop=None, min_sites=3):
"""Calculate the value of Tajima's D over a given region.
Parameters
----------
ac : array_like, int, shape (n_variants, n_alleles)
Allele counts array.
pos : array_like, int, shape (n_items,), optional
Variant positions, using 1-based coordinates, in ascending order.
start : int, optional
The position at which to start (1-based). Defaults to the first position.
stop : int, optional
The position at which to stop (1-based). Defaults to the last position.
min_sites : int, optional
Minimum number of segregating sites for which to calculate a value. If
there are fewer, np.nan is returned. Defaults to 3.
Returns
-------
D : float
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 0]],
... [[0, 0], [0, 1]],
... [[0, 0], [1, 1]],
... [[0, 1], [1, 1]],
... [[1, 1], [1, 1]],
... [[0, 0], [1, 2]],
... [[0, 1], [1, 2]],
... [[0, 1], [-1, -1]],
... [[-1, -1], [-1, -1]]])
>>> ac = g.count_alleles()
>>> allel.tajima_d(ac)
3.1445848780213814
>>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27]
>>> allel.tajima_d(ac, pos=pos, start=7, stop=25)
3.8779735196179366
"""
# check inputs
if not hasattr(ac, 'count_segregating'):
ac = AlleleCountsArray(ac, copy=False)
# deal with subregion
if pos is not None and (start is not None or stop is not None):
if not isinstance(pos, SortedIndex):
pos = SortedIndex(pos, copy=False)
loc = pos.locate_range(start, stop)
ac = ac[loc]
# count segregating variants
S = ac.count_segregating()
if S < min_sites:
return np.nan
# assume number of chromosomes sampled is constant for all variants
n = ac.sum(axis=1).max()
# (n-1)th harmonic number
a1 = np.sum(1 / np.arange(1, n))
# calculate Watterson's theta (absolute value)
theta_hat_w_abs = S / a1
# calculate mean pairwise difference
mpd = mean_pairwise_difference(ac, fill=0)
# calculate theta_hat pi (sum differences over variants)
theta_hat_pi_abs = np.sum(mpd)
# N.B., both theta estimates are usually divided by the number of
# (accessible) bases but here we want the absolute difference
d = theta_hat_pi_abs - theta_hat_w_abs
# calculate the denominator (standard deviation)
a2 = np.sum(1 / (np.arange(1, n)**2))
b1 = (n + 1) / (3 * (n - 1))
b2 = 2 * (n**2 + n + 3) / (9 * n * (n - 1))
c1 = b1 - (1 / a1)
c2 = b2 - ((n + 2) / (a1 * n)) + (a2 / (a1**2))
e1 = c1 / a1
e2 = c2 / (a1**2 + a2)
d_stdev = np.sqrt((e1 * S) + (e2 * S * (S - 1)))
# finally calculate Tajima's D
D = d / d_stdev
return D
# noinspection PyPep8Naming
def windowed_tajima_d(pos, ac, size=None, start=None, stop=None,
step=None, windows=None, min_sites=3):
"""Calculate the value of Tajima's D in windows over a single
chromosome/contig.
Parameters
----------
pos : array_like, int, shape (n_items,)
Variant positions, using 1-based coordinates, in ascending order.
ac : array_like, int, shape (n_variants, n_alleles)
Allele counts array.
size : int, optional
The window size (number of bases).
start : int, optional
The position at which to start (1-based).
stop : int, optional
The position at which to stop (1-based).
step : int, optional
The distance between start positions of windows. If not given,
defaults to the window size, i.e., non-overlapping windows.
windows : array_like, int, shape (n_windows, 2), optional
Manually specify the windows to use as a sequence of (window_start,
window_stop) positions, using 1-based coordinates. Overrides the
size/start/stop/step parameters.
min_sites : int, optional
Minimum number of segregating sites for which to calculate a value. If
there are fewer, np.nan is returned. Defaults to 3.
Returns
-------
D : ndarray, float, shape (n_windows,)
Tajima's D.
windows : ndarray, int, shape (n_windows, 2)
The windows used, as an array of (window_start, window_stop) positions,
using 1-based coordinates.
counts : ndarray, int, shape (n_windows,)
Number of variants in each window.
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 0]],
... [[0, 0], [0, 1]],
... [[0, 0], [1, 1]],
... [[0, 1], [1, 1]],
... [[1, 1], [1, 1]],
... [[0, 0], [1, 2]],
... [[0, 1], [1, 2]],
... [[0, 1], [-1, -1]],
... [[-1, -1], [-1, -1]]])
>>> ac = g.count_alleles()
>>> pos = [2, 4, 7, 14, 15, 20, 22, 25, 27]
>>> D, windows, counts = allel.windowed_tajima_d(pos, ac, size=20, step=10, start=1, stop=31)
>>> D
array([1.36521524, 4.22566622])
>>> windows
array([[ 1, 20],
[11, 31]])
>>> counts
array([6, 6])
"""
# check inputs
if not isinstance(pos, SortedIndex):
pos = SortedIndex(pos, copy=False)
if not hasattr(ac, 'count_segregating'):
ac = AlleleCountsArray(ac, copy=False)
# assume number of chromosomes sampled is constant for all variants
n = ac.sum(axis=1).max()
# calculate constants
a1 = np.sum(1 / np.arange(1, n))
a2 = np.sum(1 / (np.arange(1, n)**2))
b1 = (n + 1) / (3 * (n - 1))
b2 = 2 * (n**2 + n + 3) / (9 * n * (n - 1))
c1 = b1 - (1 / a1)
c2 = b2 - ((n + 2) / (a1 * n)) + (a2 / (a1**2))
e1 = c1 / a1
e2 = c2 / (a1**2 + a2)
# locate segregating variants
is_seg = ac.is_segregating()
# calculate mean pairwise difference
mpd = mean_pairwise_difference(ac, fill=0)
# define statistic to compute for each window
# noinspection PyPep8Naming
def statistic(w_is_seg, w_mpd):
S = np.count_nonzero(w_is_seg)
if S < min_sites:
return np.nan
pi = np.sum(w_mpd)
d = pi - (S / a1)
d_stdev = np.sqrt((e1 * S) + (e2 * S * (S - 1)))
wD = d / d_stdev
return wD
D, windows, counts = windowed_statistic(pos, values=(is_seg, mpd),
statistic=statistic, size=size,
start=start, stop=stop, step=step,
windows=windows, fill=np.nan)
return D, windows, counts
def moving_tajima_d(ac, size, start=0, stop=None, step=None, min_sites=3):
"""Calculate the value of Tajima's D in moving windows of `size` variants.
Parameters
----------
ac : array_like, int, shape (n_variants, n_alleles)
Allele counts array.
size : int
The window size (number of variants).
start : int, optional
The index at which to start.
stop : int, optional
The index at which to stop.
step : int, optional
The number of variants between start positions of windows. If not
given, defaults to the window size, i.e., non-overlapping windows.
min_sites : int, optional
Minimum number of segregating sites for which to calculate a value. If
there are fewer, np.nan is returned. Defaults to 3.
Returns
-------
d : ndarray, float, shape (n_windows,)
Tajima's D.
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 0]],
... [[0, 0], [0, 1]],
... [[0, 0], [1, 1]],
... [[0, 1], [1, 1]],
... [[1, 1], [1, 1]],
... [[0, 0], [1, 2]],
... [[0, 1], [1, 2]],
... [[0, 1], [-1, -1]],
... [[-1, -1], [-1, -1]]])
>>> ac = g.count_alleles()
>>> D = allel.moving_tajima_d(ac, size=4, step=2)
>>> D
array([0.1676558 , 2.01186954, 5.70029703])
"""
d = moving_statistic(values=ac, statistic=tajima_d, size=size, start=start, stop=stop,
step=step, min_sites=min_sites)
return d
| mit | 9,152,583,474,148,451,000 | 33.374445 | 97 | 0.549535 | false |
cosmir/dev-set-builder | audioset/util.py | 1 | 2590 | from joblib import Parallel, delayed
import numpy as np
import os
import pandas as pd
import tensorflow as tf
from audioset import AUDIO_EMBEDDING_FEATURE_NAME, LABELS
from audioset import START_TIME, TIME, VIDEO_ID
def filebase(fname):
return os.path.splitext(os.path.basename(fname))[0]
def safe_makedirs(dpath):
if not os.path.exists(dpath) and dpath:
os.makedirs(dpath)
def bytestring_to_record(example):
"""Convert a serialized tf.SequenceExample to a dict of python/numpy types.
Parameters
----------
example : str
A single serialized tf.SequenceExample
Returns
-------
features : np.array, shape=(n, 128)
Array of feature coefficients over time (axis=0).
meta : pd.DataFrame, len=n
Corresponding labels and metadata for these features.
"""
rec = tf.train.SequenceExample.FromString(example)
start_time = rec.context.feature[START_TIME].float_list.value[0]
vid_id = rec.context.feature[VIDEO_ID].bytes_list.value[0].decode('utf-8')
labels = list(rec.context.feature[LABELS].int64_list.value)
data = rec.feature_lists.feature_list[AUDIO_EMBEDDING_FEATURE_NAME]
features = [b.bytes_list.value for b in data.feature]
features = np.asarray([np.frombuffer(_[0], dtype=np.uint8)
for _ in features])
if features.ndim == 1:
raise ValueError("Caught unexpected feature shape: {}"
.format(features.shape))
rows = [{VIDEO_ID: vid_id, LABELS: labels, TIME: np.uint16(start_time + t)}
for t in range(len(features))]
return features, pd.DataFrame.from_records(data=rows)
def load_tfrecord(fname, n_jobs=1, verbose=0):
"""Transform a YouTube-8M style tfrecord file to numpy / pandas objects.
Parameters
----------
fname : str
Filepath on disk to read.
n_jobs : int, default=-2
Number of cores to use, defaults to all but one.
verbose : int, default=0
Verbosity level for loading.
Returns
-------
features : np.array, shape=(n_obs, n_coeffs)
All observations, concatenated together,
meta : pd.DataFrame
Table of metadata aligned to the features, indexed by `filebase.idx`
"""
dfx = delayed(bytestring_to_record)
pool = Parallel(n_jobs=n_jobs, verbose=verbose)
results = pool(dfx(x) for x in tf.python_io.tf_record_iterator(fname))
features = np.concatenate([xy[0] for xy in results], axis=0)
meta = pd.concat([xy[1] for xy in results], axis=0, ignore_index=True)
return features, meta
| mit | 4,450,234,088,858,610,700 | 30.975309 | 79 | 0.657529 | false |
kubeflow/pipelines | components/PyTorch/pytorch-kfp-components/pytorch_kfp_components/components/visualization/executor.py | 1 | 9980 | #!/usr/bin/env/python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Visualization Executor Class."""
# pylint: disable=C0103
# pylint: disable=R0201
import json
import os
import tempfile
from pathlib import Path
from urllib.parse import urlparse
import pandas as pd
from sklearn.metrics import confusion_matrix
from pytorch_kfp_components.components.base.base_executor import BaseExecutor
from pytorch_kfp_components.components.minio.component import MinIO
from pytorch_kfp_components.types import standard_component_specs
class Executor(BaseExecutor): # pylint: disable=R0903
"""Visualization Executor Class."""
def __init__(self):
super(Executor, self).__init__() # pylint: disable=R1725
self.mlpipeline_ui_metadata = None
self.mlpipeline_metrics = None
def _write_ui_metadata(
self, metadata_filepath, metadata_dict, key="outputs"
):
"""Function to write the metadata to UI."""
if not os.path.exists(metadata_filepath):
metadata = {key: [metadata_dict]}
else:
with open(metadata_filepath) as fp:
metadata = json.load(fp)
metadata_outputs = metadata[key]
metadata_outputs.append(metadata_dict)
print("Writing to file: {}".format(metadata_filepath))
with open(metadata_filepath, "w") as fp:
json.dump(metadata, fp)
def _generate_markdown(self, markdown_dict):
"""Generates a markdown.
Args:
markdown_dict : dict of markdown specifications
"""
source_str = json.dumps(
markdown_dict["source"], sort_keys=True, indent=4
)
source = f"```json \n {source_str} ```"
markdown_metadata = {
"storage": markdown_dict["storage"],
"source": source,
"type": "markdown",
}
self._write_ui_metadata(
metadata_filepath=self.mlpipeline_ui_metadata,
metadata_dict=markdown_metadata,
)
def _generate_confusion_matrix_metadata(
self, confusion_matrix_path, classes
):
"""Generates the confusion matrix metadata and writes in ui."""
print("Generating Confusion matrix Metadata")
metadata = {
"type": "confusion_matrix",
"format": "csv",
"schema": [
{"name": "target", "type": "CATEGORY"},
{"name": "predicted", "type": "CATEGORY"},
{"name": "count", "type": "NUMBER"},
],
"source": confusion_matrix_path,
"labels": list(map(str, classes)),
}
self._write_ui_metadata(
metadata_filepath=self.mlpipeline_ui_metadata,
metadata_dict=metadata,
)
def _upload_confusion_matrix_to_minio(
self, confusion_matrix_url, confusion_matrix_output_path
):
parse_obj = urlparse(confusion_matrix_url, allow_fragments=False)
bucket_name = parse_obj.netloc
folder_name = str(parse_obj.path).lstrip("/")
# TODO: # pylint: disable=W0511
endpoint = "minio-service.kubeflow:9000"
MinIO(
source=confusion_matrix_output_path,
bucket_name=bucket_name,
destination=folder_name,
endpoint=endpoint,
)
def _generate_confusion_matrix(
self, confusion_matrix_dict
): # pylint: disable=R0914
"""Generates confusion matrix in minio."""
actuals = confusion_matrix_dict["actuals"]
preds = confusion_matrix_dict["preds"]
confusion_matrix_url = confusion_matrix_dict["url"]
# Generating confusion matrix
df = pd.DataFrame(
list(zip(actuals, preds)), columns=["target", "predicted"]
)
vocab = list(df["target"].unique())
cm = confusion_matrix(df["target"], df["predicted"], labels=vocab)
data = []
for target_index, target_row in enumerate(cm):
for predicted_index, count in enumerate(target_row):
data.append(
(vocab[target_index], vocab[predicted_index], count)
)
confusion_matrix_df = pd.DataFrame(
data, columns=["target", "predicted", "count"]
)
confusion_matrix_output_dir = str(tempfile.mkdtemp())
confusion_matrix_output_path = os.path.join(
confusion_matrix_output_dir, "confusion_matrix.csv"
)
# saving confusion matrix
confusion_matrix_df.to_csv(
confusion_matrix_output_path, index=False, header=False
)
self._upload_confusion_matrix_to_minio(
confusion_matrix_url=confusion_matrix_url,
confusion_matrix_output_path=confusion_matrix_output_path,
)
# Generating metadata
self._generate_confusion_matrix_metadata(
confusion_matrix_path=os.path.join(
confusion_matrix_url, "confusion_matrix.csv"
),
classes=vocab,
)
def _visualize_accuracy_metric(self, accuracy):
"""Generates the visualization for accuracy."""
metadata = {
"name": "accuracy-score",
"numberValue": accuracy,
"format": "PERCENTAGE",
}
self._write_ui_metadata(
metadata_filepath=self.mlpipeline_metrics,
metadata_dict=metadata,
key="metrics",
)
def _get_fn_args(self, input_dict: dict, exec_properties: dict):
"""Extracts the confusion matrix dict, test accuracy, markdown from the
input dict and mlpipeline ui metadata & metrics from exec_properties.
Args:
input_dict : a dictionary of inputs,
example: confusion matrix dict, markdown
exe_properties : a dict of execution properties
example : mlpipeline_ui_metadata
Returns:
confusion_matrix_dict : dict of confusion metrics
test_accuracy : model test accuracy metrics
markdown : markdown dict
mlpipeline_ui_metadata : path of ui metadata
mlpipeline_metrics : metrics to be uploaded
"""
confusion_matrix_dict = input_dict.get(
standard_component_specs.VIZ_CONFUSION_MATRIX_DICT
)
test_accuracy = input_dict.get(
standard_component_specs.VIZ_TEST_ACCURACY
)
markdown = input_dict.get(standard_component_specs.VIZ_MARKDOWN)
mlpipeline_ui_metadata = exec_properties.get(
standard_component_specs.VIZ_MLPIPELINE_UI_METADATA
)
mlpipeline_metrics = exec_properties.get(
standard_component_specs.VIZ_MLPIPELINE_METRICS
)
return (
confusion_matrix_dict,
test_accuracy,
markdown,
mlpipeline_ui_metadata,
mlpipeline_metrics,
)
def _set_defalt_mlpipeline_path(
self, mlpipeline_ui_metadata: str, mlpipeline_metrics: str
):
"""Sets the default mlpipeline path."""
if mlpipeline_ui_metadata:
Path(os.path.dirname(mlpipeline_ui_metadata)).mkdir(
parents=True, exist_ok=True
)
else:
mlpipeline_ui_metadata = "/mlpipeline-ui-metadata.json"
if mlpipeline_metrics:
Path(os.path.dirname(mlpipeline_metrics)).mkdir(
parents=True, exist_ok=True
)
else:
mlpipeline_metrics = "/mlpipeline-metrics.json"
return mlpipeline_ui_metadata, mlpipeline_metrics
def Do(self, input_dict: dict, output_dict: dict, exec_properties: dict):
"""Executes the visualization process and uploads to minio
Args:
input_dict : a dictionary of inputs,
example: confusion matrix dict, markdown
output_dict :
exec_properties : a dict of execution properties
example : mlpipeline_ui_metadata
"""
(
confusion_matrix_dict,
test_accuracy,
markdown,
mlpipeline_ui_metadata,
mlpipeline_metrics,
) = self._get_fn_args(
input_dict=input_dict, exec_properties=exec_properties
)
(
self.mlpipeline_ui_metadata,
self.mlpipeline_metrics,
) = self._set_defalt_mlpipeline_path(
mlpipeline_ui_metadata=mlpipeline_ui_metadata,
mlpipeline_metrics=mlpipeline_metrics,
)
if not (confusion_matrix_dict or test_accuracy or markdown):
raise ValueError(
"Any one of these keys should be set - "
"confusion_matrix_dict, test_accuracy, markdown"
)
if confusion_matrix_dict:
self._generate_confusion_matrix(
confusion_matrix_dict=confusion_matrix_dict,
)
if test_accuracy:
self._visualize_accuracy_metric(accuracy=test_accuracy)
if markdown:
self._generate_markdown(markdown_dict=markdown)
output_dict[
standard_component_specs.VIZ_MLPIPELINE_UI_METADATA
] = self.mlpipeline_ui_metadata
output_dict[
standard_component_specs.VIZ_MLPIPELINE_METRICS
] = self.mlpipeline_metrics
| apache-2.0 | 7,535,007,747,309,917,000 | 33.895105 | 79 | 0.594088 | false |
OpenPIV/openpiv-python | openpiv/windef.py | 2 | 32332 | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 4 14:04:04 2019
@author: Theo
@modified: Alex, Erich
"""
import os
import numpy as np
import scipy.ndimage as scn
from scipy.interpolate import RectBivariateSpline
import matplotlib.pyplot as plt
from openpiv.tools import imread, Multiprocesser, display_vector_field, \
transform_coordinates
from openpiv import validation, filters, tools, preprocess, scaling
from openpiv.pyprocess import extended_search_area_piv, get_coordinates, \
get_field_shape
from openpiv import smoothn
from skimage.util import invert
def piv(settings):
""" the func fuction is the "frame" in which the PIV evaluation is done """
def func(args):
"""A function to process each image pair."""
# this line is REQUIRED for multiprocessing to work
# always use it in your custom function
file_a, file_b, counter = args
# counter2=str(counter2)
#####################
# Here goes you code
#####################
" read images into numpy arrays"
frame_a = imread(os.path.join(settings.filepath_images, file_a))
frame_b = imread(os.path.join(settings.filepath_images, file_b))
# Miguel: I just had a quick look, and I do not understand the reason
# for this step.
# I propose to remove it.
# frame_a = (frame_a*1024).astype(np.int32)
# frame_b = (frame_b*1024).astype(np.int32)
" crop to ROI"
if settings.ROI == "full":
frame_a = frame_a
frame_b = frame_b
else:
frame_a = frame_a[
settings.ROI[0]:settings.ROI[1],
settings.ROI[2]:settings.ROI[3]
]
frame_b = frame_b[
settings.ROI[0]:settings.ROI[1],
settings.ROI[2]:settings.ROI[3]
]
if settings.invert is True:
frame_a = invert(frame_a)
frame_b = invert(frame_b)
if settings.show_all_plots:
fig, ax = plt.subplots(1, 1)
ax.imshow(frame_a, cmap=plt.get_cmap('Reds'))
ax.imshow(frame_b, cmap=plt.get_cmap('Blues'), alpha=.5)
plt.show()
if settings.dynamic_masking_method in ("edge", "intensity"):
frame_a, mask_a = preprocess.dynamic_masking(
frame_a,
method=settings.dynamic_masking_method,
filter_size=settings.dynamic_masking_filter_size,
threshold=settings.dynamic_masking_threshold,
)
frame_b, mask_b = preprocess.dynamic_masking(
frame_b,
method=settings.dynamic_masking_method,
filter_size=settings.dynamic_masking_filter_size,
threshold=settings.dynamic_masking_threshold,
)
# "first pass"
x, y, u, v, s2n = first_pass(
frame_a,
frame_b,
settings
)
if settings.show_all_plots:
plt.figure()
plt.quiver(x, y, u, -v, color='b')
# plt.gca().invert_yaxis()
# plt.gca().set_aspect(1.)
# plt.title('after first pass, invert')
# plt.show()
# " Image masking "
if settings.image_mask:
image_mask = np.logical_and(mask_a, mask_b)
mask_coords = preprocess.mask_coordinates(image_mask)
# mark those points on the grid of PIV inside the mask
grid_mask = preprocess.prepare_mask_on_grid(x, y, mask_coords)
# mask the velocity
u = np.ma.masked_array(u, mask=grid_mask)
v = np.ma.masked_array(v, mask=grid_mask)
else:
mask_coords = []
u = np.ma.masked_array(u, mask=np.ma.nomask)
v = np.ma.masked_array(v, mask=np.ma.nomask)
if settings.validation_first_pass:
u, v, mask = validation.typical_validation(u, v, s2n, settings)
if settings.show_all_plots:
# plt.figure()
plt.quiver(x, y, u, -v, color='r')
plt.gca().invert_yaxis()
plt.gca().set_aspect(1.)
plt.title('after first pass validation new, inverted')
plt.show()
# "filter to replace the values that where marked by the validation"
if settings.num_iterations == 1 and settings.replace_vectors:
# for multi-pass we cannot have holes in the data
# after the first pass
u, v = filters.replace_outliers(
u,
v,
method=settings.filter_method,
max_iter=settings.max_filter_iteration,
kernel_size=settings.filter_kernel_size,
)
# don't even check if it's true or false
elif settings.num_iterations > 1:
u, v = filters.replace_outliers(
u,
v,
method=settings.filter_method,
max_iter=settings.max_filter_iteration,
kernel_size=settings.filter_kernel_size,
)
# "adding masks to add the effect of all the validations"
if settings.smoothn:
u, dummy_u1, dummy_u2, dummy_u3 = smoothn.smoothn(
u, s=settings.smoothn_p
)
v, dummy_v1, dummy_v2, dummy_v3 = smoothn.smoothn(
v, s=settings.smoothn_p
)
if settings.image_mask:
grid_mask = preprocess.prepare_mask_on_grid(x, y, mask_coords)
u = np.ma.masked_array(u, mask=grid_mask)
v = np.ma.masked_array(v, mask=grid_mask)
else:
u = np.ma.masked_array(u, np.ma.nomask)
v = np.ma.masked_array(v, np.ma.nomask)
if settings.show_all_plots:
plt.figure()
plt.quiver(x, y, u, -v)
plt.gca().invert_yaxis()
plt.gca().set_aspect(1.)
plt.title('before multi pass, inverted')
plt.show()
if not isinstance(u, np.ma.MaskedArray):
raise ValueError("Expected masked array")
""" Multi pass """
for i in range(1, settings.num_iterations):
if not isinstance(u, np.ma.MaskedArray):
raise ValueError("Expected masked array")
x, y, u, v, s2n, mask = multipass_img_deform(
frame_a,
frame_b,
i,
x,
y,
u,
v,
settings,
mask_coords=mask_coords
)
# If the smoothing is active, we do it at each pass
# but not the last one
if settings.smoothn is True and i < settings.num_iterations-1:
u, dummy_u1, dummy_u2, dummy_u3 = smoothn.smoothn(
u, s=settings.smoothn_p
)
v, dummy_v1, dummy_v2, dummy_v3 = smoothn.smoothn(
v, s=settings.smoothn_p
)
if not isinstance(u, np.ma.MaskedArray):
raise ValueError('not a masked array anymore')
if hasattr(settings, 'image_mask') and settings.image_mask:
grid_mask = preprocess.prepare_mask_on_grid(x, y, mask_coords)
u = np.ma.masked_array(u, mask=grid_mask)
v = np.ma.masked_array(v, mask=grid_mask)
else:
u = np.ma.masked_array(u, np.ma.nomask)
v = np.ma.masked_array(v, np.ma.nomask)
if settings.show_all_plots:
plt.figure()
plt.quiver(x, y, u, -1*v, color='r')
plt.gca().set_aspect(1.)
plt.gca().invert_yaxis()
plt.title('end of the multipass, invert')
plt.show()
if settings.show_all_plots and settings.num_iterations > 1:
plt.figure()
plt.quiver(x, y, u, -v)
plt.gca().invert_yaxis()
plt.gca().set_aspect(1.)
plt.title('after multi pass, before saving, inverted')
plt.show()
# we now use only 0s instead of the image
# masked regions.
# we could do Nan, not sure what is best
u = u.filled(0.)
v = v.filled(0.)
# "scales the results pixel-> meter"
x, y, u, v = scaling.uniform(x, y, u, v,
scaling_factor=settings.scaling_factor)
if settings.image_mask:
grid_mask = preprocess.prepare_mask_on_grid(x, y, mask_coords)
u = np.ma.masked_array(u, mask=grid_mask)
v = np.ma.masked_array(v, mask=grid_mask)
else:
u = np.ma.masked_array(u, np.ma.nomask)
v = np.ma.masked_array(v, np.ma.nomask)
# before saving we conver to the "physically relevant"
# right-hand coordinate system with 0,0 at the bottom left
# x to the right, y upwards
# and so u,v
x, y, u, v = transform_coordinates(x, y, u, v)
# import pdb; pdb.set_trace()
# "save to a file"
tools.save(x, y, u, v, mask,
os.path.join(save_path, "field_A%03d.txt" % counter),
delimiter="\t")
# "some other stuff that one might want to use"
if settings.show_plot or settings.save_plot:
Name = os.path.join(save_path, "Image_A%03d.png" % counter)
fig, _ = display_vector_field(
os.path.join(save_path, "field_A%03d.txt" % counter),
scale=settings.scale_plot,
)
if settings.save_plot is True:
fig.savefig(Name)
if settings.show_plot is True:
plt.show()
print(f"Image Pair {counter + 1}")
print(file_a.rsplit('/')[-1], file_b.rsplit('/')[-1])
# "Below is code to read files and create a folder to store the results"
save_path = os.path.join(
settings.save_path,
"Open_PIV_results_"
+ str(settings.windowsizes[settings.num_iterations-1])
+ "_"
+ settings.save_folder_suffix,
)
if not os.path.exists(save_path):
os.makedirs(save_path)
task = Multiprocesser(
data_dir=settings.filepath_images,
pattern_a=settings.frame_pattern_a,
pattern_b=settings.frame_pattern_b,
)
task.run(func=func, n_cpus=1)
def create_deformation_field(frame, x, y, u, v, kx=3, ky=3):
"""
Deform an image by window deformation where a new grid is defined based
on the grid and displacements of the previous pass and pixel values are
interpolated onto the new grid.
Parameters
----------
frame : 2d np.ndarray, dtype=np.int32
an two dimensions array of integers containing grey levels of
the first frame.
x : 2d np.ndarray
a two dimensional array containing the x coordinates of the
interrogation window centers, in pixels.
y : 2d np.ndarray
a two dimensional array containing the y coordinates of the
interrogation window centers, in pixels.
u : 2d np.ndarray
a two dimensional array containing the u velocity component,
in pixels/seconds.
v : 2d np.ndarray
a two dimensional array containing the v velocity component,
in pixels/seconds.
interpolation_order: scalar
the degree of the frame interpolation (deformation) of the mesh
kx : scalar
the degree of the interpolation of the B-splines over the x-axis
of a rectangular mesh
ky : scalar
the degree of the interpolation of the B-splines over the
y-axis of a rectangular mesh
Returns
-------
x,y : new grid (after meshgrid)
u,v : deformation field
"""
y1 = y[:, 0] # extract first coloumn from meshgrid
x1 = x[0, :] # extract first row from meshgrid
side_x = np.arange(frame.shape[1]) # extract the image grid
side_y = np.arange(frame.shape[0])
# interpolating displacements onto a new meshgrid
ip = RectBivariateSpline(y1, x1, u, kx=kx, ky=ky)
ut = ip(side_y, side_x)
# the way how to use the interpolation functions differs from matlab
ip2 = RectBivariateSpline(y1, x1, v, kx=kx, ky=ky)
vt = ip2(side_y, side_x)
x, y = np.meshgrid(side_x, side_y)
# plt.figure()
# plt.quiver(x1,y1,u,-v,color='r')
# plt.quiver(x,y,ut,-vt)
# plt.gca().invert_yaxis()
# plt.show()
return x, y, ut, vt
def deform_windows(frame, x, y, u, v, interpolation_order=1, kx=3, ky=3,
debugging=False):
"""
Deform an image by window deformation where a new grid is defined based
on the grid and displacements of the previous pass and pixel values are
interpolated onto the new grid.
Parameters
----------
frame : 2d np.ndarray, dtype=np.int32
an two dimensions array of integers containing grey levels of
the first frame.
x : 2d np.ndarray
a two dimensional array containing the x coordinates of the
interrogation window centers, in pixels.
y : 2d np.ndarray
a two dimensional array containing the y coordinates of the
interrogation window centers, in pixels.
u : 2d np.ndarray
a two dimensional array containing the u velocity component,
in pixels/seconds.
v : 2d np.ndarray
a two dimensional array containing the v velocity component,
in pixels/seconds.
interpolation_order: scalar
the degree of the frame interpolation (deformation) of the mesh
kx : scalar
the degree of the interpolation of the B-splines over the x-axis
of a rectangular mesh
ky : scalar
the degree of the interpolation of the B-splines over the
y-axis of a rectangular mesh
Returns
-------
frame_def:
a deformed image based on the meshgrid and displacements of the
previous pass
"""
frame = frame.astype(np.float32)
x, y, ut, vt = \
create_deformation_field(frame,
x, y, u, v,
kx=kx, ky=ky)
frame_def = scn.map_coordinates(
frame, ((y - vt, x + ut,)), order=interpolation_order, mode='nearest')
if debugging:
plt.figure()
plt.quiver(x, y, ut, vt)
plt.title('new, x,y, ut,vt')
plt.show()
plt.figure()
plt.imshow(frame-frame_def)
plt.title('new deformed image')
plt.show()
return frame_def
def first_pass(frame_a, frame_b, settings):
# window_size,
# overlap,
# iterations,
# correlation_method="circular",
# normalized_correlation=False,
# subpixel_method="gaussian",
# do_sig2noise=False,
# sig2noise_method="peak2peak",
# sig2noise_mask=2,
# settings):
"""
First pass of the PIV evaluation.
This function does the PIV evaluation of the first pass. It returns
the coordinates of the interrogation window centres, the displacment
u and v for each interrogation window as well as the mask which indicates
wether the displacement vector was interpolated or not.
Parameters
----------
frame_a : 2d np.ndarray
the first image
frame_b : 2d np.ndarray
the second image
window_size : int
the size of the interrogation window
overlap : int
the overlap of the interrogation window, typically it is window_size/2
subpixel_method: string
the method used for the subpixel interpolation.
one of the following methods to estimate subpixel location of the peak:
'centroid' [replaces default if correlation map is negative],
'gaussian' [default if correlation map is positive],
'parabolic'
Returns
-------
x : 2d np.array
array containg the x coordinates of the interrogation window centres
y : 2d np.array
array containg the y coordinates of the interrogation window centres
u : 2d np.array
array containing the u displacement for every interrogation window
u : 2d np.array
array containing the u displacement for every interrogation window
"""
# if do_sig2noise is False or iterations != 1:
# sig2noise_method = None # this indicates to get out nans
u, v, s2n = extended_search_area_piv(
frame_a,
frame_b,
window_size=settings.windowsizes[0],
overlap=settings.overlap[0],
search_area_size=settings.windowsizes[0],
width=settings.sig2noise_mask,
subpixel_method=settings.subpixel_method,
sig2noise_method=settings.sig2noise_method,
correlation_method=settings.correlation_method,
normalized_correlation=settings.normalized_correlation
)
shapes = np.array(get_field_shape(frame_a.shape,
settings.windowsizes[0],
settings.overlap[0]))
u = u.reshape(shapes)
v = v.reshape(shapes)
s2n = s2n.reshape(shapes)
x, y = get_coordinates(frame_a.shape,
settings.windowsizes[0],
settings.overlap[0])
return x, y, u, v, s2n
def multipass_img_deform(
frame_a,
frame_b,
current_iteration,
x_old,
y_old,
u_old,
v_old,
settings,
mask_coords=[],
):
# window_size,
# overlap,
# iterations,
# current_iteration,
# x_old,
# y_old,
# u_old,
# v_old,
# correlation_method="circular",
# normalized_correlation=False,
# subpixel_method="gaussian",
# deformation_method="symmetric",
# sig2noise_method="peak2peak",
# sig2noise_threshold=1.0,
# sig2noise_mask=2,
# interpolation_order=1,
"""
Multi pass of the PIV evaluation.
This function does the PIV evaluation of the second and other passes.
It returns the coordinates of the interrogation window centres,
the displacement u, v for each interrogation window as well as
the signal to noise ratio array (which is full of NaNs if opted out)
Parameters
----------
frame_a : 2d np.ndarray
the first image
frame_b : 2d np.ndarray
the second image
window_size : tuple of ints
the size of the interrogation window
overlap : tuple of ints
the overlap of the interrogation window, e.g. window_size/2
x_old : 2d np.ndarray
the x coordinates of the vector field of the previous pass
y_old : 2d np.ndarray
the y coordinates of the vector field of the previous pass
u_old : 2d np.ndarray
the u displacement of the vector field of the previous pass
in case of the image mask - u_old and v_old are MaskedArrays
v_old : 2d np.ndarray
the v displacement of the vector field of the previous pass
subpixel_method: string
the method used for the subpixel interpolation.
one of the following methods to estimate subpixel location of the peak:
'centroid' [replaces default if correlation map is negative],
'gaussian' [default if correlation map is positive],
'parabolic'
interpolation_order : int
the order of the spline interpolation used for the image deformation
mask_coords : list of x,y coordinates (pixels) of the image mask,
default is an empty list
Returns
-------
x : 2d np.array
array containg the x coordinates of the interrogation window centres
y : 2d np.array
array containg the y coordinates of the interrogation window centres
u : 2d np.array
array containing the horizontal displacement for every interrogation
window [pixels]
u : 2d np.array
array containing the vertical displacement for every interrogation
window it returns values in [pixels]
s2n : 2D np.array of signal to noise ratio values
"""
if not isinstance(u_old, np.ma.MaskedArray):
raise ValueError('Expected masked array')
# calculate the y and y coordinates of the interrogation window centres.
# Hence, the
# edges must be extracted to provide the sufficient input. x_old and y_old
# are the coordinates of the old grid. x_int and y_int are the coordinates
# of the new grid
window_size = settings.windowsizes[current_iteration]
overlap = settings.overlap[current_iteration]
x, y = get_coordinates(frame_a.shape,
window_size,
overlap)
# The interpolation function dont like meshgrids as input.
# plus the coordinate system for y is now from top to bottom
# and RectBivariateSpline wants an increasing set
y_old = y_old[:, 0]
# y_old = y_old[::-1]
x_old = x_old[0, :]
y_int = y[:, 0]
# y_int = y_int[::-1]
x_int = x[0, :]
# interpolating the displacements from the old grid onto the new grid
# y befor x because of numpy works row major
ip = RectBivariateSpline(y_old, x_old, u_old.filled(0.))
u_pre = ip(y_int, x_int)
ip2 = RectBivariateSpline(y_old, x_old, v_old.filled(0.))
v_pre = ip2(y_int, x_int)
# if settings.show_plot:
if settings.show_all_plots:
plt.figure()
plt.quiver(x_old, y_old, u_old, -1*v_old, color='b')
plt.quiver(x_int, y_int, u_pre, -1*v_pre, color='r', lw=2)
plt.gca().set_aspect(1.)
plt.gca().invert_yaxis()
plt.title('inside deform, invert')
plt.show()
# @TKauefer added another method to the windowdeformation, 'symmetric'
# splits the onto both frames, takes more effort due to additional
# interpolation however should deliver better results
old_frame_a = frame_a.copy()
old_frame_b = frame_b.copy()
# Image deformation has to occur in image coordinates
# therefore we need to convert the results of the
# previous pass which are stored in the physical units
# and so y from the get_coordinates
if settings.deformation_method == "symmetric":
# this one is doing the image deformation (see above)
x_new, y_new, ut, vt = create_deformation_field(
frame_a, x, y, u_pre, v_pre)
frame_a = scn.map_coordinates(
frame_a, ((y_new - vt / 2, x_new - ut / 2)),
order=settings.interpolation_order, mode='nearest')
frame_b = scn.map_coordinates(
frame_b, ((y_new + vt / 2, x_new + ut / 2)),
order=settings.interpolation_order, mode='nearest')
elif settings.deformation_method == "second image":
frame_b = deform_windows(
frame_b, x, y, u_pre, -v_pre,
interpolation_order=settings.interpolation_order)
else:
raise Exception("Deformation method is not valid.")
# if settings.show_plot:
if settings.show_all_plots:
if settings.deformation_method == 'symmetric':
plt.figure()
plt.imshow(frame_a-old_frame_a)
plt.show()
plt.figure()
plt.imshow(frame_b-old_frame_b)
plt.show()
# if do_sig2noise is True
# sig2noise_method = sig2noise_method
# else:
# sig2noise_method = None
# so we use here default circular not normalized correlation:
# if we did not want to validate every step, remove the method
if settings.sig2noise_validate is False:
settings.sig2noise_method = None
u, v, s2n = extended_search_area_piv(
frame_a,
frame_b,
window_size=window_size,
overlap=overlap,
width=settings.sig2noise_mask,
subpixel_method=settings.subpixel_method,
sig2noise_method=settings.sig2noise_method,
correlation_method=settings.correlation_method,
normalized_correlation=settings.normalized_correlation,
)
shapes = np.array(get_field_shape(frame_a.shape,
window_size,
overlap))
u = u.reshape(shapes)
v = v.reshape(shapes)
s2n = s2n.reshape(shapes)
u += u_pre
v += v_pre
# reapply the image mask to the new grid
if settings.image_mask:
grid_mask = preprocess.prepare_mask_on_grid(x, y, mask_coords)
u = np.ma.masked_array(u, mask=grid_mask)
v = np.ma.masked_array(v, mask=grid_mask)
else:
u = np.ma.masked_array(u, np.ma.nomask)
v = np.ma.masked_array(v, np.ma.nomask)
# validate in the multi-pass by default
u, v, mask = validation.typical_validation(u, v, s2n, settings)
if np.all(mask):
raise ValueError("Something happened in the validation")
if not isinstance(u, np.ma.MaskedArray):
raise ValueError('not a masked array anymore')
if settings.show_all_plots:
plt.figure()
nans = np.nonzero(mask)
plt.quiver(x[~nans], y[~nans], u[~nans], -v[~nans], color='b')
plt.quiver(x[nans], y[nans], u[nans], -v[nans], color='r')
plt.gca().invert_yaxis()
plt.gca().set_aspect(1.)
plt.title('After sig2noise, inverted')
plt.show()
# we have to replace outliers
u, v = filters.replace_outliers(
u,
v,
method=settings.filter_method,
max_iter=settings.max_filter_iteration,
kernel_size=settings.filter_kernel_size,
)
# reapply the image mask to the new grid
if settings.image_mask:
grid_mask = preprocess.prepare_mask_on_grid(x, y, mask_coords)
u = np.ma.masked_array(u, mask=grid_mask)
v = np.ma.masked_array(v, mask=grid_mask)
else:
u = np.ma.masked_array(u, np.ma.nomask)
v = np.ma.masked_array(v, np.ma.nomask)
if settings.show_all_plots:
plt.figure()
plt.quiver(x, y, u, -v, color='r')
plt.quiver(x, y, u_pre, -1*v_pre, color='b')
plt.gca().invert_yaxis()
plt.gca().set_aspect(1.)
plt.title(' after replaced outliers, red, invert')
plt.show()
return x, y, u, v, s2n, mask
class FrozenClass(object):
__isfrozen = False
def __setattr__(self, key, value):
if self.__isfrozen and not hasattr(self, key):
raise TypeError("%r is a frozen class" % self)
object.__setattr__(self, key, value)
def _freeze(self):
self.__isfrozen = True
class Settings(FrozenClass):
def __init__(self):
# "Data related settings"
# Folder with the images to process
self.filepath_images = "."
# Folder for the outputs
self.save_path = "./res"
# Root name of the output Folder for Result Files
self.save_folder_suffix = "Test_4"
# Format and Image Sequence
self.frame_pattern_a = 'exp1_001_a.bmp'
self.frame_pattern_b = 'exp1_001_b.bmp'
# "Region of interest"
# (50,300,50,300) #Region of interest: (xmin,xmax,ymin,ymax) or 'full'
# for full image
self.ROI = "full"
# "Image preprocessing"
# 'None' for no masking, 'edges' for edges masking, 'intensity' for
# intensity masking
# WARNING: This part is under development so better not to use MASKS
self.dynamic_masking_method = "None"
self.dynamic_masking_threshold = 0.005
self.dynamic_masking_filter_size = 7
# "Processing Parameters"
self.correlation_method = "circular" # 'circular' or 'linear'
self.normalized_correlation = False
# add the interroagtion window size for each pass.
# For the moment, it should be a power of 2
self.windowsizes = (
64,
32,
16,
) # if longer than n iteration the rest is ignored
# The overlap of the interroagtion window for each pass.
self.overlap = (32, 16, 8) # This is 50% overlap
# Has to be a value with base two. In general window size/2 is a good
# choice.
self.num_iterations = len(self.windowsizes) # select the number of PIV
# passes
# methode used for subpixel interpolation:
# 'gaussian','centroid','parabolic'
self.subpixel_method = "gaussian"
# 'symmetric' or 'second image', 'symmetric' splits the deformation
# both images, while 'second image' does only deform the second image.
self.deformation_method = 'symmetric' # 'symmetric' or 'second image'
# order of the image interpolation for the window deformation
self.interpolation_order = 3
self.scaling_factor = 1 # scaling factor pixel/meter
self.dt = 1 # time between to frames (in seconds)
# "Signal to noise ratio options (only for the last pass)"
# It is possible to decide if the S/N should be computed (for the last
# pass) or not
# 'True' or 'False' (only for the last pass)
# self.extract_sig2noise = False
# method used to calculate the signal to noise ratio 'peak2peak' or
# 'peak2mean'
self.sig2noise_method = "peak2peak" # or "peak2mean" or "None"
# select the width of the masked to masked out pixels next to the main
# peak
self.sig2noise_mask = 2
# If extract_sig2noise==False the values in the signal to noise ratio
# output column are set to NaN
# "vector validation options"
# choose if you want to do validation of the first pass: True or False
self.validation_first_pass = True
# only effecting the first pass of the interrogation the following
# passes
# in the multipass will be validated
# "Validation Parameters"
# The validation is done at each iteration based on three filters.
# The first filter is based on the min/max ranges. Observe that these
# values are defined in
# terms of minimum and maximum displacement in pixel/frames.
self.MinMax_U_disp = (-30, 30)
self.MinMax_V_disp = (-30, 30)
# The second filter is based on the global STD threshold
self.std_threshold = 10 # threshold of the std validation
# The third filter is the median test (not normalized at the moment)
self.median_threshold = 3 # threshold of the median validation
# On the last iteration, an additional validation can be done based on
# the S/N.
self.median_size = 1 # defines the size of the local median
# "Validation based on the signal to noise ratio"
# Note: only available when extract_sig2noise==True and only for the
# last pass of the interrogation
# Enable the signal to noise ratio validation. Options: True or False
# self.sig2noise_validate = False # This is time consuming
# minmum signal to noise ratio that is need for a valid vector
self.sig2noise_threshold = 1.0
self.sig2noise_validate = True
# "Outlier replacement or Smoothing options"
# Replacment options for vectors which are masked as invalid by the
# validation
# Choose: True or False
self.replace_vectors = True # Enable the replacement.
self.smoothn = True # Enables smoothing of the displacement field
self.smoothn_p = 0.05 # This is a smoothing parameter
# select a method to replace the outliers:
# 'localmean', 'disk', 'distance'
self.filter_method = "localmean"
# maximum iterations performed to replace the outliers
self.max_filter_iteration = 4
self.filter_kernel_size = 2 # kernel size for the localmean method
# "Output options"
# Select if you want to save the plotted vectorfield: True or False
self.save_plot = True
# Choose wether you want to see the vectorfield or not :True or False
self.show_plot = True
self.scale_plot = 100 # select a value to scale the quiver plot of
# the vectorfield run the script with the given settings
self.image_mask = False
self.show_all_plots = False
self.invert = False # for the test_invert
self._freeze()
if __name__ == "__main__":
""" Run windef.py as a script:
python windef.py
"""
settings = Settings()
piv(settings)
| gpl-3.0 | 5,857,745,627,380,246,000 | 33.17759 | 79 | 0.592973 | false |
gfyoung/pandas | pandas/core/strings/base.py | 2 | 4702 | import abc
from typing import Pattern, Union
import numpy as np
from pandas._typing import Scalar
class BaseStringArrayMethods(abc.ABC):
"""
Base class for extension arrays implementing string methods.
This is where our ExtensionArrays can override the implementation of
Series.str.<method>. We don't expect this to work with
3rd-party extension arrays.
* User calls Series.str.<method>
* pandas extracts the extension array from the Series
* pandas calls ``extension_array._str_<method>(*args, **kwargs)``
* pandas wraps the result, to return to the user.
See :ref:`Series.str` for the docstring of each method.
"""
def _str_getitem(self, key):
if isinstance(key, slice):
return self._str_slice(start=key.start, stop=key.stop, step=key.step)
else:
return self._str_get(key)
@abc.abstractmethod
def _str_count(self, pat, flags=0):
pass
@abc.abstractmethod
def _str_pad(self, width, side="left", fillchar=" "):
pass
@abc.abstractmethod
def _str_contains(self, pat, case=True, flags=0, na=None, regex=True):
pass
@abc.abstractmethod
def _str_startswith(self, pat, na=None):
pass
@abc.abstractmethod
def _str_endswith(self, pat, na=None):
pass
@abc.abstractmethod
def _str_replace(self, pat, repl, n=-1, case=None, flags=0, regex=True):
pass
@abc.abstractmethod
def _str_repeat(self, repeats):
pass
@abc.abstractmethod
def _str_match(
self,
pat: Union[str, Pattern],
case: bool = True,
flags: int = 0,
na: Scalar = np.nan,
):
pass
@abc.abstractmethod
def _str_fullmatch(
self,
pat: Union[str, Pattern],
case: bool = True,
flags: int = 0,
na: Scalar = np.nan,
):
pass
@abc.abstractmethod
def _str_encode(self, encoding, errors="strict"):
pass
@abc.abstractmethod
def _str_find(self, sub, start=0, end=None):
pass
@abc.abstractmethod
def _str_rfind(self, sub, start=0, end=None):
pass
@abc.abstractmethod
def _str_findall(self, pat, flags=0):
pass
@abc.abstractmethod
def _str_get(self, i):
pass
@abc.abstractmethod
def _str_index(self, sub, start=0, end=None):
pass
@abc.abstractmethod
def _str_rindex(self, sub, start=0, end=None):
pass
@abc.abstractmethod
def _str_join(self, sep):
pass
@abc.abstractmethod
def _str_partition(self, sep, expand):
pass
@abc.abstractmethod
def _str_rpartition(self, sep, expand):
pass
@abc.abstractmethod
def _str_len(self):
pass
@abc.abstractmethod
def _str_slice(self, start=None, stop=None, step=None):
pass
@abc.abstractmethod
def _str_slice_replace(self, start=None, stop=None, repl=None):
pass
@abc.abstractmethod
def _str_translate(self, table):
pass
@abc.abstractmethod
def _str_wrap(self, width, **kwargs):
pass
@abc.abstractmethod
def _str_get_dummies(self, sep="|"):
pass
@abc.abstractmethod
def _str_isalnum(self):
pass
@abc.abstractmethod
def _str_isalpha(self):
pass
@abc.abstractmethod
def _str_isdecimal(self):
pass
@abc.abstractmethod
def _str_isdigit(self):
pass
@abc.abstractmethod
def _str_islower(self):
pass
@abc.abstractmethod
def _str_isnumeric(self):
pass
@abc.abstractmethod
def _str_isspace(self):
pass
@abc.abstractmethod
def _str_istitle(self):
pass
@abc.abstractmethod
def _str_isupper(self):
pass
@abc.abstractmethod
def _str_capitalize(self):
pass
@abc.abstractmethod
def _str_casefold(self):
pass
@abc.abstractmethod
def _str_title(self):
pass
@abc.abstractmethod
def _str_swapcase(self):
pass
@abc.abstractmethod
def _str_lower(self):
pass
@abc.abstractmethod
def _str_upper(self):
pass
@abc.abstractmethod
def _str_normalize(self, form):
pass
@abc.abstractmethod
def _str_strip(self, to_strip=None):
pass
@abc.abstractmethod
def _str_lstrip(self, to_strip=None):
pass
@abc.abstractmethod
def _str_rstrip(self, to_strip=None):
pass
@abc.abstractmethod
def _str_split(self, pat=None, n=-1, expand=False):
pass
@abc.abstractmethod
def _str_rsplit(self, pat=None, n=-1):
pass
| bsd-3-clause | -1,347,311,179,181,504,800 | 19.897778 | 81 | 0.595917 | false |
openbermuda/karmapi | karmapi/sunny.py | 1 | 1858 | from karmapi import pigfarm
import curio
import random
from pathlib import Path
class Sunspot(pigfarm.MagicCarpet):
def compute_data(self):
pass
def plot(self):
jup = 11.86
nep = 164.8
sat = 29.4571
x = (1/jup - 1/sat)
jupsat = 1/(2 * x)
x = (1/jup - 1/nep)
jupnep = 1/(2 * x)
jupsat, jupnep
period = [jupsat, 10.87, jup, 11.07]
phase = [2000.475, 2002.364, 1999.381, 2009]
weight = [0.83, 1.0, 0.55, 0.25]
import datetime
import pandas
import math
from karmapi import base
infile = Path('~/devel/karmapi/notebooks/SN_m_tot_V2.0.csv').expanduser()
df = pandas.read_csv(
infile,
names=['year', 'month', 'time', 'sunspot', 'sd', 'status'],
sep=';',
header=None,
index_col=False)
def add_date(x):
# FIXME -- turn time into day
return datetime.date(int(x.year), int(x.month), 1)
df.index = df.apply(add_date, axis=1)
df.index = pandas.date_range(
datetime.date(int(df.index[0].year), int(df.index[0].month), 1),
periods=len(df), freq='M')
df['year2'] = pandas.np.linspace(1749, 2016.67, 3212)
pi = math.pi
cos = pandas.np.cos
for ii in range(4):
df['h{}'.format(ii + 1)] = weight[ii] * cos((2 * pi) * ((df.year2 - phase[ii]) / period[ii]))
df['model'] = df.h1 + df.h2 + df.h3 + df.h4
df['guess'] = df.model.clip_lower(0.0) * 150
self.axes.hold(True)
self.axes.plot(df['guess'] / 2.0, 'b')
self.axes.plot((df.h3 * 20) -10, 'g')
self.axes.plot((df.h2 * 20) -40,'k')
self.axes.plot((df.sunspot / 2) + 100,'r')
| gpl-3.0 | -1,704,182,033,431,231,000 | 19.876404 | 105 | 0.492465 | false |
schae234/Camoco | camoco/__init__.py | 1 | 1676 | """
Camoco Library - CoAnalysis of Molecular Components
CacheMoneyCorn
"""
__license__ = """
The "MIT" License
Copyright (c) 2017-2019 Robert Schaefer
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
__version__ = '0.6.2'
import sys
import os
import numpy
import pyximport
pyximport.install(setup_args={
"include_dirs":numpy.get_include()
})
import matplotlib
from .Config import cf
from .Camoco import Camoco
from .Expr import Expr
from .COB import COB
from .RefGen import RefGen
from .Ontology import Ontology,Term
from .GWAS import GWAS
from .Locus import Locus
from .GOnt import GOnt
from .Overlap import Overlap
# Create yourself
Camoco.create('Camoco','Mother Database')
| mit | -8,831,807,908,612,434,000 | 26.032258 | 79 | 0.78043 | false |
NicWayand/xray | xarray/test/test_dataset.py | 1 | 113381 | # -*- coding: utf-8 -*-
from copy import copy, deepcopy
from textwrap import dedent
try:
import cPickle as pickle
except ImportError:
import pickle
try:
import dask.array as da
except ImportError:
pass
import numpy as np
import pandas as pd
import pytest
from xarray import (align, broadcast, concat, merge, conventions, backends,
Dataset, DataArray, Variable, Coordinate, auto_combine,
open_dataset, set_options, MergeError)
from xarray.core import indexing, utils
from xarray.core.pycompat import iteritems, OrderedDict, unicode_type
from . import (TestCase, unittest, InaccessibleArray, UnexpectedDataAccess,
requires_dask, source_ndarray)
def create_test_data(seed=None):
rs = np.random.RandomState(seed)
_vars = {'var1': ['dim1', 'dim2'],
'var2': ['dim1', 'dim2'],
'var3': ['dim3', 'dim1']}
_dims = {'dim1': 8, 'dim2': 9, 'dim3': 10}
obj = Dataset()
obj['time'] = ('time', pd.date_range('2000-01-01', periods=20))
obj['dim1'] = ('dim1', np.arange(_dims['dim1'], dtype='int64'))
obj['dim2'] = ('dim2', 0.5 * np.arange(_dims['dim2']))
obj['dim3'] = ('dim3', list('abcdefghij'))
for v, dims in sorted(_vars.items()):
data = rs.normal(size=tuple(_dims[d] for d in dims))
obj[v] = (dims, data, {'foo': 'variable'})
obj.coords['numbers'] = ('dim3', np.array([0, 1, 2, 0, 0, 1, 1, 2, 2, 3],
dtype='int64'))
assert all(obj.data.flags.writeable for obj in obj.values())
return obj
class InaccessibleVariableDataStore(backends.InMemoryDataStore):
def get_variables(self):
def lazy_inaccessible(x):
data = indexing.LazilyIndexedArray(InaccessibleArray(x.values))
return Variable(x.dims, data, x.attrs)
return dict((k, lazy_inaccessible(v)) for
k, v in iteritems(self._variables))
class TestDataset(TestCase):
def test_repr(self):
data = create_test_data(seed=123)
data.attrs['foo'] = 'bar'
# need to insert str dtype at runtime to handle both Python 2 & 3
expected = dedent("""\
<xarray.Dataset>
Dimensions: (dim1: 8, dim2: 9, dim3: 10, time: 20)
Coordinates:
* time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 ...
* dim1 (dim1) int64 0 1 2 3 4 5 6 7
* dim2 (dim2) float64 0.0 0.5 1.0 1.5 2.0 2.5 3.0 3.5 4.0
* dim3 (dim3) %s 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j'
numbers (dim3) int64 0 1 2 0 0 1 1 2 2 3
Data variables:
var1 (dim1, dim2) float64 -1.086 0.9973 0.283 -1.506 -0.5786 1.651 ...
var2 (dim1, dim2) float64 1.162 -1.097 -2.123 1.04 -0.4034 -0.126 ...
var3 (dim3, dim1) float64 0.5565 -0.2121 0.4563 1.545 -0.2397 0.1433 ...
Attributes:
foo: bar""") % data['dim3'].dtype
actual = '\n'.join(x.rstrip() for x in repr(data).split('\n'))
print(actual)
self.assertEqual(expected, actual)
with set_options(display_width=100):
max_len = max(map(len, repr(data).split('\n')))
assert 90 < max_len < 100
expected = dedent("""\
<xarray.Dataset>
Dimensions: ()
Coordinates:
*empty*
Data variables:
*empty*""")
actual = '\n'.join(x.rstrip() for x in repr(Dataset()).split('\n'))
print(actual)
self.assertEqual(expected, actual)
# verify that ... doesn't appear for scalar coordinates
data = Dataset({'foo': ('x', np.ones(10))}).mean()
expected = dedent("""\
<xarray.Dataset>
Dimensions: ()
Coordinates:
*empty*
Data variables:
foo float64 1.0""")
actual = '\n'.join(x.rstrip() for x in repr(data).split('\n'))
print(actual)
self.assertEqual(expected, actual)
# verify long attributes are truncated
data = Dataset(attrs={'foo': 'bar' * 1000})
self.assertTrue(len(repr(data)) < 1000)
def test_repr_period_index(self):
data = create_test_data(seed=456)
data.coords['time'] = pd.period_range('2000-01-01', periods=20, freq='B')
# check that creating the repr doesn't raise an error #GH645
repr(data)
def test_unicode_data(self):
# regression test for GH834
data = Dataset({u'foø': [u'ba®']}, attrs={u'å': u'∑'})
repr(data) # should not raise
expected = dedent(u"""\
<xarray.Dataset>
Dimensions: (foø: 1)
Coordinates:
* foø (foø) <U3 %r
Data variables:
*empty*
Attributes:
å: ∑""" % u'ba®')
actual = unicode_type(data)
self.assertEqual(expected, actual)
def test_constructor(self):
x1 = ('x', 2 * np.arange(100))
x2 = ('x', np.arange(1000))
z = (['x', 'y'], np.arange(1000).reshape(100, 10))
with self.assertRaisesRegexp(ValueError, 'conflicting sizes'):
Dataset({'a': x1, 'b': x2})
with self.assertRaisesRegexp(ValueError,
"variable 'x' has the same name"):
Dataset({'a': x1, 'x': z})
with self.assertRaisesRegexp(TypeError, 'tuples to convert'):
Dataset({'x': (1, 2, 3, 4, 5, 6, 7)})
with self.assertRaisesRegexp(ValueError, 'already exists as a scalar'):
Dataset({'x': 0, 'y': ('x', [1, 2, 3])})
# verify handling of DataArrays
expected = Dataset({'x': x1, 'z': z})
actual = Dataset({'z': expected['z']})
self.assertDatasetIdentical(expected, actual)
def test_constructor_kwargs(self):
x1 = ('x', 2 * np.arange(100))
with self.assertRaises(TypeError):
Dataset(data_vars={'x1': x1}, invalid_kwarg=42)
import warnings
# this can be removed once the variables keyword is fully removed
with warnings.catch_warnings(record=False):
ds = Dataset(variables={'x1': x1})
# but assert dataset is still created
self.assertDatasetEqual(ds, Dataset(data_vars={'x1': x1}))
def test_constructor_1d(self):
expected = Dataset({'x': (['x'], 5.0 + np.arange(5))})
actual = Dataset({'x': 5.0 + np.arange(5)})
self.assertDatasetIdentical(expected, actual)
actual = Dataset({'x': [5, 6, 7, 8, 9]})
self.assertDatasetIdentical(expected, actual)
def test_constructor_0d(self):
expected = Dataset({'x': ([], 1)})
for arg in [1, np.array(1), expected['x']]:
actual = Dataset({'x': arg})
self.assertDatasetIdentical(expected, actual)
class Arbitrary(object):
pass
d = pd.Timestamp('2000-01-01T12')
args = [True, None, 3.4, np.nan, 'hello', u'uni', b'raw',
np.datetime64('2000-01-01T00'), d, d.to_datetime(),
Arbitrary()]
for arg in args:
print(arg)
expected = Dataset({'x': ([], arg)})
actual = Dataset({'x': arg})
self.assertDatasetIdentical(expected, actual)
def test_constructor_auto_align(self):
a = DataArray([1, 2], [('x', [0, 1])])
b = DataArray([3, 4], [('x', [1, 2])])
# verify align uses outer join
expected = Dataset({'a': ('x', [1, 2, np.nan]),
'b': ('x', [np.nan, 3, 4])})
actual = Dataset({'a': a, 'b': b})
self.assertDatasetIdentical(expected, actual)
# regression test for GH346
self.assertIsInstance(actual.variables['x'], Coordinate)
# variable with different dimensions
c = ('y', [3, 4])
expected2 = expected.merge({'c': c})
actual = Dataset({'a': a, 'b': b, 'c': c})
self.assertDatasetIdentical(expected2, actual)
# variable that is only aligned against the aligned variables
d = ('x', [3, 2, 1])
expected3 = expected.merge({'d': d})
actual = Dataset({'a': a, 'b': b, 'd': d})
self.assertDatasetIdentical(expected3, actual)
e = ('x', [0, 0])
with self.assertRaisesRegexp(ValueError, 'conflicting sizes'):
Dataset({'a': a, 'b': b, 'e': e})
def test_constructor_pandas_sequence(self):
ds = self.make_example_math_dataset()
pandas_objs = OrderedDict(
(var_name, ds[var_name].to_pandas()) for var_name in ['foo','bar']
)
ds_based_on_pandas = Dataset(data_vars=pandas_objs, coords=ds.coords, attrs=ds.attrs)
self.assertDatasetEqual(ds, ds_based_on_pandas)
# reindex pandas obj, check align works
rearranged_index = reversed(pandas_objs['foo'].index)
pandas_objs['foo'] = pandas_objs['foo'].reindex(rearranged_index)
ds_based_on_pandas = Dataset(variables=pandas_objs, coords=ds.coords, attrs=ds.attrs)
self.assertDatasetEqual(ds, ds_based_on_pandas)
def test_constructor_pandas_single(self):
das = [
DataArray(np.random.rand(4), dims=['a']), # series
DataArray(np.random.rand(4,3), dims=['a', 'b']), # df
DataArray(np.random.rand(4,3,2), dims=['a','b','c']), # panel
]
for da in das:
pandas_obj = da.to_pandas()
ds_based_on_pandas = Dataset(pandas_obj)
for dim in ds_based_on_pandas.data_vars:
self.assertArrayEqual(ds_based_on_pandas[dim], pandas_obj[dim])
def test_constructor_compat(self):
data = OrderedDict([('x', DataArray(0, coords={'y': 1})),
('y', ('z', [1, 1, 1]))])
with self.assertRaises(MergeError):
Dataset(data, compat='equals')
expected = Dataset({'x': 0}, {'y': ('z', [1, 1, 1])})
actual = Dataset(data)
self.assertDatasetIdentical(expected, actual)
actual = Dataset(data, compat='broadcast_equals')
self.assertDatasetIdentical(expected, actual)
data = OrderedDict([('y', ('z', [1, 1, 1])),
('x', DataArray(0, coords={'y': 1}))])
actual = Dataset(data)
self.assertDatasetIdentical(expected, actual)
original = Dataset({'a': (('x', 'y'), np.ones((2, 3)))},
{'c': (('x', 'y'), np.zeros((2, 3)))})
expected = Dataset({'a': ('x', np.ones(2)),
'b': ('y', np.ones(3))},
{'c': (('x', 'y'), np.zeros((2, 3)))})
# use an OrderedDict to ensure test results are reproducible; otherwise
# the order of appearance of x and y matters for the order of
# dimensions in 'c'
actual = Dataset(OrderedDict([('a', original['a'][:, 0].drop('y')),
('b', original['a'][0].drop('x'))]))
self.assertDatasetIdentical(expected, actual)
data = {'x': DataArray(0, coords={'y': 3}), 'y': ('z', [1, 1, 1])}
with self.assertRaises(MergeError):
Dataset(data)
data = {'x': DataArray(0, coords={'y': 1}), 'y': [1, 1]}
actual = Dataset(data)
expected = Dataset({'x': 0}, {'y': [1, 1]})
self.assertDatasetIdentical(expected, actual)
def test_constructor_with_coords(self):
with self.assertRaisesRegexp(ValueError, 'found in both data_vars and'):
Dataset({'a': ('x', [1])}, {'a': ('x', [1])})
ds = Dataset({}, {'a': ('x', [1])})
self.assertFalse(ds.data_vars)
self.assertItemsEqual(ds.coords.keys(), ['x', 'a'])
def test_properties(self):
ds = create_test_data()
self.assertEqual(ds.dims,
{'dim1': 8, 'dim2': 9, 'dim3': 10, 'time': 20})
self.assertEqual(list(ds.dims), sorted(ds.dims))
# These exact types aren't public API, but this makes sure we don't
# change them inadvertently:
self.assertIsInstance(ds.dims, utils.Frozen)
self.assertIsInstance(ds.dims.mapping, utils.SortedKeysDict)
self.assertIs(type(ds.dims.mapping.mapping), dict)
self.assertItemsEqual(ds, list(ds.variables))
self.assertItemsEqual(ds.keys(), list(ds.variables))
self.assertNotIn('aasldfjalskdfj', ds.variables)
self.assertIn('dim1', repr(ds.variables))
self.assertEqual(len(ds), 8)
self.assertItemsEqual(ds.data_vars, ['var1', 'var2', 'var3'])
self.assertItemsEqual(ds.data_vars.keys(), ['var1', 'var2', 'var3'])
self.assertIn('var1', ds.data_vars)
self.assertNotIn('dim1', ds.data_vars)
self.assertNotIn('numbers', ds.data_vars)
self.assertEqual(len(ds.data_vars), 3)
self.assertItemsEqual(ds.indexes, ['dim1', 'dim2', 'dim3', 'time'])
self.assertEqual(len(ds.indexes), 4)
self.assertIn('dim1', repr(ds.indexes))
self.assertItemsEqual(ds.coords,
['time', 'dim1', 'dim2', 'dim3', 'numbers'])
self.assertIn('dim1', ds.coords)
self.assertIn('numbers', ds.coords)
self.assertNotIn('var1', ds.coords)
self.assertEqual(len(ds.coords), 5)
self.assertEqual(Dataset({'x': np.int64(1),
'y': np.float32([1, 2])}).nbytes, 16)
def test_attr_access(self):
ds = Dataset({'tmin': ('x', [42], {'units': 'Celcius'})},
attrs={'title': 'My test data'})
self.assertDataArrayIdentical(ds.tmin, ds['tmin'])
self.assertDataArrayIdentical(ds.tmin.x, ds.x)
self.assertEqual(ds.title, ds.attrs['title'])
self.assertEqual(ds.tmin.units, ds['tmin'].attrs['units'])
self.assertLessEqual(set(['tmin', 'title']), set(dir(ds)))
self.assertIn('units', set(dir(ds.tmin)))
# should defer to variable of same name
ds.attrs['tmin'] = -999
self.assertEqual(ds.attrs['tmin'], -999)
self.assertDataArrayIdentical(ds.tmin, ds['tmin'])
def test_variable(self):
a = Dataset()
d = np.random.random((10, 3))
a['foo'] = (('time', 'x',), d)
self.assertTrue('foo' in a.variables)
self.assertTrue('foo' in a)
a['bar'] = (('time', 'x',), d)
# order of creation is preserved
self.assertEqual(list(a), ['foo', 'time', 'x', 'bar'])
self.assertTrue(all([a['foo'][i].values == d[i]
for i in np.ndindex(*d.shape)]))
# try to add variable with dim (10,3) with data that's (3,10)
with self.assertRaises(ValueError):
a['qux'] = (('time', 'x'), d.T)
def test_modify_inplace(self):
a = Dataset()
vec = np.random.random((10,))
attributes = {'foo': 'bar'}
a['x'] = ('x', vec, attributes)
self.assertTrue('x' in a.coords)
self.assertIsInstance(a.coords['x'].to_index(),
pd.Index)
self.assertVariableIdentical(a.coords['x'], a.variables['x'])
b = Dataset()
b['x'] = ('x', vec, attributes)
self.assertVariableIdentical(a['x'], b['x'])
self.assertEqual(a.dims, b.dims)
# this should work
a['x'] = ('x', vec[:5])
a['z'] = ('x', np.arange(5))
with self.assertRaises(ValueError):
# now it shouldn't, since there is a conflicting length
a['x'] = ('x', vec[:4])
arr = np.random.random((10, 1,))
scal = np.array(0)
with self.assertRaises(ValueError):
a['y'] = ('y', arr)
with self.assertRaises(ValueError):
a['y'] = ('y', scal)
self.assertTrue('y' not in a.dims)
def test_coords_properties(self):
# use an OrderedDict for coordinates to ensure order across python
# versions
# use int64 for repr consistency on windows
data = Dataset(OrderedDict([('x', ('x', np.array([-1, -2], 'int64'))),
('y', ('y', np.array([0, 1, 2], 'int64'))),
('foo', (['x', 'y'],
np.random.randn(2, 3)))]),
OrderedDict([('a', ('x', np.array([4, 5], 'int64'))),
('b', np.int64(-10))]))
self.assertEqual(4, len(data.coords))
self.assertItemsEqual(['x', 'y', 'a', 'b'], list(data.coords))
self.assertVariableIdentical(data.coords['x'], data['x'].variable)
self.assertVariableIdentical(data.coords['y'], data['y'].variable)
self.assertIn('x', data.coords)
self.assertIn('a', data.coords)
self.assertNotIn(0, data.coords)
self.assertNotIn('foo', data.coords)
with self.assertRaises(KeyError):
data.coords['foo']
with self.assertRaises(KeyError):
data.coords[0]
expected = dedent("""\
Coordinates:
* x (x) int64 -1 -2
* y (y) int64 0 1 2
a (x) int64 4 5
b int64 -10""")
actual = repr(data.coords)
self.assertEqual(expected, actual)
self.assertEqual({'x': 2, 'y': 3}, data.coords.dims)
def test_coords_modify(self):
data = Dataset({'x': ('x', [-1, -2]),
'y': ('y', [0, 1, 2]),
'foo': (['x', 'y'], np.random.randn(2, 3))},
{'a': ('x', [4, 5]), 'b': -10})
actual = data.copy(deep=True)
actual.coords['x'] = ('x', ['a', 'b'])
self.assertArrayEqual(actual['x'], ['a', 'b'])
actual = data.copy(deep=True)
actual.coords['z'] = ('z', ['a', 'b'])
self.assertArrayEqual(actual['z'], ['a', 'b'])
actual = data.copy(deep=True)
with self.assertRaisesRegexp(ValueError, 'conflicting sizes'):
actual.coords['x'] = ('x', [-1])
self.assertDatasetIdentical(actual, data) # should not be modified
actual = data.copy()
del actual.coords['b']
expected = data.reset_coords('b', drop=True)
self.assertDatasetIdentical(expected, actual)
with self.assertRaises(KeyError):
del data.coords['not_found']
with self.assertRaises(KeyError):
del data.coords['foo']
actual = data.copy(deep=True)
actual.coords.update({'c': 11})
expected = data.merge({'c': 11}).set_coords('c')
self.assertDatasetIdentical(expected, actual)
def test_coords_setitem_with_new_dimension(self):
actual = Dataset()
actual.coords['foo'] = ('x', [1, 2, 3])
expected = Dataset(coords={'foo': ('x', [1, 2, 3])})
self.assertDatasetIdentical(expected, actual)
def test_coords_set(self):
one_coord = Dataset({'x': ('x', [0]),
'yy': ('x', [1]),
'zzz': ('x', [2])})
two_coords = Dataset({'zzz': ('x', [2])},
{'x': ('x', [0]),
'yy': ('x', [1])})
all_coords = Dataset(coords={'x': ('x', [0]),
'yy': ('x', [1]),
'zzz': ('x', [2])})
actual = one_coord.set_coords('x')
self.assertDatasetIdentical(one_coord, actual)
actual = one_coord.set_coords(['x'])
self.assertDatasetIdentical(one_coord, actual)
actual = one_coord.set_coords('yy')
self.assertDatasetIdentical(two_coords, actual)
actual = one_coord.set_coords(['yy', 'zzz'])
self.assertDatasetIdentical(all_coords, actual)
actual = one_coord.reset_coords()
self.assertDatasetIdentical(one_coord, actual)
actual = two_coords.reset_coords()
self.assertDatasetIdentical(one_coord, actual)
actual = all_coords.reset_coords()
self.assertDatasetIdentical(one_coord, actual)
actual = all_coords.reset_coords(['yy', 'zzz'])
self.assertDatasetIdentical(one_coord, actual)
actual = all_coords.reset_coords('zzz')
self.assertDatasetIdentical(two_coords, actual)
with self.assertRaisesRegexp(ValueError, 'cannot remove index'):
one_coord.reset_coords('x')
actual = all_coords.reset_coords('zzz', drop=True)
expected = all_coords.drop('zzz')
self.assertDatasetIdentical(expected, actual)
expected = two_coords.drop('zzz')
self.assertDatasetIdentical(expected, actual)
def test_coords_to_dataset(self):
orig = Dataset({'foo': ('y', [-1, 0, 1])}, {'x': 10, 'y': [2, 3, 4]})
expected = Dataset(coords={'x': 10, 'y': [2, 3, 4]})
actual = orig.coords.to_dataset()
self.assertDatasetIdentical(expected, actual)
def test_coords_merge(self):
orig_coords = Dataset(coords={'a': ('x', [1, 2])}).coords
other_coords = Dataset(coords={'b': ('x', ['a', 'b'])}).coords
expected = Dataset(coords={'a': ('x', [1, 2]),
'b': ('x', ['a', 'b'])})
actual = orig_coords.merge(other_coords)
self.assertDatasetIdentical(expected, actual)
actual = other_coords.merge(orig_coords)
self.assertDatasetIdentical(expected, actual)
other_coords = Dataset(coords={'x': ('x', ['a'])}).coords
with self.assertRaises(MergeError):
orig_coords.merge(other_coords)
other_coords = Dataset(coords={'x': ('x', ['a', 'b'])}).coords
with self.assertRaises(MergeError):
orig_coords.merge(other_coords)
other_coords = Dataset(coords={'x': ('x', ['a', 'b', 'c'])}).coords
with self.assertRaises(MergeError):
orig_coords.merge(other_coords)
other_coords = Dataset(coords={'a': ('x', [8, 9])}).coords
expected = Dataset(coords={'x': range(2)})
actual = orig_coords.merge(other_coords)
self.assertDatasetIdentical(expected, actual)
actual = other_coords.merge(orig_coords)
self.assertDatasetIdentical(expected, actual)
other_coords = Dataset(coords={'x': np.nan}).coords
actual = orig_coords.merge(other_coords)
self.assertDatasetIdentical(orig_coords.to_dataset(), actual)
actual = other_coords.merge(orig_coords)
self.assertDatasetIdentical(orig_coords.to_dataset(), actual)
def test_coords_merge_mismatched_shape(self):
orig_coords = Dataset(coords={'a': ('x', [1, 1])}).coords
other_coords = Dataset(coords={'a': 1}).coords
expected = orig_coords.to_dataset()
actual = orig_coords.merge(other_coords)
self.assertDatasetIdentical(expected, actual)
other_coords = Dataset(coords={'a': ('y', [1])}).coords
expected = Dataset(coords={'a': (['x', 'y'], [[1], [1]])})
actual = orig_coords.merge(other_coords)
self.assertDatasetIdentical(expected, actual)
actual = other_coords.merge(orig_coords)
self.assertDatasetIdentical(expected.T, actual)
orig_coords = Dataset(coords={'a': ('x', [np.nan])}).coords
other_coords = Dataset(coords={'a': np.nan}).coords
expected = orig_coords.to_dataset()
actual = orig_coords.merge(other_coords)
self.assertDatasetIdentical(expected, actual)
def test_equals_and_identical(self):
data = create_test_data(seed=42)
self.assertTrue(data.equals(data))
self.assertTrue(data.identical(data))
data2 = create_test_data(seed=42)
data2.attrs['foobar'] = 'baz'
self.assertTrue(data.equals(data2))
self.assertFalse(data.identical(data2))
del data2['time']
self.assertFalse(data.equals(data2))
data = create_test_data(seed=42).rename({'var1': None})
self.assertTrue(data.equals(data))
self.assertTrue(data.identical(data))
data2 = data.reset_coords()
self.assertFalse(data2.equals(data))
self.assertFalse(data2.identical(data))
def test_equals_failures(self):
data = create_test_data()
self.assertFalse(data.equals('foo'))
self.assertFalse(data.identical(123))
self.assertFalse(data.broadcast_equals({1: 2}))
def test_broadcast_equals(self):
data1 = Dataset(coords={'x': 0})
data2 = Dataset(coords={'x': [0]})
self.assertTrue(data1.broadcast_equals(data2))
self.assertFalse(data1.equals(data2))
self.assertFalse(data1.identical(data2))
def test_attrs(self):
data = create_test_data(seed=42)
data.attrs = {'foobar': 'baz'}
self.assertTrue(data.attrs['foobar'], 'baz')
self.assertIsInstance(data.attrs, OrderedDict)
@requires_dask
def test_chunk(self):
data = create_test_data()
for v in data.variables.values():
self.assertIsInstance(v.data, np.ndarray)
self.assertEqual(data.chunks, {})
reblocked = data.chunk()
for v in reblocked.variables.values():
self.assertIsInstance(v.data, da.Array)
expected_chunks = dict((d, (s,)) for d, s in data.dims.items())
self.assertEqual(reblocked.chunks, expected_chunks)
reblocked = data.chunk({'time': 5, 'dim1': 5, 'dim2': 5, 'dim3': 5})
expected_chunks = {'time': (5,) * 4, 'dim1': (5, 3),
'dim2': (5, 4), 'dim3': (5, 5)}
self.assertEqual(reblocked.chunks, expected_chunks)
reblocked = data.chunk(expected_chunks)
self.assertEqual(reblocked.chunks, expected_chunks)
# reblock on already blocked data
reblocked = reblocked.chunk(expected_chunks)
self.assertEqual(reblocked.chunks, expected_chunks)
self.assertDatasetIdentical(reblocked, data)
with self.assertRaisesRegexp(ValueError, 'some chunks'):
data.chunk({'foo': 10})
@requires_dask
def test_dask_is_lazy(self):
store = InaccessibleVariableDataStore()
create_test_data().dump_to_store(store)
ds = open_dataset(store).chunk()
with self.assertRaises(UnexpectedDataAccess):
ds.load()
with self.assertRaises(UnexpectedDataAccess):
ds['var1'].values
# these should not raise UnexpectedDataAccess:
ds.var1.data
ds.isel(time=10)
ds.isel(time=slice(10), dim1=[0]).isel(dim1=0, dim2=-1)
ds.transpose()
ds.mean()
ds.fillna(0)
ds.rename({'dim1': 'foobar'})
ds.set_coords('var1')
ds.drop('var1')
def test_isel(self):
data = create_test_data()
slicers = {'dim1': slice(None, None, 2), 'dim2': slice(0, 2)}
ret = data.isel(**slicers)
# Verify that only the specified dimension was altered
self.assertItemsEqual(data.dims, ret.dims)
for d in data.dims:
if d in slicers:
self.assertEqual(ret.dims[d],
np.arange(data.dims[d])[slicers[d]].size)
else:
self.assertEqual(data.dims[d], ret.dims[d])
# Verify that the data is what we expect
for v in data:
self.assertEqual(data[v].dims, ret[v].dims)
self.assertEqual(data[v].attrs, ret[v].attrs)
slice_list = [slice(None)] * data[v].values.ndim
for d, s in iteritems(slicers):
if d in data[v].dims:
inds = np.nonzero(np.array(data[v].dims) == d)[0]
for ind in inds:
slice_list[ind] = s
expected = data[v].values[slice_list]
actual = ret[v].values
np.testing.assert_array_equal(expected, actual)
with self.assertRaises(ValueError):
data.isel(not_a_dim=slice(0, 2))
ret = data.isel(dim1=0)
self.assertEqual({'time': 20, 'dim2': 9, 'dim3': 10}, ret.dims)
self.assertItemsEqual(data.data_vars, ret.data_vars)
self.assertItemsEqual(data.coords, ret.coords)
self.assertItemsEqual(data.indexes, list(ret.indexes) + ['dim1'])
ret = data.isel(time=slice(2), dim1=0, dim2=slice(5))
self.assertEqual({'time': 2, 'dim2': 5, 'dim3': 10}, ret.dims)
self.assertItemsEqual(data.data_vars, ret.data_vars)
self.assertItemsEqual(data.coords, ret.coords)
self.assertItemsEqual(data.indexes, list(ret.indexes) + ['dim1'])
ret = data.isel(time=0, dim1=0, dim2=slice(5))
self.assertItemsEqual({'dim2': 5, 'dim3': 10}, ret.dims)
self.assertItemsEqual(data.data_vars, ret.data_vars)
self.assertItemsEqual(data.coords, ret.coords)
self.assertItemsEqual(data.indexes,
list(ret.indexes) + ['dim1', 'time'])
def test_sel(self):
data = create_test_data()
int_slicers = {'dim1': slice(None, None, 2),
'dim2': slice(2),
'dim3': slice(3)}
loc_slicers = {'dim1': slice(None, None, 2),
'dim2': slice(0, 0.5),
'dim3': slice('a', 'c')}
self.assertDatasetEqual(data.isel(**int_slicers),
data.sel(**loc_slicers))
data['time'] = ('time', pd.date_range('2000-01-01', periods=20))
self.assertDatasetEqual(data.isel(time=0),
data.sel(time='2000-01-01'))
self.assertDatasetEqual(data.isel(time=slice(10)),
data.sel(time=slice('2000-01-01',
'2000-01-10')))
self.assertDatasetEqual(data, data.sel(time=slice('1999', '2005')))
times = pd.date_range('2000-01-01', periods=3)
self.assertDatasetEqual(data.isel(time=slice(3)),
data.sel(time=times))
self.assertDatasetEqual(data.isel(time=slice(3)),
data.sel(time=(data['time.dayofyear'] <= 3)))
td = pd.to_timedelta(np.arange(3), unit='days')
data = Dataset({'x': ('td', np.arange(3)), 'td': td})
self.assertDatasetEqual(data, data.sel(td=td))
self.assertDatasetEqual(data, data.sel(td=slice('3 days')))
self.assertDatasetEqual(data.isel(td=0), data.sel(td='0 days'))
self.assertDatasetEqual(data.isel(td=0), data.sel(td='0h'))
self.assertDatasetEqual(data.isel(td=slice(1, 3)),
data.sel(td=slice('1 days', '2 days')))
def test_isel_points(self):
data = create_test_data()
pdim1 = [1, 2, 3]
pdim2 = [4, 5, 1]
pdim3 = [1, 2, 3]
actual = data.isel_points(dim1=pdim1, dim2=pdim2, dim3=pdim3,
dim='test_coord')
assert 'test_coord' in actual.coords
assert actual.coords['test_coord'].shape == (len(pdim1), )
actual = data.isel_points(dim1=pdim1, dim2=pdim2)
assert 'points' in actual.coords
np.testing.assert_array_equal(pdim1, actual['dim1'])
# test that the order of the indexers doesn't matter
self.assertDatasetIdentical(data.isel_points(dim1=pdim1, dim2=pdim2),
data.isel_points(dim2=pdim2, dim1=pdim1))
# make sure we're raising errors in the right places
with self.assertRaisesRegexp(ValueError,
'All indexers must be the same length'):
data.isel_points(dim1=[1, 2], dim2=[1, 2, 3])
with self.assertRaisesRegexp(ValueError,
'dimension bad_key does not exist'):
data.isel_points(bad_key=[1, 2])
with self.assertRaisesRegexp(TypeError, 'Indexers must be integers'):
data.isel_points(dim1=[1.5, 2.2])
with self.assertRaisesRegexp(TypeError, 'Indexers must be integers'):
data.isel_points(dim1=[1, 2, 3], dim2=slice(3))
with self.assertRaisesRegexp(ValueError,
'Indexers must be 1 dimensional'):
data.isel_points(dim1=1, dim2=2)
with self.assertRaisesRegexp(ValueError,
'Existing dimension names are not valid'):
data.isel_points(dim1=[1, 2], dim2=[1, 2], dim='dim2')
# test to be sure we keep around variables that were not indexed
ds = Dataset({'x': [1, 2, 3, 4], 'y': 0})
actual = ds.isel_points(x=[0, 1, 2])
self.assertDataArrayIdentical(ds['y'], actual['y'])
# tests using index or DataArray as a dim
stations = Dataset()
stations['station'] = ('station', ['A', 'B', 'C'])
stations['dim1s'] = ('station', [1, 2, 3])
stations['dim2s'] = ('station', [4, 5, 1])
actual = data.isel_points(dim1=stations['dim1s'],
dim2=stations['dim2s'],
dim=stations['station'])
assert 'station' in actual.coords
assert 'station' in actual.dims
self.assertDataArrayIdentical(actual['station'].drop(['dim1', 'dim2']),
stations['station'])
# make sure we get the default 'points' coordinate when a list is passed
actual = data.isel_points(dim1=stations['dim1s'],
dim2=stations['dim2s'],
dim=['A', 'B', 'C'])
assert 'points' in actual.coords
assert actual.coords['points'].values.tolist() == ['A', 'B', 'C']
# test index
actual = data.isel_points(dim1=stations['dim1s'].values,
dim2=stations['dim2s'].values,
dim=pd.Index(['A', 'B', 'C'], name='letters'))
assert 'letters' in actual.coords
# can pass a numpy array
data.isel_points(dim1=stations['dim1s'],
dim2=stations['dim2s'],
dim=np.array([4, 5, 6]))
def test_sel_points(self):
data = create_test_data()
pdim1 = [1, 2, 3]
pdim2 = [4, 5, 1]
pdim3 = [1, 2, 3]
expected = data.isel_points(dim1=pdim1, dim2=pdim2, dim3=pdim3,
dim='test_coord')
actual = data.sel_points(dim1=data.dim1[pdim1], dim2=data.dim2[pdim2],
dim3=data.dim3[pdim3], dim='test_coord')
self.assertDatasetIdentical(expected, actual)
data = Dataset({'foo': (('x', 'y'), np.arange(9).reshape(3, 3))})
expected = Dataset({'foo': ('points', [0, 4, 8])},
{'x': ('points', range(3)),
'y': ('points', range(3))})
actual = data.sel_points(x=[0.1, 1.1, 2.5], y=[0, 1.2, 2.0],
method='pad')
self.assertDatasetIdentical(expected, actual)
if pd.__version__ >= '0.17':
with self.assertRaises(KeyError):
data.sel_points(x=[2.5], y=[2.0], method='pad', tolerance=1e-3)
def test_sel_method(self):
data = create_test_data()
if pd.__version__ >= '0.16':
expected = data.sel(dim1=1)
actual = data.sel(dim1=0.95, method='nearest')
self.assertDatasetIdentical(expected, actual)
if pd.__version__ >= '0.17':
actual = data.sel(dim1=0.95, method='nearest', tolerance=1)
self.assertDatasetIdentical(expected, actual)
with self.assertRaises(KeyError):
actual = data.sel(dim1=0.5, method='nearest', tolerance=0)
expected = data.sel(dim2=[1.5])
actual = data.sel(dim2=[1.45], method='backfill')
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(NotImplementedError, 'slice objects'):
data.sel(dim2=slice(1, 3), method='ffill')
with self.assertRaisesRegexp(TypeError, '``method``'):
# this should not pass silently
data.sel(data)
def test_loc(self):
data = create_test_data()
expected = data.sel(dim3='a')
actual = data.loc[dict(dim3='a')]
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(TypeError, 'can only lookup dict'):
data.loc['a']
with self.assertRaises(TypeError):
data.loc[dict(dim3='a')] = 0
def test_multiindex(self):
mindex = pd.MultiIndex.from_product([['a', 'b'], [1, 2], [-1, -2]],
names=('one', 'two', 'three'))
mdata = Dataset(data_vars={'var': ('x', range(8))},
coords={'x': mindex})
def test_sel(lab_indexer, pos_indexer, replaced_idx=False,
renamed_dim=None):
ds = mdata.sel(x=lab_indexer)
expected_ds = mdata.isel(x=pos_indexer)
if not replaced_idx:
self.assertDatasetIdentical(ds, expected_ds)
else:
if renamed_dim:
self.assertEqual(ds['var'].dims[0], renamed_dim)
ds = ds.rename({renamed_dim: 'x'})
self.assertVariableIdentical(ds['var'], expected_ds['var'])
self.assertVariableNotEqual(ds['x'], expected_ds['x'])
test_sel(('a', 1, -1), 0)
test_sel(('b', 2, -2), -1)
test_sel(('a', 1), [0, 1], replaced_idx=True, renamed_dim='three')
test_sel(('a',), range(4), replaced_idx=True)
test_sel('a', range(4), replaced_idx=True)
test_sel([('a', 1, -1), ('b', 2, -2)], [0, 7])
test_sel(slice('a', 'b'), range(8))
test_sel(slice(('a', 1), ('b', 1)), range(6))
test_sel({'one': 'a', 'two': 1, 'three': -1}, 0)
test_sel({'one': 'a', 'two': 1}, [0, 1], replaced_idx=True,
renamed_dim='three')
test_sel({'one': 'a'}, range(4), replaced_idx=True)
self.assertDatasetIdentical(mdata.loc[{'x': {'one': 'a'}}],
mdata.sel(x={'one': 'a'}))
self.assertDatasetIdentical(mdata.loc[{'x': 'a'}],
mdata.sel(x='a'))
self.assertDatasetIdentical(mdata.loc[{'x': ('a', 1)}],
mdata.sel(x=('a', 1)))
self.assertDatasetIdentical(mdata.loc[{'x': ('a', 1, -1)}],
mdata.sel(x=('a', 1, -1)))
with self.assertRaises(KeyError):
mdata.loc[{'one': 'a'}]
def test_reindex_like(self):
data = create_test_data()
data['letters'] = ('dim3', 10 * ['a'])
expected = data.isel(dim1=slice(10), time=slice(13))
actual = data.reindex_like(expected)
self.assertDatasetIdentical(actual, expected)
expected = data.copy(deep=True)
expected['dim3'] = ('dim3', list('cdefghijkl'))
expected['var3'][:-2] = expected['var3'][2:]
expected['var3'][-2:] = np.nan
expected['letters'] = expected['letters'].astype(object)
expected['letters'][-2:] = np.nan
expected['numbers'] = expected['numbers'].astype(float)
expected['numbers'][:-2] = expected['numbers'][2:].values
expected['numbers'][-2:] = np.nan
actual = data.reindex_like(expected)
self.assertDatasetIdentical(actual, expected)
def test_reindex(self):
data = create_test_data()
self.assertDatasetIdentical(data, data.reindex())
expected = data.isel(dim1=slice(10))
actual = data.reindex(dim1=data['dim1'][:10])
self.assertDatasetIdentical(actual, expected)
actual = data.reindex(dim1=data['dim1'][:10].values)
self.assertDatasetIdentical(actual, expected)
actual = data.reindex(dim1=data['dim1'][:10].to_index())
self.assertDatasetIdentical(actual, expected)
# test dict-like argument
actual = data.reindex({'dim1': data['dim1'][:10]})
self.assertDatasetIdentical(actual, expected)
with self.assertRaisesRegexp(ValueError, 'cannot specify both'):
data.reindex({'x': 0}, x=0)
with self.assertRaisesRegexp(ValueError, 'dictionary'):
data.reindex('foo')
# invalid dimension
with self.assertRaisesRegexp(ValueError, 'invalid reindex dim'):
data.reindex(invalid=0)
# out of order
expected = data.sel(dim1=data['dim1'][:10:-1])
actual = data.reindex(dim1=data['dim1'][:10:-1])
self.assertDatasetIdentical(actual, expected)
# regression test for #279
expected = Dataset({'x': ('time', np.random.randn(5))})
time2 = DataArray(np.arange(5), dims="time2")
actual = expected.reindex(time=time2)
self.assertDatasetIdentical(actual, expected)
# another regression test
ds = Dataset({'foo': (['x', 'y'], np.zeros((3, 4)))})
expected = Dataset({'foo': (['x', 'y'], np.zeros((3, 2))),
'x': [0, 1, 3]})
expected['foo'][-1] = np.nan
actual = ds.reindex(x=[0, 1, 3], y=[0, 1])
self.assertDatasetIdentical(expected, actual)
def test_reindex_method(self):
ds = Dataset({'x': ('y', [10, 20])})
y = [-0.5, 0.5, 1.5]
actual = ds.reindex(y=y, method='backfill')
expected = Dataset({'x': ('y', [10, 20, np.nan]), 'y': y})
self.assertDatasetIdentical(expected, actual)
if pd.__version__ >= '0.17':
actual = ds.reindex(y=y, method='backfill', tolerance=0.1)
expected = Dataset({'x': ('y', 3 * [np.nan]), 'y': y})
self.assertDatasetIdentical(expected, actual)
else:
with self.assertRaisesRegexp(NotImplementedError, 'tolerance'):
ds.reindex(y=y, method='backfill', tolerance=0.1)
actual = ds.reindex(y=y, method='pad')
expected = Dataset({'x': ('y', [np.nan, 10, 20]), 'y': y})
self.assertDatasetIdentical(expected, actual)
alt = Dataset({'y': y})
actual = ds.reindex_like(alt, method='pad')
self.assertDatasetIdentical(expected, actual)
def test_align(self):
left = create_test_data()
right = left.copy(deep=True)
right['dim3'] = ('dim3', list('cdefghijkl'))
right['var3'][:-2] = right['var3'][2:]
right['var3'][-2:] = np.random.randn(*right['var3'][-2:].shape)
right['numbers'][:-2] = right['numbers'][2:]
right['numbers'][-2:] = -10
intersection = list('cdefghij')
union = list('abcdefghijkl')
left2, right2 = align(left, right, join='inner')
self.assertArrayEqual(left2['dim3'], intersection)
self.assertDatasetIdentical(left2, right2)
left2, right2 = align(left, right, join='outer')
self.assertVariableEqual(left2['dim3'], right2['dim3'])
self.assertArrayEqual(left2['dim3'], union)
self.assertDatasetIdentical(left2.sel(dim3=intersection),
right2.sel(dim3=intersection))
self.assertTrue(np.isnan(left2['var3'][-2:]).all())
self.assertTrue(np.isnan(right2['var3'][:2]).all())
left2, right2 = align(left, right, join='left')
self.assertVariableEqual(left2['dim3'], right2['dim3'])
self.assertVariableEqual(left2['dim3'], left['dim3'])
self.assertDatasetIdentical(left2.sel(dim3=intersection),
right2.sel(dim3=intersection))
self.assertTrue(np.isnan(right2['var3'][:2]).all())
left2, right2 = align(left, right, join='right')
self.assertVariableEqual(left2['dim3'], right2['dim3'])
self.assertVariableEqual(left2['dim3'], right['dim3'])
self.assertDatasetIdentical(left2.sel(dim3=intersection),
right2.sel(dim3=intersection))
self.assertTrue(np.isnan(left2['var3'][-2:]).all())
with self.assertRaisesRegexp(ValueError, 'invalid value for join'):
align(left, right, join='foobar')
with self.assertRaises(TypeError):
align(left, right, foo='bar')
def test_align_exclude(self):
x = Dataset({'foo': DataArray([[1, 2],[3, 4]], dims=['x', 'y'], coords={'x': [1, 2], 'y': [3, 4]})})
y = Dataset({'bar': DataArray([[1, 2],[3, 4]], dims=['x', 'y'], coords={'x': [1, 3], 'y': [5, 6]})})
x2, y2 = align(x, y, exclude=['y'], join='outer')
expected_x2 = Dataset({'foo': DataArray([[1, 2], [3, 4], [np.nan, np.nan]], dims=['x', 'y'], coords={'x': [1, 2, 3], 'y': [3, 4]})})
expected_y2 = Dataset({'bar': DataArray([[1, 2], [np.nan, np.nan], [3, 4]], dims=['x', 'y'], coords={'x': [1, 2, 3], 'y': [5, 6]})})
self.assertDatasetIdentical(expected_x2, x2)
self.assertDatasetIdentical(expected_y2, y2)
def test_align_nocopy(self):
x = Dataset({'foo': DataArray([1, 2, 3], coords={'x': [1, 2, 3]})})
y = Dataset({'foo': DataArray([1, 2], coords={'x': [1, 2]})})
expected_x2 = x
expected_y2 = Dataset({'foo': DataArray([1, 2, np.nan], coords={'x': [1, 2, 3]})})
x2, y2 = align(x, y, copy=False, join='outer')
self.assertDatasetIdentical(expected_x2, x2)
self.assertDatasetIdentical(expected_y2, y2)
assert source_ndarray(x['foo'].data) is source_ndarray(x2['foo'].data)
x2, y2 = align(x, y, copy=True, join='outer')
self.assertDatasetIdentical(expected_x2, x2)
self.assertDatasetIdentical(expected_y2, y2)
assert source_ndarray(x['foo'].data) is not source_ndarray(x2['foo'].data)
def test_align_indexes(self):
x = Dataset({'foo': DataArray([1, 2, 3], coords={'x': [1, 2, 3]})})
x2, = align(x, indexes={'x': [2, 3, 1]})
expected_x2 = Dataset({'foo': DataArray([2, 3, 1], coords={'x': [2, 3, 1]})})
self.assertDatasetIdentical(expected_x2, x2)
def test_broadcast(self):
ds = Dataset({'foo': 0, 'bar': ('x', [1]), 'baz': ('y', [2, 3])},
{'c': ('x', [4])})
expected = Dataset({'foo': (('x', 'y'), [[0, 0]]),
'bar': (('x', 'y'), [[1, 1]]),
'baz': (('x', 'y'), [[2, 3]])},
{'c': ('x', [4])})
actual, = broadcast(ds)
self.assertDatasetIdentical(expected, actual)
ds_x = Dataset({'foo': ('x', [1])})
ds_y = Dataset({'bar': ('y', [2, 3])})
expected_x = Dataset({'foo': (('x', 'y'), [[1, 1]])})
expected_y = Dataset({'bar': (('x', 'y'), [[2, 3]])})
actual_x, actual_y = broadcast(ds_x, ds_y)
self.assertDatasetIdentical(expected_x, actual_x)
self.assertDatasetIdentical(expected_y, actual_y)
array_y = ds_y['bar']
expected_y = expected_y['bar']
actual_x, actual_y = broadcast(ds_x, array_y)
self.assertDatasetIdentical(expected_x, actual_x)
self.assertDataArrayIdentical(expected_y, actual_y)
def test_broadcast_nocopy(self):
# Test that data is not copied if not needed
x = Dataset({'foo': (('x', 'y'), [[1, 1]])})
y = Dataset({'bar': ('y', [2, 3])})
actual_x, = broadcast(x)
self.assertDatasetIdentical(x, actual_x)
assert source_ndarray(actual_x['foo'].data) is source_ndarray(x['foo'].data)
actual_x, actual_y = broadcast(x, y)
self.assertDatasetIdentical(x, actual_x)
assert source_ndarray(actual_x['foo'].data) is source_ndarray(x['foo'].data)
def test_broadcast_exclude(self):
x = Dataset({
'foo': DataArray([[1, 2],[3, 4]], dims=['x', 'y'], coords={'x': [1, 2], 'y': [3, 4]}),
'bar': DataArray(5),
})
y = Dataset({
'foo': DataArray([[1, 2]], dims=['z', 'y'], coords={'z': [1], 'y': [5, 6]}),
})
x2, y2 = broadcast(x, y, exclude=['y'])
expected_x2 = Dataset({
'foo': DataArray([[[1, 2]], [[3, 4]]], dims=['x', 'z', 'y'], coords={'z': [1], 'x': [1, 2], 'y': [3, 4]}),
'bar': DataArray([[5], [5]], dims=['x', 'z'], coords={'x': [1, 2], 'z': [1]}),
})
expected_y2 = Dataset({
'foo': DataArray([[[1, 2]], [[1, 2]]], dims=['x', 'z', 'y'], coords={'z': [1], 'x': [1, 2], 'y': [5, 6]}),
})
self.assertDatasetIdentical(expected_x2, x2)
self.assertDatasetIdentical(expected_y2, y2)
def test_broadcast_misaligned(self):
x = Dataset({'foo': DataArray([1, 2, 3], coords={'x': [-1, -2, -3]})})
y = Dataset({'bar': DataArray([[1, 2], [3, 4]], dims=['y', 'x'], coords={'y': [1, 2], 'x': [10, -3]})})
x2, y2 = broadcast(x, y)
expected_x2 = Dataset({'foo': DataArray([[3, 3], [2, 2], [1, 1], [np.nan, np.nan]], dims=['x', 'y'], coords={'y': [1, 2], 'x': [-3, -2, -1, 10]})})
expected_y2 = Dataset({'bar': DataArray([[2, 4], [np.nan, np.nan], [np.nan, np.nan], [1, 3]], dims=['x', 'y'], coords={'y': [1, 2], 'x': [-3, -2, -1, 10]})})
self.assertDatasetIdentical(expected_x2, x2)
self.assertDatasetIdentical(expected_y2, y2)
def test_variable_indexing(self):
data = create_test_data()
v = data['var1']
d1 = data['dim1']
d2 = data['dim2']
self.assertVariableEqual(v, v[d1.values])
self.assertVariableEqual(v, v[d1])
self.assertVariableEqual(v[:3], v[d1 < 3])
self.assertVariableEqual(v[:, 3:], v[:, d2 >= 1.5])
self.assertVariableEqual(v[:3, 3:], v[d1 < 3, d2 >= 1.5])
self.assertVariableEqual(v[:3, :2], v[range(3), range(2)])
self.assertVariableEqual(v[:3, :2], v.loc[d1[:3], d2[:2]])
def test_drop_variables(self):
data = create_test_data()
self.assertDatasetIdentical(data, data.drop([]))
expected = Dataset(dict((k, data[k]) for k in data if k != 'time'))
actual = data.drop('time')
self.assertDatasetIdentical(expected, actual)
actual = data.drop(['time'])
self.assertDatasetIdentical(expected, actual)
expected = Dataset(dict((k, data[k]) for
k in ['dim2', 'dim3', 'time', 'numbers']))
actual = data.drop('dim1')
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(ValueError, 'cannot be found'):
data.drop('not_found_here')
def test_drop_index_labels(self):
data = Dataset({'A': (['x', 'y'], np.random.randn(2, 3)),
'x': ['a', 'b']})
actual = data.drop(1, 'y')
expected = data.isel(y=[0, 2])
self.assertDatasetIdentical(expected, actual)
actual = data.drop(['a'], 'x')
expected = data.isel(x=[1])
self.assertDatasetIdentical(expected, actual)
actual = data.drop(['a', 'b'], 'x')
expected = data.isel(x=slice(0, 0))
self.assertDatasetIdentical(expected, actual)
with self.assertRaises(ValueError):
# not contained in axis
data.drop(['c'], dim='x')
def test_copy(self):
data = create_test_data()
for copied in [data.copy(deep=False), copy(data)]:
self.assertDatasetIdentical(data, copied)
for k in data:
v0 = data.variables[k]
v1 = copied.variables[k]
self.assertIs(v0, v1)
copied['foo'] = ('z', np.arange(5))
self.assertNotIn('foo', data)
for copied in [data.copy(deep=True), deepcopy(data)]:
self.assertDatasetIdentical(data, copied)
for k in data:
v0 = data.variables[k]
v1 = copied.variables[k]
self.assertIsNot(v0, v1)
def test_rename(self):
data = create_test_data()
newnames = {'var1': 'renamed_var1', 'dim2': 'renamed_dim2'}
renamed = data.rename(newnames)
variables = OrderedDict(data.variables)
for k, v in iteritems(newnames):
variables[v] = variables.pop(k)
for k, v in iteritems(variables):
dims = list(v.dims)
for name, newname in iteritems(newnames):
if name in dims:
dims[dims.index(name)] = newname
self.assertVariableEqual(Variable(dims, v.values, v.attrs),
renamed[k])
self.assertEqual(v.encoding, renamed[k].encoding)
self.assertEqual(type(v), type(renamed.variables[k]))
self.assertTrue('var1' not in renamed)
self.assertTrue('dim2' not in renamed)
with self.assertRaisesRegexp(ValueError, "cannot rename 'not_a_var'"):
data.rename({'not_a_var': 'nada'})
with self.assertRaisesRegexp(ValueError, "'var1' already exists"):
data.rename({'var2': 'var1'})
# verify that we can rename a variable without accessing the data
var1 = data['var1']
data['var1'] = (var1.dims, InaccessibleArray(var1.values))
renamed = data.rename(newnames)
with self.assertRaises(UnexpectedDataAccess):
renamed['renamed_var1'].values
def test_rename_same_name(self):
data = create_test_data()
newnames = {'var1': 'var1', 'dim2': 'dim2'}
renamed = data.rename(newnames)
self.assertDatasetIdentical(renamed, data)
def test_rename_inplace(self):
times = pd.date_range('2000-01-01', periods=3)
data = Dataset({'z': ('x', [2, 3, 4]), 't': ('t', times)})
copied = data.copy()
renamed = data.rename({'x': 'y'})
data.rename({'x': 'y'}, inplace=True)
self.assertDatasetIdentical(data, renamed)
self.assertFalse(data.equals(copied))
self.assertEquals(data.dims, {'y': 3, 't': 3})
# check virtual variables
self.assertArrayEqual(data['t.dayofyear'], [1, 2, 3])
def test_swap_dims(self):
original = Dataset({'x': [1, 2, 3], 'y': ('x', list('abc')), 'z': 42})
expected = Dataset({'z': 42}, {'x': ('y', [1, 2, 3]), 'y': list('abc')})
actual = original.swap_dims({'x': 'y'})
self.assertDatasetIdentical(expected, actual)
self.assertIsInstance(actual.variables['y'], Coordinate)
self.assertIsInstance(actual.variables['x'], Variable)
roundtripped = actual.swap_dims({'y': 'x'})
self.assertDatasetIdentical(original.set_coords('y'), roundtripped)
actual = original.copy()
actual.swap_dims({'x': 'y'}, inplace=True)
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(ValueError, 'cannot swap'):
original.swap_dims({'y': 'x'})
with self.assertRaisesRegexp(ValueError, 'replacement dimension'):
original.swap_dims({'x': 'z'})
def test_stack(self):
ds = Dataset({'a': ('x', [0, 1]),
'b': (('x', 'y'), [[0, 1], [2, 3]]),
'y': ['a', 'b']})
exp_index = pd.MultiIndex.from_product([[0, 1], ['a', 'b']],
names=['x', 'y'])
expected = Dataset({'a': ('z', [0, 0, 1, 1]),
'b': ('z', [0, 1, 2, 3]),
'z': exp_index})
actual = ds.stack(z=['x', 'y'])
self.assertDatasetIdentical(expected, actual)
exp_index = pd.MultiIndex.from_product([['a', 'b'], [0, 1]],
names=['y', 'x'])
expected = Dataset({'a': ('z', [0, 1, 0, 1]),
'b': ('z', [0, 2, 1, 3]),
'z': exp_index})
actual = ds.stack(z=['y', 'x'])
self.assertDatasetIdentical(expected, actual)
def test_unstack(self):
index = pd.MultiIndex.from_product([[0, 1], ['a', 'b']],
names=['x', 'y'])
ds = Dataset({'b': ('z', [0, 1, 2, 3]), 'z': index})
expected = Dataset({'b': (('x', 'y'), [[0, 1], [2, 3]]),
'y': ['a', 'b']})
actual = ds.unstack('z')
self.assertDatasetIdentical(actual, expected)
def test_unstack_errors(self):
ds = Dataset({'x': [1, 2, 3]})
with self.assertRaisesRegexp(ValueError, 'invalid dimension'):
ds.unstack('foo')
with self.assertRaisesRegexp(ValueError, 'does not have a MultiIndex'):
ds.unstack('x')
def test_stack_unstack(self):
ds = Dataset({'a': ('x', [0, 1]),
'b': (('x', 'y'), [[0, 1], [2, 3]]),
'y': ['a', 'b']})
actual = ds.stack(z=['x', 'y']).unstack('z')
assert actual.broadcast_equals(ds)
actual = ds[['b']].stack(z=['x', 'y']).unstack('z')
assert actual.identical(ds[['b']])
def test_update(self):
data = create_test_data(seed=0)
expected = data.copy()
var2 = Variable('dim1', np.arange(8))
actual = data.update({'var2': var2})
expected['var2'] = var2
self.assertDatasetIdentical(expected, actual)
actual = data.copy()
actual_result = actual.update(data, inplace=True)
self.assertIs(actual_result, actual)
self.assertDatasetIdentical(expected, actual)
actual = data.update(data, inplace=False)
expected = data
self.assertIsNot(actual, expected)
self.assertDatasetIdentical(expected, actual)
other = Dataset(attrs={'new': 'attr'})
actual = data.copy()
actual.update(other)
self.assertDatasetIdentical(expected, actual)
def test_update_auto_align(self):
ds = Dataset({'x': ('t', [3, 4])})
expected = Dataset({'x': ('t', [3, 4]), 'y': ('t', [np.nan, 5])})
actual = ds.copy()
other = {'y': ('t', [5]), 't': [1]}
with self.assertRaisesRegexp(ValueError, 'conflicting sizes'):
actual.update(other)
actual.update(Dataset(other))
self.assertDatasetIdentical(expected, actual)
actual = ds.copy()
other = Dataset({'y': ('t', [5]), 't': [100]})
actual.update(other)
expected = Dataset({'x': ('t', [3, 4]), 'y': ('t', [np.nan] * 2)})
self.assertDatasetIdentical(expected, actual)
def test_getitem(self):
data = create_test_data()
self.assertIsInstance(data['var1'], DataArray)
self.assertVariableEqual(data['var1'], data.variables['var1'])
with self.assertRaises(KeyError):
data['notfound']
with self.assertRaises(KeyError):
data[['var1', 'notfound']]
actual = data[['var1', 'var2']]
expected = Dataset({'var1': data['var1'], 'var2': data['var2']})
self.assertDatasetEqual(expected, actual)
actual = data['numbers']
expected = DataArray(data['numbers'].variable,
{'dim3': data['dim3'],
'numbers': data['numbers']},
dims='dim3', name='numbers')
self.assertDataArrayIdentical(expected, actual)
actual = data[dict(dim1=0)]
expected = data.isel(dim1=0)
self.assertDatasetIdentical(expected, actual)
def test_getitem_hashable(self):
data = create_test_data()
data[(3, 4)] = data['var1'] + 1
expected = data['var1'] + 1
expected.name = (3, 4)
self.assertDataArrayIdentical(expected, data[(3, 4)])
with self.assertRaisesRegexp(KeyError, "('var1', 'var2')"):
data[('var1', 'var2')]
def test_virtual_variables(self):
# access virtual variables
data = create_test_data()
expected = DataArray(1 + np.arange(20), coords=[data['time']],
dims='time', name='dayofyear')
self.assertDataArrayIdentical(expected, data['time.dayofyear'])
self.assertArrayEqual(data['time.month'].values,
data.variables['time'].to_index().month)
self.assertArrayEqual(data['time.season'].values, 'DJF')
# test virtual variable math
self.assertArrayEqual(data['time.dayofyear'] + 1, 2 + np.arange(20))
self.assertArrayEqual(np.sin(data['time.dayofyear']),
np.sin(1 + np.arange(20)))
# ensure they become coordinates
expected = Dataset({}, {'dayofyear': data['time.dayofyear']})
actual = data[['time.dayofyear']]
self.assertDatasetEqual(expected, actual)
# non-coordinate variables
ds = Dataset({'t': ('x', pd.date_range('2000-01-01', periods=3))})
self.assertTrue((ds['t.year'] == 2000).all())
def test_virtual_variable_same_name(self):
# regression test for GH367
times = pd.date_range('2000-01-01', freq='H', periods=5)
data = Dataset({'time': times})
actual = data['time.time']
expected = DataArray(times.time, {'time': times}, name='time')
self.assertDataArrayIdentical(actual, expected)
def test_time_season(self):
ds = Dataset({'t': pd.date_range('2000-01-01', periods=12, freq='M')})
expected = ['DJF'] * 2 + ['MAM'] * 3 + ['JJA'] * 3 + ['SON'] * 3 + ['DJF']
self.assertArrayEqual(expected, ds['t.season'])
def test_slice_virtual_variable(self):
data = create_test_data()
self.assertVariableEqual(data['time.dayofyear'][:10],
Variable(['time'], 1 + np.arange(10)))
self.assertVariableEqual(data['time.dayofyear'][0], Variable([], 1))
def test_setitem(self):
# assign a variable
var = Variable(['dim1'], np.random.randn(8))
data1 = create_test_data()
data1['A'] = var
data2 = data1.copy()
data2['A'] = var
self.assertDatasetIdentical(data1, data2)
# assign a dataset array
dv = 2 * data2['A']
data1['B'] = dv.variable
data2['B'] = dv
self.assertDatasetIdentical(data1, data2)
# can't assign an ND array without dimensions
with self.assertRaisesRegexp(ValueError,
'dimensions .* must have the same len'):
data2['C'] = var.values.reshape(2, 4)
# but can assign a 1D array
data1['C'] = var.values
data2['C'] = ('C', var.values)
self.assertDatasetIdentical(data1, data2)
# can assign a scalar
data1['scalar'] = 0
data2['scalar'] = ([], 0)
self.assertDatasetIdentical(data1, data2)
# can't use the same dimension name as a scalar var
with self.assertRaisesRegexp(ValueError, 'already exists as a scalar'):
data1['newvar'] = ('scalar', [3, 4, 5])
# can't resize a used dimension
with self.assertRaisesRegexp(ValueError, 'conflicting sizes'):
data1['dim1'] = data1['dim1'][:5]
# override an existing value
data1['A'] = 3 * data2['A']
self.assertVariableEqual(data1['A'], 3 * data2['A'])
with self.assertRaises(NotImplementedError):
data1[{'x': 0}] = 0
def test_setitem_pandas(self):
ds = self.make_example_math_dataset()
ds_copy = ds.copy()
ds_copy['bar'] = ds['bar'].to_pandas()
self.assertDatasetEqual(ds, ds_copy)
def test_setitem_auto_align(self):
ds = Dataset()
ds['x'] = ('y', range(3))
ds['y'] = 1 + np.arange(3)
expected = Dataset({'x': ('y', range(3)), 'y': 1 + np.arange(3)})
self.assertDatasetIdentical(ds, expected)
ds['y'] = DataArray(range(3), dims='y')
expected = Dataset({'x': ('y', range(3))})
self.assertDatasetIdentical(ds, expected)
ds['x'] = DataArray([1, 2], dims='y')
expected = Dataset({'x': ('y', [1, 2, np.nan])})
self.assertDatasetIdentical(ds, expected)
ds['x'] = 42
expected = Dataset({'x': 42, 'y': range(3)})
self.assertDatasetIdentical(ds, expected)
ds['x'] = DataArray([4, 5, 6, 7], dims='y')
expected = Dataset({'x': ('y', [4, 5, 6])})
self.assertDatasetIdentical(ds, expected)
def test_setitem_align_new_indexes(self):
ds = Dataset({'foo': ('x', [1, 2, 3])}, {'x': [0, 1, 2]})
ds['bar'] = DataArray([2, 3, 4], [('x', [1, 2, 3])])
expected = Dataset({'foo': ('x', [1, 2, 3]),
'bar': ('x', [np.nan, 2, 3])})
self.assertDatasetIdentical(ds, expected)
def test_assign(self):
ds = Dataset()
actual = ds.assign(x = [0, 1, 2], y = 2)
expected = Dataset({'x': [0, 1, 2], 'y': 2})
self.assertDatasetIdentical(actual, expected)
self.assertEqual(list(actual), ['x', 'y'])
self.assertDatasetIdentical(ds, Dataset())
actual = actual.assign(y = lambda ds: ds.x ** 2)
expected = Dataset({'y': ('x', [0, 1, 4])})
self.assertDatasetIdentical(actual, expected)
actual = actual.assign_coords(z = 2)
expected = Dataset({'y': ('x', [0, 1, 4])}, {'z': 2})
self.assertDatasetIdentical(actual, expected)
ds = Dataset({'a': ('x', range(3))}, {'b': ('x', ['A'] * 2 + ['B'])})
actual = ds.groupby('b').assign(c = lambda ds: 2 * ds.a)
expected = ds.merge({'c': ('x', [0, 2, 4])})
self.assertDatasetIdentical(actual, expected)
actual = ds.groupby('b').assign(c = lambda ds: ds.a.sum())
expected = ds.merge({'c': ('x', [1, 1, 2])})
self.assertDatasetIdentical(actual, expected)
actual = ds.groupby('b').assign_coords(c = lambda ds: ds.a.sum())
expected = expected.set_coords('c')
self.assertDatasetIdentical(actual, expected)
def test_setitem_non_unique_index(self):
# regression test for GH943
original = Dataset({'data': ('x', np.arange(5))},
coords={'x': [0, 1, 2, 0, 1]})
expected = Dataset({'data': ('x', np.arange(5))})
actual = original.copy()
actual['x'] = list(range(5))
self.assertDatasetIdentical(actual, expected)
actual = original.copy()
actual['x'] = ('x', list(range(5)))
self.assertDatasetIdentical(actual, expected)
actual = original.copy()
actual.coords['x'] = list(range(5))
self.assertDatasetIdentical(actual, expected)
def test_delitem(self):
data = create_test_data()
all_items = set(data)
self.assertItemsEqual(data, all_items)
del data['var1']
self.assertItemsEqual(data, all_items - set(['var1']))
del data['dim1']
self.assertItemsEqual(data, set(['time', 'dim2', 'dim3', 'numbers']))
self.assertNotIn('dim1', data.dims)
self.assertNotIn('dim1', data.coords)
def test_squeeze(self):
data = Dataset({'foo': (['x', 'y', 'z'], [[[1], [2]]])})
for args in [[], [['x']], [['x', 'z']]]:
def get_args(v):
return [set(args[0]) & set(v.dims)] if args else []
expected = Dataset(dict((k, v.squeeze(*get_args(v)))
for k, v in iteritems(data.variables)))
expected.set_coords(data.coords, inplace=True)
self.assertDatasetIdentical(expected, data.squeeze(*args))
# invalid squeeze
with self.assertRaisesRegexp(ValueError, 'cannot select a dimension'):
data.squeeze('y')
def test_groupby(self):
data = Dataset({'z': (['x', 'y'], np.random.randn(3, 5))},
{'x': ('x', list('abc')),
'c': ('x', [0, 1, 0])})
groupby = data.groupby('x')
self.assertEqual(len(groupby), 3)
expected_groups = {'a': 0, 'b': 1, 'c': 2}
self.assertEqual(groupby.groups, expected_groups)
expected_items = [('a', data.isel(x=0)),
('b', data.isel(x=1)),
('c', data.isel(x=2))]
for actual, expected in zip(groupby, expected_items):
self.assertEqual(actual[0], expected[0])
self.assertDatasetEqual(actual[1], expected[1])
identity = lambda x: x
for k in ['x', 'c', 'y']:
actual = data.groupby(k, squeeze=False).apply(identity)
self.assertDatasetEqual(data, actual)
def test_groupby_returns_new_type(self):
data = Dataset({'z': (['x', 'y'], np.random.randn(3, 5))})
actual = data.groupby('x').apply(lambda ds: ds['z'])
expected = data['z']
self.assertDataArrayIdentical(expected, actual)
actual = data['z'].groupby('x').apply(lambda x: x.to_dataset())
expected = data
self.assertDatasetIdentical(expected, actual)
def test_groupby_iter(self):
data = create_test_data()
for n, (t, sub) in enumerate(list(data.groupby('dim1'))[:3]):
self.assertEqual(data['dim1'][n], t)
self.assertVariableEqual(data['var1'][n], sub['var1'])
self.assertVariableEqual(data['var2'][n], sub['var2'])
self.assertVariableEqual(data['var3'][:, n], sub['var3'])
def test_groupby_errors(self):
data = create_test_data()
with self.assertRaisesRegexp(ValueError, 'must have a name'):
data.groupby(np.arange(10))
with self.assertRaisesRegexp(ValueError, 'length does not match'):
data.groupby(data['dim1'][:3])
with self.assertRaisesRegexp(ValueError, "must have a 'dims'"):
data.groupby(data.coords['dim1'].to_index())
def test_groupby_reduce(self):
data = Dataset({'xy': (['x', 'y'], np.random.randn(3, 4)),
'xonly': ('x', np.random.randn(3)),
'yonly': ('y', np.random.randn(4)),
'letters': ('y', ['a', 'a', 'b', 'b'])})
expected = data.mean('y')
expected['yonly'] = expected['yonly'].variable.expand_dims({'x': 3})
actual = data.groupby('x').mean()
self.assertDatasetAllClose(expected, actual)
actual = data.groupby('x').mean('y')
self.assertDatasetAllClose(expected, actual)
letters = data['letters']
expected = Dataset({'xy': data['xy'].groupby(letters).mean(),
'xonly': (data['xonly'].mean().variable
.expand_dims({'letters': 2})),
'yonly': data['yonly'].groupby(letters).mean()})
actual = data.groupby('letters').mean()
self.assertDatasetAllClose(expected, actual)
def test_groupby_math(self):
reorder_dims = lambda x: x.transpose('dim1', 'dim2', 'dim3', 'time')
ds = create_test_data()
for squeeze in [True, False]:
grouped = ds.groupby('dim1', squeeze=squeeze)
expected = reorder_dims(ds + ds.coords['dim1'])
actual = grouped + ds.coords['dim1']
self.assertDatasetIdentical(expected, reorder_dims(actual))
actual = ds.coords['dim1'] + grouped
self.assertDatasetIdentical(expected, reorder_dims(actual))
ds2 = 2 * ds
expected = reorder_dims(ds + ds2)
actual = grouped + ds2
self.assertDatasetIdentical(expected, reorder_dims(actual))
actual = ds2 + grouped
self.assertDatasetIdentical(expected, reorder_dims(actual))
grouped = ds.groupby('numbers')
zeros = DataArray([0, 0, 0, 0], [('numbers', range(4))])
expected = ((ds + Variable('dim3', np.zeros(10)))
.transpose('dim3', 'dim1', 'dim2', 'time'))
actual = grouped + zeros
self.assertDatasetEqual(expected, actual)
actual = zeros + grouped
self.assertDatasetEqual(expected, actual)
with self.assertRaisesRegexp(ValueError, 'dimensions .* do not exist'):
grouped + ds
with self.assertRaisesRegexp(ValueError, 'dimensions .* do not exist'):
ds + grouped
with self.assertRaisesRegexp(TypeError, 'only support binary ops'):
grouped + 1
with self.assertRaisesRegexp(TypeError, 'only support binary ops'):
grouped + grouped
with self.assertRaisesRegexp(TypeError, 'in-place operations'):
ds += grouped
ds = Dataset({'x': ('time', np.arange(100)),
'time': pd.date_range('2000-01-01', periods=100)})
with self.assertRaisesRegexp(ValueError, 'incompat.* grouped binary'):
ds + ds.groupby('time.month')
def test_groupby_math_virtual(self):
ds = Dataset({'x': ('t', [1, 2, 3])},
{'t': pd.date_range('20100101', periods=3)})
grouped = ds.groupby('t.day')
actual = grouped - grouped.mean()
expected = Dataset({'x': ('t', [0, 0, 0])},
ds[['t', 't.day']])
self.assertDatasetIdentical(actual, expected)
def test_groupby_nan(self):
# nan should be excluded from groupby
ds = Dataset({'foo': ('x', [1, 2, 3, 4])},
{'bar': ('x', [1, 1, 2, np.nan])})
actual = ds.groupby('bar').mean()
expected = Dataset({'foo': ('bar', [1.5, 3]), 'bar': [1, 2]})
self.assertDatasetIdentical(actual, expected)
def test_resample_and_first(self):
times = pd.date_range('2000-01-01', freq='6H', periods=10)
ds = Dataset({'foo': (['time', 'x', 'y'], np.random.randn(10, 5, 3)),
'bar': ('time', np.random.randn(10), {'meta': 'data'}),
'time': times})
actual = ds.resample('1D', dim='time', how='first', keep_attrs=True)
expected = ds.isel(time=[0, 4, 8])
self.assertDatasetIdentical(expected, actual)
# upsampling
expected_time = pd.date_range('2000-01-01', freq='3H', periods=19)
expected = ds.reindex(time=expected_time)
for how in ['mean', 'sum', 'first', 'last', np.mean]:
actual = ds.resample('3H', 'time', how=how)
self.assertDatasetEqual(expected, actual)
def test_resample_by_mean_with_keep_attrs(self):
times = pd.date_range('2000-01-01', freq='6H', periods=10)
ds = Dataset({'foo': (['time', 'x', 'y'], np.random.randn(10, 5, 3)),
'bar': ('time', np.random.randn(10), {'meta': 'data'}),
'time': times})
ds.attrs['dsmeta'] = 'dsdata'
resampled_ds = ds.resample('1D', dim='time', how='mean', keep_attrs=True)
actual = resampled_ds['bar'].attrs
expected = ds['bar'].attrs
self.assertEqual(expected, actual)
actual = resampled_ds.attrs
expected = ds.attrs
self.assertEqual(expected, actual)
def test_resample_by_mean_discarding_attrs(self):
times = pd.date_range('2000-01-01', freq='6H', periods=10)
ds = Dataset({'foo': (['time', 'x', 'y'], np.random.randn(10, 5, 3)),
'bar': ('time', np.random.randn(10), {'meta': 'data'}),
'time': times})
ds.attrs['dsmeta'] = 'dsdata'
resampled_ds = ds.resample('1D', dim='time', how='mean', keep_attrs=False)
assert resampled_ds['bar'].attrs == {}
assert resampled_ds.attrs == {}
def test_resample_by_last_discarding_attrs(self):
times = pd.date_range('2000-01-01', freq='6H', periods=10)
ds = Dataset({'foo': (['time', 'x', 'y'], np.random.randn(10, 5, 3)),
'bar': ('time', np.random.randn(10), {'meta': 'data'}),
'time': times})
ds.attrs['dsmeta'] = 'dsdata'
resampled_ds = ds.resample('1D', dim='time', how='last', keep_attrs=False)
assert resampled_ds['bar'].attrs == {}
assert resampled_ds.attrs == {}
def test_to_array(self):
ds = Dataset(OrderedDict([('a', 1), ('b', ('x', [1, 2, 3]))]),
coords={'c': 42}, attrs={'Conventions': 'None'})
data = [[1, 1, 1], [1, 2, 3]]
coords = {'x': range(3), 'c': 42, 'variable': ['a', 'b']}
dims = ('variable', 'x')
expected = DataArray(data, coords, dims, attrs=ds.attrs)
actual = ds.to_array()
self.assertDataArrayIdentical(expected, actual)
actual = ds.to_array('abc', name='foo')
expected = expected.rename({'variable': 'abc'}).rename('foo')
self.assertDataArrayIdentical(expected, actual)
def test_to_and_from_dataframe(self):
x = np.random.randn(10)
y = np.random.randn(10)
t = list('abcdefghij')
ds = Dataset(OrderedDict([('a', ('t', x)),
('b', ('t', y)),
('t', ('t', t))]))
expected = pd.DataFrame(np.array([x, y]).T, columns=['a', 'b'],
index=pd.Index(t, name='t'))
actual = ds.to_dataframe()
# use the .equals method to check all DataFrame metadata
assert expected.equals(actual), (expected, actual)
# verify coords are included
actual = ds.set_coords('b').to_dataframe()
assert expected.equals(actual), (expected, actual)
# check roundtrip
self.assertDatasetIdentical(ds, Dataset.from_dataframe(actual))
# test a case with a MultiIndex
w = np.random.randn(2, 3)
ds = Dataset({'w': (('x', 'y'), w)})
ds['y'] = ('y', list('abc'))
exp_index = pd.MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1], ['a', 'b', 'c', 'a', 'b', 'c']],
names=['x', 'y'])
expected = pd.DataFrame(w.reshape(-1), columns=['w'], index=exp_index)
actual = ds.to_dataframe()
self.assertTrue(expected.equals(actual))
# check roundtrip
self.assertDatasetIdentical(ds, Dataset.from_dataframe(actual))
# check pathological cases
df = pd.DataFrame([1])
actual = Dataset.from_dataframe(df)
expected = Dataset({0: ('index', [1])})
self.assertDatasetIdentical(expected, actual)
df = pd.DataFrame()
actual = Dataset.from_dataframe(df)
expected = Dataset(coords={'index':[]})
self.assertDatasetIdentical(expected, actual)
# GH697
df = pd.DataFrame({'A' : []})
actual = Dataset.from_dataframe(df)
expected = Dataset({'A': DataArray([], dims=('index',))})
self.assertDatasetIdentical(expected, actual)
# regression test for GH278
# use int64 to ensure consistent results for the pandas .equals method
# on windows (which requires the same dtype)
ds = Dataset({'x': pd.Index(['bar']),
'a': ('y', np.array([1], 'int64'))}).isel(x=0)
# use .loc to ensure consistent results on Python 3
actual = ds.to_dataframe().loc[:, ['a', 'x']]
expected = pd.DataFrame([[1, 'bar']], index=pd.Index([0], name='y'),
columns=['a', 'x'])
assert expected.equals(actual), (expected, actual)
ds = Dataset({'x': np.array([0], 'int64'),
'y': np.array([1], 'int64')})
actual = ds.to_dataframe()
idx = pd.MultiIndex.from_arrays([[0], [1]], names=['x', 'y'])
expected = pd.DataFrame([[]], index=idx)
assert expected.equals(actual), (expected, actual)
def test_from_dataframe_non_unique_columns(self):
# regression test for GH449
df = pd.DataFrame(np.zeros((2, 2)))
df.columns = ['foo', 'foo']
with self.assertRaisesRegexp(ValueError, 'non-unique columns'):
Dataset.from_dataframe(df)
def test_convert_dataframe_with_many_types_and_multiindex(self):
# regression test for GH737
df = pd.DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.Categorical(list('abc')),
'g': pd.date_range('20130101', periods=3),
'h': pd.date_range('20130101',
periods=3,
tz='US/Eastern')})
df.index = pd.MultiIndex.from_product([['a'], range(3)],
names=['one', 'two'])
roundtripped = Dataset.from_dataframe(df).to_dataframe()
# we can't do perfectly, but we should be at least as faithful as
# np.asarray
expected = df.apply(np.asarray)
if pd.__version__ < '0.17':
# datetime with timezone dtype is not consistent on old pandas
roundtripped = roundtripped.drop(['h'], axis=1)
expected = expected.drop(['h'], axis=1)
assert roundtripped.equals(expected)
def test_to_and_from_dict(self):
# <xarray.Dataset>
# Dimensions: (t: 10)
# Coordinates:
# * t (t) <U1 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j'
# Data variables:
# a (t) float64 0.6916 -1.056 -1.163 0.9792 -0.7865 ...
# b (t) float64 1.32 0.1954 1.91 1.39 0.519 -0.2772 ...
x = np.random.randn(10)
y = np.random.randn(10)
t = list('abcdefghij')
ds = Dataset(OrderedDict([('a', ('t', x)),
('b', ('t', y)),
('t', ('t', t))]))
expected = {'coords': {'t': {'dims': ('t',),
'data': t,
'attrs': {}}},
'attrs': {},
'dims': {'t': 10},
'data_vars': {'a': {'dims': ('t',),
'data': x.tolist(),
'attrs': {}},
'b': {'dims': ('t',),
'data': y.tolist(),
'attrs': {}}}}
actual = ds.to_dict()
# check that they are identical
self.assertEqual(expected, actual)
# check roundtrip
self.assertDatasetIdentical(ds, Dataset.from_dict(actual))
# verify coords are included roundtrip
expected = ds.set_coords('b')
actual = Dataset.from_dict(expected.to_dict())
self.assertDatasetIdentical(expected, actual)
# test some incomplete dicts:
# this one has no attrs field, the dims are strings, and x, y are
# np.arrays
d = {'coords': {'t': {'dims': 't', 'data': t}},
'dims': 't',
'data_vars': {'a': {'dims': 't', 'data': x},
'b': {'dims': 't', 'data': y}}}
self.assertDatasetIdentical(ds, Dataset.from_dict(d))
# this is kind of a flattened version with no coords, or data_vars
d = {'a': {'dims': 't', 'data': x},
't': {'data': t, 'dims': 't'},
'b': {'dims': 't', 'data': y}}
self.assertDatasetIdentical(ds, Dataset.from_dict(d))
# this one is missing some necessary information
d = {'a': {'data': x},
't': {'data': t, 'dims': 't'},
'b': {'dims': 't', 'data': y}}
with self.assertRaisesRegexp(ValueError, "cannot convert dict "
"without the key 'dims'"):
Dataset.from_dict(d)
def test_to_and_from_dict_with_time_dim(self):
x = np.random.randn(10, 3)
y = np.random.randn(10, 3)
t = pd.date_range('20130101', periods=10)
lat = [77.7, 83.2, 76]
ds = Dataset(OrderedDict([('a', (['t', 'lat'], x)),
('b', (['t', 'lat'], y)),
('t', ('t', t)),
('lat', ('lat', lat))]))
roundtripped = Dataset.from_dict(ds.to_dict())
self.assertDatasetIdentical(ds, roundtripped)
def test_to_and_from_dict_with_nan_nat(self):
x = np.random.randn(10, 3)
y = np.random.randn(10, 3)
y[2] = np.nan
t = pd.Series(pd.date_range('20130101', periods=10))
t[2] = np.nan
lat = [77.7, 83.2, 76]
ds = Dataset(OrderedDict([('a', (['t', 'lat'], x)),
('b', (['t', 'lat'], y)),
('t', ('t', t)),
('lat', ('lat', lat))]))
roundtripped = Dataset.from_dict(ds.to_dict())
self.assertDatasetIdentical(ds, roundtripped)
def test_pickle(self):
data = create_test_data()
roundtripped = pickle.loads(pickle.dumps(data))
self.assertDatasetIdentical(data, roundtripped)
# regression test for #167:
self.assertEqual(data.dims, roundtripped.dims)
def test_lazy_load(self):
store = InaccessibleVariableDataStore()
create_test_data().dump_to_store(store)
for decode_cf in [True, False]:
ds = open_dataset(store, decode_cf=decode_cf)
with self.assertRaises(UnexpectedDataAccess):
ds.load()
with self.assertRaises(UnexpectedDataAccess):
ds['var1'].values
# these should not raise UnexpectedDataAccess:
ds.isel(time=10)
ds.isel(time=slice(10), dim1=[0]).isel(dim1=0, dim2=-1)
def test_dropna(self):
x = np.random.randn(4, 4)
x[::2, 0] = np.nan
y = np.random.randn(4)
y[-1] = np.nan
ds = Dataset({'foo': (('a', 'b'), x), 'bar': (('b', y))})
expected = ds.isel(a=slice(1, None, 2))
actual = ds.dropna('a')
self.assertDatasetIdentical(actual, expected)
expected = ds.isel(b=slice(1, 3))
actual = ds.dropna('b')
self.assertDatasetIdentical(actual, expected)
actual = ds.dropna('b', subset=['foo', 'bar'])
self.assertDatasetIdentical(actual, expected)
expected = ds.isel(b=slice(1, None))
actual = ds.dropna('b', subset=['foo'])
self.assertDatasetIdentical(actual, expected)
expected = ds.isel(b=slice(3))
actual = ds.dropna('b', subset=['bar'])
self.assertDatasetIdentical(actual, expected)
actual = ds.dropna('a', subset=[])
self.assertDatasetIdentical(actual, ds)
actual = ds.dropna('a', subset=['bar'])
self.assertDatasetIdentical(actual, ds)
actual = ds.dropna('a', how='all')
self.assertDatasetIdentical(actual, ds)
actual = ds.dropna('b', how='all', subset=['bar'])
expected = ds.isel(b=[0, 1, 2])
self.assertDatasetIdentical(actual, expected)
actual = ds.dropna('b', thresh=1, subset=['bar'])
self.assertDatasetIdentical(actual, expected)
actual = ds.dropna('b', thresh=2)
self.assertDatasetIdentical(actual, ds)
actual = ds.dropna('b', thresh=4)
expected = ds.isel(b=[1, 2, 3])
self.assertDatasetIdentical(actual, expected)
actual = ds.dropna('a', thresh=3)
expected = ds.isel(a=[1, 3])
self.assertDatasetIdentical(actual, ds)
with self.assertRaisesRegexp(ValueError, 'a single dataset dimension'):
ds.dropna('foo')
with self.assertRaisesRegexp(ValueError, 'invalid how'):
ds.dropna('a', how='somehow')
with self.assertRaisesRegexp(TypeError, 'must specify how or thresh'):
ds.dropna('a', how=None)
def test_fillna(self):
ds = Dataset({'a': ('x', [np.nan, 1, np.nan, 3])})
# fill with -1
actual = ds.fillna(-1)
expected = Dataset({'a': ('x', [-1, 1, -1, 3])})
self.assertDatasetIdentical(expected, actual)
actual = ds.fillna({'a': -1})
self.assertDatasetIdentical(expected, actual)
other = Dataset({'a': -1})
actual = ds.fillna(other)
self.assertDatasetIdentical(expected, actual)
actual = ds.fillna({'a': other.a})
self.assertDatasetIdentical(expected, actual)
# fill with range(4)
b = DataArray(range(4), dims='x')
actual = ds.fillna(b)
expected = b.rename('a').to_dataset()
self.assertDatasetIdentical(expected, actual)
actual = ds.fillna(expected)
self.assertDatasetIdentical(expected, actual)
actual = ds.fillna(range(4))
self.assertDatasetIdentical(expected, actual)
actual = ds.fillna(b[:3])
self.assertDatasetIdentical(expected, actual)
# okay to only include some data variables
ds['b'] = np.nan
actual = ds.fillna({'a': -1})
expected = Dataset({'a': ('x', [-1, 1, -1, 3]), 'b': np.nan})
self.assertDatasetIdentical(expected, actual)
# but new data variables is not okay
with self.assertRaisesRegexp(ValueError, 'must be contained'):
ds.fillna({'x': 0})
# empty argument should be OK
result = ds.fillna({})
self.assertDatasetIdentical(ds, result)
result = ds.fillna(Dataset(coords={'c': 42}))
expected = ds.assign_coords(c=42)
self.assertDatasetIdentical(expected, result)
# groupby
expected = Dataset({'a': ('x', range(4))})
for target in [ds, expected]:
target.coords['b'] = ('x', [0, 0, 1, 1])
actual = ds.groupby('b').fillna(DataArray([0, 2], dims='b'))
self.assertDatasetIdentical(expected, actual)
actual = ds.groupby('b').fillna(Dataset({'a': ('b', [0, 2])}))
self.assertDatasetIdentical(expected, actual)
def test_where(self):
ds = Dataset({'a': ('x', range(5))})
expected = Dataset({'a': ('x', [np.nan, np.nan, 2, 3, 4])})
actual = ds.where(ds > 1)
self.assertDatasetIdentical(expected, actual)
actual = ds.where(ds.a > 1)
self.assertDatasetIdentical(expected, actual)
actual = ds.where(ds.a.values > 1)
self.assertDatasetIdentical(expected, actual)
actual = ds.where(True)
self.assertDatasetIdentical(ds, actual)
expected = ds.copy(deep=True)
expected['a'].values = [np.nan] * 5
actual = ds.where(False)
self.assertDatasetIdentical(expected, actual)
# 2d
ds = Dataset({'a': (('x', 'y'), [[0, 1], [2, 3]])})
expected = Dataset({'a': (('x', 'y'), [[np.nan, 1], [2, 3]])})
actual = ds.where(ds > 0)
self.assertDatasetIdentical(expected, actual)
# groupby
ds = Dataset({'a': ('x', range(5))}, {'c': ('x', [0, 0, 1, 1, 1])})
cond = Dataset({'a': ('c', [True, False])})
expected = ds.copy(deep=True)
expected['a'].values = [0, 1] + [np.nan] * 3
actual = ds.groupby('c').where(cond)
self.assertDatasetIdentical(expected, actual)
def test_where_drop(self):
# if drop=True
# 1d
# data array case
array = DataArray(range(5), coords=[range(5)], dims=['x'])
expected = DataArray(range(5)[2:], coords=[range(5)[2:]], dims=['x'])
actual = array.where(array > 1, drop=True)
self.assertDatasetIdentical(expected, actual)
# dataset case
ds = Dataset({'a': array})
expected = Dataset({'a': expected})
actual = ds.where(ds > 1, drop=True)
self.assertDatasetIdentical(expected, actual)
actual = ds.where(ds.a > 1, drop=True)
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(TypeError, 'must be a'):
ds.where(np.arange(5) > 1, drop=True)
# 1d with odd coordinates
array = DataArray(np.array([2, 7, 1, 8, 3]), coords=[np.array([3, 1, 4, 5, 9])], dims=['x'])
expected = DataArray(np.array([7, 8, 3]), coords=[np.array([1, 5, 9])], dims=['x'])
actual = array.where(array > 2, drop=True)
self.assertDatasetIdentical(expected, actual)
# 1d multiple variables
ds = Dataset({'a': (('x'), [0, 1, 2, 3]), 'b': (('x'), [4, 5, 6, 7])})
expected = Dataset({'a': (('x'), [np.nan, 1, 2, 3]), 'b': (('x'), [4, 5, 6, np.nan])})
actual = ds.where((ds > 0) & (ds < 7), drop=True)
self.assertDatasetIdentical(expected, actual)
# 2d
ds = Dataset({'a': (('x', 'y'), [[0, 1], [2, 3]])})
expected = Dataset({'a': (('x', 'y'), [[np.nan, 1], [2, 3]])})
actual = ds.where(ds > 0, drop=True)
self.assertDatasetIdentical(expected, actual)
# 2d with odd coordinates
ds = Dataset({'a': (('x', 'y'), [[0, 1], [2, 3]])},
coords={'x': [4, 3], 'y': [1, 2],
'z' : (['x','y'], [[np.e, np.pi], [np.pi*np.e, np.pi*3]])})
expected = Dataset({'a': (('x', 'y'), [[3]])},
coords={'x': [3], 'y': [2],
'z' : (['x','y'], [[np.pi*3]])})
actual = ds.where(ds > 2, drop=True)
self.assertDatasetIdentical(expected, actual)
# 2d multiple variables
ds = Dataset({'a': (('x', 'y'), [[0, 1], [2, 3]]), 'b': (('x','y'), [[4, 5], [6, 7]])})
expected = Dataset({'a': (('x', 'y'), [[np.nan, 1], [2, 3]]), 'b': (('x', 'y'), [[4, 5], [6,7]])})
actual = ds.where(ds > 0, drop=True)
self.assertDatasetIdentical(expected, actual)
def test_reduce(self):
data = create_test_data()
self.assertEqual(len(data.mean().coords), 0)
actual = data.max()
expected = Dataset(dict((k, v.max())
for k, v in iteritems(data.data_vars)))
self.assertDatasetEqual(expected, actual)
self.assertDatasetEqual(data.min(dim=['dim1']),
data.min(dim='dim1'))
for reduct, expected in [('dim2', ['dim1', 'dim3', 'time']),
(['dim2', 'time'], ['dim1', 'dim3']),
(('dim2', 'time'), ['dim1', 'dim3']),
((), ['dim1', 'dim2', 'dim3', 'time'])]:
actual = data.min(dim=reduct).dims
print(reduct, actual, expected)
self.assertItemsEqual(actual, expected)
self.assertDatasetEqual(data.mean(dim=[]), data)
def test_reduce_bad_dim(self):
data = create_test_data()
with self.assertRaisesRegexp(ValueError, 'Dataset does not contain'):
ds = data.mean(dim='bad_dim')
def test_reduce_non_numeric(self):
data1 = create_test_data(seed=44)
data2 = create_test_data(seed=44)
add_vars = {'var4': ['dim1', 'dim2']}
for v, dims in sorted(add_vars.items()):
size = tuple(data1.dims[d] for d in dims)
data = np.random.random_integers(0, 100, size=size).astype(np.str_)
data1[v] = (dims, data, {'foo': 'variable'})
self.assertTrue('var4' not in data1.mean())
self.assertDatasetEqual(data1.mean(), data2.mean())
self.assertDatasetEqual(data1.mean(dim='dim1'),
data2.mean(dim='dim1'))
def test_reduce_strings(self):
expected = Dataset({'x': 'a'})
ds = Dataset({'x': ('y', ['a', 'b'])})
actual = ds.min()
self.assertDatasetIdentical(expected, actual)
expected = Dataset({'x': 'b'})
actual = ds.max()
self.assertDatasetIdentical(expected, actual)
expected = Dataset({'x': 0})
actual = ds.argmin()
self.assertDatasetIdentical(expected, actual)
expected = Dataset({'x': 1})
actual = ds.argmax()
self.assertDatasetIdentical(expected, actual)
expected = Dataset({'x': b'a'})
ds = Dataset({'x': ('y', np.array(['a', 'b'], 'S1'))})
actual = ds.min()
self.assertDatasetIdentical(expected, actual)
expected = Dataset({'x': u'a'})
ds = Dataset({'x': ('y', np.array(['a', 'b'], 'U1'))})
actual = ds.min()
self.assertDatasetIdentical(expected, actual)
def test_reduce_dtypes(self):
# regression test for GH342
expected = Dataset({'x': 1})
actual = Dataset({'x': True}).sum()
self.assertDatasetIdentical(expected, actual)
# regression test for GH505
expected = Dataset({'x': 3})
actual = Dataset({'x': ('y', np.array([1, 2], 'uint16'))}).sum()
self.assertDatasetIdentical(expected, actual)
expected = Dataset({'x': 1 + 1j})
actual = Dataset({'x': ('y', [1, 1j])}).sum()
self.assertDatasetIdentical(expected, actual)
def test_reduce_keep_attrs(self):
data = create_test_data()
_attrs = {'attr1': 'value1', 'attr2': 2929}
attrs = OrderedDict(_attrs)
data.attrs = attrs
# Test dropped attrs
ds = data.mean()
self.assertEqual(ds.attrs, {})
for v in ds.data_vars.values():
self.assertEqual(v.attrs, {})
# Test kept attrs
ds = data.mean(keep_attrs=True)
self.assertEqual(ds.attrs, attrs)
for k, v in ds.data_vars.items():
self.assertEqual(v.attrs, data[k].attrs)
def test_reduce_argmin(self):
# regression test for #205
ds = Dataset({'a': ('x', [0, 1])})
expected = Dataset({'a': ([], 0)})
actual = ds.argmin()
self.assertDatasetIdentical(expected, actual)
actual = ds.argmin('x')
self.assertDatasetIdentical(expected, actual)
def test_reduce_scalars(self):
ds = Dataset({'x': ('a', [2, 2]), 'y': 2, 'z': ('b', [2])})
expected = Dataset({'x': 0, 'y': 0, 'z': 0})
actual = ds.var()
self.assertDatasetIdentical(expected, actual)
def test_reduce_only_one_axis(self):
def mean_only_one_axis(x, axis):
if not isinstance(axis, (int, np.integer)):
raise TypeError('non-integer axis')
return x.mean(axis)
ds = Dataset({'a': (['x', 'y'], [[0, 1, 2, 3, 4]])})
expected = Dataset({'a': ('x', [2])})
actual = ds.reduce(mean_only_one_axis, 'y')
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(TypeError, 'non-integer axis'):
ds.reduce(mean_only_one_axis)
with self.assertRaisesRegexp(TypeError, 'non-integer axis'):
ds.reduce(mean_only_one_axis, ['x', 'y'])
def test_count(self):
ds = Dataset({'x': ('a', [np.nan, 1]), 'y': 0, 'z': np.nan})
expected = Dataset({'x': 1, 'y': 1, 'z': 0})
actual = ds.count()
self.assertDatasetIdentical(expected, actual)
def test_apply(self):
data = create_test_data()
data.attrs['foo'] = 'bar'
self.assertDatasetIdentical(data.apply(np.mean), data.mean())
expected = data.mean(keep_attrs=True)
actual = data.apply(lambda x: x.mean(keep_attrs=True), keep_attrs=True)
self.assertDatasetIdentical(expected, actual)
self.assertDatasetIdentical(data.apply(lambda x: x, keep_attrs=True),
data.drop('time'))
def scale(x, multiple=1):
return multiple * x
actual = data.apply(scale, multiple=2)
self.assertDataArrayEqual(actual['var1'], 2 * data['var1'])
self.assertDataArrayIdentical(actual['numbers'], data['numbers'])
actual = data.apply(np.asarray)
expected = data.drop('time') # time is not used on a data var
self.assertDatasetEqual(expected, actual)
def make_example_math_dataset(self):
variables = OrderedDict(
[('bar', ('x', np.arange(100, 400, 100))),
('foo', (('x', 'y'), 1.0 * np.arange(12).reshape(3, 4)))])
coords = {'abc': ('x', ['a', 'b', 'c']),
'y': 10 * np.arange(4)}
ds = Dataset(variables, coords)
ds['foo'][0, 0] = np.nan
return ds
def test_dataset_number_math(self):
ds = self.make_example_math_dataset()
self.assertDatasetIdentical(ds, +ds)
self.assertDatasetIdentical(ds, ds + 0)
self.assertDatasetIdentical(ds, 0 + ds)
self.assertDatasetIdentical(ds, ds + np.array(0))
self.assertDatasetIdentical(ds, np.array(0) + ds)
actual = ds.copy(deep=True)
actual += 0
self.assertDatasetIdentical(ds, actual)
def test_unary_ops(self):
ds = self.make_example_math_dataset()
self.assertDatasetIdentical(ds.apply(abs), abs(ds))
self.assertDatasetIdentical(ds.apply(lambda x: x + 4), ds + 4)
for func in [lambda x: x.isnull(),
lambda x: x.round(),
lambda x: x.astype(int)]:
self.assertDatasetIdentical(ds.apply(func), func(ds))
self.assertDatasetIdentical(ds.isnull(), ~ds.notnull())
# don't actually patch these methods in
with self.assertRaises(AttributeError):
ds.item
with self.assertRaises(AttributeError):
ds.searchsorted
def test_dataset_array_math(self):
ds = self.make_example_math_dataset()
expected = ds.apply(lambda x: x - ds['foo'])
self.assertDatasetIdentical(expected, ds - ds['foo'])
self.assertDatasetIdentical(expected, -ds['foo'] + ds)
self.assertDatasetIdentical(expected, ds - ds['foo'].variable)
self.assertDatasetIdentical(expected, -ds['foo'].variable + ds)
actual = ds.copy(deep=True)
actual -= ds['foo']
self.assertDatasetIdentical(expected, actual)
expected = ds.apply(lambda x: x + ds['bar'])
self.assertDatasetIdentical(expected, ds + ds['bar'])
actual = ds.copy(deep=True)
actual += ds['bar']
self.assertDatasetIdentical(expected, actual)
expected = Dataset({'bar': ds['bar'] + np.arange(3)})
self.assertDatasetIdentical(expected, ds[['bar']] + np.arange(3))
self.assertDatasetIdentical(expected, np.arange(3) + ds[['bar']])
def test_dataset_dataset_math(self):
ds = self.make_example_math_dataset()
self.assertDatasetIdentical(ds, ds + 0 * ds)
self.assertDatasetIdentical(ds, ds + {'foo': 0, 'bar': 0})
expected = ds.apply(lambda x: 2 * x)
self.assertDatasetIdentical(expected, 2 * ds)
self.assertDatasetIdentical(expected, ds + ds)
self.assertDatasetIdentical(expected, ds + ds.data_vars)
self.assertDatasetIdentical(expected, ds + dict(ds.data_vars))
actual = ds.copy(deep=True)
expected_id = id(actual)
actual += ds
self.assertDatasetIdentical(expected, actual)
self.assertEqual(expected_id, id(actual))
self.assertDatasetIdentical(ds == ds, ds.notnull())
subsampled = ds.isel(y=slice(2))
expected = 2 * subsampled
self.assertDatasetIdentical(expected, subsampled + ds)
self.assertDatasetIdentical(expected, ds + subsampled)
def test_dataset_math_auto_align(self):
ds = self.make_example_math_dataset()
subset = ds.isel(x=slice(2), y=[1, 3])
expected = 2 * subset
actual = ds + subset
self.assertDatasetIdentical(expected, actual)
actual = ds.isel(x=slice(1)) + ds.isel(x=slice(1, None))
expected = ds.drop(ds.x, dim='x')
self.assertDatasetEqual(actual, expected)
actual = ds + ds[['bar']]
expected = (2 * ds[['bar']]).merge(ds.coords)
self.assertDatasetIdentical(expected, actual)
self.assertDatasetIdentical(ds + Dataset(), ds.coords.to_dataset())
self.assertDatasetIdentical(Dataset() + Dataset(), Dataset())
ds2 = Dataset(coords={'bar': 42})
self.assertDatasetIdentical(ds + ds2, ds.coords.merge(ds2))
# maybe unary arithmetic with empty datasets should raise instead?
self.assertDatasetIdentical(Dataset() + 1, Dataset())
for other in [ds.isel(x=slice(2)), ds.bar.isel(x=slice(0))]:
actual = ds.copy(deep=True)
other = ds.isel(x=slice(2))
actual += other
expected = ds + other.reindex_like(ds)
self.assertDatasetIdentical(expected, actual)
def test_dataset_math_errors(self):
ds = self.make_example_math_dataset()
with self.assertRaises(TypeError):
ds['foo'] += ds
with self.assertRaises(TypeError):
ds['foo'].variable += ds
with self.assertRaisesRegexp(ValueError, 'must have the same'):
ds += ds[['bar']]
# verify we can rollback in-place operations if something goes wrong
# nb. inplace datetime64 math actually will work with an integer array
# but not floats thanks to numpy's inconsistent handling
other = DataArray(np.datetime64('2000-01-01T12'), coords={'c': 2})
actual = ds.copy(deep=True)
with self.assertRaises(TypeError):
actual += other
self.assertDatasetIdentical(actual, ds)
def test_dataset_transpose(self):
ds = Dataset({'a': (('x', 'y'), np.random.randn(3, 4)),
'b': (('y', 'x'), np.random.randn(4, 3))})
actual = ds.transpose()
expected = ds.apply(lambda x: x.transpose())
self.assertDatasetIdentical(expected, actual)
actual = ds.T
self.assertDatasetIdentical(expected, actual)
actual = ds.transpose('x', 'y')
expected = ds.apply(lambda x: x.transpose('x', 'y'))
self.assertDatasetIdentical(expected, actual)
ds = create_test_data()
actual = ds.transpose()
for k in ds:
self.assertEqual(actual[k].dims[::-1], ds[k].dims)
new_order = ('dim2', 'dim3', 'dim1', 'time')
actual = ds.transpose(*new_order)
for k in ds:
expected_dims = tuple(d for d in new_order if d in ds[k].dims)
self.assertEqual(actual[k].dims, expected_dims)
with self.assertRaisesRegexp(ValueError, 'arguments to transpose'):
ds.transpose('dim1', 'dim2', 'dim3')
with self.assertRaisesRegexp(ValueError, 'arguments to transpose'):
ds.transpose('dim1', 'dim2', 'dim3', 'time', 'extra_dim')
def test_dataset_retains_period_index_on_transpose(self):
ds = create_test_data()
ds['time'] = pd.period_range('2000-01-01', periods=20)
transposed = ds.transpose()
self.assertIsInstance(transposed.time.to_index(), pd.PeriodIndex)
def test_dataset_diff_n1_simple(self):
ds = Dataset({'foo': ('x', [5, 5, 6, 6])})
actual = ds.diff('x')
expected = Dataset({'foo': ('x', [0, 1, 0])})
expected.coords['x'].values = [1, 2, 3]
self.assertDatasetEqual(expected, actual)
def test_dataset_diff_n1_lower(self):
ds = Dataset({'foo': ('x', [5, 5, 6, 6])})
actual = ds.diff('x', label='lower')
expected = Dataset({'foo': ('x', [0, 1, 0])})
expected.coords['x'].values = [0, 1, 2]
self.assertDatasetEqual(expected, actual)
def test_dataset_diff_n1(self):
ds = create_test_data(seed=1)
actual = ds.diff('dim2')
expected = dict()
expected['var1'] = DataArray(np.diff(ds['var1'].values, axis=1),
[ds['dim1'].values,
ds['dim2'].values[1:]],
['dim1', 'dim2'])
expected['var2'] = DataArray(np.diff(ds['var2'].values, axis=1),
[ds['dim1'].values,
ds['dim2'].values[1:]],
['dim1', 'dim2'])
expected['var3'] = ds['var3']
expected = Dataset(expected, coords={'time': ds['time'].values})
expected.coords['numbers'] = ('dim3', ds['numbers'].values)
self.assertDatasetEqual(expected, actual)
def test_dataset_diff_n2(self):
ds = create_test_data(seed=1)
actual = ds.diff('dim2', n=2)
expected = dict()
expected['var1'] = DataArray(np.diff(ds['var1'].values, axis=1, n=2),
[ds['dim1'].values,
ds['dim2'].values[2:]],
['dim1', 'dim2'])
expected['var2'] = DataArray(np.diff(ds['var2'].values, axis=1, n=2),
[ds['dim1'].values,
ds['dim2'].values[2:]],
['dim1', 'dim2'])
expected['var3'] = ds['var3']
expected = Dataset(expected, coords={'time': ds['time'].values})
expected.coords['numbers'] = ('dim3', ds['numbers'].values)
self.assertDatasetEqual(expected, actual)
def test_dataset_diff_exception_n_neg(self):
ds = create_test_data(seed=1)
with self.assertRaisesRegexp(ValueError, 'must be non-negative'):
ds.diff('dim2', n=-1)
def test_dataset_diff_exception_label_str(self):
ds = create_test_data(seed=1)
with self.assertRaisesRegexp(ValueError, '\'label\' argument has to'):
ds.diff('dim2', label='raise_me')
def test_shift(self):
coords = {'bar': ('x', list('abc')), 'x': [-4, 3, 2]}
attrs = {'meta': 'data'}
ds = Dataset({'foo': ('x', [1, 2, 3])}, coords, attrs)
actual = ds.shift(x=1)
expected = Dataset({'foo': ('x', [np.nan, 1, 2])}, coords, attrs)
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(ValueError, 'dimensions'):
ds.shift(foo=123)
def test_roll(self):
coords = {'bar': ('x', list('abc')), 'x': [-4, 3, 2]}
attrs = {'meta': 'data'}
ds = Dataset({'foo': ('x', [1, 2, 3])}, coords, attrs)
actual = ds.roll(x=1)
ex_coords = {'bar': ('x', list('cab')), 'x': [2, -4, 3]}
expected = Dataset({'foo': ('x', [3, 1, 2])}, ex_coords, attrs)
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(ValueError, 'dimensions'):
ds.roll(foo=123)
def test_real_and_imag(self):
attrs = {'foo': 'bar'}
ds = Dataset({'x': ((), 1 + 2j, attrs)}, attrs=attrs)
expected_re = Dataset({'x': ((), 1, attrs)}, attrs=attrs)
self.assertDatasetIdentical(ds.real, expected_re)
expected_im = Dataset({'x': ((), 2, attrs)}, attrs=attrs)
self.assertDatasetIdentical(ds.imag, expected_im)
def test_setattr_raises(self):
ds = Dataset({}, coords={'scalar': 1}, attrs={'foo': 'bar'})
with self.assertRaisesRegexp(AttributeError, 'cannot set attr'):
ds.scalar = 2
with self.assertRaisesRegexp(AttributeError, 'cannot set attr'):
ds.foo = 2
with self.assertRaisesRegexp(AttributeError, 'cannot set attr'):
ds.other = 2
def test_filter_by_attrs(self):
precip = dict(standard_name='convective_precipitation_flux')
temp0 = dict(standard_name='air_potential_temperature', height='0 m')
temp10 = dict(standard_name='air_potential_temperature', height='10 m')
ds = Dataset({'temperature_0': (['t'], [0], temp0),
'temperature_10': (['t'], [0], temp10),
'precipitation': (['t'], [0], precip)},
coords={'time': (['t'], [0], dict(axis='T'))})
# Test return empty Dataset.
ds.filter_by_attrs(standard_name='invalid_standard_name')
new_ds = ds.filter_by_attrs(standard_name='invalid_standard_name')
self.assertFalse(bool(new_ds.data_vars))
# Test return one DataArray.
new_ds = ds.filter_by_attrs(standard_name='convective_precipitation_flux')
self.assertEqual(new_ds['precipitation'].standard_name, 'convective_precipitation_flux')
self.assertDatasetEqual(new_ds['precipitation'], ds['precipitation'])
# Test return more than one DataArray.
new_ds = ds.filter_by_attrs(standard_name='air_potential_temperature')
self.assertEqual(len(new_ds.data_vars), 2)
for var in new_ds.data_vars:
self.assertEqual(new_ds[var].standard_name, 'air_potential_temperature')
# Test callable.
new_ds = ds.filter_by_attrs(height=lambda v: v is not None)
self.assertEqual(len(new_ds.data_vars), 2)
for var in new_ds.data_vars:
self.assertEqual(new_ds[var].standard_name, 'air_potential_temperature')
new_ds = ds.filter_by_attrs(height='10 m')
self.assertEqual(len(new_ds.data_vars), 1)
for var in new_ds.data_vars:
self.assertEqual(new_ds[var].height, '10 m')
### Py.test tests
@pytest.fixture()
def data_set(seed=None):
return create_test_data(seed)
def test_dir_expected_attrs(data_set):
some_expected_attrs = {'pipe', 'mean', 'isnull', 'var1',
'dim1', 'numbers'}
result = dir(data_set)
assert set(result) >= some_expected_attrs
def test_dir_non_string(data_set):
# add a numbered key to ensure this doesn't break dir
data_set[5] = 'foo'
result = dir(data_set)
assert not (5 in result)
def test_dir_unicode(data_set):
data_set[u'unicode'] = 'uni'
result = dir(data_set)
assert u'unicode' in result
| apache-2.0 | 8,000,172,576,457,245,000 | 40.165214 | 165 | 0.537413 | false |
rynecarbone/power_ranker | power_ranker/web/power_plot.py | 1 | 5155 | #!/usr/bin/env python
"""Create box-plot of power rankings vs points scored"""
import logging
from pathlib import Path
import pandas as pd
from plotnine import *
import warnings
__author__ = 'Ryne Carbone'
logger = logging.getLogger(__name__)
def get_team_scores(df_schedule, team, week):
"""Get all scores for a team
:param df_schedule: data frame with scores and team ids for each game
:param team: id for team
:param week: current week
:return: series of scores for team up to week
"""
return (
df_schedule
.query(f'(home_id=={team} | away_id=={team}) & (matchupPeriodId <= {week} & winner != "UNDECIDED")')
.apply(lambda x: x.home_total_points if x.home_id == team else x.away_total_points, axis=1)
)
def make_power_plot(df_ranks, df_schedule, df_teams, year, week):
"""Create plot of weekly scores and current power rankings
:param df_ranks: data frame with current power rankings
:param df_schedule: data frame with scores for each game
:param df_teams: data frame with team names
:param year: current year
:param week: current week
:return: None
"""
# Grab team id and power score, convert power to ranking
df_plot = df_ranks[['team_id', 'power', 'tier']].reset_index(drop=True)
# Add power rankings as categorical variable for plot
df_plot['power'] = pd.Categorical(
df_plot.get('power').rank(ascending=False).astype(int),
categories=[i for i in range(df_plot.team_id.size, 0, -1)],
ordered=True
)
# Add in first names for each team
df_plot['Name'] = df_plot.apply(
lambda x: df_teams.loc[df_teams.team_id == x.get('team_id'), 'firstName'].values[0],
axis=1)
# Add in weekly scores
df_plot['scores'] = df_plot.apply(
lambda x: get_team_scores(df_schedule=df_schedule, team=x.get('team_id'), week=week).values,
axis=1)
# Add in where to put labels
df_plot['label_pos'] = df_plot.scores.apply(lambda x: max(x) + 10)
# Explode list into a row for each week
df_plot = df_plot.explode('scores')
df_plot.scores = df_plot.scores.astype(float)
# noinspection PyTypeChecker
p = (
ggplot(aes(y='scores', x='factor(power)', group='factor(power)', color='factor(tier)'), data=df_plot) +
geom_boxplot(alpha=.8, outlier_alpha=0) +
geom_jitter(width=.1, alpha=.3, color='black') +
geom_text(aes(label='Name', x='factor(power)', y='label_pos'),
color='black',
nudge_y=3,
data=df_plot.groupby(['team_id']).agg(max).reset_index(drop=True)) +
coord_flip() +
labs(x='Power Ranking', y='Weekly Score') +
theme_bw() +
theme(legend_title=element_text(text='Tiers', size=10),
legend_position=(0.18, .72),
legend_background=element_rect(alpha=0),
panel_grid_major_y=element_blank())
)
# Specify where to save the plot
out_dir = Path(f'output/{year}/week{week}')
out_dir.mkdir(parents=True, exist_ok=True)
out_name = out_dir / 'power_plot.png'
warnings.filterwarnings('ignore')
p.save(out_name, width=10, height=5.6, dpi=300)
warnings.filterwarnings('default')
logger.info(f'Saved power ranking plot to local file: {out_name.resolve()}')
def save_team_weekly_ranking_plots(year, week):
"""Create plot of historic team rankings this season
:param year: current year
:param week: current week
:return: None
"""
# Read in calculated rankings for the season
input_dir = Path(f'output/{year}')
f_rankings = input_dir / 'weekly_rankings.csv'
df_ranks = pd.read_csv(f_rankings)
# Create directory to save plots
out_dir = Path(f'output/{year}/week{week}/ranking_plots')
out_dir.mkdir(parents=True, exist_ok=True)
# Convert from wide to long
df_ranks = df_ranks.melt(id_vars=['team_id', 'week'], value_vars=['overall', 'power']).reset_index(drop=True)
# Get team ids
team_ids = df_ranks.get('team_id').unique().tolist()
# Get max rank for plot
max_rank = df_ranks.get('value').max()
# Create power history plot for each team
for team_id in team_ids:
p = (ggplot(aes(x='factor(week)',
y='value',
color='variable',
group='variable'),
data=df_ranks[df_ranks.team_id == team_id]) +
geom_line(aes(linetype='variable'), alpha=0.7, size=2) +
geom_point(size=3) +
scale_y_reverse(breaks=[x for x in range(max_rank, 0, -1)],
minor_breaks=[],
limits=[max_rank, 1]) +
labs(x='Week', y='Ranking', color='Ranking', linetype='Ranking') +
theme_bw() +
theme(legend_background=element_rect(alpha=0),
plot_background=element_rect(fill='white'),
panel_background=element_rect(fill='white'),
legend_box_margin=0,
strip_margin=0,
legend_title=element_text(size=8),
legend_text=element_text(size=7))
)
out_file = out_dir / f'ranking_{int(team_id)}.png'
warnings.filterwarnings('ignore')
p.save(out_file, width=8, height=2, dpi=300)
warnings.filterwarnings('default')
logger.info(f'Saved team power ranking history plots')
| mit | -1,320,269,196,353,959,400 | 36.904412 | 111 | 0.634724 | false |
dbarbier/ot-svn | python/doc/sphinxext/numpydoc/plot_directive.py | 3 | 20539 | """
A special directive for generating a matplotlib plot.
.. warning::
This is a hacked version of plot_directive.py from Matplotlib.
It's very much subject to change!
Usage
-----
Can be used like this::
.. plot:: examples/example.py
.. plot::
import matplotlib.pyplot as plt
plt.plot([1,2,3], [4,5,6])
.. plot::
A plotting example:
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3], [4,5,6])
The content is interpreted as doctest formatted if it has a line starting
with ``>>>``.
The ``plot`` directive supports the options
format : {'python', 'doctest'}
Specify the format of the input
include-source : bool
Whether to display the source code. Default can be changed in conf.py
and the ``image`` directive options ``alt``, ``height``, ``width``,
``scale``, ``align``, ``class``.
Configuration options
---------------------
The plot directive has the following configuration options:
plot_include_source
Default value for the include-source option
plot_pre_code
Code that should be executed before each plot.
plot_basedir
Base directory, to which plot:: file names are relative to.
(If None or empty, file names are relative to the directoly where
the file containing the directive is.)
plot_formats
File formats to generate. List of tuples or strings::
[(suffix, dpi), suffix, ...]
that determine the file format and the DPI. For entries whose
DPI was omitted, sensible defaults are chosen.
plot_html_show_formats
Whether to show links to the files in HTML.
TODO
----
* Refactor Latex output; now it's plain images, but it would be nice
to make them appear side-by-side, or in floats.
"""
from __future__ import division, absolute_import, print_function
import sys
import os
import glob
import shutil
import imp
import warnings
import re
import textwrap
import traceback
import sphinx
if sys.version_info[0] >= 3:
from io import StringIO
else:
from io import StringIO
import warnings
warnings.warn("A plot_directive module is also available under "
"matplotlib.sphinxext; expect this numpydoc.plot_directive "
"module to be deprecated after relevant features have been "
"integrated there.",
FutureWarning, stacklevel=2)
#------------------------------------------------------------------------------
# Registration hook
#------------------------------------------------------------------------------
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
app.add_config_value('plot_pre_code', '', True)
app.add_config_value('plot_include_source', False, True)
app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
app.add_config_value('plot_basedir', None, True)
app.add_config_value('plot_html_show_formats', True, True)
app.add_directive('plot', plot_directive, True, (0, 1, False),
**plot_directive_options)
#------------------------------------------------------------------------------
# plot:: directive
#------------------------------------------------------------------------------
from docutils.parsers.rst import directives
from docutils import nodes
def plot_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(arguments, content, options, state_machine, state, lineno)
plot_directive.__doc__ = __doc__
def _option_boolean(arg):
if not arg or not arg.strip():
# no argument given, assume used as a flag
return True
elif arg.strip().lower() in ('no', '0', 'false'):
return False
elif arg.strip().lower() in ('yes', '1', 'true'):
return True
else:
raise ValueError('"%s" unknown boolean' % arg)
def _option_format(arg):
return directives.choice(arg, ('python', 'lisp'))
def _option_align(arg):
return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
"right"))
plot_directive_options = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.nonnegative_int,
'align': _option_align,
'class': directives.class_option,
'include-source': _option_boolean,
'format': _option_format,
}
#------------------------------------------------------------------------------
# Generating output
#------------------------------------------------------------------------------
from docutils import nodes, utils
try:
# Sphinx depends on either Jinja or Jinja2
import jinja2
def format_template(template, **kw):
return jinja2.Template(template).render(**kw)
except ImportError:
import jinja
def format_template(template, **kw):
return jinja.from_string(template, **kw)
TEMPLATE = """
{{ source_code }}
{{ only_html }}
{% if source_link or (html_show_formats and not multi_image) %}
(
{%- if source_link -%}
`Source code <{{ source_link }}>`__
{%- endif -%}
{%- if html_show_formats and not multi_image -%}
{%- for img in images -%}
{%- for fmt in img.formats -%}
{%- if source_link or not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- endfor -%}
{%- endif -%}
)
{% endif %}
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.png
{%- for option in options %}
{{ option }}
{% endfor %}
{% if html_show_formats and multi_image -%}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
)
{%- endif -%}
{% endfor %}
{{ only_latex }}
{% for img in images %}
.. image:: {{ build_dir }}/{{ img.basename }}.pdf
{% endfor %}
"""
class ImageFile(object):
def __init__(self, basename, dirname):
self.basename = basename
self.dirname = dirname
self.formats = []
def filename(self, format):
return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
def filenames(self):
return [self.filename(fmt) for fmt in self.formats]
def run(arguments, content, options, state_machine, state, lineno):
if arguments and content:
raise RuntimeError("plot:: directive can't have both args and content")
document = state_machine.document
config = document.settings.env.config
options.setdefault('include-source', config.plot_include_source)
# determine input
rst_file = document.attributes['source']
rst_dir = os.path.dirname(rst_file)
if arguments:
if not config.plot_basedir:
source_file_name = os.path.join(rst_dir,
directives.uri(arguments[0]))
else:
source_file_name = os.path.join(setup.confdir, config.plot_basedir,
directives.uri(arguments[0]))
code = open(source_file_name, 'r').read()
output_base = os.path.basename(source_file_name)
else:
source_file_name = rst_file
code = textwrap.dedent("\n".join(map(str, content)))
counter = document.attributes.get('_plot_counter', 0) + 1
document.attributes['_plot_counter'] = counter
base, ext = os.path.splitext(os.path.basename(source_file_name))
output_base = '%s-%d.py' % (base, counter)
base, source_ext = os.path.splitext(output_base)
if source_ext in ('.py', '.rst', '.txt'):
output_base = base
else:
source_ext = ''
# ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
output_base = output_base.replace('.', '-')
# is it in doctest format?
is_doctest = contains_doctest(code)
if 'format' in options:
if options['format'] == 'python':
is_doctest = False
else:
is_doctest = True
# determine output directory name fragment
source_rel_name = relpath(source_file_name, setup.confdir)
source_rel_dir = os.path.dirname(source_rel_name)
while source_rel_dir.startswith(os.path.sep):
source_rel_dir = source_rel_dir[1:]
# build_dir: where to place output files (temporarily)
build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
'plot_directive',
source_rel_dir)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
# output_dir: final location in the builder's directory
dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
source_rel_dir))
# how to link to files from the RST file
dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
source_rel_dir).replace(os.path.sep, '/')
build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
source_link = dest_dir_link + '/' + output_base + source_ext
# make figures
try:
results = makefig(code, source_file_name, build_dir, output_base,
config)
errors = []
except PlotError as err:
reporter = state.memo.reporter
sm = reporter.system_message(
2, "Exception occurred in plotting %s: %s" % (output_base, err),
line=lineno)
results = [(code, [])]
errors = [sm]
# generate output restructuredtext
total_lines = []
for j, (code_piece, images) in enumerate(results):
if options['include-source']:
if is_doctest:
lines = ['']
lines += [row.rstrip() for row in code_piece.split('\n')]
else:
lines = ['.. code-block:: python', '']
lines += [' %s' % row.rstrip()
for row in code_piece.split('\n')]
source_code = "\n".join(lines)
else:
source_code = ""
opts = [':%s: %s' % (key, val) for key, val in list(options.items())
if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
only_html = ".. only:: html"
only_latex = ".. only:: latex"
if j == 0:
src_link = source_link
else:
src_link = None
result = format_template(
TEMPLATE,
dest_dir=dest_dir_link,
build_dir=build_dir_link,
source_link=src_link,
multi_image=len(images) > 1,
only_html=only_html,
only_latex=only_latex,
options=opts,
images=images,
source_code=source_code,
html_show_formats=config.plot_html_show_formats)
total_lines.extend(result.split("\n"))
total_lines.extend("\n")
if total_lines:
state_machine.insert_input(total_lines, source=source_file_name)
# copy image files to builder's output directory
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
for code_piece, images in results:
for img in images:
for fn in img.filenames():
shutil.copyfile(fn, os.path.join(dest_dir,
os.path.basename(fn)))
# copy script (if necessary)
if source_file_name == rst_file:
target_name = os.path.join(dest_dir, output_base + source_ext)
f = open(target_name, 'w')
f.write(unescape_doctest(code))
f.close()
return errors
#------------------------------------------------------------------------------
# Run code and capture figures
#------------------------------------------------------------------------------
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.image as image
from matplotlib import _pylab_helpers
import exceptions
def contains_doctest(text):
try:
# check if it's valid Python as-is
compile(text, '<string>', 'exec')
return False
except SyntaxError:
pass
r = re.compile(r'^\s*>>>', re.M)
m = r.search(text)
return bool(m)
def unescape_doctest(text):
"""
Extract code from a piece of text, which contains either Python code
or doctests.
"""
if not contains_doctest(text):
return text
code = ""
for line in text.split("\n"):
m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
if m:
code += m.group(2) + "\n"
elif line.strip():
code += "# " + line.strip() + "\n"
else:
code += "\n"
return code
def split_code_at_show(text):
"""
Split code at plt.show()
"""
parts = []
is_doctest = contains_doctest(text)
part = []
for line in text.split("\n"):
if (not is_doctest and line.strip() == 'plt.show()') or \
(is_doctest and line.strip() == '>>> plt.show()'):
part.append(line)
parts.append("\n".join(part))
part = []
else:
part.append(line)
if "\n".join(part).strip():
parts.append("\n".join(part))
return parts
class PlotError(RuntimeError):
pass
def run_code(code, code_path, ns=None):
# Change the working directory to the directory of the example, so
# it can get at its data files, if any.
pwd = os.getcwd()
old_sys_path = list(sys.path)
if code_path is not None:
dirname = os.path.abspath(os.path.dirname(code_path))
os.chdir(dirname)
sys.path.insert(0, dirname)
# Redirect stdout
stdout = sys.stdout
sys.stdout = StringIO()
# Reset sys.argv
old_sys_argv = sys.argv
sys.argv = [code_path]
try:
try:
code = unescape_doctest(code)
if ns is None:
ns = {}
if not ns:
exec(setup.config.plot_pre_code, ns)
exec(code, ns)
except (Exception, SystemExit) as err:
raise PlotError(traceback.format_exc())
finally:
os.chdir(pwd)
sys.argv = old_sys_argv
sys.path[:] = old_sys_path
sys.stdout = stdout
return ns
#------------------------------------------------------------------------------
# Generating figures
#------------------------------------------------------------------------------
def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
"""
return (not os.path.exists(derived)
or os.stat(derived).st_mtime < os.stat(original).st_mtime)
def makefig(code, code_path, output_dir, output_base, config):
"""
Run a pyplot script *code* and save the images under *output_dir*
with file names derived from *output_base*
"""
# -- Parse format list
default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 50}
formats = []
for fmt in config.plot_formats:
if isinstance(fmt, str):
formats.append((fmt, default_dpi.get(fmt, 80)))
elif type(fmt) in (tuple, list) and len(fmt) == 2:
formats.append((str(fmt[0]), int(fmt[1])))
else:
raise PlotError('invalid image format "%r" in plot_formats' % fmt)
# -- Try to determine if all images already exist
code_pieces = split_code_at_show(code)
# Look for single-figure output files first
all_exists = True
img = ImageFile(output_base, output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
if all_exists:
return [(code, [img])]
# Then look for multi-figure output files
results = []
all_exists = True
for i, code_piece in enumerate(code_pieces):
images = []
for j in range(1000):
img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
# assume that if we have one, we have them all
if not all_exists:
all_exists = (j > 0)
break
images.append(img)
if not all_exists:
break
results.append((code_piece, images))
if all_exists:
return results
# -- We didn't find the files, so build them
results = []
ns = {}
for i, code_piece in enumerate(code_pieces):
# Clear between runs
plt.close('all')
# Run code
run_code(code_piece, code_path, ns)
# Collect images
images = []
fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
for j, figman in enumerate(fig_managers):
if len(fig_managers) == 1 and len(code_pieces) == 1:
img = ImageFile(output_base, output_dir)
else:
img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
output_dir)
images.append(img)
for format, dpi in formats:
try:
figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
except exceptions.BaseException as err:
raise PlotError(traceback.format_exc())
img.formats.append(format)
# Results
results.append((code_piece, images))
return results
#------------------------------------------------------------------------------
# Relative pathnames
#------------------------------------------------------------------------------
try:
from os.path import relpath
except ImportError:
# Copied from Python 2.7
if 'posix' in sys.builtin_module_names:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
from os.path import sep, curdir, join, abspath, commonprefix, \
pardir
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
# Work out how much of the filepath is shared by start and path.
i = len(commonprefix([start_list, path_list]))
rel_list = [pardir] * (len(start_list) - i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
elif 'nt' in sys.builtin_module_names:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
from os.path import sep, curdir, join, abspath, commonprefix, \
pardir, splitunc
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
if start_list[0].lower() != path_list[0].lower():
unc_path, rest = splitunc(path)
unc_start, rest = splitunc(start)
if bool(unc_path) ^ bool(unc_start):
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_list[0], start_list[0]))
# Work out how much of the filepath is shared by start and path.
for i in range(min(len(start_list), len(path_list))):
if start_list[i].lower() != path_list[i].lower():
break
else:
i += 1
rel_list = [pardir] * (len(start_list) - i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
else:
raise RuntimeError("Unsupported platform (no relpath available!)")
| gpl-3.0 | -3,953,456,890,394,788,400 | 29.932229 | 83 | 0.534544 | false |
boland1992/seissuite_iran | seissuite/sort_later/pointshape.py | 2 | 2014 | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 20 12:28:32 2015
@author: boland
"""
import sys
sys.path.append("/home/boland/Anaconda/lib/python2.7/site-packages")
import fiona
import shapefile
from shapely import geometry
import numpy as np
import matplotlib.pyplot as plt
import pyproj
import datetime
from matplotlib.path import Path
#---------------------------------------------
#DEFINE INPUT PARAMETERS
#---------------------------------------------
#enter shapefile absolute or relative path name as string if optimal = True
#shape_path = "/home/boland/Dropbox/University/UniMelb/AGOS/PROGRAMS/ANT/Versions/26.04.2015/shapefiles/aus.shp"
#N = 130
def shape_(input_shape):
with fiona.open(input_shape) as fiona_collection:
# In this case, we'll assume the shapefile only has one record/layer (e.g., the shapefile
# is just for the borders of a single country, etc.).
shapefile_record = fiona_collection.next()
# Use Shapely to create the polygon
shape = geometry.asShape( shapefile_record['geometry'] )
return shape
def points_in_shape(shape_path, N):
shape = shape_(shape_path)
minx, miny, maxx, maxy = shape.bounds
#print minx; print miny; print maxx; print maxy
#bounding_box = geometry.box(minx, miny, maxx, maxy)
#generate random points within bounding box!
N_total = 130**2
sf = shapefile.Reader(shape_path)
shape = sf.shapes()[0]
#find polygon nodes lat lons
verticies = shape.points
#convert to a matplotlib path class!
polygon = Path(verticies)
#points_in_shape = polygon.contains_points(coords)
#coords = coords[points_in_shape == True][0:N-1]
X = abs(maxx - minx) * np.random.rand(N_total,1) + minx
Y = abs(maxy - miny) * np.random.rand(N_total,1) + miny
coords = np.column_stack((X,Y))
points_in_shape = polygon.contains_points(coords)
coords = coords[points_in_shape == True][0:N]
return coords
| gpl-3.0 | -5,387,212,902,189,082,000 | 25.5 | 112 | 0.638034 | false |
damonge/CoLoRe | examples/simple/read_skewers.py | 1 | 1672 | import numpy as np
from astropy.io import fits
import matplotlib.pyplot as plt
hdulist = fits.open('out_srcs_s1_0.fits')
# First HDU contains the source catalog
print(hdulist[1].header.keys)
plt.figure()
plt.hist(hdulist[1].data['Z_COSMO'], bins=100)
print(" ")
# Second HDU contains the density skewers as a FITS image
# The skewers have the same ordering as the sources in the catalog
# (i.e. skewer hdulist[2].data[i, :] corresponds to source hdulist[1].data[i])
id = np.argmax(hdulist[1].data['Z_COSMO'])
print(hdulist[2].header.keys)
plt.figure()
plt.plot(hdulist[4].data['R'], hdulist[2].data[id])
plt.xlabel('$r\\, \\, [{\\rm Mpc}/h]$', fontsize=18)
plt.ylabel('$\\delta$', fontsize=18)
print(" ")
# Third HDU contains the velocity skewers. The units of the velocity are
# such that the skewers contain the redshift distortion associated with
# the peculiar velocity field
print(hdulist[3].header.keys)
plt.figure()
plt.plot(hdulist[4].data['R'], hdulist[3].data[id])
plt.xlabel('$r\\, \\, [{\\rm Mpc}/h]$', fontsize=18)
plt.ylabel('$\\delta z_{\\rm RSD}$', fontsize=18)
print(" ")
# Fourth HDU is a table containing background cosmological quantities at
# the distances where the skewers are sampled (see the use of
# hdulist[4].data['R'] in the previous examples
print(hdulist[4].header.keys)
plt.figure()
plt.plot(hdulist[4].data['Z'], hdulist[4].data['R']*0.001,
label='$r(z)\\, [{\\rm Gpc}/h]$')
plt.plot(hdulist[4].data['Z'], hdulist[4].data['D'],
label='$D_\\delta(z)$')
plt.plot(hdulist[4].data['Z'], hdulist[4].data['V'],
label='$D_v(z)$')
plt.legend(loc='lower right')
plt.xlabel('$z$', fontsize=18)
print(" ")
plt.show()
| gpl-3.0 | -3,835,317,680,419,938,300 | 33.122449 | 78 | 0.675837 | false |
Procrat/som | som/basic_som.py | 1 | 8023 | #!/usr/bin/env python
# encoding: utf-8
"""A regular SOM."""
from collections import UserList
from .som import normalize
from .som import SOM, Topology, Node
from itertools import chain, islice
from random import choice
from math import exp
import numpy as np
import matplotlib.pyplot as plt
class BasicSOM(SOM):
"""A regular SOM."""
def __init__(self, data, width, height, neighbourhood=None,
init_variance=None, **kwargs):
"""Initializes a new BasicSOM object.
:data: should be a list of numerical vectors
:width and :height: should be the dimension of the map
:neighbourhood: (optional) should be a function which
decides how much influence a bmu has on a specifed node at a
certain moment in the training stage.
:init_variance: (optional) should be the initial variance of the
Gaussian distribution of the neighbourhood function (if no other
neighbourhood function is given of course)
"""
codebook = Grid(data, width, height, init_variance)
super().__init__(data, codebook, **kwargs)
def color_plot(self):
"""Shows a representation of the BasicSOM where every codebook vector
is represented as a color. Of course, this only works for 3- or
4-dimensional data.
"""
assert 3 <= self.data_vector_size <= 4
values = [[x.vector for x in row] for row in self.codebook.data]
plt.imshow(values, interpolation='none')
plt.title('Color plot')
plt.show()
def label_plot(self):
"""If there are class labels available for the data, we plot the
SOM with labels on the nodes where this class is the most frequent.
"""
assert self.labels is not None
normalized_codebook = normalize(node.vector for node in self.codebook)
raster = split_generator(normalized_codebook, self.codebook.width)
three_feats = [[vector[:3] + [.7] for vector in row] for row in raster]
plt.imshow(three_feats, interpolation='none')
for label in set(self.labels) - set([None]):
class_node = max(self.codebook, key=lambda node: node.labels[label])
plt.text(class_node.row, class_node.col, label,
horizontalalignment='center', verticalalignment='center')
plt.title('Label plot')
plt.show()
def polar_plots(self):
"""Shows for each node the attributes of the codebook vector as a polar
plot.
"""
fig, axes = plt.subplots(self.codebook.height, self.codebook.width,
subplot_kw={'polar': True})
normalized_codebook = normalize(x.vector for x in self.codebook)
for ax, codebook_vector in zip(chain(*axes), normalized_codebook):
n = len(codebook_vector)
thetas = np.linspace(0, 2 * np.pi, n, endpoint=False)
radii = codebook_vector
bars = ax.bar(thetas, radii, width=2 * np.pi / n)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
for i, bar in enumerate(bars):
bar.set_facecolor(plt.cm.jet(i / n))
bar.set_alpha(0.5)
fig.suptitle('Polar plots')
plt.show()
def hit_map(self):
"""Shows a heatmap of the codebook where the heat represents how many
times the nodes were chosen as a BMU.
"""
hits = [[node.hits for node in row] for row in self.codebook.data]
plt.imshow(hits, interpolation='none')
plt.title('Hit map')
plt.show()
def distance_map(self):
"""Shows a plot of how far the vector of a node is from its
neighbours. A warmer color means it's further away.
"""
distances = []
for row, nodes in enumerate(self.codebook.data):
distance_row = []
for col, node in enumerate(nodes):
node_distances = []
for drow, dcol in ((-1, 0), (1, 0), (0, -1), (0, 1)):
if (row + drow < 0 or row + drow >= len(self.codebook) or
col + dcol < 0 or col + dcol >= len(nodes)):
continue
neighbour = self.codebook[row + drow][col + dcol].vector
node_distances.append(node.distance(neighbour))
distance_row.append(sum(node_distances) / len(node_distances))
distances.append(distance_row)
plt.imshow(distances, interpolation='none')
plt.title('Distance map / U-matrix')
plt.show()
class Grid(Topology, UserList):
"""A grid topology which is just a wrapper around a 2D list, satisfying the
Topology interface.
"""
def __init__(self, data, width, height, init_variance=None):
"""Initializes the grid."""
self.width, self.height = width, height
# If no initial variance is given, make a guess based on the dimensions
if init_variance is None:
init_variance = ((width ** 2 + height ** 2) / 2) ** 0.5
self.init_variance = init_variance
real_list = [[GridNode(i, j, choice(data)) for j in range(width)]
for i in range(height)]
super().__init__(real_list)
def __iter__(self):
"""Returns an iterator which flattens the 2D array to one dimension."""
return chain(*self.data)
def neighbourhood(self, node1, node2, t):
"""Returns the neighbourhood influence of node1 over node2 at
iteration t. This uses a gaussian distribution.
"""
return self._gaussian(node1, node2, t)
# M-SOM
# return max(0, 1 - self.distance_squared(node1, node2))
def _gaussian(self, node1, node2, t):
"""Calculates a neighbourhood value following a Gaussian distribution.
This assumes the nodes are GridNodes.
"""
dist_sq = self.distance_squared(node1, node2)
variance = self._gaussian_variance(t)
return exp(-dist_sq / (2 * variance * variance))
def _gaussian_variance(self, t):
"""A decreasing function for the variance of the gaussian distribution.
"""
# return self.init_variance * (1 - t)
return self.init_variance / (1 + t)
# return self.init_variance ** (-t + 1)
# return self.init_variance * (.001 / self.init_variance) ** t
def distance_squared(self, node1, node2):
"""Calculates the squared distance between two nodes on the grid."""
return (node1.row - node2.row) ** 2 + (node1.col - node2.col) ** 2
def are_neighbours(self, node1, node2):
"""Checks whether two nodes are neighbouring on the grid."""
return ((node1.row == node2.row and abs(node1.col - node2.col) <= 1)
or (node1.col == node2.col and abs(node1.row - node2.row) <= 1))
class ToroidalGrid(Grid):
"""An extension of the regular grid using a torus."""
def distance_squared(self, node1, node2):
"""Calculates the squared distance between two nodes on the torus."""
dr = abs(node1.row - node2.row)
dc = abs(node1.col - node2.col)
return (min(dr, abs(dr - self.height)) ** 2
+ min(dc, abs(dc - self.width)) ** 2)
class GridNode(Node):
"""A node in the grid."""
def __init__(self, row, col, *args, **kwargs):
super().__init__(*args, **kwargs)
self.row, self.col = row, col
def __repr__(self):
"""Representation: 'row,col: label (vector)'"""
return '%d,%d: %s (%s)' % (self.row, self.col, self.get_label(),
self.vector)
def split_generator(generator, n):
"""A helper function which splits a generator into multiple generators,
cutting it off each time after n elements.
"""
while True:
part = list(islice(generator, n))
if len(part) > 0:
yield part
if len(part) < n:
break
| mit | 1,099,930,361,131,690,900 | 37.204762 | 80 | 0.589056 | false |
Ecam-Eurobot-2017/main | code/raspberrypi/graphmap/graphmap.py | 1 | 16765 | import math
import os.path
import operator
import pprint
import networkx as nx
from .utils import GraphUtils
class GraphMap:
CACHE_PATH = 'graphmap.data'
def __init__(self, nodes, triangles, cache=True):
"""
nodes represent the (x, y) address of nodes in the graph.
triangles give the 3 positions in nodes array to form a triangle.
"""
self._cache = cache
# Remove nodes and edges to simulate obstacles.
self._obstacles_cache = {'nodes': [], 'edges': []}
self._graph = None
if self._cache and os.path.exists(self.CACHE_PATH):
self._graph = self.__read_cache()
else:
self.__build_graph_from_mesh(nodes, triangles)
def get_path(self, robot_pos, target, display=False):
# Give ID to the start node and end node.
START_NODE_ID = 1000
END_NODE_ID = 1001
# Remove edge added to the process before looking for the shortest path.
# This could give unvalid result.
nodes = self._graph.nodes()
if START_NODE_ID in nodes and END_NODE_ID in nodes:
self._graph.remove_node(START_NODE_ID)
self._graph.remove_node(END_NODE_ID)
# Get the neirest node from the robot position and the target position.
# The direction is of start_node is the target (make sense) and vise-versa.
start_node = self.get_neirest_node_pos(robot_pos['point'], target['point'])
end_node = self.get_neirest_node_pos(target['point'], robot_pos['point'])
path = nx.shortest_path(self._graph, source=start_node, target=end_node, weight='weight')
self._graph.add_node(START_NODE_ID, pos=robot_pos['point'], color='green')
self._graph.add_node(END_NODE_ID, pos=target['point'], color='green')
# If we display the graph map, color the path that the robot should have taken.
if display:
self._graph.add_edge(START_NODE_ID, path[0], color='green')
self._graph.add_edge(path[-1], END_NODE_ID, color='green')
for i in range(len(path) - 1):
self._graph.edge[path[i]][path[i+1]]['color'] = 'green'
path = [START_NODE_ID] + path
path.append(END_NODE_ID)
return self.__convert_nodelist_to_instruction(path, robot_pos['angle'], target['angle'])
def add_obstacle(self, robot_pos, robot_dim, obstacle_dim, obstacle_position, obstacle_distance):
obstacle_points = self.__create_obstacle_rectangle(robot_pos, robot_dim, obstacle_dim, obstacle_position,
obstacle_distance)
(minx, miny, maxx, maxy) = GraphUtils.get_min_max_points(obstacle_points)
for n in self._graph.nodes():
p = self._graph.node[n]['pos']
if GraphUtils.is_point_in_rectangle(minx, miny, maxx, maxy, p[0], p[1]):
# Remove nodes and every edges in it.
self._obstacles_cache['nodes'].append({'id': n, 'attr': self._graph.node[n]})
for neighbor in self._graph.neighbors(n):
self._obstacles_cache['edges'].append({'id': (n, neighbor),
'attr': self._graph.edge[n][neighbor]})
self._graph.remove_node(n)
for edge in self._graph.edges():
p1 = self._graph.node[edge[0]]['pos']
p2 = self._graph.node[edge[1]]['pos']
if GraphUtils.is_line_cross_rectangle(minx, miny, maxx, maxy, p1[0], p1[1], p2[0], p2[1]):
# We can remove the edge.
self._obstacles_cache['edges'].append({'id': edge,
'attr': self._graph.edge[edge[0]][edge[1]]})
self._graph.remove_edge(edge[0], edge[1])
def reset_obstacles(self):
# Add back the nodes.
for node in self._obstacles_cache['nodes']:
self._graph.add_node(node['id'], attr_dict=node['attr'])
# Add back the edges.
for edge in self._obstacles_cache['edges']:
self._graph.add_edge(edge['id'][0], edge['id'][1], attr_dict=edge['attr'])
def display(self):
"""
Use matplotlib to display graph.
"""
import matplotlib.pyplot as plt
node_color = list(nx.get_node_attributes(self._graph, 'color').values())
edge_color = list(nx.get_edge_attributes(self._graph, 'color').values())
_, ax = plt.subplots()
ax.axis('equal')
nx.draw_networkx(self._graph, nx.get_node_attributes(self._graph, 'pos'),
node_size=20, with_labels=True, edge_color=edge_color,
node_color=node_color, ax=ax)
plt.show()
def save(self):
nx.write_gpickle(self._graph, self.CACHE_PATH)
def __read_cache(self):
return nx.read_gpickle(self.CACHE_PATH)
def get_neirest_node_pos(self, point, direction):
"""
Get the neirest node position according to the direction.
Takes 2 nodes ID and return another one.
"""
best_matches = {'dist_point': 100}
# Get the neirest points.
for node in self._graph.nodes():
node_pos = self._graph.node[node]['pos']
dist_point = self.__distance_btw_points(point, node_pos)
if dist_point < best_matches['dist_point']:
best_matches = { 'dist_point': dist_point, 'node': node }
# dist_dest = self.__distance_btw_points(direction, node_pos)
# if dist_point < 30:
# best_matches.append({'dist_point': dist_point,
# 'dist_dest': dist_dest,
# 'node': node})
# Get the point which is the closest to the direction we need to go to.
# pprint.pprint(best_matches)
# return sorted(best_matches, key=operator.itemgetter('dist_dest'))[0]['node']
return best_matches['node']
def __simplify_turn_angle(self, angle):
"""
Convert angle [0; 360] to [-180: 180] degrees to simplify the rotation of the
robot.
"""
if abs(angle) > 180:
sign = 1 if angle > 0 else -1
angle = -sign*(360 - abs(angle))
return round(angle)
def __convert_nodelist_to_instruction(self, path, robot_angle, target_angle):
"""
Convert the node id list to instruction easily understandable for the robot control.
Return a list of dict() with a key giving the movement ("move" or "turn") and a key giving
a value (distance in cm for "move" or turning degrees for "turn"). The value can be positive
or negative.
value/movement| "move" | "turn"
----------------------------------------
positive | forward | right
----------------------------------------
negative | backward | left
----------------------------------------
"""
actions = []
# First turn is a bit specific so we don't do it in the for loop.
start_angle_constrain = self.__get_node_angle(path[1], path[0])
actions.append({'action': 'turn', 'value': self.__simplify_turn_angle(robot_angle
- start_angle_constrain)})
for i in range(len(path) - 2):
# Add the distance actions
distance = self.__distance_btw_points(
self._graph.node[path[i]]['pos'],
self._graph.node[path[i+1]]['pos']
)
# Check if have 2 moves actions successively.
if actions[-1]['action'] == 'move':
actions[-1]['value'] += distance
else:
actions.append({'action': 'move', 'value': round(distance)})
# Try to simplify the actions by removing actions making a triangle.
# A triangle means that the actions could be made by a straight line.
# if (i > 2) and (actions[-2]['action'] == 'turn') and (abs(actions[-2]['value']) < 15):
# # Begin the simplication.
# # Calculate the new distance
# a = actions[-3]['value']
# b = actions[-1]['value']
# triange_angle = 180 - actions[-2]['value']
# new_distance = a**2 + b**2 - 2*a*b*math.cos(math.radians(triange_angle))
# actions[-1]['value'] = round(math.sqrt(new_distance))
# del actions[-3]
# # Correct the angle
# node_pos = self._graph.node[path[i-3]]['pos']
# center_pos = self._graph.node[path[i-2]]['pos']
# next_node_pos = self._graph.node[path[i]]['pos']
# turn_angle = self.__calculate_turn_angle(node_pos, center_pos, next_node_pos)
# if turn_angle != 0:
# if actions[-3]['action'] == 'turn':
# actions[-3]['value'] += turn_angle
# del actions[-2]
# else:
# actions[-2] = {'action': 'turn', 'value': turn_angle}
# Add the turn actions.
node_pos = self._graph.node[path[i]]['pos']
center_pos = self._graph.node[path[i+1]]['pos']
next_node_pos = self._graph.node[path[i+2]]['pos']
turn_angle = self.__calculate_turn_angle(node_pos, center_pos, next_node_pos)
if turn_angle != 0:
actions.append({'action': 'turn', 'value': turn_angle})
# Finalize the last moving and turning.
distance = self.__distance_btw_points(
self._graph.node[path[-2]]['pos'],
self._graph.node[path[-1]]['pos']
)
actions.append({'action': 'move', 'value': int(distance)})
end_robot_angle = self.__simplify_turn_angle(self.__get_node_angle(path[-2], path[-1]))
print(end_robot_angle, target_angle)
actions.append({'action': 'turn', 'value':
self.__simplify_turn_angle(end_robot_angle + target_angle)})
return actions
def __calculate_turn_angle(self, node, center, next_node):
opposite_pos = (center[0]+(center[0]-node[0]),
center[1] + (center[1]-node[1]))
# Calculate the 2 angles needed to get the turn angle.
angle1 = self.__get_pos_angle(center, opposite_pos)
angle2 = self.__get_pos_angle(center, next_node)
angle = self.__simplify_turn_angle(angle2-angle1)
print(node, center, opposite_pos, angle, angle1, angle2)
return angle
# return self.__simplify_turn_angle(angle2 - angle1)
def __build_graph_from_mesh(self, nodes, triangle_cells):
"""
nodes: position of each nodes in the mesh.
triangle_cells: list of triangle cells list
A triangle cell is a 3 items lists with each items is a vertice
of a triangle.
"""
graph = nx.Graph()
for i, n in enumerate(nodes):
graph.add_node(i, pos=n, color='red')
for conns in triangle_cells:
for i, c in enumerate(conns):
p1 = c
p2 = conns[(i+1) % 3]
weight = self.__distance_btw_points(
graph.node[p1]['pos'],
graph.node[p2]['pos']
)
graph.add_edge(p1, p2, weight=weight, color='black')
self._graph = graph
# Not useful but could be.
self.__mark_nodes_as_border()
print('Cleaning!')
self.__clean()
self.save()
def __distance_btw_points(self, p1, p2):
x = (p1[0] - p2[0])**2
y = (p1[1] - p2[1])**2
return math.sqrt(x + y)
def __clean(self):
"""
Merges useless nodes according to the distance between 2 nodes.
"""
for i in range(1000):
for e in self._graph.edges():
if e[0] == e[1]:
continue
if self._graph[e[0]][e[1]]['weight'] < 7:
self.__merge_nodes(e[0], e[1])
break
def __merge_nodes(self, node, old_node):
"""
Takes 2 node ids. One will stay, one will be eaten.
Obvioulsy, the first will eat the second.
"""
if node == old_node:
return
out_edge_node = self._graph.neighbors(node)
out_edge_old_node = self._graph.neighbors(old_node)
for edge in out_edge_old_node:
if edge not in out_edge_node:
weight = self.__distance_btw_points(
self._graph.node[node]['pos'],
self._graph.node[edge]['pos']
)
self._graph.add_edge(node, edge, weight=weight, color='black')
self._graph.remove_node(old_node)
def __mark_nodes_as_border(self):
"""
Flag nodes when they are in the borders.
Moslty works.
Works by checking the biggest opening angle between 2 triangles.
"""
for node in self._graph.nodes():
out_edges = self._graph.neighbors(node)
if len(out_edges) == 2 or len(out_edges) == 1:
self._graph.node[node]['color'] = 'blue'
self._graph.node[node]['mesh_edge'] = True
continue
angles = sorted(self.__get_neighbors_angle(node),
key=lambda k: k['angle'])
biggest_angle = 0
for i in range(len(angles)):
a1 = angles[i]['angle']
a2 = angles[(i+1) % len(angles)]['angle']
a_diff = (a2 - a1)
if a_diff < 0:
a_diff += 360
if abs(a_diff) > biggest_angle:
biggest_angle = abs(a_diff)
if biggest_angle > 105:
self._graph.node[node]['color'] = 'blue'
self._graph.node[node]['mesh_edge'] = True
def __get_neighbors_angle(self, node_id):
neighbors = self._graph.neighbors(node_id)
data = []
for i in neighbors:
data.append({'neighbor': i, 'angle': self.__get_node_angle(node_id, i)})
return data
def __get_pos_angle(self, center, node, upper=(0, 0)):
"""
Calculate an angle between 3 points using the law of cosine.
"""
# Little hack allows to create a fake x axis.
if upper == (0, 0):
upper = (2000, center[1])
a = self.__distance_btw_points(center, node)
b = self.__distance_btw_points(center, upper)
c = self.__distance_btw_points(node, upper)
# Law of cosine
cosinelaw = (a**2 + b**2 - c**2) / (2*a*b)
# Clamp the value between [-1; 1] because of inaccuracy in
# floating point numbers.
cosinelaw = max(min(cosinelaw, 1.0), -1.0)
angle = math.degrees(math.acos(cosinelaw))
# Check if we have passed the 180 degrees or not.
xpos1 = node[1] - center[1]
if xpos1 > 0:
angle = 360 - angle
return angle
def __get_node_angle(self, center, node):
"""
The angle is the absolute angle relative to the X axis.
"""
center_pos = self._graph.node[center]['pos']
point_pos = self._graph.node[node]['pos']
return self.__get_pos_angle(center_pos, point_pos)
def __create_obstacle_rectangle(self, robot_pos, robot_dim, obstacle_dim, obstacle_pos, obstacle_distance):
direction = 1
if robot_pos['angle'] > 180:
direction = -1
if obstacle_pos == 'front':
return GraphUtils.generate_translated_rectangle(robot_pos['point'], robot_pos['angle'],
robot_dim['length']/2, robot_dim['width']/2,
obstacle_distance+obstacle_dim, 1)
elif obstacle_pos == 'left':
return GraphUtils.generate_translated_rectangle(robot_pos['point'], robot_pos['angle']+direction*90,
robot_dim['length']/2, robot_dim['width']/2,
obstacle_distance+obstacle_dim, direction)
elif obstacle_pos == 'right':
return GraphUtils.generate_translated_rectangle(robot_pos['point'], robot_pos['angle']-direction*90,
robot_dim['length']/2, robot_dim['width']/2,
obstacle_distance+obstacle_dim, direction)
elif obstacle_pos == 'back':
return GraphUtils.generate_translated_rectangle(robot_pos['point'], robot_pos['angle'] + 180,
robot_dim['length']/2, robot_dim['width']/2,
obstacle_distance+obstacle_dim, 1)
return None
| mit | 3,210,404,997,343,590,000 | 40.395062 | 113 | 0.53224 | false |
Kitware/minerva | gaia_tasks/inputs.py | 1 | 7301 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc. and Epidemico Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import json
import os
import tempfile
from base64 import b64encode
import fiona
import geopandas
import requests
from six import StringIO
from gaia.core import GaiaException
from gaia.filters import filter_pandas
from gaia.inputs import GaiaIO
from gaia import formats, types
import girder_worker
from girder.constants import PACKAGE_DIR
import girder_client
class MinervaVectorIO(GaiaIO):
"""
Interface to Minerva item geojson
"""
type = types.VECTOR
default_output = formats.JSON
def __init__(self, item_id=None, token=None, name='gaia_result.json',
uri='', **kwargs):
"""
Read and write GeoJSON data to/from Girder
:param item_id: Item id to read/write from/to
:param uri: location of temporary file
:param kwargs: Other keyword arguments
"""
self.id = item_id
self.token = token
if uri:
self.uri = uri
else:
tmpdir = tempfile.mkdtemp()
self.uri = tempfile.mkstemp(suffix='.json', dir=tmpdir)[1]
self.filename = name
girderHost = None
girderPort = None
girderScheme = None
try:
girderHost = girder_worker.config.get('minerva', 'girder_host_name')
girderPort = girder_worker.config.get('minerva', 'girder_host_port')
girderScheme = girder_worker.config.get('minerva', 'girder_host_scheme')
except:
girderHost = 'localhost'
girderPort = '8080'
girderScheme = 'http'
client = girder_client.GirderClient(host=girderHost, port=girderPort, scheme=girderScheme)
client.token = token
self.client = client
self.meta = self.client.getItem(item_id)
super(MinervaVectorIO, self).__init__(uri=self.uri, **kwargs)
def save_geojson(self):
"""
Save GeoJSON from a Minerva item
TODO: Use caching like the girder_worker.girder_io plugin
TODO: Separate methods for saving geojson from different sources
TODO: Get geojson via WFS calls for local WMS vector layers
"""
minerva = self.meta['meta']['minerva']
if 'geojson_file' in minerva:
# Uploaded GeoJSON is stored as a file in Girder
self.client.downloadFile(minerva['geojson_file']['_id'], self.uri)
elif 'geojson' in minerva:
# Mongo collection is stored in item meta
geojson = json.loads(minerva['geojson']['data'])
# TODO: Don't use mongo metadata for filename
with open(self.uri, 'w') as outjson:
json.dump(geojson, outjson)
# elif 'dataset_type' in minerva and minerva['dataset_type'] == 'wms':
# from girder.plugins.minerva.utility.minerva_utility import decryptCredentials
# servers = config.getConfig()['gaia_minerva_wms']['servers']
# if minerva['base_url'] in servers:
# params = 'srsName=EPSG:4326&typename={name}&outputFormat=json'\
# + '&version=1.0.0&service=WFS&request=GetFeature'
# url = '{base}?{params}'.format(
# base=minerva['base_url'].replace('/wms', '/wfs'),
# params=params.format(name=minerva['type_name'])
# )
# headers = {}
# if 'credentials' in minerva:
# credentials = (minerva['credentials'])
# basic_auth = 'Basic ' + b64encode(
# decryptCredentials(credentials))
# headers = {'Authorization': basic_auth}
# with open(self.uri, 'w') as outjson:
# r = requests.get(url, headers=headers)
# r.raise_for_status()
# json.dump(r.json(), outjson)
# else:
# raise GaiaException('This server {} is not supported. \n{}'.format(minerva))
else:
raise GaiaException('Unsupported data source. \n{}'.format(minerva))
def read(self, epsg=None, **kwargs):
"""
Read vector data from Girder
:param format: Format to return data in (default is GeoDataFrame)
:param epsg: EPSG code to reproject data to
:return: Data in GeoJSON
"""
if self.data is None:
self.save_geojson()
self.data = geopandas.read_file(self.uri)
if self.filters:
self.filter_data()
out_data = self.data
if epsg and self.get_epsg() != epsg:
out_data = geopandas.GeoDataFrame.copy(out_data)
out_data[out_data.geometry.name] = \
self.data.geometry.to_crs(epsg=epsg)
out_data.crs = fiona.crs.from_epsg(epsg)
if format == formats.JSON:
return out_data.to_json()
else:
return out_data
def write(self, filename=None, as_type='json'):
"""
Write data (assumed geopandas) to geojson or shapefile
:param filename: Base filename
:param as_type: json or memory
:return: file girder uri
"""
filedata = self.data.to_json()
if not filename:
filename = self.filename
if as_type == 'json':
self.uri = self.uri.replace(os.path.basename(self.uri), filename)
self.create_output_dir(self.uri)
with open(self.uri, 'w') as outfile:
outfile.write(filedata)
elif as_type == 'memory':
pass
else:
raise NotImplementedError('{} not a valid type'.format(as_type))
fd = StringIO(filedata)
upload = self.client.uploadFile(parentId=self.id, stream=fd,
size=len(filedata), name=filename)
item_meta = self.client.getItem(self.id)['meta']
item_meta['minerva']['geojson_file'] = {
'_id': upload['_id'],
'name': upload['name']
}
item_meta['minerva']['geo_render'] = {
'type': 'geojson',
'file_id': upload['_id']
}
self.client.addMetadataToItem(self.id, item_meta)
return os.path.join(
self.client.urlBase, 'file', upload['_id'], 'download')
def filter_data(self):
"""
Apply filters to the dataset
:return:
"""
self.data = filter_pandas(self.data, self.filters)
PLUGIN_CLASS_EXPORTS = [
MinervaVectorIO
]
| apache-2.0 | 1,753,931,356,030,654,700 | 36.634021 | 98 | 0.567457 | false |
FEniCS/dolfin | test/unit/python/mesh/test_mesh_quality.py | 1 | 3815 | #!/usr/bin/env py.test
"Unit tests for the MeshQuality class"
# Copyright (C) 2013 Garth N. Wells
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# First added: 2013-10-07
# Last changed:
from __future__ import print_function
import pytest
import numpy
from dolfin import *
from dolfin_utils.test import skip_in_parallel
def test_radius_ratio_triangle():
# Create mesh and compute rations
mesh = UnitSquareMesh(12, 12)
ratios = MeshQuality.radius_ratios(mesh)
for c in cells(mesh):
assert round(ratios[c] - 0.828427124746, 7) == 0
def test_radius_ratio_tetrahedron():
# Create mesh and compute ratios
mesh = UnitCubeMesh(14, 14, 14)
ratios = MeshQuality.radius_ratios(mesh)
for c in cells(mesh):
assert round(ratios[c] - 0.717438935214, 7) == 0
def test_radius_ratio_triangle_min_max():
# Create mesh, collpase and compute min ratio
mesh = UnitSquareMesh(12, 12)
rmin, rmax = MeshQuality.radius_ratio_min_max(mesh)
assert rmax <= rmax
x = mesh.coordinates()
x[:, 0] *= 0.0
rmin, rmax = MeshQuality.radius_ratio_min_max(mesh)
assert round(rmin - 0.0, 7) == 0
assert round(rmax - 0.0, 7) == 0
def test_radius_ratio_tetrahedron_min_max():
# Create mesh, collpase and compute min ratio
mesh = UnitCubeMesh(12, 12, 12)
rmin, rmax = MeshQuality.radius_ratio_min_max(mesh)
assert rmax <= rmax
x = mesh.coordinates()
x[:, 0] *= 0.0
rmin, rmax = MeshQuality.radius_ratio_min_max(mesh)
assert round(rmax - 0.0, 7) == 0
assert round(rmax - 0.0, 7) == 0
def test_radius_ratio_matplotlib():
# Create mesh, collpase and compute min ratio
mesh = UnitCubeMesh(12, 12, 12)
test = MeshQuality.radius_ratio_matplotlib_histogram(mesh, 5)
print(test)
@skip_in_parallel
def test_radius_ratio_min_radius_ratio_max():
mesh1d = UnitIntervalMesh(4)
mesh1d.coordinates()[4] = mesh1d.coordinates()[3]
# Create 2D mesh with one equilateral triangle
mesh2d = UnitSquareMesh(1, 1, 'left')
mesh2d.coordinates()[3] += 0.5*(sqrt(3.0)-1.0)
# Create 3D mesh with regular tetrahedron and degenerate cells
mesh3d = UnitCubeMesh(1, 1, 1)
mesh3d.coordinates()[2][0] = 1.0
mesh3d.coordinates()[7][1] = 0.0
rmin, rmax = MeshQuality.radius_ratio_min_max(mesh1d)
assert round(rmin - 0.0, 7) == 0
assert round(rmax - 1.0, 7) == 0
rmin, rmax = MeshQuality.radius_ratio_min_max(mesh2d)
assert round(rmin - 2.0*sqrt(2.0)/(2.0+sqrt(2.0)), 7) == 0
assert round(rmax - 1.0, 7) == 0
rmin, rmax = MeshQuality.radius_ratio_min_max(mesh3d)
assert round(rmin - 0.0, 7) == 0
assert round(rmax - 1.0, 7) == 0
def test_dihedral_angles_min_max():
# Create 3D mesh with regular tetrahedron
mesh = UnitCubeMesh(2, 2, 2)
dang_min, dang_max = MeshQuality.dihedral_angles_min_max(mesh)
assert round(dang_min*(180/numpy.pi) - 45.0) == 0
assert round(dang_max*(180/numpy.pi) - 90.0) == 0
def test_dihedral_angles_matplotlib():
# Create mesh, collpase and compute min ratio
mesh = UnitCubeMesh(12, 12, 12)
test = MeshQuality.dihedral_angles_matplotlib_histogram(mesh, 5)
print(test)
| lgpl-3.0 | -8,000,715,879,390,831,000 | 29.52 | 77 | 0.675491 | false |
bellwethers-in-se/defects | src/tca_vs_seer.py | 1 | 1139 | """
Compares TCA with Bellwether Method (SEER to be added)
"""
from __future__ import print_function, division
import os
import sys
root = os.path.join(os.getcwd().split('src')[0], 'src')
if root not in sys.path:
sys.path.append(root)
from SEER.SEER import seer_jur
from TCA.execute import tca_jur
import multiprocessing
from pdb import set_trace
from stats.effect_size import hedges_g_2
import pandas
from utils import print_pandas
# Note To Self: REFACTOR THE TRANSFER LEARNER
def compute(method):
return method()
if __name__ == "__main__":
methods = [seer_jur, tca_jur]
pool = multiprocessing.Pool(processes=2)
a = pool.map(compute, methods)
projects = a[0].keys()
for p in projects:
print("\\textbf{" + str(p) + "}")
bell = a[0][p].set_index("Name").sort_index() # Rename index and sort alphabetically
tca = a[1][p].set_index("Name").sort_index() # Rename index and sort alphabetically
both = pandas.concat([tca, bell], axis=1, join_axes=[tca.index])
all_metrics = hedges_g_2(both)
print_pandas(all_metrics.set_index("Name"))
print("\n\n")
| mit | -1,644,647,821,626,126,300 | 26.780488 | 93 | 0.656716 | false |
suriyan/ethnicolr | ethnicolr/census_ln.py | 1 | 3561 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import argparse
import pandas as pd
from pkg_resources import resource_filename
from .utils import column_exists, fixup_columns
CENSUS2000 = resource_filename(__name__, "data/census/census_2000.csv")
CENSUS2010 = resource_filename(__name__, "data/census/census_2010.csv")
CENSUS_COLS = ['pctwhite', 'pctblack', 'pctapi', 'pctaian', 'pct2prace',
'pcthispanic']
class CensusLnData():
census_df = None
@classmethod
def census_ln(cls, df, namecol, year=2000):
"""Appends additional columns from Census data to the input DataFrame
based on the last name.
Removes extra space. Checks if the name is the Census data. If it is,
outputs data from that row.
Args:
df (:obj:`DataFrame`): Pandas DataFrame containing the last name
column.
namecol (str or int): Column's name or location of the name in
DataFrame.
year (int): The year of Census data to be used. (2000 or 2010)
(default is 2000)
Returns:
DataFrame: Pandas DataFrame with additional columns 'pctwhite',
'pctblack', 'pctapi', 'pctaian', 'pct2prace', 'pcthispanic'
"""
if namecol not in df.columns:
print("No column `{0!s}` in the DataFrame".format(namecol))
return df
df['__last_name'] = df[namecol].str.strip()
df['__last_name'] = df['__last_name'].str.upper()
if cls.census_df is None or cls.census_year != year:
if year == 2000:
cls.census_df = pd.read_csv(CENSUS2000, usecols=['name'] +
CENSUS_COLS)
elif year == 2010:
cls.census_df = pd.read_csv(CENSUS2010, usecols=['name'] +
CENSUS_COLS)
cls.census_df.drop(cls.census_df[cls.census_df.name.isnull()]
.index, inplace=True)
cls.census_df.columns = ['__last_name'] + CENSUS_COLS
cls.census_year = year
rdf = pd.merge(df, cls.census_df, how='left', on='__last_name')
del rdf['__last_name']
return rdf
census_ln = CensusLnData.census_ln
def main(argv=sys.argv[1:]):
title = 'Appends Census columns by last name'
parser = argparse.ArgumentParser(description=title)
parser.add_argument('input', default=None,
help='Input file')
parser.add_argument('-y', '--year', type=int, default=2000,
choices=[2000, 2010],
help='Year of Census data (default=2000)')
parser.add_argument('-o', '--output', default='census-output.csv',
help='Output file with Census data columns')
parser.add_argument('-l', '--last', required=True,
help='Name or index location of column contains '
'the last name')
args = parser.parse_args(argv)
print(args)
if not args.last.isdigit():
df = pd.read_csv(args.input)
else:
df = pd.read_csv(args.input, header=None)
args.last = int(args.last)
if not column_exists(df, args.last):
return -1
rdf = census_ln(df, args.last, args.year)
print("Saving output to file: `{0:s}`".format(args.output))
rdf.columns = fixup_columns(rdf.columns)
rdf.to_csv(args.output, index=False)
return 0
if __name__ == "__main__":
sys.exit(main())
| mit | -221,643,584,409,173,700 | 30.794643 | 78 | 0.564448 | false |
madphysicist/numpy | numpy/fft/_pocketfft.py | 2 | 52860 | """
Discrete Fourier Transforms
Routines in this module:
fft(a, n=None, axis=-1, norm="backward")
ifft(a, n=None, axis=-1, norm="backward")
rfft(a, n=None, axis=-1, norm="backward")
irfft(a, n=None, axis=-1, norm="backward")
hfft(a, n=None, axis=-1, norm="backward")
ihfft(a, n=None, axis=-1, norm="backward")
fftn(a, s=None, axes=None, norm="backward")
ifftn(a, s=None, axes=None, norm="backward")
rfftn(a, s=None, axes=None, norm="backward")
irfftn(a, s=None, axes=None, norm="backward")
fft2(a, s=None, axes=(-2,-1), norm="backward")
ifft2(a, s=None, axes=(-2, -1), norm="backward")
rfft2(a, s=None, axes=(-2,-1), norm="backward")
irfft2(a, s=None, axes=(-2, -1), norm="backward")
i = inverse transform
r = transform of purely real data
h = Hermite transform
n = n-dimensional transform
2 = 2-dimensional transform
(Note: 2D routines are just nD routines with different default
behavior.)
"""
__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn']
import functools
from numpy.core import asarray, zeros, swapaxes, conjugate, take, sqrt
from . import _pocketfft_internal as pfi
from numpy.core.multiarray import normalize_axis_index
from numpy.core import overrides
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy.fft')
# `inv_norm` is a float by which the result of the transform needs to be
# divided. This replaces the original, more intuitive 'fct` parameter to avoid
# divisions by zero (or alternatively additional checks) in the case of
# zero-length axes during its computation.
def _raw_fft(a, n, axis, is_real, is_forward, inv_norm):
axis = normalize_axis_index(axis, a.ndim)
if n is None:
n = a.shape[axis]
fct = 1/inv_norm
if a.shape[axis] != n:
s = list(a.shape)
index = [slice(None)]*len(s)
if s[axis] > n:
index[axis] = slice(0, n)
a = a[tuple(index)]
else:
index[axis] = slice(0, s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[tuple(index)] = a
a = z
if axis == a.ndim-1:
r = pfi.execute(a, is_real, is_forward, fct)
else:
a = swapaxes(a, axis, -1)
r = pfi.execute(a, is_real, is_forward, fct)
r = swapaxes(r, axis, -1)
return r
def _get_forward_norm(n, norm):
if n < 1:
raise ValueError(f"Invalid number of FFT data points ({n}) specified.")
if norm is None or norm == "backward":
return 1
elif norm == "ortho":
return sqrt(n)
elif norm == "forward":
return n
raise ValueError(f'Invalid norm value {norm}; should be "backward",'
'"ortho" or "forward".')
def _get_backward_norm(n, norm):
if n < 1:
raise ValueError(f"Invalid number of FFT data points ({n}) specified.")
if norm is None or norm == "backward":
return n
elif norm == "ortho":
return sqrt(n)
elif norm == "forward":
return 1
raise ValueError(f'Invalid norm value {norm}; should be "backward", '
'"ortho" or "forward".')
_SWAP_DIRECTION_MAP = {"backward": "forward", None: "forward",
"ortho": "ortho", "forward": "backward"}
def _swap_direction(norm):
try:
return _SWAP_DIRECTION_MAP[norm]
except KeyError:
raise ValueError(f'Invalid norm value {norm}; should be "backward", '
'"ortho" or "forward".')
def _fft_dispatcher(a, n=None, axis=None, norm=None):
return (a,)
@array_function_dispatch(_fft_dispatcher)
def fft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axis` is not a valid axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([-2.33486982e-16+1.14423775e-17j, 8.00000000e+00-1.25557246e-15j,
2.33486982e-16+2.33486982e-16j, 0.00000000e+00+1.22464680e-16j,
-1.14423775e-17+2.33486982e-16j, 0.00000000e+00+5.20784380e-16j,
1.14423775e-17+1.14423775e-17j, 0.00000000e+00+1.22464680e-16j])
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
"""
a = asarray(a)
if n is None:
n = a.shape[axis]
inv_norm = _get_forward_norm(n, norm)
output = _raw_fft(a, n, axis, False, True, inv_norm)
return output
@array_function_dispatch(_fft_dispatcher)
def ifft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e.,
* ``a[0]`` should contain the zero frequency term,
* ``a[1:n//2]`` should contain the positive-frequency terms,
* ``a[n//2 + 1:]`` should contain the negative-frequency terms, in
increasing order starting from the most negative frequency.
For an even number of input points, ``A[n//2]`` represents the sum of
the values at the positive and negative Nyquist frequencies, as the two
are aliased together. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axis` is not a valid axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j]) # may vary
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
[<matplotlib.lines.Line2D object at ...>, <matplotlib.lines.Line2D object at ...>]
>>> plt.legend(('real', 'imaginary'))
<matplotlib.legend.Legend object at ...>
>>> plt.show()
"""
a = asarray(a)
if n is None:
n = a.shape[axis]
inv_norm = _get_backward_norm(n, norm)
output = _raw_fft(a, n, axis, False, False, inv_norm)
return output
@array_function_dispatch(_fft_dispatcher)
def rfft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
Raises
------
IndexError
If `axis` is not a valid axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermitian-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n//2 + 1``.
When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains
the zero-frequency term 0*fs, which is real due to Hermitian symmetry.
If `n` is even, ``A[-1]`` contains the term representing both positive
and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely
real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains
the largest positive frequency (fs/2*(n-1)/n), and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j]) # may vary
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j]) # may vary
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
a = asarray(a)
if n is None:
n = a.shape[axis]
inv_norm = _get_forward_norm(n, norm)
output = _raw_fft(a, n, axis, True, True, inv_norm)
return output
@array_function_dispatch(_fft_dispatcher)
def irfft(a, n=None, axis=-1, norm=None):
"""
Computes the inverse of `rfft`.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermitian-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is taken to be
``2*(m-1)`` where ``m`` is the length of the input along the axis
specified by `axis`.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is not a valid axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermitian-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
The correct interpretation of the hermitian input depends on the length of
the original data, as given by `n`. This is because each input shape could
correspond to either an odd or even length signal. By default, `irfft`
assumes an even output length which puts the last entry at the Nyquist
frequency; aliasing with its symmetric counterpart. By Hermitian symmetry,
the value is thus treated as purely real. To avoid losing information, the
correct length of the real input **must** be given.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]) # may vary
>>> np.fft.irfft([1, -1j, -1])
array([0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
a = asarray(a)
if n is None:
n = (a.shape[axis] - 1) * 2
inv_norm = _get_backward_norm(n, norm)
output = _raw_fft(a, n, axis, True, False, inv_norm)
return output
@array_function_dispatch(_fft_dispatcher)
def hfft(a, n=None, axis=-1, norm=None):
"""
Compute the FFT of a signal that has Hermitian symmetry, i.e., a real
spectrum.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output. For `n` output
points, ``n//2 + 1`` input points are necessary. If the input is
longer than this, it is cropped. If it is shorter than this, it is
padded with zeros. If `n` is not given, it is taken to be ``2*(m-1)``
where ``m`` is the length of the input along the axis specified by
`axis`.
axis : int, optional
Axis over which to compute the FFT. If not given, the last
axis is used.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*m - 2`` where ``m`` is the length of the transformed axis of
the input. To get an odd number of output points, `n` must be
specified, for instance as ``2*m - 1`` in the typical case,
Raises
------
IndexError
If `axis` is not a valid axis of `a`.
See also
--------
rfft : Compute the one-dimensional FFT for real input.
ihfft : The inverse of `hfft`.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time
domain and is real in the frequency domain. So here it's `hfft` for
which you must supply the length of the result if it is to be odd.
* even: ``ihfft(hfft(a, 2*len(a) - 2)) == a``, within roundoff error,
* odd: ``ihfft(hfft(a, 2*len(a) - 1)) == a``, within roundoff error.
The correct interpretation of the hermitian input depends on the length of
the original data, as given by `n`. This is because each input shape could
correspond to either an odd or even length signal. By default, `hfft`
assumes an even output length which puts the last entry at the Nyquist
frequency; aliasing with its symmetric counterpart. By Hermitian symmetry,
the value is thus treated as purely real. To avoid losing information, the
shape of the full signal **must** be given.
Examples
--------
>>> signal = np.array([1, 2, 3, 4, 3, 2])
>>> np.fft.fft(signal)
array([15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j]) # may vary
>>> np.fft.hfft(signal[:4]) # Input first half of signal
array([15., -4., 0., -1., 0., -4.])
>>> np.fft.hfft(signal, 6) # Input entire signal and truncate
array([15., -4., 0., -1., 0., -4.])
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
array([[ 0.-0.j, -0.+0.j], # may vary
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
array([[ 1., 1.],
[ 2., -2.]])
"""
a = asarray(a)
if n is None:
n = (a.shape[axis] - 1) * 2
new_norm = _swap_direction(norm)
output = irfft(conjugate(a), n, axis, norm=new_norm)
return output
@array_function_dispatch(_fft_dispatcher)
def ihfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse FFT of a signal that has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the inverse FFT, the number of points along
transformation axis in the input to use. If `n` is smaller than
the length of the input, the input is cropped. If it is larger,
the input is padded with zeros. If `n` is not given, the length of
the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is ``n//2 + 1``.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time
domain and is real in the frequency domain. So here it's `hfft` for
which you must supply the length of the result if it is to be odd:
* even: ``ihfft(hfft(a, 2*len(a) - 2)) == a``, within roundoff error,
* odd: ``ihfft(hfft(a, 2*len(a) - 1)) == a``, within roundoff error.
Examples
--------
>>> spectrum = np.array([ 15, -4, 0, -1, 0, -4])
>>> np.fft.ifft(spectrum)
array([1.+0.j, 2.+0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.+0.j]) # may vary
>>> np.fft.ihfft(spectrum)
array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j]) # may vary
"""
a = asarray(a)
if n is None:
n = a.shape[axis]
new_norm = _swap_direction(norm)
output = conjugate(rfft(a, n, axis, norm=new_norm))
return output
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if s is None:
shapeless = 1
if axes is None:
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if axes is None:
axes = list(range(-len(s), 0))
if len(s) != len(axes):
raise ValueError("Shape and axes have different lengths.")
if invreal and shapeless:
s[-1] = (a.shape[axes[-1]] - 1) * 2
return s, axes
def _raw_fftnd(a, s=None, axes=None, function=fft, norm=None):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
itl = list(range(len(axes)))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii], norm=norm)
return a
def _fftn_dispatcher(a, s=None, axes=None, norm=None):
return (a,)
@array_function_dispatch(_fftn_dispatcher)
def fftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform.
This function computes the *N*-dimensional discrete Fourier Transform over
any number of axes in an *M*-dimensional array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the transform over that axis is
performed multiple times.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
fft : The one-dimensional FFT, with definitions and conventions used.
rfftn : The *n*-dimensional FFT of real input.
fft2 : The two-dimensional FFT.
fftshift : Shifts zero-frequency terms to centre of array
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
See `numpy.fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:3, :3, :3][0]
>>> np.fft.fftn(a, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j], # may vary
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> np.fft.fftn(a, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j], # may vary
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
>>> FS = np.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, fft, norm)
@array_function_dispatch(_fftn_dispatcher)
def ifftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary
[0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, ifft, norm)
@array_function_dispatch(_fftn_dispatcher)
def fft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional discrete Fourier Transform.
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``fft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT.
fft : The one-dimensional FFT.
fftn : The *n*-dimensional FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
array([[ 50. +0.j , 0. +0.j , 0. +0.j , # may vary
0. +0.j , 0. +0.j ],
[-12.5+17.20477401j, 0. +0.j , 0. +0.j ,
0. +0.j , 0. +0.j ],
[-12.5 +4.0614962j , 0. +0.j , 0. +0.j ,
0. +0.j , 0. +0.j ],
[-12.5 -4.0614962j , 0. +0.j , 0. +0.j ,
0. +0.j , 0. +0.j ],
[-12.5-17.20477401j, 0. +0.j , 0. +0.j ,
0. +0.j , 0. +0.j ]])
"""
return _raw_fftnd(a, s, axes, fft, norm)
@array_function_dispatch(_fftn_dispatcher)
def ifft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT.
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary
[0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a, s, axes, ifft, norm)
@array_function_dispatch(_fftn_dispatcher)
def rfftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
This function computes the N-dimensional discrete Fourier Transform over
any number of axes in an M-dimensional real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
a : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
of real input.
fft : The one-dimensional FFT, with definitions and conventions used.
rfft : The one-dimensional FFT of real input.
fftn : The n-dimensional FFT.
rfft2 : The two-dimensional FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.ones((2, 2, 2))
>>> np.fft.rfftn(a)
array([[[8.+0.j, 0.+0.j], # may vary
[0.+0.j, 0.+0.j]],
[[0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j]]])
>>> np.fft.rfftn(a, axes=(2, 0))
array([[[4.+0.j, 0.+0.j], # may vary
[4.+0.j, 0.+0.j]],
[[0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j]]])
"""
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1], norm)
for ii in range(len(axes)-1):
a = fft(a, s[ii], axes[ii], norm)
return a
@array_function_dispatch(_fftn_dispatcher)
def rfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional FFT of a real array.
Parameters
----------
a : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
rfftn : Compute the N-dimensional discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.rfft2(a)
array([[ 50. +0.j , 0. +0.j , 0. +0.j ],
[-12.5+17.20477401j, 0. +0.j , 0. +0.j ],
[-12.5 +4.0614962j , 0. +0.j , 0. +0.j ],
[-12.5 -4.0614962j , 0. +0.j , 0. +0.j ],
[-12.5-17.20477401j, 0. +0.j , 0. +0.j ]])
"""
return rfftn(a, s, axes, norm)
@array_function_dispatch(_fftn_dispatcher)
def irfftn(a, s=None, axes=None, norm=None):
"""
Computes the inverse of `rfftn`.
This function computes the inverse of the N-dimensional discrete
Fourier Transform for real input over any number of axes in an
M-dimensional array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
a : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input along the axes
specified by axes is used. Except for the last axis which is taken to
be ``2*(m-1)`` where ``m`` is the length of the input along that axis.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
rfftn : The forward n-dimensional FFT of real input,
of which `ifftn` is the inverse.
fft : The one-dimensional FFT, with definitions and conventions used.
irfft : The inverse of the one-dimensional FFT of real input.
irfft2 : The inverse of the two-dimensional FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
The correct interpretation of the hermitian input depends on the shape of
the original data, as given by `s`. This is because each input shape could
correspond to either an odd or even length signal. By default, `irfftn`
assumes an even output length which puts the last entry at the Nyquist
frequency; aliasing with its symmetric counterpart. When performing the
final complex to real transform, the last value is thus treated as purely
real. To avoid losing information, the correct shape of the real input
**must** be given.
Examples
--------
>>> a = np.zeros((3, 2, 2))
>>> a[0, 0, 0] = 3 * 2 * 2
>>> np.fft.irfftn(a)
array([[[1., 1.],
[1., 1.]],
[[1., 1.],
[1., 1.]],
[[1., 1.],
[1., 1.]]])
"""
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes)-1):
a = ifft(a, s[ii], axes[ii], norm)
a = irfft(a, s[-1], axes[-1], norm)
return a
@array_function_dispatch(_fftn_dispatcher)
def irfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Computes the inverse of `rfft2`.
Parameters
----------
a : array_like
The input array
s : sequence of ints, optional
Shape of the real output to the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
rfft2 : The forward two-dimensional FFT of real input,
of which `irfft2` is the inverse.
rfft : The one-dimensional FFT for real input.
irfft : The inverse of the one-dimensional FFT of real input.
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> A = np.fft.rfft2(a)
>>> np.fft.irfft2(A, s=a.shape)
array([[0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1.],
[2., 2., 2., 2., 2.],
[3., 3., 3., 3., 3.],
[4., 4., 4., 4., 4.]])
"""
return irfftn(a, s, axes, norm)
| bsd-3-clause | 2,631,187,690,599,615,000 | 36.172996 | 90 | 0.611313 | false |
aenon/company_10k_analysis | src/company_10k_classifier.py | 1 | 9700 | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 07 22:45:13 2016
@author: Team 6
10-K Classifier
"""
#Importing required modules
import os
import re
import pandas as pd
import numpy as np
from nltk.corpus import stopwords
from sklearn.cross_validation import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline
from nltk import stem
from sklearn.feature_selection import SelectPercentile, f_classif
from textblob import TextBlob
from sklearn.ensemble import RandomForestClassifier
#Creates the master dataframe from the text files and assigns them labels
def get_dataset(path):
dataset=[]
try:
os.chdir(path)
except:
print "Incorrect path name!"
for filename in os.listdir(path):
f=open(filename,'r')
if re.search("POS",filename):
dataset.append([f.read(),"pos"])
else:
dataset.append([f.read(),"neg"])
dataset=pd.DataFrame(dataset)
dataset.columns = ['MDA_Text','Sentiment']
return dataset
#Splitting into training and testing set
def split(df,test_ratio):
return train_test_split(df, test_size = test_ratio, stratify = df['Sentiment'])
#Function to stem words a string
def stemming(x):
stemmer = stem.SnowballStemmer("english")
words=x.split()
doc=[]
for word in words:
word=stemmer.stem(word)
doc.append(word)
return " ".join(doc)
#Function to remove all non-characters from MD&As
def preprop(dataset):
dataset['MDA_Text']=dataset['MDA_Text'].str.replace("[^a-zA-Z]", ' ')
return dataset
#Function to create features of total positive and total negative words based on Loughran McDonald Dictionary
def count_fin_words(lmd,dataset):
#Modifying the Dictionary
lmd=lmd[['Word','Positive','Negative']]
lmd['Sum']=lmd['Positive']+lmd['Negative']
lmd=lmd[lmd.Sum != 0]
lmd=lmd.drop(['Sum'],axis=1)
lmd.loc[lmd['Positive']>0, 'Positive'] = 1
lmd.loc[lmd['Negative']>0, 'Negative'] = -1
lmd['Word']=lmd['Word'].str.lower()
#Counting the words in the MDA
tf = CountVectorizer(analyzer='word', min_df = 0, stop_words = 'english')
tfidf_matrix = tf.fit_transform(dataset['MDA_Text'].values)
feature_names = tf.get_feature_names()
tfidf_array = tfidf_matrix.toarray()
tfidf_df = pd.DataFrame(tfidf_array)
tfidf_df.columns = [i.lower() for i in feature_names]
tfidf_df = tfidf_df.T
tfidf_df['Word']=tfidf_df.index
#Merging the results
result_df = pd.merge(tfidf_df, lmd, how='inner',left_on='Word',right_on='Word')
col_list=list(result_df)
result_df_pos=result_df[result_df.Positive==1]
result_df_neg=result_df[result_df.Negative==-1]
result_df[col_list[0:len(dataset)]].sum(axis=0)
#Counting the positive and negative words in a financial context per document
pos_words_sum=result_df_pos[col_list[0:len(dataset)]].sum(axis=0)
neg_words_sum=result_df_neg[col_list[0:len(dataset)]].sum(axis=0)
#Adding new features to the master dataframe
dataset['Tot_pos']=pos_words_sum.values
dataset['Tot_neg']=neg_words_sum.values
return dataset
#Function to create polarity score feature
def polarity_score(dataset):
polarity=[]
polarity_score=[]
for mda,sentiment in zip(dataset['MDA_Text'].values,dataset['Sentiment'].values):
blob=TextBlob(mda)
score = blob.sentiment.polarity
polarity.append([score,sentiment])
polarity_score.append(score)
dataset['Polarity']=polarity_score
return dataset
#Function to add features to the train and test set based on vectorizer
def vect_features(vectorizer,train,test):
features_train_transformed = vectorizer.fit_transform(train['MDA_Text'].values)
feature_names = vectorizer.get_feature_names()
features_train_transformed = features_train_transformed.toarray()
train_data = pd.DataFrame(features_train_transformed)
train_data.columns = feature_names
train=pd.concat([train,train_data],axis=1)
features_test_transformed = vectorizer.transform(test['MDA_Text'].values)
features_test_transformed = features_test_transformed.toarray()
test_data = pd.DataFrame(features_test_transformed)
test_data.columns = feature_names
test=pd.concat([test,test_data],axis=1)
return train,test
#Function to create Classification Report
def report(test,predictions):
pd.crosstab(test['Sentiment'], predictions, rownames=['Actual'], colnames=['Predicted'], margins=True)
a=accuracy_score(test['Sentiment'],predictions)
p=precision_score(test['Sentiment'],predictions, pos_label = "pos")
r=recall_score(test['Sentiment'].values,predictions, pos_label = "pos")
f=f1_score(test['Sentiment'].values,predictions, pos_label = "pos")
print "Accuracy = ",a,"\nPrecision =",p,"\nRecall = ",r,"\nF-Score = ",f
#Function to create models and print accuracies
def model(classifier,train,test,column):
targets = train['Sentiment'].values
train_data=train.values
predictors = train_data[0:,column:]
classifier.fit(predictors,targets)
test_data=test.values
predictions=classifier.predict(test_data[0:,column:])
report(test_1,predictions)
return predictions
#Reading the Loughran McDonald Dictionary
os.chdir("D:\Joel\UC Berkeley\Courses\IEOR 242 - Applications of Data Analysis\Project\Homework 7")
lmd = pd.read_excel("LoughranMcDonald_MasterDictionary_2014.xlsx")
#Defining the path
path = "D:\Joel\UC Berkeley\Courses\IEOR 242 - Applications of Data Analysis\Project\Homework 7\MDAs"
#Creating the master dataframe
dataset=get_dataset(path)
#Preprocessing the master dataframe
dataset=preprop(dataset)
#Adding total positive and total negative words based on Loughran McDonald Dictionary to the master dataframe
dataset=count_fin_words(lmd,dataset)
#Creating polarity score feature
dataset=polarity_score(dataset)
#Stemming the MD&A Text
stemmer = stem.SnowballStemmer("english")
dataset['MDA_Text']=dataset['MDA_Text'].apply(stemming)
#Splitting to training and testing
train, test = split(dataset,0.25)
train=train.reset_index(drop=True)
test=test.reset_index(drop=True)
#Model 1 - Baseline Model
#Algorithm: Bernoulli Naive Bayes
#Features: Contains all words
vectorizer_1 = CountVectorizer(stop_words='english')
train_1,test_1 = vect_features(vectorizer_1,train,test)
classifier = BernoulliNB(fit_prior=False)
predictions = model(classifier,train_1,test_1,5)
#Model 2
#Algorithm: Bernoulli Naive Bayes
#Features: Contains top 50 words
vectorizer_2 = CountVectorizer(stop_words='english',max_features=50)
train_2,test_2 = vect_features(vectorizer_2,train,test)
classifier = BernoulliNB(fit_prior=False)
predictions = model(classifier,train_1,test_1,5)
#Model 3
#Algorithm: Multinomial Naive Bayes
#Features: CountVectorizer of all words
vectorizer_3 = CountVectorizer(stop_words='english')
train_3,test_3 = vect_features(vectorizer_3,train,test)
classifier = MultinomialNB(fit_prior=False)
predictions = model(classifier,train_3,test_3,5)
#Model 4
#Algorithm: Multinomial Naive Bayes
#Features: CountVectorizer of only top 50 words and 2-grams
vectorizer_4 = CountVectorizer(stop_words='english',max_features=50,ngram_range=(1,2),min_df=5,max_df=0.8)
train_4,test_4 = vect_features(vectorizer_4,train,test)
classifier = MultinomialNB(fit_prior=False)
predictions = model(classifier,train_4,test_4,5)
#Model 5
#Algorithm: Multinomial Naive Bayes
#Features: TfidfVectorizer of only top 50 words and 2-grams
vectorizer_5 = TfidfVectorizer(sublinear_tf=True,stop_words='english',max_features=50,ngram_range=(1,2),min_df=5,max_df=0.8)
train_5,test_5 = vect_features(vectorizer_5,train,test)
classifier = MultinomialNB(fit_prior=False)
predictions = model(classifier,train_5,test_5,5)
#Model 6
#Algorithm: Gaussian Naive Bayes
#Features: TfidfVectorizer of only top 50 words and 2-grams,otal positive words, total negative words, polarity score
vectorizer_6 = TfidfVectorizer(sublinear_tf=True,stop_words='english',max_features=50,ngram_range=(1,2),min_df=5,max_df=0.8)
train_6,test_6 = vect_features(vectorizer_6,train,test)
classifier = GaussianNB()
predictions = model(classifier,train_6,test_6,2)
#Model 7
#Algorithm: Uncalibrated Random Forest
#Features: TfidfVectorizer of only top 50 words and 2-grams, total positive words, total negative words, polarity score
vectorizer_7 = TfidfVectorizer(sublinear_tf=True,stop_words='english',max_features=50,ngram_range=(1,2),min_df=5,max_df=0.8)
train_7,test_7 = vect_features(vectorizer_5,train,test)
classifier = RandomForestClassifier(n_estimators=1000)
predictions = model(classifier,train_7,test_7,2)
#Code to ingore terms of sparse matrix (Not Used)
#selector = SelectPercentile(f_classif,percentile=10)
#selector.fit(features_train_transformed,train['Sentiment'].values)
#features_train_transformed = selector.transform(features_train_transformed).toarray()
#features_test_transformed = selector.transform(features_test_transformed).toarray()
| bsd-2-clause | 3,822,367,098,476,669,000 | 37.917695 | 124 | 0.719897 | false |
JakeColtman/bartpy | tests/test_proposer.py | 1 | 2445 | import unittest
from bartpy.data import make_bartpy_data
from bartpy.samplers.unconstrainedtree.proposer import uniformly_sample_grow_mutation, uniformly_sample_prune_mutation
from bartpy.split import Split
from bartpy.tree import LeafNode, Tree, DecisionNode
import pandas as pd
import numpy as np
class TestPruneTreeMutationProposer(unittest.TestCase):
def setUp(self):
self.data = make_bartpy_data(pd.DataFrame({"a": [1, 2]}), np.array([1, 2]), normalize=False)
self.d = LeafNode(Split(self.data))
self.e = LeafNode(Split(self.data))
self.c = DecisionNode(Split(self.data), self.d, self.e)
self.b = LeafNode(Split(self.data))
self.a = DecisionNode(Split(self.data), self.b, self.c)
self.tree = Tree([self.a, self.b, self.c, self.d, self.e])
def test_proposal_isnt_mutating(self):
proposal = uniformly_sample_prune_mutation(self.tree)
self.assertIn(proposal.existing_node, self.tree.nodes)
self.assertNotIn(proposal.updated_node, self.tree.nodes)
def test_types(self):
proposal = uniformly_sample_prune_mutation(self.tree)
self.assertIsInstance(proposal.existing_node, DecisionNode)
self.assertIsInstance(proposal.updated_node, LeafNode)
class TestGrowTreeMutationProposer(unittest.TestCase):
def setUp(self):
self.data = make_bartpy_data(pd.DataFrame({"a": np.random.normal(size=1000)}), np.array(np.random.normal(size=1000)))
self.d = LeafNode(Split(self.data))
self.e = LeafNode(Split(self.data))
self.c = DecisionNode(Split(self.data), self.d, self.e)
self.b = LeafNode(Split(self.data))
self.a = DecisionNode(Split(self.data), self.b, self.c)
self.tree = Tree([self.a, self.b, self.c, self.d, self.e])
def test_proposal_isnt_mutating(self):
proposal = uniformly_sample_grow_mutation(self.tree)
self.assertIn(proposal.existing_node, self.tree.nodes)
self.assertNotIn(proposal.updated_node, self.tree.nodes)
def test_types(self):
proposal = uniformly_sample_grow_mutation(self.tree)
self.assertIsInstance(proposal.updated_node, DecisionNode)
self.assertIsInstance(proposal.updated_node.left_child, LeafNode)
self.assertIsInstance(proposal.updated_node.right_child, LeafNode)
self.assertIsInstance(proposal.existing_node, LeafNode)
if __name__ == '__main__':
unittest.main()
| mit | -8,221,925,809,584,134,000 | 40.440678 | 125 | 0.693252 | false |
edux300/research | script_create_inbreast_dataset.py | 1 | 6409 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 2 14:16:54 2017
@author: eduardo
"""
import read_inbreast as readin
import os
from matplotlib import pyplot as plt
import numpy as np
import time
import sys
import pickle
patch_size = 76
safe_padding = 40
inv_resize_factor = 12
resize_factor = float(1/inv_resize_factor)
jittering_vector = [-5,0,5]
half_patch_size = int(patch_size/2)
def separate_three_splits(set_,proportions):
p_tr,p_va = proportions[0],proportions[1]
number_of_elements = len(set_)
p_tr = round(p_tr * number_of_elements)
p_va = round(p_va * number_of_elements)
s1 = []
s2 = []
s3 = []
for i in range(p_tr):
s1.append(set_.pop())
for i in range(p_va):
s2.append(set_.pop())
s3 = list(set_)
return s1,s2,s3
def take_patches(set_, name, buffer_cluster):
init_time = time.time()
print("Started taking patches from set",name,"(",len(set_),")","\n")
counter_outer=0
for image in set_:
pat = os.path.basename(image).split("_")[0]
img_arr = readin.read_img(image)
#img_arr = readin.resize_image(img_arr)
img_arr = readin.preprocessing(img_arr,resize_factor)
masses = readin.get_masses(pat,resize_factor)
safe_zone = img_arr>0
safe_zone = np.pad(safe_zone,safe_padding,"constant")
img_arr = np.pad(img_arr,safe_padding,"symmetric")
if len(masses)>0:
points = np.zeros((len(masses),2))
counter=0
for mass in masses:
safe_zone[mass[0][1]-half_patch_size+safe_padding:mass[0][1]+half_patch_size+safe_padding,
mass[0][0]-half_patch_size+safe_padding:mass[0][0]+half_patch_size+safe_padding] = 0
points[counter,0] = mass[0][1]+safe_padding
points[counter,1] = mass[0][0]+safe_padding
counter+=1
buffer_cluster.register(name,"pos")
for mass in masses:
for offsetx in jittering_vector:
for offsety in jittering_vector:
positive_patch = img_arr[mass[0][1]-half_patch_size+safe_padding+offsetx:mass[0][1]+half_patch_size+safe_padding+offsetx
,mass[0][0]-half_patch_size+safe_padding+offsety:mass[0][0]+half_patch_size+safe_padding+offsety]
buffer_cluster.registered.update(positive_patch)
buffer_cluster.register(name,"neg")
for i in range(half_patch_size,img_arr.shape[0]-half_patch_size,half_patch_size):
for j in range(half_patch_size,img_arr.shape[1]-half_patch_size,half_patch_size):
negative_patch = img_arr[i-half_patch_size:i+half_patch_size,j-half_patch_size:j+half_patch_size]
buffer_cluster.registered.update(negative_patch)
counter_outer+=1
sys.stdout.write("\r \x1b[K Current progress: "+"{:2.1%}".format(counter_outer/len(set_)))
sys.stdout.flush()
buffer_cluster.register(name,"pos")
buffer_cluster.registered.save()
buffer_cluster.register(name,"neg")
buffer_cluster.registered.save()
print("\nFinished:",time.time()-init_time)
def run(inv_resize_factor):
folder = str(patch_size)+"_"+str(inv_resize_factor)+"_INbreast_patches_preprocessed"
os.makedirs("/home/eduardo/tese/data/"+folder+"/")
buffer_cluster = Buffer_Cluster(50000,"/home/eduardo/tese/data/"+folder+"/",
["tr_neg","tr_pos","va_neg","va_pos","te_neg","te_pos"])
take_patches(tr,"tr",buffer_cluster)
take_patches(va,"va",buffer_cluster)
take_patches(te,"te",buffer_cluster)
class Buffer:
def __init__(self,size,path,name):
self.path = path
self.name = name
self.patches=np.zeros((size,patch_size,patch_size))
self.counter=0
self.total=0
def update(self,ptch):
self.patches[self.counter]=ptch
self.counter+=1
if self.needs_save():
self.save()
def save(self):
#a=np.sum(np.sum(self.patches[0:self.counter],axis=1),axis=1)
#assert not any(a==0)
np.save(self.path+self.name+"_"+str(self.total),self.patches[0:self.counter])
self.total+=1
self.counter=0
def not_empty(self):
if self.counter==0:
return False
return True
def needs_save(self):
if self.counter==self.patches.shape[0]:
return True
else:
return False
class Buffer_Cluster:
def __init__(self,sizes,paths,names):
self.tr_neg = Buffer(sizes,paths,names[0])
self.tr_pos = Buffer(sizes,paths,names[1])
self.va_neg = Buffer(sizes,paths,names[2])
self.va_pos = Buffer(sizes,paths,names[3])
self.te_neg = Buffer(sizes,paths,names[4])
self.te_pos = Buffer(sizes,paths,names[5])
self.registered = None
def register(self, set_,lbl):
if set_=="tr":
if lbl=="neg":
self.registered = self.tr_neg
elif lbl=="pos":
self.registered = self.tr_pos
if set_=="va":
if lbl=="neg":
self.registered = self.va_neg
elif lbl=="pos":
self.registered = self.va_pos
if set_=="te":
if lbl=="neg":
self.registered = self.te_neg
elif lbl=="pos":
self.registered = self.te_pos
sets_path = "/home/eduardo/tese/data/splits_info"
if not os.path.isfile(sets_path):
with_masses,no_masses = readin.separate_sets()
tr,va,te = separate_three_splits(with_masses,[0.6,0.2])
atr,ava,ate = separate_three_splits(no_masses,[0.6,0.2])
tr = tr + atr
va = va + ava
te = te + ate
splits = tr,va,te
pickle.dump(splits,open(sets_path,"wb"))
else:
print("loaded splits")
splits = pickle.load(open(sets_path,"rb"))
tr,va,te = splits
run(inv_resize_factor)
"""
for inv_resize_factor in [12,16,20,24,28,32]:
readin.resize_factor = float(1/inv_resize_factor)
print("\n\n -------------------------------- \n Running Inverse Factor:",inv_resize_factor)
run(inv_resize_factor)
"""
| apache-2.0 | -8,263,579,726,650,916,000 | 30.571429 | 141 | 0.565455 | false |
cggh/scikit-allel | allel/stats/ld.py | 1 | 9253 | # -*- coding: utf-8 -*-
import numpy as np
from allel.stats.window import windowed_statistic
from allel.util import asarray_ndim, ensure_square
from allel.chunked import get_blen_array
from allel.compat import memoryview_safe
from allel.opt.stats import gn_pairwise_corrcoef_int8, gn_pairwise2_corrcoef_int8, \
gn_locate_unlinked_int8
def rogers_huff_r(gn):
"""Estimate the linkage disequilibrium parameter *r* for each pair of
variants using the method of Rogers and Huff (2008).
Parameters
----------
gn : array_like, int8, shape (n_variants, n_samples)
Diploid genotypes at biallelic variants, coded as the number of
alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt).
Returns
-------
r : ndarray, float, shape (n_variants * (n_variants - 1) // 2,)
Matrix in condensed form.
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [1, 1], [0, 0]],
... [[0, 0], [1, 1], [0, 0]],
... [[1, 1], [0, 0], [1, 1]],
... [[0, 0], [0, 1], [-1, -1]]], dtype='i1')
>>> gn = g.to_n_alt(fill=-1)
>>> gn
array([[ 0, 2, 0],
[ 0, 2, 0],
[ 2, 0, 2],
[ 0, 1, -1]], dtype=int8)
>>> r = allel.rogers_huff_r(gn)
>>> r # doctest: +ELLIPSIS
array([ 1. , -1.0000001, 1. , -1.0000001, 1. ,
-1. ], dtype=float32)
>>> r ** 2 # doctest: +ELLIPSIS
array([1. , 1.0000002, 1. , 1.0000002, 1. , 1. ],
dtype=float32)
>>> from scipy.spatial.distance import squareform
>>> squareform(r ** 2)
array([[0. , 1. , 1.0000002, 1. ],
[1. , 0. , 1.0000002, 1. ],
[1.0000002, 1.0000002, 0. , 1. ],
[1. , 1. , 1. , 0. ]], dtype=float32)
"""
# check inputs
gn = asarray_ndim(gn, 2, dtype='i1')
gn = memoryview_safe(gn)
# compute correlation coefficients
r = gn_pairwise_corrcoef_int8(gn)
# convenience for singletons
if r.size == 1:
r = r[0]
return r
def rogers_huff_r_between(gna, gnb):
"""Estimate the linkage disequilibrium parameter *r* for each pair of
variants between the two input arrays, using the method of Rogers and
Huff (2008).
Parameters
----------
gna, gnb : array_like, int8, shape (n_variants, n_samples)
Diploid genotypes at biallelic variants, coded as the number of
alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt).
Returns
-------
r : ndarray, float, shape (m_variants, n_variants )
Matrix in rectangular form.
"""
# check inputs
gna = asarray_ndim(gna, 2, dtype='i1')
gnb = asarray_ndim(gnb, 2, dtype='i1')
gna = memoryview_safe(gna)
gnb = memoryview_safe(gnb)
# compute correlation coefficients
r = gn_pairwise2_corrcoef_int8(gna, gnb)
# convenience for singletons
if r.size == 1:
r = r[0, 0]
return r
def locate_unlinked(gn, size=100, step=20, threshold=.1, blen=None):
"""Locate variants in approximate linkage equilibrium, where r**2 is
below the given `threshold`.
Parameters
----------
gn : array_like, int8, shape (n_variants, n_samples)
Diploid genotypes at biallelic variants, coded as the number of
alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt).
size : int
Window size (number of variants).
step : int
Number of variants to advance to the next window.
threshold : float
Maximum value of r**2 to include variants.
blen : int, optional
Block length to use for chunked computation.
Returns
-------
loc : ndarray, bool, shape (n_variants)
Boolean array where True items locate variants in approximate
linkage equilibrium.
Notes
-----
The value of r**2 between each pair of variants is calculated using the
method of Rogers and Huff (2008).
"""
# check inputs
if not hasattr(gn, 'shape') or not hasattr(gn, 'dtype'):
gn = np.asarray(gn, dtype='i1')
if gn.ndim != 2:
raise ValueError('gn must have two dimensions')
# setup output
loc = np.ones(gn.shape[0], dtype='u1')
# compute in chunks to avoid loading big arrays into memory
blen = get_blen_array(gn, blen)
blen = max(blen, 10*size) # avoid too small chunks
n_variants = gn.shape[0]
for i in range(0, n_variants, blen):
# N.B., ensure overlap with next window
j = min(n_variants, i+blen+size)
gnb = np.asarray(gn[i:j], dtype='i1')
gnb = memoryview_safe(gnb)
locb = loc[i:j]
gn_locate_unlinked_int8(gnb, locb, size, step, threshold)
return loc.astype('b1')
def windowed_r_squared(pos, gn, size=None, start=None, stop=None, step=None,
windows=None, fill=np.nan, percentile=50):
"""Summarise linkage disequilibrium in windows over a single
chromosome/contig.
Parameters
----------
pos : array_like, int, shape (n_items,)
The item positions in ascending order, using 1-based coordinates..
gn : array_like, int8, shape (n_variants, n_samples)
Diploid genotypes at biallelic variants, coded as the number of
alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt).
size : int, optional
The window size (number of bases).
start : int, optional
The position at which to start (1-based).
stop : int, optional
The position at which to stop (1-based).
step : int, optional
The distance between start positions of windows. If not given,
defaults to the window size, i.e., non-overlapping windows.
windows : array_like, int, shape (n_windows, 2), optional
Manually specify the windows to use as a sequence of (window_start,
window_stop) positions, using 1-based coordinates. Overrides the
size/start/stop/step parameters.
fill : object, optional
The value to use where a window is empty, i.e., contains no items.
percentile : int or sequence of ints, optional
The percentile or percentiles to calculate within each window.
Returns
-------
out : ndarray, shape (n_windows,)
The value of the statistic for each window.
windows : ndarray, int, shape (n_windows, 2)
The windows used, as an array of (window_start, window_stop) positions,
using 1-based coordinates.
counts : ndarray, int, shape (n_windows,)
The number of items in each window.
Notes
-----
Linkage disequilibrium (r**2) is calculated using the method of Rogers
and Huff (2008).
See Also
--------
allel.stats.window.windowed_statistic
"""
# define the statistic function
if isinstance(percentile, (list, tuple)):
fill = [fill for _ in percentile]
def statistic(gnw):
r_squared = rogers_huff_r(gnw) ** 2
return [np.percentile(r_squared, p) for p in percentile]
else:
def statistic(gnw):
r_squared = rogers_huff_r(gnw) ** 2
return np.percentile(r_squared, percentile)
return windowed_statistic(pos, gn, statistic, size, start=start,
stop=stop, step=step, windows=windows, fill=fill)
def plot_pairwise_ld(m, colorbar=True, ax=None, imshow_kwargs=None):
"""Plot a matrix of genotype linkage disequilibrium values between
all pairs of variants.
Parameters
----------
m : array_like
Array of linkage disequilibrium values in condensed form.
colorbar : bool, optional
If True, add a colorbar to the current figure.
ax : axes, optional
The axes on which to draw. If not provided, a new figure will be
created.
imshow_kwargs : dict-like, optional
Additional keyword arguments passed through to
:func:`matplotlib.pyplot.imshow`.
Returns
-------
ax : axes
The axes on which the plot was drawn.
"""
import matplotlib.pyplot as plt
# check inputs
m_square = ensure_square(m)
# blank out lower triangle and flip up/down
m_square = np.tril(m_square)[::-1, :]
# set up axes
if ax is None:
# make a square figure with enough pixels to represent each variant
x = m_square.shape[0] / plt.rcParams['figure.dpi']
x = max(x, plt.rcParams['figure.figsize'][0])
fig, ax = plt.subplots(figsize=(x, x))
fig.tight_layout(pad=0)
# setup imshow arguments
if imshow_kwargs is None:
imshow_kwargs = dict()
imshow_kwargs.setdefault('interpolation', 'none')
imshow_kwargs.setdefault('cmap', 'Greys')
imshow_kwargs.setdefault('vmin', 0)
imshow_kwargs.setdefault('vmax', 1)
# plot as image
im = ax.imshow(m_square, **imshow_kwargs)
# tidy up
ax.set_xticks([])
ax.set_yticks([])
for s in 'bottom', 'right':
ax.spines[s].set_visible(False)
if colorbar:
plt.gcf().colorbar(im, shrink=.5, pad=0)
return ax
| mit | -6,201,206,898,779,084,000 | 31.017301 | 84 | 0.590727 | false |
ecotox/pacfm | pacfm/controller/tools/circos/building/assembler.py | 1 | 3304 | from collections import OrderedDict
from pandas import DataFrame
from pacfm.model import Coordinate, Chromosome, Ideogram
from pacfm.model import LinkCoordinate
class Assembler(object):
"""
assembles the circos abundance map structure.
biodb_selector: biodb.Selector instance
abundance: dictionary of keys= accessions, values= abundances
"""
def __init__(self, biodb_selector, abundance):
self.biodb_selector= biodb_selector
self.abundance= abundance
self.n_levels= self.biodb_selector.getLevelCount()
n_of_ideograms= self.n_levels - 1
self.assembly= self.ideograms= [None] * n_of_ideograms
self._init_ideograms()
def _init_ideograms(self):
for i in range(len(self.ideograms)):
self.ideograms[i] = Ideogram(i+1)
def construct_base_ideogram(self):
enzymes= self.abundance.keys()
ide= self.ideograms[self.biodb_selector.getLevelCount()-2]
link_coordinates= {}
for e in enzymes:
biodb = self.biodb_selector.getFeatureByAccession(unicode(e))
if not biodb:
continue
parents= self.biodb_selector.getParentsByChildID(biodb.id)
lc= LinkCoordinate(biodb)
for parent in parents:
c= Coordinate(biodb)
c.set_value(self.abundance[e])
if parent.name not in ide.names:
ide.append(Chromosome(parent, self.biodb_selector))
lc.add(c)
chromosome= ide[parent.name]
chromosome.append(c)
#print chromosome.name
link_coordinates[biodb.id]= lc
ide.set_link_coordinates(link_coordinates)
return ide
def assemble_ideograms(self):
base_ide=self.construct_base_ideogram()
self.ideograms[base_ide.level-1]= base_ide
for i in range(len(self.ideograms)-1, 0, -1):
ide= self.ideograms[i]
chromosomes= ide.get_all()
for chrom in chromosomes:
parents= [self.biodb_selector.getParentsByChildID(chrom.id)[0]]
for parent in parents:
curIde= self.ideograms[parent.level -1]
if parent.name not in curIde.names:
newChrom= Chromosome( parent, self.biodb_selector )
newChrom.append_coordinates( chrom.get_coordinates() )
curIde.append( newChrom )
chrom.append_parent(newChrom)
else:
myChrom= curIde[parent.name]
myChrom.append_coordinates( chrom.get_coordinates() )
chrom.append_parent(myChrom)
def proof_read_coordinates(self):
for ide in self.ideograms:
coors= ide.get_all_coordinates()
print ide.level, len(coors)
def to_data_frame(self):
ideogram= self.ideograms[-1]
return ideogram.to_dataframe()
| mit | -4,182,199,072,962,714,000 | 29.036364 | 79 | 0.537228 | false |
foreversand/QSTK | Examples/Validation.py | 1 | 5524 | '''
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see
http://wiki.quantsoftware.org/index.php?title=QSTK_License
for license details.
Created on February, 9, 2013
@author: Sourabh Bajaj
@contact: sourabhbajaj@gatech.edu
@summary: Python Validation Script
'''
# Printing what Python Version is installed : QSTK uses 2.7
import sys
import platform
print "Python Details : "
print sys.version
print "Your Python Version is : ", platform.python_version()
print "QSTK uses Python 2.7.X (2.7.3 recommended and supported)"
print "Please make sure you're using the correct python version."
print
# Printing the directory you are in
import os
print "Current Directory : ", os.path.abspath('.')
print
# Printing files in the current directory.
print "Files in the current directory"
ls_files = os.listdir('.')
for s_file in ls_files:
print s_file
print
# Testing the dependencies
# Testing numpy
try:
import numpy
print "Numpy is installed and the version used is : ", numpy.__version__
print "Please make sure you're using version >= 1.6.1"
except:
sys.exit("Error : Numpy can not be imported or not installed.")
print
# Testing matplotlib
try:
import matplotlib
print "Matplotlib is installed and version is : ", matplotlib.__version__
print "Please make sure you're using version >= 1.1.0"
except:
sys.exit("Error : Matplotlib can not be imported or not installed.")
print
# Testing Pandas
try:
import pandas
print "Pandas is installed and the version used is : ", pandas.__version__
print "Please make sure you're using version == 0.7.3"
print "IMPORTANT: No other pandas version is supported except 0.7.3"
s_pd_version = pandas.__version__
if s_pd_version[:5] != '0.7.3':
sys.exit("Error : Pandas version should be 0.7.3")
except:
print "Error : Please install Pandas 0.7.3"
sys.exit("Error : Pandas can not be imported or not installed.")
print
# Testing Scipy
try:
import scipy
print "Scipy is installed and the version used is : ", scipy.__version__
print "Please make sure you're using version >= 0.9.0"
except:
sys.exit("Error : Scipy can not be imported or not installed.")
print
# Testing Dateutil
try:
import dateutil
print "Dateutil is installed and the version used is : ", dateutil.__version__
print "Please make sure you're using version == 1.5"
except:
sys.exit("Error : Dateutil can not be imported or not installed.")
print
# Testing Setuptools
try:
import setuptools
print "Setuptools is installed and the version used is : ", setuptools.__version__
print "Please make sure you're using version >= 0.6"
except:
sys.exit("Error : Setuptools can not be imported or not installed.")
print
# # Testing CVXOPT
# try:
# import cvxopt
# print "CVXOPT is installed and can be imported"
# except:
# sys.exit("Error : CVXOPT can not be imported or not installed.")
# print
# Testing datetime
try:
import datetime as dt
print "datetime is installed and can be imported"
except:
sys.exit("Error : datetime can not be imported or not installed.")
print
# All dependencies are installed and working
print "All dependencies are installed and working\n"
# Testing import of QSTK
# Testing QSTK
try:
import QSTK
print "QSTK is installed and can be imported"
except:
sys.exit("Error : QSTK can not be imported or not installed.")
print
# Testing QSTK.qstkutil
try:
import QSTK.qstkutil.tsutil as tsu
import QSTK.qstkutil.qsdateutil as du
import QSTK.qstkutil.DataAccess as da
print "QSTK.qstkutil is installed and can be imported"
except:
exit("Error : QSTK.qstkutil can not be imported.")
print
# Testing QSTK.qstkstudy
try:
import QSTK.qstkstudy.EventProfiler
print "QSTK.qstkstudy is installed and can be imported"
except:
exit("Error : QSTK.qstkstudy can not be imported.")
print
# Checking that the data installed is correct.
# Start and End date of the charts
dt_start = dt.datetime(2012, 2, 10)
dt_end = dt.datetime(2012, 2, 24)
dt_timeofday = dt.timedelta(hours=16)
# Get a list of trading days between the start and the end.
ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt_timeofday)
ls_symbols = ['MSFT', 'GOOG']
# Creating an object of the dataaccess class with Yahoo as the source.
c_dataobj = da.DataAccess('Yahoo')
# Reading adjusted_close prices
df_close = c_dataobj.get_data(ldt_timestamps, ls_symbols, "close")
print df_close
print
print "\nCorrect Output using the Default Data should be : "
print "Assignments use this data for grading"
print " MSFT GOOG"
print "2012-02-10 16:00:00 29.90 605.91"
print "2012-02-13 16:00:00 29.98 612.20"
print "2012-02-14 16:00:00 29.86 609.76"
print "2012-02-15 16:00:00 29.66 605.56"
print "2012-02-16 16:00:00 30.88 606.52"
print "2012-02-17 16:00:00 30.84 604.64"
print "2012-02-21 16:00:00 31.03 614.00"
print "2012-02-22 16:00:00 30.86 607.94"
print "2012-02-23 16:00:00 30.96 606.11"
print
dt_test = dt.datetime(2012, 2, 15, 16)
print "Close price of MSFT on 2012/2/15 is : ", df_close['MSFT'].ix[dt_test]
if df_close['MSFT'].ix[dt_test] == 29.66:
print "Data looks correct as the close price in default data is 29.66"
else:
print "Default data used in the assisgnments has close price as 29.66"
sys.exit("Error : Data has changed so does not match data used in Assignments")
print
print "Everything works fine: You're all set."
| bsd-3-clause | 5,958,295,362,151,716,000 | 29.351648 | 86 | 0.708726 | false |
bsautermeister/machine-learning-examples | visualization/keras/vgg16_visualize_convnet_activation_heatmap.py | 1 | 4158 | import argparse
import os
import sys
import cv2
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.contrib.keras import applications
from tensorflow.contrib.keras import backend as K
from tensorflow.contrib.keras import preprocessing
import cnn_classification.keras.dogs_cats_dataset as dataset
import cnn_classification.keras.utils as utils
def show_top_predictions(preds, top=3):
decoded_preds = applications.vgg16.decode_predictions(preds, top=top)[0]
for i, (_, clazz, prob) in enumerate(decoded_preds):
print('{}. {:20s}: {:.4f}'.format(i + 1, clazz, prob))
def get_top_prediction(preds):
decoded_preds = applications.vgg16.decode_predictions(preds, top=1)[0]
clazz = decoded_preds[0][1]
prob = decoded_preds[0][2]
return clazz, prob
def normalize_heatmap(heatmap):
heatmap = np.maximum(heatmap, 0)
heatmap /= np.max(heatmap)
return heatmap
def superimpose_image_with_heatmap(dest_img_path, src_img_path, heatmap, strength=0.4):
img = cv2.imread(src_img_path)
heatmap = cv2.resize(heatmap, (img.shape[1], img.shape[0]))
heatmap = np.uint8(255 * heatmap)
heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
superimposed_img = heatmap * strength + img
cv2.imwrite(dest_img_path, superimposed_img)
def gradCAM(model, dog_x, class_idx):
model_class_output = model.output[:, class_idx]
print('model_class_output shape', model_class_output.shape)
last_conv_layer = model.get_layer('block5_conv3')
grads_list = K.gradients(model_class_output, last_conv_layer.output)
grads = grads_list[0]
# grads shape: (?, 14, 14, 512)
pooled_grads = K.mean(grads, axis=(0, 1, 2))
# pooled_grads shape: (512,)
iterate = K.function(inputs=[model.input],
outputs=[pooled_grads, last_conv_layer.output[0]])
pooled_grads_value, conv_layer_output_value = iterate([dog_x])
# conv_layer_output_value shape: (14, 14, 512)
for i in range(last_conv_layer.filters):
conv_layer_output_value[:, :, i] *= pooled_grads_value[i]
heatmap = np.mean(conv_layer_output_value, axis=-1)
# heatmap shape: (14, 14)
heatmap = normalize_heatmap(heatmap)
return heatmap
def run(i, model, train_dogs_dir, tag):
img_filename = utils.listdir(train_dogs_dir, recursive=True)[i]
img_path = os.path.join(train_dogs_dir, img_filename)
img_dog = preprocessing.image.load_img(img_path, target_size=(224, 224))
os.makedirs('tmp/output', exist_ok=True)
processed_img_path = 'tmp/output/processed-{}-{}.png'.format(tag, i)
img_dog.save(processed_img_path)
dog_x = preprocessing.image.img_to_array(img_dog)
dog_x = np.expand_dims(dog_x, axis=0)
dog_x = applications.vgg16.preprocess_input(dog_x)
preds = model.predict(dog_x)
show_top_predictions(preds, top=3)
clazz, prob = get_top_prediction(preds)
class_idx = np.argmax(preds[0])
heatmap = gradCAM(model, dog_x, class_idx)
if FLAGS.show_intermediate_heatmap:
plt.matshow(heatmap)
plt.show()
superimposed_output_path = 'tmp/output/superimposed-{}-{}-{}-{}.png'.format(tag, i, clazz, int(round(prob * 100)))
superimpose_image_with_heatmap(superimposed_output_path, img_path, heatmap, strength=0.4)
def main(_):
model = applications.VGG16(weights='imagenet')
# load images for visualization
train_dir, _, _ = dataset.prepare(train_size=2 * FLAGS.n_per_class, valid_size=0, test_size=0)
train_dogs_dir = os.path.join(train_dir, 'dogs')
train_cats_dir = os.path.join(train_dir, 'cats')
for i in range(FLAGS.n_per_class):
run(i, model, train_dogs_dir, 'dog')
run(i, model, train_cats_dir, 'cat')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--n_per_class', type=int, default=10,
help='The number of animals per class')
parser.add_argument('--show_intermediate_heatmap', type=bool, default=False,
help='Show intermediate heatmap with matplotlib')
FLAGS, unparsed = parser.parse_known_args()
main([sys.argv[0]] + unparsed)
| mit | -6,352,257,524,204,803,000 | 33.363636 | 118 | 0.669072 | false |
huazhisong/race_code | baidu_xijiao/codes/input_helper.py | 1 | 8436 | # 100中图片
# %%
from sklearn.preprocessing import label_binarize
import tensorflow as tf
import numpy as np
import os
# %%
file = 'train_data.txt'
# change data to real predictions
def get_real_label(data, file = 'train_data.txt', trainable=True):
'''
Args:
data: predi
Returns:
list of images and labels
'''
image_list = []
label_list = []
current_dir = os.path.abspath('../../../data/badu_xijiao/train/')
with open(file, 'r') as file:
lines = file.readlines()
for line in lines:
infos = line.split(' ')
if trainable:
image_path = os.path.abspath(
os.path.join(current_dir,'train', infos[0] + '.jpg'))
else:
image_path = os.path.abspath(
os.path.join(current_dir,'test1', infos[0] + '.jpg'))
label = infos[1]
if tf.gfile.Exists(image_path):
image_list.append(image_path)
label_list.append(label)
label_dict = np.array(list(set(label_list)), dtype=np.int32)
label_dict.sort()
return np.array([label_dict[int(index)] for index in data])
# loading image paths and labels
def get_files(file = 'train_data.txt', trainable=True):
'''
Args:
file_dir: file directory
Returns:
list of images and labels
'''
image_list = []
label_list = []
# for file in os.listdir(file_dir):
# name = file.split(sep='.')
# if name[0] == 'cat':
# cats.append(file_dir + file)
# label_cats.append(0)
# else:
# dogs.append(file_dir + file)
# label_dogs.append(1)
# print('There are %d cats\nThere are %d dogs' % (len(cats), len(dogs)))
current_dir = os.path.abspath('../../../data/badu_xijiao/train/')
with open(file, 'r') as file:
lines = file.readlines()
for line in lines:
infos = line.split(' ')
if trainable:
image_path = os.path.abspath(
os.path.join(current_dir,'train', infos[0] + '.jpg'))
else:
image_path = os.path.abspath(
os.path.join(current_dir,'test1', infos[0] + '.jpg'))
label = infos[1]
if tf.gfile.Exists(image_path):
image_list.append(image_path)
label_list.append(label)
label = [np.argmax(label) for
label in label_binarize(label_list, classes=list(set(label_list)))]
temp = np.array([image_list, label])
temp = temp.transpose()
np.random.shuffle(temp)
image_list = list(temp[:, 0])
label_list = list(temp[:, 1])
label_list = [int(i) for i in label_list]
return image_list, label_list
# %%
def get_dev_batch(image, label, image_W, image_H, batch_size, capacity):
'''
Args:
image: list type
label: list type
image_W: image width
image_H: image height
batch_size: batch size
capacity: the maximum elements in queue
Returns:
image_batch: 4D tensor [batch_size, width, height, 3], dtype=tf.float32
label_batch: 1D tensor [batch_size], dtype=tf.int32
'''
image = tf.cast(image, tf.string)
label = tf.cast(label, tf.int32)
# make an input queue
input_queue = tf.train.slice_input_producer([image, label])
label = input_queue[1]
image_contents = tf.read_file(input_queue[0])
image = tf.image.decode_jpeg(image_contents, channels=3)
image = tf.image.resize_image_with_crop_or_pad(image, image_W, image_H)
# Subtract off the mean and divide by the variance of the pixels.
image = tf.image.per_image_standardization(image)
image.set_shape([image_W, image_H, 3])
image_batch, label_batch = tf.train.batch([image, label],
batch_size=batch_size,
num_threads=64,
capacity=capacity)
# you can also use shuffle_batch
# image_batch, label_batch = tf.train.shuffle_batch([image,label],
# batch_size=BATCH_SIZE,
# num_threads=64,
# capacity=CAPACITY,
# min_after_dequeue=CAPACITY-1)
label_batch = tf.reshape(label_batch, [batch_size])
image_batch = tf.cast(image_batch, tf.float32)
return image_batch, label_batch
# %%
def get_batch(image, label, image_W, image_H, batch_size, capacity):
'''
Args:
image: list type
label: list type
image_W: image width
image_H: image height
batch_size: batch size
capacity: the maximum elements in queue
Returns:
image_batch: 4D tensor [batch_size, width, height, 3], dtype=tf.float32
label_batch: 1D tensor [batch_size], dtype=tf.int32
'''
image = tf.cast(image, tf.string)
label = tf.cast(label, tf.int32)
# make an input queue
input_queue = tf.train.slice_input_producer([image, label])
label = input_queue[1]
image_contents = tf.read_file(input_queue[0])
image = tf.image.decode_jpeg(image_contents, channels=3)
######################################
# data argumentation should go to here
######################################
image = tf.image.resize_image_with_crop_or_pad(image, image_W, image_H)
# Image processing for training the network. Note the many random
# distortions applied to the image.
reshaped_image = tf.cast(image, tf.float32)
# Randomly crop a [height, width] section of the image.
distorted_image = tf.random_crop(reshaped_image, [image_W, image_H, 3])
# Because these operations are not commutative, consider randomizing
# the order their operation.
# NOTE: since per_image_standardization zeros the mean and makes
# the stddev unit, this likely has no effect see tensorflow#1458.
distorted_image = tf.image.random_brightness(distorted_image,
max_delta=63)
distorted_image = tf.image.random_contrast(distorted_image,
lower=0.2, upper=1.8)
# Subtract off the mean and divide by the variance of the pixels.
image = tf.image.per_image_standardization(image)
image.set_shape([image_W, image_H, 3])
image_batch, label_batch = tf.train.batch([image, label],
batch_size=batch_size,
num_threads=64,
capacity=capacity)
# you can also use shuffle_batch
# image_batch, label_batch = tf.train.shuffle_batch([image,label],
# batch_size=BATCH_SIZE,
# num_threads=64,
# capacity=CAPACITY,
# min_after_dequeue=CAPACITY-1)
label_batch = tf.reshape(label_batch, [batch_size])
image_batch = tf.cast(image_batch, tf.float32)
return image_batch, label_batch
# %% TEST
# To test the generated batches of images
# When training the model, DO comment the following codes
#import matplotlib.pyplot as plt
#
#BATCH_SIZE = 2
#CAPACITY = 256
#IMG_W = 300
#IMG_H = 300
#
#file = 'train_data.txt'
#
#image_list, label_list = get_files(file)
#image_batch, label_batch = get_batch(image_list, label_list, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)
#
#with tf.Session() as sess:
# i = 0
# coord = tf.train.Coordinator()
# threads = tf.train.start_queue_runners(coord=coord)
#
# try:
# while not coord.should_stop() and i<1:
#
# img, label = sess.run([image_batch, label_batch])
#
# # just test one batch
# for j in np.arange(BATCH_SIZE):
# print('label: %d' %label[j])
# plt.imshow(img[j,:,:,:])
# plt.show()
# i+=1
#
# except tf.errors.OutOfRangeError:
# print('done!')
# finally:
# coord.request_stop()
# coord.join(threads)
# %%
| gpl-3.0 | 9,115,338,627,568,042,000 | 33.54918 | 97 | 0.539739 | false |
MartinThoma/algorithms | ML/movielens-20m/ml-20m/movies_analysis.py | 1 | 2395 | from collections import Counter
from itertools import combinations
import clana.io
import clana.visualize_cm
import networkx as nx
import numpy as np
import pandas as pd
import progressbar
# Load the data
df = pd.read_csv("movies.csv")
df["genres"] = df["genres"].str.split("|")
# Analyze the data
list_values = [value for valueset in df["genres"].tolist() for value in valueset]
value_count = Counter(list_values)
print("* Movies: {}".format(len(df)))
print("* Unique genres: {}".format(len(value_count)))
print("* Most common:")
most_common = sorted(value_count.items(), key=lambda n: n[1], reverse=True)
for name, count in most_common[:10]:
print(f" {count:>4}x {name}")
unique_genres = sorted(list(value_count.keys()))
def get_biggest_clusters(edges, n=10):
G = nx.Graph()
for authorset in edges.tolist():
for author in authorset:
G.add_node(author)
for authorset in progressbar.progressbar(df["genres"].tolist()[:10_000]):
for author1, author2 in combinations(authorset, 2):
G.add_edge(author1, author2)
print("Edges were added")
components = [c for c in sorted(nx.connected_components(G), key=len, reverse=True)]
return components[:n]
def create_matrix(nodes, edges):
n2i = {node: i for i, node in enumerate(sorted(nodes))}
# node to index
mat = np.zeros((len(nodes), len(nodes)), dtype=np.int32)
for edge in edges:
for a, b in combinations(edge, 2):
if a not in n2i or b not in n2i:
continue
mat[n2i[a]][n2i[b]] += 1
if a != b:
mat[n2i[b]][n2i[a]] += 1
return mat, sorted(nodes)
components = get_biggest_clusters(df["genres"])
print("* Biggest clusters: {}".format([len(el) for el in components]))
component_w_publications = [(author, value_count[author]) for author in components[0]]
component_w_publications = sorted(
component_w_publications, key=lambda n: n[1], reverse=True
)
authors = [author for author, count in component_w_publications[:1_00]]
mat, labels = create_matrix(authors, df["genres"].tolist())
clana.io.write_cm("genre-combinations.json", mat)
clana.io.write_labels("labels.json", labels)
clana.visualize_cm.main(
"genre-combinations.json",
perm_file="",
steps=1_000_000,
labels_file="labels.json",
zero_diagonal=False,
output="cm-genre-combinations.pdf",
)
| mit | 2,289,728,226,151,431,200 | 29.705128 | 87 | 0.65929 | false |
cython-testbed/pandas | pandas/tseries/offsets.py | 1 | 81716 | # -*- coding: utf-8 -*-
from datetime import date, datetime, timedelta
import functools
import operator
from pandas.compat import range
from pandas import compat
import numpy as np
from pandas.core.dtypes.generic import ABCPeriod
from pandas.core.tools.datetimes import to_datetime
import pandas.core.common as com
# import after tools, dateutil check
from dateutil.easter import easter
from pandas._libs import tslibs, Timestamp, OutOfBoundsDatetime, Timedelta
from pandas.util._decorators import cache_readonly
from pandas._libs.tslibs import (
ccalendar, conversion,
frequencies as libfrequencies)
from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds
import pandas._libs.tslibs.offsets as liboffsets
from pandas._libs.tslibs.offsets import (
ApplyTypeError,
as_datetime, _is_normalized,
_get_calendar, _to_dt64,
apply_index_wraps,
roll_yearday,
shift_month,
BaseOffset)
__all__ = ['Day', 'BusinessDay', 'BDay', 'CustomBusinessDay', 'CDay',
'CBMonthEnd', 'CBMonthBegin',
'MonthBegin', 'BMonthBegin', 'MonthEnd', 'BMonthEnd',
'SemiMonthEnd', 'SemiMonthBegin',
'BusinessHour', 'CustomBusinessHour',
'YearBegin', 'BYearBegin', 'YearEnd', 'BYearEnd',
'QuarterBegin', 'BQuarterBegin', 'QuarterEnd', 'BQuarterEnd',
'LastWeekOfMonth', 'FY5253Quarter', 'FY5253',
'Week', 'WeekOfMonth', 'Easter',
'Hour', 'Minute', 'Second', 'Milli', 'Micro', 'Nano',
'DateOffset', 'CalendarDay']
# convert to/from datetime/timestamp to allow invalid Timestamp ranges to
# pass thru
def as_timestamp(obj):
if isinstance(obj, Timestamp):
return obj
try:
return Timestamp(obj)
except (OutOfBoundsDatetime):
pass
return obj
def apply_wraps(func):
@functools.wraps(func)
def wrapper(self, other):
if other is tslibs.NaT:
return tslibs.NaT
elif isinstance(other, (timedelta, Tick, DateOffset)):
# timedelta path
return func(self, other)
elif isinstance(other, (np.datetime64, datetime, date)):
other = as_timestamp(other)
tz = getattr(other, 'tzinfo', None)
nano = getattr(other, 'nanosecond', 0)
try:
if self._adjust_dst and isinstance(other, Timestamp):
other = other.tz_localize(None)
result = func(self, other)
if self._adjust_dst:
result = conversion.localize_pydatetime(result, tz)
result = Timestamp(result)
if self.normalize:
result = result.normalize()
# nanosecond may be deleted depending on offset process
if not self.normalize and nano != 0:
if not isinstance(self, Nano) and result.nanosecond != nano:
if result.tz is not None:
# convert to UTC
value = conversion.tz_convert_single(
result.value, 'UTC', result.tz)
else:
value = result.value
result = Timestamp(value + nano)
if tz is not None and result.tzinfo is None:
result = conversion.localize_pydatetime(result, tz)
except OutOfBoundsDatetime:
result = func(self, as_datetime(other))
if self.normalize:
# normalize_date returns normal datetime
result = tslibs.normalize_date(result)
if tz is not None and result.tzinfo is None:
result = conversion.localize_pydatetime(result, tz)
return result
return wrapper
# ---------------------------------------------------------------------
# DateOffset
class DateOffset(BaseOffset):
"""
Standard kind of date increment used for a date range.
Works exactly like relativedelta in terms of the keyword args you
pass in, use of the keyword n is discouraged-- you would be better
off specifying n in the keywords you use, but regardless it is
there for you. n is needed for DateOffset subclasses.
DateOffets work as follows. Each offset specify a set of dates
that conform to the DateOffset. For example, Bday defines this
set to be the set of dates that are weekdays (M-F). To test if a
date is in the set of a DateOffset dateOffset we can use the
onOffset method: dateOffset.onOffset(date).
If a date is not on a valid date, the rollback and rollforward
methods can be used to roll the date to the nearest valid date
before/after the date.
DateOffsets can be created to move dates forward a given number of
valid dates. For example, Bday(2) can be added to a date to move
it two business days forward. If the date does not start on a
valid date, first it is moved to a valid date. Thus pseudo code
is:
def __add__(date):
date = rollback(date) # does nothing if date is valid
return date + <n number of periods>
When a date offset is created for a negative number of periods,
the date is first rolled forward. The pseudo code is:
def __add__(date):
date = rollforward(date) # does nothing is date is valid
return date + <n number of periods>
Zero presents a problem. Should it roll forward or back? We
arbitrarily have it rollforward:
date + BDay(0) == BDay.rollforward(date)
Since 0 is a bit weird, we suggest avoiding its use.
Parameters
----------
n : int, default 1
The number of time periods the offset represents.
normalize : bool, default False
Whether to round the result of a DateOffset addition down to the
previous midnight.
**kwds
Temporal parameter that add to or replace the offset value.
Parameters that **add** to the offset (like Timedelta):
- years
- months
- weeks
- days
- hours
- minutes
- seconds
- microseconds
- nanoseconds
Parameters that **replace** the offset value:
- year
- month
- day
- weekday
- hour
- minute
- second
- microsecond
- nanosecond
See Also
--------
dateutil.relativedelta.relativedelta
Examples
--------
>>> ts = pd.Timestamp('2017-01-01 09:10:11')
>>> ts + DateOffset(months=3)
Timestamp('2017-04-01 09:10:11')
>>> ts = pd.Timestamp('2017-01-01 09:10:11')
>>> ts + DateOffset(month=3)
Timestamp('2017-03-01 09:10:11')
"""
_params = cache_readonly(BaseOffset._params.fget)
_use_relativedelta = False
_adjust_dst = False
_attributes = frozenset(['n', 'normalize'] +
list(liboffsets.relativedelta_kwds))
# default for prior pickles
normalize = False
def __init__(self, n=1, normalize=False, **kwds):
BaseOffset.__init__(self, n, normalize)
off, use_rd = liboffsets._determine_offset(kwds)
object.__setattr__(self, "_offset", off)
object.__setattr__(self, "_use_relativedelta", use_rd)
for key in kwds:
val = kwds[key]
object.__setattr__(self, key, val)
@apply_wraps
def apply(self, other):
if self._use_relativedelta:
other = as_datetime(other)
if len(self.kwds) > 0:
tzinfo = getattr(other, 'tzinfo', None)
if tzinfo is not None and self._use_relativedelta:
# perform calculation in UTC
other = other.replace(tzinfo=None)
if self.n > 0:
for i in range(self.n):
other = other + self._offset
else:
for i in range(-self.n):
other = other - self._offset
if tzinfo is not None and self._use_relativedelta:
# bring tz back from UTC calculation
other = conversion.localize_pydatetime(other, tzinfo)
return as_timestamp(other)
else:
return other + timedelta(self.n)
@apply_index_wraps
def apply_index(self, i):
"""
Vectorized apply of DateOffset to DatetimeIndex,
raises NotImplentedError for offsets without a
vectorized implementation
Parameters
----------
i : DatetimeIndex
Returns
-------
y : DatetimeIndex
"""
if type(self) is not DateOffset:
raise NotImplementedError("DateOffset subclass {name} "
"does not have a vectorized "
"implementation".format(
name=self.__class__.__name__))
kwds = self.kwds
relativedelta_fast = {'years', 'months', 'weeks', 'days', 'hours',
'minutes', 'seconds', 'microseconds'}
# relativedelta/_offset path only valid for base DateOffset
if (self._use_relativedelta and
set(kwds).issubset(relativedelta_fast)):
months = ((kwds.get('years', 0) * 12 +
kwds.get('months', 0)) * self.n)
if months:
shifted = liboffsets.shift_months(i.asi8, months)
i = i._shallow_copy(shifted)
weeks = (kwds.get('weeks', 0)) * self.n
if weeks:
i = (i.to_period('W') + weeks).to_timestamp() + \
i.to_perioddelta('W')
timedelta_kwds = {k: v for k, v in kwds.items()
if k in ['days', 'hours', 'minutes',
'seconds', 'microseconds']}
if timedelta_kwds:
delta = Timedelta(**timedelta_kwds)
i = i + (self.n * delta)
return i
elif not self._use_relativedelta and hasattr(self, '_offset'):
# timedelta
return i + (self._offset * self.n)
else:
# relativedelta with other keywords
kwd = set(kwds) - relativedelta_fast
raise NotImplementedError("DateOffset with relativedelta "
"keyword(s) {kwd} not able to be "
"applied vectorized".format(kwd=kwd))
def isAnchored(self):
# TODO: Does this make sense for the general case? It would help
# if there were a canonical docstring for what isAnchored means.
return (self.n == 1)
# TODO: Combine this with BusinessMixin version by defining a whitelisted
# set of attributes on each object rather than the existing behavior of
# iterating over internal ``__dict__``
def _repr_attrs(self):
exclude = {'n', 'inc', 'normalize'}
attrs = []
for attr in sorted(self.__dict__):
if attr.startswith('_') or attr == 'kwds':
continue
elif attr not in exclude:
value = getattr(self, attr)
attrs.append('{attr}={value}'.format(attr=attr, value=value))
out = ''
if attrs:
out += ': ' + ', '.join(attrs)
return out
@property
def name(self):
return self.rule_code
def rollback(self, dt):
"""Roll provided date backward to next offset only if not on offset"""
dt = as_timestamp(dt)
if not self.onOffset(dt):
dt = dt - self.__class__(1, normalize=self.normalize, **self.kwds)
return dt
def rollforward(self, dt):
"""Roll provided date forward to next offset only if not on offset"""
dt = as_timestamp(dt)
if not self.onOffset(dt):
dt = dt + self.__class__(1, normalize=self.normalize, **self.kwds)
return dt
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
# XXX, see #1395
if type(self) == DateOffset or isinstance(self, Tick):
return True
# Default (slow) method for determining if some date is a member of the
# date range generated by this offset. Subclasses may have this
# re-implemented in a nicer way.
a = dt
b = ((dt + self) - self)
return a == b
# way to get around weirdness with rule_code
@property
def _prefix(self):
raise NotImplementedError('Prefix not defined')
@property
def rule_code(self):
return self._prefix
@cache_readonly
def freqstr(self):
try:
code = self.rule_code
except NotImplementedError:
return repr(self)
if self.n != 1:
fstr = '{n}{code}'.format(n=self.n, code=code)
else:
fstr = code
try:
if self._offset:
fstr += self._offset_str()
except AttributeError:
# TODO: standardize `_offset` vs `offset` naming convention
pass
return fstr
def _offset_str(self):
return ''
@property
def nanos(self):
raise ValueError("{name} is a non-fixed frequency".format(name=self))
class SingleConstructorOffset(DateOffset):
@classmethod
def _from_name(cls, suffix=None):
# default _from_name calls cls with no args
if suffix:
raise ValueError("Bad freq suffix {suffix}".format(suffix=suffix))
return cls()
class _CustomMixin(object):
"""
Mixin for classes that define and validate calendar, holidays,
and weekdays attributes
"""
def __init__(self, weekmask, holidays, calendar):
calendar, holidays = _get_calendar(weekmask=weekmask,
holidays=holidays,
calendar=calendar)
# Custom offset instances are identified by the
# following two attributes. See DateOffset._params()
# holidays, weekmask
object.__setattr__(self, "weekmask", weekmask)
object.__setattr__(self, "holidays", holidays)
object.__setattr__(self, "calendar", calendar)
class BusinessMixin(object):
""" Mixin to business types to provide related functions """
@property
def offset(self):
"""Alias for self._offset"""
# Alias for backward compat
return self._offset
def _repr_attrs(self):
if self.offset:
attrs = ['offset={offset!r}'.format(offset=self.offset)]
else:
attrs = None
out = ''
if attrs:
out += ': ' + ', '.join(attrs)
return out
class BusinessDay(BusinessMixin, SingleConstructorOffset):
"""
DateOffset subclass representing possibly n business days
"""
_prefix = 'B'
_adjust_dst = True
_attributes = frozenset(['n', 'normalize', 'offset'])
def __init__(self, n=1, normalize=False, offset=timedelta(0)):
BaseOffset.__init__(self, n, normalize)
object.__setattr__(self, "_offset", offset)
def _offset_str(self):
def get_str(td):
off_str = ''
if td.days > 0:
off_str += str(td.days) + 'D'
if td.seconds > 0:
s = td.seconds
hrs = int(s / 3600)
if hrs != 0:
off_str += str(hrs) + 'H'
s -= hrs * 3600
mts = int(s / 60)
if mts != 0:
off_str += str(mts) + 'Min'
s -= mts * 60
if s != 0:
off_str += str(s) + 's'
if td.microseconds > 0:
off_str += str(td.microseconds) + 'us'
return off_str
if isinstance(self.offset, timedelta):
zero = timedelta(0, 0, 0)
if self.offset >= zero:
off_str = '+' + get_str(self.offset)
else:
off_str = '-' + get_str(-self.offset)
return off_str
else:
return '+' + repr(self.offset)
@apply_wraps
def apply(self, other):
if isinstance(other, datetime):
n = self.n
wday = other.weekday()
# avoid slowness below by operating on weeks first
weeks = n // 5
if n <= 0 and wday > 4:
# roll forward
n += 1
n -= 5 * weeks
# n is always >= 0 at this point
if n == 0 and wday > 4:
# roll back
days = 4 - wday
elif wday > 4:
# roll forward
days = (7 - wday) + (n - 1)
elif wday + n <= 4:
# shift by n days without leaving the current week
days = n
else:
# shift by n days plus 2 to get past the weekend
days = n + 2
result = other + timedelta(days=7 * weeks + days)
if self.offset:
result = result + self.offset
return result
elif isinstance(other, (timedelta, Tick)):
return BDay(self.n, offset=self.offset + other,
normalize=self.normalize)
else:
raise ApplyTypeError('Only know how to combine business day with '
'datetime or timedelta.')
@apply_index_wraps
def apply_index(self, i):
time = i.to_perioddelta('D')
# to_period rolls forward to next BDay; track and
# reduce n where it does when rolling forward
shifted = (i.to_perioddelta('B') - time).asi8 != 0
if self.n > 0:
roll = np.where(shifted, self.n - 1, self.n)
else:
roll = self.n
return (i.to_period('B') + roll).to_timestamp() + time
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.weekday() < 5
class BusinessHourMixin(BusinessMixin):
def __init__(self, start='09:00', end='17:00', offset=timedelta(0)):
# must be validated here to equality check
start = liboffsets._validate_business_time(start)
object.__setattr__(self, "start", start)
end = liboffsets._validate_business_time(end)
object.__setattr__(self, "end", end)
object.__setattr__(self, "_offset", offset)
@cache_readonly
def next_bday(self):
"""used for moving to next businessday"""
if self.n >= 0:
nb_offset = 1
else:
nb_offset = -1
if self._prefix.startswith('C'):
# CustomBusinessHour
return CustomBusinessDay(n=nb_offset,
weekmask=self.weekmask,
holidays=self.holidays,
calendar=self.calendar)
else:
return BusinessDay(n=nb_offset)
@cache_readonly
def _get_daytime_flag(self):
if self.start == self.end:
raise ValueError('start and end must not be the same')
elif self.start < self.end:
return True
else:
return False
def _next_opening_time(self, other):
"""
If n is positive, return tomorrow's business day opening time.
Otherwise yesterday's business day's opening time.
Opening time always locates on BusinessDay.
Otherwise, closing time may not if business hour extends over midnight.
"""
if not self.next_bday.onOffset(other):
other = other + self.next_bday
else:
if self.n >= 0 and self.start < other.time():
other = other + self.next_bday
elif self.n < 0 and other.time() < self.start:
other = other + self.next_bday
return datetime(other.year, other.month, other.day,
self.start.hour, self.start.minute)
def _prev_opening_time(self, other):
"""
If n is positive, return yesterday's business day opening time.
Otherwise yesterday business day's opening time.
"""
if not self.next_bday.onOffset(other):
other = other - self.next_bday
else:
if self.n >= 0 and other.time() < self.start:
other = other - self.next_bday
elif self.n < 0 and other.time() > self.start:
other = other - self.next_bday
return datetime(other.year, other.month, other.day,
self.start.hour, self.start.minute)
@cache_readonly
def _get_business_hours_by_sec(self):
"""
Return business hours in a day by seconds.
"""
if self._get_daytime_flag:
# create dummy datetime to calculate businesshours in a day
dtstart = datetime(2014, 4, 1, self.start.hour, self.start.minute)
until = datetime(2014, 4, 1, self.end.hour, self.end.minute)
return (until - dtstart).total_seconds()
else:
dtstart = datetime(2014, 4, 1, self.start.hour, self.start.minute)
until = datetime(2014, 4, 2, self.end.hour, self.end.minute)
return (until - dtstart).total_seconds()
@apply_wraps
def rollback(self, dt):
"""Roll provided date backward to next offset only if not on offset"""
if not self.onOffset(dt):
businesshours = self._get_business_hours_by_sec
if self.n >= 0:
dt = self._prev_opening_time(
dt) + timedelta(seconds=businesshours)
else:
dt = self._next_opening_time(
dt) + timedelta(seconds=businesshours)
return dt
@apply_wraps
def rollforward(self, dt):
"""Roll provided date forward to next offset only if not on offset"""
if not self.onOffset(dt):
if self.n >= 0:
return self._next_opening_time(dt)
else:
return self._prev_opening_time(dt)
return dt
@apply_wraps
def apply(self, other):
daytime = self._get_daytime_flag
businesshours = self._get_business_hours_by_sec
bhdelta = timedelta(seconds=businesshours)
if isinstance(other, datetime):
# used for detecting edge condition
nanosecond = getattr(other, 'nanosecond', 0)
# reset timezone and nanosecond
# other may be a Timestamp, thus not use replace
other = datetime(other.year, other.month, other.day,
other.hour, other.minute,
other.second, other.microsecond)
n = self.n
if n >= 0:
if (other.time() == self.end or
not self._onOffset(other, businesshours)):
other = self._next_opening_time(other)
else:
if other.time() == self.start:
# adjustment to move to previous business day
other = other - timedelta(seconds=1)
if not self._onOffset(other, businesshours):
other = self._next_opening_time(other)
other = other + bhdelta
bd, r = divmod(abs(n * 60), businesshours // 60)
if n < 0:
bd, r = -bd, -r
if bd != 0:
skip_bd = BusinessDay(n=bd)
# midnight business hour may not on BusinessDay
if not self.next_bday.onOffset(other):
remain = other - self._prev_opening_time(other)
other = self._next_opening_time(other + skip_bd) + remain
else:
other = other + skip_bd
hours, minutes = divmod(r, 60)
result = other + timedelta(hours=hours, minutes=minutes)
# because of previous adjustment, time will be larger than start
if ((daytime and (result.time() < self.start or
self.end < result.time())) or
not daytime and (self.end < result.time() < self.start)):
if n >= 0:
bday_edge = self._prev_opening_time(other)
bday_edge = bday_edge + bhdelta
# calculate remainder
bday_remain = result - bday_edge
result = self._next_opening_time(other)
result += bday_remain
else:
bday_edge = self._next_opening_time(other)
bday_remain = result - bday_edge
result = self._next_opening_time(result) + bhdelta
result += bday_remain
# edge handling
if n >= 0:
if result.time() == self.end:
result = self._next_opening_time(result)
else:
if result.time() == self.start and nanosecond == 0:
# adjustment to move to previous business day
result = self._next_opening_time(
result - timedelta(seconds=1)) + bhdelta
return result
else:
# TODO: Figure out the end of this sente
raise ApplyTypeError(
'Only know how to combine business hour with ')
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
if dt.tzinfo is not None:
dt = datetime(dt.year, dt.month, dt.day, dt.hour,
dt.minute, dt.second, dt.microsecond)
# Valid BH can be on the different BusinessDay during midnight
# Distinguish by the time spent from previous opening time
businesshours = self._get_business_hours_by_sec
return self._onOffset(dt, businesshours)
def _onOffset(self, dt, businesshours):
"""
Slight speedups using calculated values
"""
# if self.normalize and not _is_normalized(dt):
# return False
# Valid BH can be on the different BusinessDay during midnight
# Distinguish by the time spent from previous opening time
if self.n >= 0:
op = self._prev_opening_time(dt)
else:
op = self._next_opening_time(dt)
span = (dt - op).total_seconds()
if span <= businesshours:
return True
else:
return False
def _repr_attrs(self):
out = super(BusinessHourMixin, self)._repr_attrs()
start = self.start.strftime('%H:%M')
end = self.end.strftime('%H:%M')
attrs = ['{prefix}={start}-{end}'.format(prefix=self._prefix,
start=start, end=end)]
out += ': ' + ', '.join(attrs)
return out
class BusinessHour(BusinessHourMixin, SingleConstructorOffset):
"""
DateOffset subclass representing possibly n business days
.. versionadded:: 0.16.1
"""
_prefix = 'BH'
_anchor = 0
_attributes = frozenset(['n', 'normalize', 'start', 'end', 'offset'])
def __init__(self, n=1, normalize=False, start='09:00',
end='17:00', offset=timedelta(0)):
BaseOffset.__init__(self, n, normalize)
super(BusinessHour, self).__init__(start=start, end=end, offset=offset)
class CustomBusinessDay(_CustomMixin, BusinessDay):
"""
DateOffset subclass representing possibly n custom business days,
excluding holidays
Parameters
----------
n : int, default 1
offset : timedelta, default timedelta(0)
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
weekmask : str, Default 'Mon Tue Wed Thu Fri'
weekmask of valid business days, passed to ``numpy.busdaycalendar``
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
calendar : pd.HolidayCalendar or np.busdaycalendar
"""
_cacheable = False
_prefix = 'C'
_attributes = frozenset(['n', 'normalize',
'weekmask', 'holidays', 'calendar', 'offset'])
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
holidays=None, calendar=None, offset=timedelta(0)):
BaseOffset.__init__(self, n, normalize)
object.__setattr__(self, "_offset", offset)
_CustomMixin.__init__(self, weekmask, holidays, calendar)
@apply_wraps
def apply(self, other):
if self.n <= 0:
roll = 'forward'
else:
roll = 'backward'
if isinstance(other, datetime):
date_in = other
np_dt = np.datetime64(date_in.date())
np_incr_dt = np.busday_offset(np_dt, self.n, roll=roll,
busdaycal=self.calendar)
dt_date = np_incr_dt.astype(datetime)
result = datetime.combine(dt_date, date_in.time())
if self.offset:
result = result + self.offset
return result
elif isinstance(other, (timedelta, Tick)):
return BDay(self.n, offset=self.offset + other,
normalize=self.normalize)
else:
raise ApplyTypeError('Only know how to combine trading day with '
'datetime, datetime64 or timedelta.')
def apply_index(self, i):
raise NotImplementedError
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
day64 = _to_dt64(dt, 'datetime64[D]')
return np.is_busday(day64, busdaycal=self.calendar)
class CustomBusinessHour(_CustomMixin, BusinessHourMixin,
SingleConstructorOffset):
"""
DateOffset subclass representing possibly n custom business days
.. versionadded:: 0.18.1
"""
_prefix = 'CBH'
_anchor = 0
_attributes = frozenset(['n', 'normalize',
'weekmask', 'holidays', 'calendar',
'start', 'end', 'offset'])
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
holidays=None, calendar=None,
start='09:00', end='17:00', offset=timedelta(0)):
BaseOffset.__init__(self, n, normalize)
object.__setattr__(self, "_offset", offset)
_CustomMixin.__init__(self, weekmask, holidays, calendar)
BusinessHourMixin.__init__(self, start=start, end=end, offset=offset)
# ---------------------------------------------------------------------
# Month-Based Offset Classes
class MonthOffset(SingleConstructorOffset):
_adjust_dst = True
_attributes = frozenset(['n', 'normalize'])
__init__ = BaseOffset.__init__
@property
def name(self):
if self.isAnchored:
return self.rule_code
else:
month = ccalendar.MONTH_ALIASES[self.n]
return "{code}-{month}".format(code=self.rule_code,
month=month)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.day == self._get_offset_day(dt)
@apply_wraps
def apply(self, other):
compare_day = self._get_offset_day(other)
n = liboffsets.roll_convention(other.day, self.n, compare_day)
return shift_month(other, n, self._day_opt)
@apply_index_wraps
def apply_index(self, i):
shifted = liboffsets.shift_months(i.asi8, self.n, self._day_opt)
return i._shallow_copy(shifted)
class MonthEnd(MonthOffset):
"""DateOffset of one month end"""
_prefix = 'M'
_day_opt = 'end'
class MonthBegin(MonthOffset):
"""DateOffset of one month at beginning"""
_prefix = 'MS'
_day_opt = 'start'
class BusinessMonthEnd(MonthOffset):
"""DateOffset increments between business EOM dates"""
_prefix = 'BM'
_day_opt = 'business_end'
class BusinessMonthBegin(MonthOffset):
"""DateOffset of one business month at beginning"""
_prefix = 'BMS'
_day_opt = 'business_start'
class _CustomBusinessMonth(_CustomMixin, BusinessMixin, MonthOffset):
"""
DateOffset subclass representing one custom business month, incrementing
between [BEGIN/END] of month dates
Parameters
----------
n : int, default 1
offset : timedelta, default timedelta(0)
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
weekmask : str, Default 'Mon Tue Wed Thu Fri'
weekmask of valid business days, passed to ``numpy.busdaycalendar``
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
calendar : pd.HolidayCalendar or np.busdaycalendar
"""
_cacheable = False
_attributes = frozenset(['n', 'normalize',
'weekmask', 'holidays', 'calendar', 'offset'])
onOffset = DateOffset.onOffset # override MonthOffset method
apply_index = DateOffset.apply_index # override MonthOffset method
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
holidays=None, calendar=None, offset=timedelta(0)):
BaseOffset.__init__(self, n, normalize)
object.__setattr__(self, "_offset", offset)
_CustomMixin.__init__(self, weekmask, holidays, calendar)
@cache_readonly
def cbday_roll(self):
"""Define default roll function to be called in apply method"""
cbday = CustomBusinessDay(n=self.n, normalize=False, **self.kwds)
if self._prefix.endswith('S'):
# MonthBegin
roll_func = cbday.rollforward
else:
# MonthEnd
roll_func = cbday.rollback
return roll_func
@cache_readonly
def m_offset(self):
if self._prefix.endswith('S'):
# MonthBegin
moff = MonthBegin(n=1, normalize=False)
else:
# MonthEnd
moff = MonthEnd(n=1, normalize=False)
return moff
@cache_readonly
def month_roll(self):
"""Define default roll function to be called in apply method"""
if self._prefix.endswith('S'):
# MonthBegin
roll_func = self.m_offset.rollback
else:
# MonthEnd
roll_func = self.m_offset.rollforward
return roll_func
@apply_wraps
def apply(self, other):
# First move to month offset
cur_month_offset_date = self.month_roll(other)
# Find this custom month offset
compare_date = self.cbday_roll(cur_month_offset_date)
n = liboffsets.roll_convention(other.day, self.n, compare_date.day)
new = cur_month_offset_date + n * self.m_offset
result = self.cbday_roll(new)
return result
class CustomBusinessMonthEnd(_CustomBusinessMonth):
# TODO(py27): Replace condition with Subsitution after dropping Py27
if _CustomBusinessMonth.__doc__:
__doc__ = _CustomBusinessMonth.__doc__.replace('[BEGIN/END]', 'end')
_prefix = 'CBM'
class CustomBusinessMonthBegin(_CustomBusinessMonth):
# TODO(py27): Replace condition with Subsitution after dropping Py27
if _CustomBusinessMonth.__doc__:
__doc__ = _CustomBusinessMonth.__doc__.replace('[BEGIN/END]',
'beginning')
_prefix = 'CBMS'
# ---------------------------------------------------------------------
# Semi-Month Based Offset Classes
class SemiMonthOffset(DateOffset):
_adjust_dst = True
_default_day_of_month = 15
_min_day_of_month = 2
_attributes = frozenset(['n', 'normalize', 'day_of_month'])
def __init__(self, n=1, normalize=False, day_of_month=None):
BaseOffset.__init__(self, n, normalize)
if day_of_month is None:
object.__setattr__(self, "day_of_month",
self._default_day_of_month)
else:
object.__setattr__(self, "day_of_month", int(day_of_month))
if not self._min_day_of_month <= self.day_of_month <= 27:
msg = 'day_of_month must be {min}<=day_of_month<=27, got {day}'
raise ValueError(msg.format(min=self._min_day_of_month,
day=self.day_of_month))
@classmethod
def _from_name(cls, suffix=None):
return cls(day_of_month=suffix)
@property
def rule_code(self):
suffix = '-{day_of_month}'.format(day_of_month=self.day_of_month)
return self._prefix + suffix
@apply_wraps
def apply(self, other):
# shift `other` to self.day_of_month, incrementing `n` if necessary
n = liboffsets.roll_convention(other.day, self.n, self.day_of_month)
days_in_month = ccalendar.get_days_in_month(other.year, other.month)
# For SemiMonthBegin on other.day == 1 and
# SemiMonthEnd on other.day == days_in_month,
# shifting `other` to `self.day_of_month` _always_ requires
# incrementing/decrementing `n`, regardless of whether it is
# initially positive.
if type(self) is SemiMonthBegin and (self.n <= 0 and other.day == 1):
n -= 1
elif type(self) is SemiMonthEnd and (self.n > 0 and
other.day == days_in_month):
n += 1
return self._apply(n, other)
def _apply(self, n, other):
"""Handle specific apply logic for child classes"""
raise com.AbstractMethodError(self)
@apply_index_wraps
def apply_index(self, i):
# determine how many days away from the 1st of the month we are
days_from_start = i.to_perioddelta('M').asi8
delta = Timedelta(days=self.day_of_month - 1).value
# get boolean array for each element before the day_of_month
before_day_of_month = days_from_start < delta
# get boolean array for each element after the day_of_month
after_day_of_month = days_from_start > delta
# determine the correct n for each date in i
roll = self._get_roll(i, before_day_of_month, after_day_of_month)
# isolate the time since it will be striped away one the next line
time = i.to_perioddelta('D')
# apply the correct number of months
i = (i.to_period('M') + (roll // 2)).to_timestamp()
# apply the correct day
i = self._apply_index_days(i, roll)
return i + time
def _get_roll(self, i, before_day_of_month, after_day_of_month):
"""Return an array with the correct n for each date in i.
The roll array is based on the fact that i gets rolled back to
the first day of the month.
"""
raise com.AbstractMethodError(self)
def _apply_index_days(self, i, roll):
"""Apply the correct day for each date in i"""
raise com.AbstractMethodError(self)
class SemiMonthEnd(SemiMonthOffset):
"""
Two DateOffset's per month repeating on the last
day of the month and day_of_month.
.. versionadded:: 0.19.0
Parameters
----------
n: int
normalize : bool, default False
day_of_month: int, {1, 3,...,27}, default 15
"""
_prefix = 'SM'
_min_day_of_month = 1
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
days_in_month = ccalendar.get_days_in_month(dt.year, dt.month)
return dt.day in (self.day_of_month, days_in_month)
def _apply(self, n, other):
months = n // 2
day = 31 if n % 2 else self.day_of_month
return shift_month(other, months, day)
def _get_roll(self, i, before_day_of_month, after_day_of_month):
n = self.n
is_month_end = i.is_month_end
if n > 0:
roll_end = np.where(is_month_end, 1, 0)
roll_before = np.where(before_day_of_month, n, n + 1)
roll = roll_end + roll_before
elif n == 0:
roll_after = np.where(after_day_of_month, 2, 0)
roll_before = np.where(~after_day_of_month, 1, 0)
roll = roll_before + roll_after
else:
roll = np.where(after_day_of_month, n + 2, n + 1)
return roll
def _apply_index_days(self, i, roll):
"""Add days portion of offset to DatetimeIndex i
Parameters
----------
i : DatetimeIndex
roll : ndarray[int64_t]
Returns
-------
result : DatetimeIndex
"""
nanos = (roll % 2) * Timedelta(days=self.day_of_month).value
i += nanos.astype('timedelta64[ns]')
return i + Timedelta(days=-1)
class SemiMonthBegin(SemiMonthOffset):
"""
Two DateOffset's per month repeating on the first
day of the month and day_of_month.
.. versionadded:: 0.19.0
Parameters
----------
n: int
normalize : bool, default False
day_of_month: int, {2, 3,...,27}, default 15
"""
_prefix = 'SMS'
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.day in (1, self.day_of_month)
def _apply(self, n, other):
months = n // 2 + n % 2
day = 1 if n % 2 else self.day_of_month
return shift_month(other, months, day)
def _get_roll(self, i, before_day_of_month, after_day_of_month):
n = self.n
is_month_start = i.is_month_start
if n > 0:
roll = np.where(before_day_of_month, n, n + 1)
elif n == 0:
roll_start = np.where(is_month_start, 0, 1)
roll_after = np.where(after_day_of_month, 1, 0)
roll = roll_start + roll_after
else:
roll_after = np.where(after_day_of_month, n + 2, n + 1)
roll_start = np.where(is_month_start, -1, 0)
roll = roll_after + roll_start
return roll
def _apply_index_days(self, i, roll):
"""Add days portion of offset to DatetimeIndex i
Parameters
----------
i : DatetimeIndex
roll : ndarray[int64_t]
Returns
-------
result : DatetimeIndex
"""
nanos = (roll % 2) * Timedelta(days=self.day_of_month - 1).value
return i + nanos.astype('timedelta64[ns]')
# ---------------------------------------------------------------------
# Week-Based Offset Classes
class Week(DateOffset):
"""
Weekly offset
Parameters
----------
weekday : int, default None
Always generate specific day of week. 0 for Monday
"""
_adjust_dst = True
_inc = timedelta(weeks=1)
_prefix = 'W'
_attributes = frozenset(['n', 'normalize', 'weekday'])
def __init__(self, n=1, normalize=False, weekday=None):
BaseOffset.__init__(self, n, normalize)
object.__setattr__(self, "weekday", weekday)
if self.weekday is not None:
if self.weekday < 0 or self.weekday > 6:
raise ValueError('Day must be 0<=day<=6, got {day}'
.format(day=self.weekday))
def isAnchored(self):
return (self.n == 1 and self.weekday is not None)
@apply_wraps
def apply(self, other):
if self.weekday is None:
return other + self.n * self._inc
k = self.n
otherDay = other.weekday()
if otherDay != self.weekday:
other = other + timedelta((self.weekday - otherDay) % 7)
if k > 0:
k -= 1
return other + timedelta(weeks=k)
@apply_index_wraps
def apply_index(self, i):
if self.weekday is None:
return ((i.to_period('W') + self.n).to_timestamp() +
i.to_perioddelta('W'))
else:
return self._end_apply_index(i)
def _end_apply_index(self, dtindex):
"""Add self to the given DatetimeIndex, specialized for case where
self.weekday is non-null.
Parameters
----------
dtindex : DatetimeIndex
Returns
-------
result : DatetimeIndex
"""
off = dtindex.to_perioddelta('D')
base, mult = libfrequencies.get_freq_code(self.freqstr)
base_period = dtindex.to_period(base)
if self.n > 0:
# when adding, dates on end roll to next
normed = dtindex - off + Timedelta(1, 'D') - Timedelta(1, 'ns')
roll = np.where(base_period.to_timestamp(how='end') == normed,
self.n, self.n - 1)
else:
roll = self.n
base = (base_period + roll).to_timestamp(how='end')
return base + off + Timedelta(1, 'ns') - Timedelta(1, 'D')
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
elif self.weekday is None:
return True
return dt.weekday() == self.weekday
@property
def rule_code(self):
suffix = ''
if self.weekday is not None:
weekday = ccalendar.int_to_weekday[self.weekday]
suffix = '-{weekday}'.format(weekday=weekday)
return self._prefix + suffix
@classmethod
def _from_name(cls, suffix=None):
if not suffix:
weekday = None
else:
weekday = ccalendar.weekday_to_int[suffix]
return cls(weekday=weekday)
class _WeekOfMonthMixin(object):
"""Mixin for methods common to WeekOfMonth and LastWeekOfMonth"""
@apply_wraps
def apply(self, other):
compare_day = self._get_offset_day(other)
months = self.n
if months > 0 and compare_day > other.day:
months -= 1
elif months <= 0 and compare_day < other.day:
months += 1
shifted = shift_month(other, months, 'start')
to_day = self._get_offset_day(shifted)
return liboffsets.shift_day(shifted, to_day - shifted.day)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.day == self._get_offset_day(dt)
class WeekOfMonth(_WeekOfMonthMixin, DateOffset):
"""
Describes monthly dates like "the Tuesday of the 2nd week of each month"
Parameters
----------
n : int
week : {0, 1, 2, 3, ...}, default 0
0 is 1st week of month, 1 2nd week, etc.
weekday : {0, 1, ..., 6}, default 0
0: Mondays
1: Tuesdays
2: Wednesdays
3: Thursdays
4: Fridays
5: Saturdays
6: Sundays
"""
_prefix = 'WOM'
_adjust_dst = True
_attributes = frozenset(['n', 'normalize', 'week', 'weekday'])
def __init__(self, n=1, normalize=False, week=0, weekday=0):
BaseOffset.__init__(self, n, normalize)
object.__setattr__(self, "weekday", weekday)
object.__setattr__(self, "week", week)
if self.weekday < 0 or self.weekday > 6:
raise ValueError('Day must be 0<=day<=6, got {day}'
.format(day=self.weekday))
if self.week < 0 or self.week > 3:
raise ValueError('Week must be 0<=week<=3, got {week}'
.format(week=self.week))
def _get_offset_day(self, other):
"""
Find the day in the same month as other that has the same
weekday as self.weekday and is the self.week'th such day in the month.
Parameters
----------
other: datetime
Returns
-------
day: int
"""
mstart = datetime(other.year, other.month, 1)
wday = mstart.weekday()
shift_days = (self.weekday - wday) % 7
return 1 + shift_days + self.week * 7
@property
def rule_code(self):
weekday = ccalendar.int_to_weekday.get(self.weekday, '')
return '{prefix}-{week}{weekday}'.format(prefix=self._prefix,
week=self.week + 1,
weekday=weekday)
@classmethod
def _from_name(cls, suffix=None):
if not suffix:
raise ValueError("Prefix {prefix!r} requires a suffix."
.format(prefix=cls._prefix))
# TODO: handle n here...
# only one digit weeks (1 --> week 0, 2 --> week 1, etc.)
week = int(suffix[0]) - 1
weekday = ccalendar.weekday_to_int[suffix[1:]]
return cls(week=week, weekday=weekday)
class LastWeekOfMonth(_WeekOfMonthMixin, DateOffset):
"""
Describes monthly dates in last week of month like "the last Tuesday of
each month"
Parameters
----------
n : int, default 1
weekday : {0, 1, ..., 6}, default 0
0: Mondays
1: Tuesdays
2: Wednesdays
3: Thursdays
4: Fridays
5: Saturdays
6: Sundays
"""
_prefix = 'LWOM'
_adjust_dst = True
_attributes = frozenset(['n', 'normalize', 'weekday'])
def __init__(self, n=1, normalize=False, weekday=0):
BaseOffset.__init__(self, n, normalize)
object.__setattr__(self, "weekday", weekday)
if self.n == 0:
raise ValueError('N cannot be 0')
if self.weekday < 0 or self.weekday > 6:
raise ValueError('Day must be 0<=day<=6, got {day}'
.format(day=self.weekday))
def _get_offset_day(self, other):
"""
Find the day in the same month as other that has the same
weekday as self.weekday and is the last such day in the month.
Parameters
----------
other: datetime
Returns
-------
day: int
"""
dim = ccalendar.get_days_in_month(other.year, other.month)
mend = datetime(other.year, other.month, dim)
wday = mend.weekday()
shift_days = (wday - self.weekday) % 7
return dim - shift_days
@property
def rule_code(self):
weekday = ccalendar.int_to_weekday.get(self.weekday, '')
return '{prefix}-{weekday}'.format(prefix=self._prefix,
weekday=weekday)
@classmethod
def _from_name(cls, suffix=None):
if not suffix:
raise ValueError("Prefix {prefix!r} requires a suffix."
.format(prefix=cls._prefix))
# TODO: handle n here...
weekday = ccalendar.weekday_to_int[suffix]
return cls(weekday=weekday)
# ---------------------------------------------------------------------
# Quarter-Based Offset Classes
class QuarterOffset(DateOffset):
"""Quarter representation - doesn't call super"""
_default_startingMonth = None
_from_name_startingMonth = None
_adjust_dst = True
_attributes = frozenset(['n', 'normalize', 'startingMonth'])
# TODO: Consider combining QuarterOffset and YearOffset __init__ at some
# point. Also apply_index, onOffset, rule_code if
# startingMonth vs month attr names are resolved
def __init__(self, n=1, normalize=False, startingMonth=None):
BaseOffset.__init__(self, n, normalize)
if startingMonth is None:
startingMonth = self._default_startingMonth
object.__setattr__(self, "startingMonth", startingMonth)
def isAnchored(self):
return (self.n == 1 and self.startingMonth is not None)
@classmethod
def _from_name(cls, suffix=None):
kwargs = {}
if suffix:
kwargs['startingMonth'] = ccalendar.MONTH_TO_CAL_NUM[suffix]
else:
if cls._from_name_startingMonth is not None:
kwargs['startingMonth'] = cls._from_name_startingMonth
return cls(**kwargs)
@property
def rule_code(self):
month = ccalendar.MONTH_ALIASES[self.startingMonth]
return '{prefix}-{month}'.format(prefix=self._prefix, month=month)
@apply_wraps
def apply(self, other):
# months_since: find the calendar quarter containing other.month,
# e.g. if other.month == 8, the calendar quarter is [Jul, Aug, Sep].
# Then find the month in that quarter containing an onOffset date for
# self. `months_since` is the number of months to shift other.month
# to get to this on-offset month.
months_since = other.month % 3 - self.startingMonth % 3
qtrs = liboffsets.roll_qtrday(other, self.n, self.startingMonth,
day_opt=self._day_opt, modby=3)
months = qtrs * 3 - months_since
return shift_month(other, months, self._day_opt)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
mod_month = (dt.month - self.startingMonth) % 3
return mod_month == 0 and dt.day == self._get_offset_day(dt)
@apply_index_wraps
def apply_index(self, dtindex):
shifted = liboffsets.shift_quarters(dtindex.asi8, self.n,
self.startingMonth, self._day_opt)
return dtindex._shallow_copy(shifted)
class BQuarterEnd(QuarterOffset):
"""DateOffset increments between business Quarter dates
startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...
startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...
startingMonth = 3 corresponds to dates like 3/30/2007, 6/29/2007, ...
"""
_outputName = 'BusinessQuarterEnd'
_default_startingMonth = 3
_from_name_startingMonth = 12
_prefix = 'BQ'
_day_opt = 'business_end'
# TODO: This is basically the same as BQuarterEnd
class BQuarterBegin(QuarterOffset):
_outputName = "BusinessQuarterBegin"
# I suspect this is wrong for *all* of them.
_default_startingMonth = 3
_from_name_startingMonth = 1
_prefix = 'BQS'
_day_opt = 'business_start'
class QuarterEnd(QuarterOffset):
"""DateOffset increments between business Quarter dates
startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...
startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...
startingMonth = 3 corresponds to dates like 3/31/2007, 6/30/2007, ...
"""
_outputName = 'QuarterEnd'
_default_startingMonth = 3
_prefix = 'Q'
_day_opt = 'end'
class QuarterBegin(QuarterOffset):
_outputName = 'QuarterBegin'
_default_startingMonth = 3
_from_name_startingMonth = 1
_prefix = 'QS'
_day_opt = 'start'
# ---------------------------------------------------------------------
# Year-Based Offset Classes
class YearOffset(DateOffset):
"""DateOffset that just needs a month"""
_adjust_dst = True
_attributes = frozenset(['n', 'normalize', 'month'])
def _get_offset_day(self, other):
# override BaseOffset method to use self.month instead of other.month
# TODO: there may be a more performant way to do this
return liboffsets.get_day_of_month(other.replace(month=self.month),
self._day_opt)
@apply_wraps
def apply(self, other):
years = roll_yearday(other, self.n, self.month, self._day_opt)
months = years * 12 + (self.month - other.month)
return shift_month(other, months, self._day_opt)
@apply_index_wraps
def apply_index(self, dtindex):
shifted = liboffsets.shift_quarters(dtindex.asi8, self.n,
self.month, self._day_opt,
modby=12)
return dtindex._shallow_copy(shifted)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.month == self.month and dt.day == self._get_offset_day(dt)
def __init__(self, n=1, normalize=False, month=None):
BaseOffset.__init__(self, n, normalize)
month = month if month is not None else self._default_month
object.__setattr__(self, "month", month)
if self.month < 1 or self.month > 12:
raise ValueError('Month must go from 1 to 12')
@classmethod
def _from_name(cls, suffix=None):
kwargs = {}
if suffix:
kwargs['month'] = ccalendar.MONTH_TO_CAL_NUM[suffix]
return cls(**kwargs)
@property
def rule_code(self):
month = ccalendar.MONTH_ALIASES[self.month]
return '{prefix}-{month}'.format(prefix=self._prefix, month=month)
class BYearEnd(YearOffset):
"""DateOffset increments between business EOM dates"""
_outputName = 'BusinessYearEnd'
_default_month = 12
_prefix = 'BA'
_day_opt = 'business_end'
class BYearBegin(YearOffset):
"""DateOffset increments between business year begin dates"""
_outputName = 'BusinessYearBegin'
_default_month = 1
_prefix = 'BAS'
_day_opt = 'business_start'
class YearEnd(YearOffset):
"""DateOffset increments between calendar year ends"""
_default_month = 12
_prefix = 'A'
_day_opt = 'end'
class YearBegin(YearOffset):
"""DateOffset increments between calendar year begin dates"""
_default_month = 1
_prefix = 'AS'
_day_opt = 'start'
# ---------------------------------------------------------------------
# Special Offset Classes
class FY5253(DateOffset):
"""
Describes 52-53 week fiscal year. This is also known as a 4-4-5 calendar.
It is used by companies that desire that their
fiscal year always end on the same day of the week.
It is a method of managing accounting periods.
It is a common calendar structure for some industries,
such as retail, manufacturing and parking industry.
For more information see:
http://en.wikipedia.org/wiki/4-4-5_calendar
The year may either:
- end on the last X day of the Y month.
- end on the last X day closest to the last day of the Y month.
X is a specific day of the week.
Y is a certain month of the year
Parameters
----------
n : int
weekday : {0, 1, ..., 6}
0: Mondays
1: Tuesdays
2: Wednesdays
3: Thursdays
4: Fridays
5: Saturdays
6: Sundays
startingMonth : The month in which fiscal years end. {1, 2, ... 12}
variation : str
{"nearest", "last"} for "LastOfMonth" or "NearestEndMonth"
"""
_prefix = 'RE'
_adjust_dst = True
_attributes = frozenset(['weekday', 'startingMonth', 'variation'])
def __init__(self, n=1, normalize=False, weekday=0, startingMonth=1,
variation="nearest"):
BaseOffset.__init__(self, n, normalize)
object.__setattr__(self, "startingMonth", startingMonth)
object.__setattr__(self, "weekday", weekday)
object.__setattr__(self, "variation", variation)
if self.n == 0:
raise ValueError('N cannot be 0')
if self.variation not in ["nearest", "last"]:
raise ValueError('{variation} is not a valid variation'
.format(variation=self.variation))
def isAnchored(self):
return (self.n == 1 and
self.startingMonth is not None and
self.weekday is not None)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
dt = datetime(dt.year, dt.month, dt.day)
year_end = self.get_year_end(dt)
if self.variation == "nearest":
# We have to check the year end of "this" cal year AND the previous
return (year_end == dt or
self.get_year_end(shift_month(dt, -1, None)) == dt)
else:
return year_end == dt
@apply_wraps
def apply(self, other):
norm = Timestamp(other).normalize()
n = self.n
prev_year = self.get_year_end(
datetime(other.year - 1, self.startingMonth, 1))
cur_year = self.get_year_end(
datetime(other.year, self.startingMonth, 1))
next_year = self.get_year_end(
datetime(other.year + 1, self.startingMonth, 1))
prev_year = conversion.localize_pydatetime(prev_year, other.tzinfo)
cur_year = conversion.localize_pydatetime(cur_year, other.tzinfo)
next_year = conversion.localize_pydatetime(next_year, other.tzinfo)
# Note: next_year.year == other.year + 1, so we will always
# have other < next_year
if norm == prev_year:
n -= 1
elif norm == cur_year:
pass
elif n > 0:
if norm < prev_year:
n -= 2
elif prev_year < norm < cur_year:
n -= 1
elif cur_year < norm < next_year:
pass
else:
if cur_year < norm < next_year:
n += 1
elif prev_year < norm < cur_year:
pass
elif (norm.year == prev_year.year and norm < prev_year and
prev_year - norm <= timedelta(6)):
# GH#14774, error when next_year.year == cur_year.year
# e.g. prev_year == datetime(2004, 1, 3),
# other == datetime(2004, 1, 1)
n -= 1
else:
assert False
shifted = datetime(other.year + n, self.startingMonth, 1)
result = self.get_year_end(shifted)
result = datetime(result.year, result.month, result.day,
other.hour, other.minute, other.second,
other.microsecond)
return result
def get_year_end(self, dt):
assert dt.tzinfo is None
dim = ccalendar.get_days_in_month(dt.year, self.startingMonth)
target_date = datetime(dt.year, self.startingMonth, dim)
wkday_diff = self.weekday - target_date.weekday()
if wkday_diff == 0:
# year_end is the same for "last" and "nearest" cases
return target_date
if self.variation == "last":
days_forward = (wkday_diff % 7) - 7
# days_forward is always negative, so we always end up
# in the same year as dt
return target_date + timedelta(days=days_forward)
else:
# variation == "nearest":
days_forward = wkday_diff % 7
if days_forward <= 3:
# The upcoming self.weekday is closer than the previous one
return target_date + timedelta(days_forward)
else:
# The previous self.weekday is closer than the upcoming one
return target_date + timedelta(days_forward - 7)
@property
def rule_code(self):
prefix = self._prefix
suffix = self.get_rule_code_suffix()
return "{prefix}-{suffix}".format(prefix=prefix, suffix=suffix)
def _get_suffix_prefix(self):
if self.variation == "nearest":
return 'N'
else:
return 'L'
def get_rule_code_suffix(self):
prefix = self._get_suffix_prefix()
month = ccalendar.MONTH_ALIASES[self.startingMonth]
weekday = ccalendar.int_to_weekday[self.weekday]
return '{prefix}-{month}-{weekday}'.format(prefix=prefix, month=month,
weekday=weekday)
@classmethod
def _parse_suffix(cls, varion_code, startingMonth_code, weekday_code):
if varion_code == "N":
variation = "nearest"
elif varion_code == "L":
variation = "last"
else:
raise ValueError("Unable to parse varion_code: "
"{code}".format(code=varion_code))
startingMonth = ccalendar.MONTH_TO_CAL_NUM[startingMonth_code]
weekday = ccalendar.weekday_to_int[weekday_code]
return {"weekday": weekday,
"startingMonth": startingMonth,
"variation": variation}
@classmethod
def _from_name(cls, *args):
return cls(**cls._parse_suffix(*args))
class FY5253Quarter(DateOffset):
"""
DateOffset increments between business quarter dates
for 52-53 week fiscal year (also known as a 4-4-5 calendar).
It is used by companies that desire that their
fiscal year always end on the same day of the week.
It is a method of managing accounting periods.
It is a common calendar structure for some industries,
such as retail, manufacturing and parking industry.
For more information see:
http://en.wikipedia.org/wiki/4-4-5_calendar
The year may either:
- end on the last X day of the Y month.
- end on the last X day closest to the last day of the Y month.
X is a specific day of the week.
Y is a certain month of the year
startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...
startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...
startingMonth = 3 corresponds to dates like 3/30/2007, 6/29/2007, ...
Parameters
----------
n : int
weekday : {0, 1, ..., 6}
0: Mondays
1: Tuesdays
2: Wednesdays
3: Thursdays
4: Fridays
5: Saturdays
6: Sundays
startingMonth : The month in which fiscal years end. {1, 2, ... 12}
qtr_with_extra_week : The quarter number that has the leap
or 14 week when needed. {1,2,3,4}
variation : str
{"nearest", "last"} for "LastOfMonth" or "NearestEndMonth"
"""
_prefix = 'REQ'
_adjust_dst = True
_attributes = frozenset(['weekday', 'startingMonth', 'qtr_with_extra_week',
'variation'])
def __init__(self, n=1, normalize=False, weekday=0, startingMonth=1,
qtr_with_extra_week=1, variation="nearest"):
BaseOffset.__init__(self, n, normalize)
object.__setattr__(self, "startingMonth", startingMonth)
object.__setattr__(self, "weekday", weekday)
object.__setattr__(self, "qtr_with_extra_week", qtr_with_extra_week)
object.__setattr__(self, "variation", variation)
if self.n == 0:
raise ValueError('N cannot be 0')
@cache_readonly
def _offset(self):
return FY5253(startingMonth=self.startingMonth,
weekday=self.weekday,
variation=self.variation)
def isAnchored(self):
return self.n == 1 and self._offset.isAnchored()
def _rollback_to_year(self, other):
"""roll `other` back to the most recent date that was on a fiscal year
end. Return the date of that year-end, the number of full quarters
elapsed between that year-end and other, and the remaining Timedelta
since the most recent quarter-end.
Parameters
----------
other : datetime or Timestamp
Returns
-------
tuple of
prev_year_end : Timestamp giving most recent fiscal year end
num_qtrs : int
tdelta : Timedelta
"""
num_qtrs = 0
norm = Timestamp(other).tz_localize(None)
start = self._offset.rollback(norm)
# Note: start <= norm and self._offset.onOffset(start)
if start < norm:
# roll adjustment
qtr_lens = self.get_weeks(norm)
# check thet qtr_lens is consistent with self._offset addition
end = liboffsets.shift_day(start, days=7 * sum(qtr_lens))
assert self._offset.onOffset(end), (start, end, qtr_lens)
tdelta = norm - start
for qlen in qtr_lens:
if qlen * 7 <= tdelta.days:
num_qtrs += 1
tdelta -= Timedelta(days=qlen * 7)
else:
break
else:
tdelta = Timedelta(0)
# Note: we always have tdelta.value >= 0
return start, num_qtrs, tdelta
@apply_wraps
def apply(self, other):
# Note: self.n == 0 is not allowed.
n = self.n
prev_year_end, num_qtrs, tdelta = self._rollback_to_year(other)
res = prev_year_end
n += num_qtrs
if self.n <= 0 and tdelta.value > 0:
n += 1
# Possible speedup by handling years first.
years = n // 4
if years:
res += self._offset * years
n -= years * 4
# Add an extra day to make *sure* we are getting the quarter lengths
# for the upcoming year, not the previous year
qtr_lens = self.get_weeks(res + Timedelta(days=1))
# Note: we always have 0 <= n < 4
weeks = sum(qtr_lens[:n])
if weeks:
res = liboffsets.shift_day(res, days=weeks * 7)
return res
def get_weeks(self, dt):
ret = [13] * 4
year_has_extra_week = self.year_has_extra_week(dt)
if year_has_extra_week:
ret[self.qtr_with_extra_week - 1] = 14
return ret
def year_has_extra_week(self, dt):
# Avoid round-down errors --> normalize to get
# e.g. '370D' instead of '360D23H'
norm = Timestamp(dt).normalize().tz_localize(None)
next_year_end = self._offset.rollforward(norm)
prev_year_end = norm - self._offset
weeks_in_year = (next_year_end - prev_year_end).days / 7
assert weeks_in_year in [52, 53], weeks_in_year
return weeks_in_year == 53
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
if self._offset.onOffset(dt):
return True
next_year_end = dt - self._offset
qtr_lens = self.get_weeks(dt)
current = next_year_end
for qtr_len in qtr_lens:
current = liboffsets.shift_day(current, days=qtr_len * 7)
if dt == current:
return True
return False
@property
def rule_code(self):
suffix = self._offset.get_rule_code_suffix()
qtr = self.qtr_with_extra_week
return "{prefix}-{suffix}-{qtr}".format(prefix=self._prefix,
suffix=suffix, qtr=qtr)
@classmethod
def _from_name(cls, *args):
return cls(**dict(FY5253._parse_suffix(*args[:-1]),
qtr_with_extra_week=int(args[-1])))
class Easter(DateOffset):
"""
DateOffset for the Easter holiday using
logic defined in dateutil. Right now uses
the revised method which is valid in years
1583-4099.
"""
_adjust_dst = True
_attributes = frozenset(['n', 'normalize'])
__init__ = BaseOffset.__init__
@apply_wraps
def apply(self, other):
current_easter = easter(other.year)
current_easter = datetime(current_easter.year,
current_easter.month, current_easter.day)
current_easter = conversion.localize_pydatetime(current_easter,
other.tzinfo)
n = self.n
if n >= 0 and other < current_easter:
n -= 1
elif n < 0 and other > current_easter:
n += 1
# TODO: Why does this handle the 0 case the opposite of others?
# NOTE: easter returns a datetime.date so we have to convert to type of
# other
new = easter(other.year + n)
new = datetime(new.year, new.month, new.day, other.hour,
other.minute, other.second, other.microsecond)
return new
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return date(dt.year, dt.month, dt.day) == easter(dt.year)
class CalendarDay(SingleConstructorOffset):
"""
Calendar day offset. Respects calendar arithmetic as opposed to Day which
respects absolute time.
"""
_adjust_dst = True
_inc = Timedelta(days=1)
_prefix = 'CD'
_attributes = frozenset(['n', 'normalize'])
def __init__(self, n=1, normalize=False):
BaseOffset.__init__(self, n, normalize)
@apply_wraps
def apply(self, other):
"""
Apply scalar arithmetic with CalendarDay offset. Incoming datetime
objects can be tz-aware or naive.
"""
if type(other) == type(self):
# Add other CalendarDays
return type(self)(self.n + other.n, normalize=self.normalize)
tzinfo = getattr(other, 'tzinfo', None)
if tzinfo is not None:
other = other.replace(tzinfo=None)
other = other + self.n * self._inc
if tzinfo is not None:
# This can raise a AmbiguousTimeError or NonExistentTimeError
other = conversion.localize_pydatetime(other, tzinfo)
try:
return as_timestamp(other)
except TypeError:
raise TypeError("Cannot perform arithmetic between {other} and "
"CalendarDay".format(other=type(other)))
@apply_index_wraps
def apply_index(self, i):
"""
Apply the CalendarDay offset to a DatetimeIndex. Incoming DatetimeIndex
objects are assumed to be tz_naive
"""
return i + self.n * self._inc
# ---------------------------------------------------------------------
# Ticks
def _tick_comp(op):
def f(self, other):
return op(self.delta, other.delta)
return f
class Tick(liboffsets._Tick, SingleConstructorOffset):
_inc = Timedelta(microseconds=1000)
_prefix = 'undefined'
_attributes = frozenset(['n', 'normalize'])
def __init__(self, n=1, normalize=False):
BaseOffset.__init__(self, n, normalize)
if normalize:
raise ValueError("Tick offset with `normalize=True` are not "
"allowed.") # GH#21427
__gt__ = _tick_comp(operator.gt)
__ge__ = _tick_comp(operator.ge)
__lt__ = _tick_comp(operator.lt)
__le__ = _tick_comp(operator.le)
__eq__ = _tick_comp(operator.eq)
__ne__ = _tick_comp(operator.ne)
def __add__(self, other):
if isinstance(other, Tick):
if type(self) == type(other):
return type(self)(self.n + other.n)
else:
return _delta_to_tick(self.delta + other.delta)
elif isinstance(other, ABCPeriod):
return other + self
try:
return self.apply(other)
except ApplyTypeError:
return NotImplemented
except OverflowError:
raise OverflowError("the add operation between {self} and {other} "
"will overflow".format(self=self, other=other))
def __eq__(self, other):
if isinstance(other, compat.string_types):
from pandas.tseries.frequencies import to_offset
other = to_offset(other)
if isinstance(other, Tick):
return self.delta == other.delta
else:
return False
# This is identical to DateOffset.__hash__, but has to be redefined here
# for Python 3, because we've redefined __eq__.
def __hash__(self):
return hash(self._params)
def __ne__(self, other):
if isinstance(other, compat.string_types):
from pandas.tseries.frequencies import to_offset
other = to_offset(other)
if isinstance(other, Tick):
return self.delta != other.delta
else:
return True
@property
def delta(self):
return self.n * self._inc
@property
def nanos(self):
return delta_to_nanoseconds(self.delta)
# TODO: Should Tick have its own apply_index?
def apply(self, other):
# Timestamp can handle tz and nano sec, thus no need to use apply_wraps
if isinstance(other, Timestamp):
# GH 15126
# in order to avoid a recursive
# call of __add__ and __radd__ if there is
# an exception, when we call using the + operator,
# we directly call the known method
result = other.__add__(self)
if result == NotImplemented:
raise OverflowError
return result
elif isinstance(other, (datetime, np.datetime64, date)):
return as_timestamp(other) + self
if isinstance(other, timedelta):
return other + self.delta
elif isinstance(other, type(self)):
return type(self)(self.n + other.n)
raise ApplyTypeError('Unhandled type: {type_str}'
.format(type_str=type(other).__name__))
def isAnchored(self):
return False
def _delta_to_tick(delta):
if delta.microseconds == 0:
if delta.seconds == 0:
return Day(delta.days)
else:
seconds = delta.days * 86400 + delta.seconds
if seconds % 3600 == 0:
return Hour(seconds / 3600)
elif seconds % 60 == 0:
return Minute(seconds / 60)
else:
return Second(seconds)
else:
nanos = delta_to_nanoseconds(delta)
if nanos % 1000000 == 0:
return Milli(nanos // 1000000)
elif nanos % 1000 == 0:
return Micro(nanos // 1000)
else: # pragma: no cover
return Nano(nanos)
class Day(Tick):
_inc = Timedelta(days=1)
_prefix = 'D'
class Hour(Tick):
_inc = Timedelta(hours=1)
_prefix = 'H'
class Minute(Tick):
_inc = Timedelta(minutes=1)
_prefix = 'T'
class Second(Tick):
_inc = Timedelta(seconds=1)
_prefix = 'S'
class Milli(Tick):
_inc = Timedelta(milliseconds=1)
_prefix = 'L'
class Micro(Tick):
_inc = Timedelta(microseconds=1)
_prefix = 'U'
class Nano(Tick):
_inc = Timedelta(nanoseconds=1)
_prefix = 'N'
BDay = BusinessDay
BMonthEnd = BusinessMonthEnd
BMonthBegin = BusinessMonthBegin
CBMonthEnd = CustomBusinessMonthEnd
CBMonthBegin = CustomBusinessMonthBegin
CDay = CustomBusinessDay
# ---------------------------------------------------------------------
def generate_range(start=None, end=None, periods=None,
offset=BDay(), time_rule=None):
"""
Generates a sequence of dates corresponding to the specified time
offset. Similar to dateutil.rrule except uses pandas DateOffset
objects to represent time increments
Parameters
----------
start : datetime (default None)
end : datetime (default None)
periods : int, (default None)
offset : DateOffset, (default BDay())
time_rule : (legacy) name of DateOffset object to be used, optional
Corresponds with names expected by tseries.frequencies.get_offset
Notes
-----
* This method is faster for generating weekdays than dateutil.rrule
* At least two of (start, end, periods) must be specified.
* If both start and end are specified, the returned dates will
satisfy start <= date <= end.
* If both time_rule and offset are specified, time_rule supersedes offset.
Returns
-------
dates : generator object
"""
if time_rule is not None:
from pandas.tseries.frequencies import get_offset
offset = get_offset(time_rule)
start = to_datetime(start)
end = to_datetime(end)
if start and not offset.onOffset(start):
start = offset.rollforward(start)
elif end and not offset.onOffset(end):
end = offset.rollback(end)
if periods is None and end < start:
end = None
periods = 0
if end is None:
end = start + (periods - 1) * offset
if start is None:
start = end - (periods - 1) * offset
cur = start
if offset.n >= 0:
while cur <= end:
yield cur
# faster than cur + offset
next_date = offset.apply(cur)
if next_date <= cur:
raise ValueError('Offset {offset} did not increment date'
.format(offset=offset))
cur = next_date
else:
while cur >= end:
yield cur
# faster than cur + offset
next_date = offset.apply(cur)
if next_date >= cur:
raise ValueError('Offset {offset} did not decrement date'
.format(offset=offset))
cur = next_date
prefix_mapping = {offset._prefix: offset for offset in [
YearBegin, # 'AS'
YearEnd, # 'A'
BYearBegin, # 'BAS'
BYearEnd, # 'BA'
BusinessDay, # 'B'
BusinessMonthBegin, # 'BMS'
BusinessMonthEnd, # 'BM'
BQuarterEnd, # 'BQ'
BQuarterBegin, # 'BQS'
BusinessHour, # 'BH'
CustomBusinessDay, # 'C'
CustomBusinessMonthEnd, # 'CBM'
CustomBusinessMonthBegin, # 'CBMS'
CustomBusinessHour, # 'CBH'
MonthEnd, # 'M'
MonthBegin, # 'MS'
Nano, # 'N'
SemiMonthEnd, # 'SM'
SemiMonthBegin, # 'SMS'
Week, # 'W'
Second, # 'S'
Minute, # 'T'
Micro, # 'U'
QuarterEnd, # 'Q'
QuarterBegin, # 'QS'
Milli, # 'L'
Hour, # 'H'
Day, # 'D'
WeekOfMonth, # 'WOM'
FY5253,
FY5253Quarter,
CalendarDay # 'CD'
]}
| bsd-3-clause | 8,722,054,524,984,704,000 | 32.231395 | 79 | 0.557431 | false |
dr-leo/pandaSDMX | pandasdmx/tests/test_insee.py | 1 | 4267 | # TODO tidy these tests to use fixtures/methods from pandasdmx.tests
from collections import OrderedDict
import pytest
import pandasdmx
from pandasdmx import Request
from .data import BASE_PATH as test_data_path
test_data_path = test_data_path / "INSEE"
DATAFLOW_FP = test_data_path / "dataflow.xml"
DATASETS = {
"IPI-2010-A21": {
"data-fp": test_data_path / "IPI-2010-A21.xml",
"datastructure-fp": test_data_path / "IPI-2010-A21-structure.xml",
"series_count": 20,
},
"CNA-2010-CONSO-SI-A17": {
"data-fp": test_data_path / "CNA-2010-CONSO-SI-A17.xml",
"datastructure-fp": (test_data_path / "CNA-2010-CONSO-SI-A17-structure.xml"),
"series_count": 1,
},
}
SERIES = {
"UNEMPLOYMENT_CAT_A_B_C": {"data-fp": test_data_path / "bug-series-freq-data.xml"}
}
class TestINSEE:
@pytest.fixture(scope="class")
def req(self):
return Request("INSEE")
def test_load_dataset(self, req):
dataset_code = "IPI-2010-A21"
# Load all dataflows
dataflows_response = pandasdmx.read_sdmx(DATAFLOW_FP)
dataflows = dataflows_response.dataflow
assert len(dataflows) == 663
assert dataset_code in dataflows
# Load datastructure for current dataset_code
fp_datastructure = DATASETS[dataset_code]["datastructure-fp"]
datastructure_response = pandasdmx.read_sdmx(fp_datastructure)
assert dataset_code in datastructure_response.dataflow
dsd = datastructure_response.dataflow[dataset_code].structure
# Verify dimensions list
dimensions = OrderedDict(
[dim.id, dim]
for dim in dsd.dimensions
if dim.id not in ["TIME", "TIME_PERIOD"]
)
dim_keys = list(dimensions.keys())
assert dim_keys == ["FREQ", "PRODUIT", "NATURE"]
# Load datas for the current dataset
fp_data = DATASETS[dataset_code]["data-fp"]
data = pandasdmx.read_sdmx(fp_data)
# Verify series count and values
series = data.data[0].series
series_count = len(series)
assert series_count == DATASETS[dataset_code]["series_count"]
first_series = series[0]
observations = first_series
first_obs = observations[0]
last_obs = observations[-1]
assert first_obs.dim == "2015-10"
assert first_obs.value == "105.61"
assert last_obs.dim == "1990-01"
assert last_obs.value == "139.22"
def test_fixe_key_names(self, req):
"""Verify key or attribute contains '-' in name."""
dataset_code = "CNA-2010-CONSO-SI-A17"
fp_datastructure = DATASETS[dataset_code]["datastructure-fp"]
datastructure_response = pandasdmx.read_sdmx(fp_datastructure)
assert dataset_code in datastructure_response.dataflow
dsd = datastructure_response.dataflow[dataset_code].structure
dimensions = OrderedDict(
[dim.id, dim]
for dim in dsd.dimensions
if dim.id not in ["TIME", "TIME_PERIOD"]
)
dim_keys = list(dimensions.keys())
assert dim_keys == ["SECT-INST", "OPERATION", "PRODUIT", "PRIX"]
fp_data = DATASETS[dataset_code]["data-fp"]
data = pandasdmx.read_sdmx(fp_data)
series = data.data[0].series
series_key = list(series.keys())[0]
assert list(series_key.values.keys()) == [
"SECT-INST",
"OPERATION",
"PRODUIT",
"PRIX",
]
assert list(series_key.attrib.keys()) == [
"FREQ",
"IDBANK",
"TITLE",
"LAST_UPDATE",
"UNIT_MEASURE",
"UNIT_MULT",
"REF_AREA",
"DECIMALS",
"BASE_PER",
"TIME_PER_COLLECT",
]
def test_freq_in_series_attribute(self, req):
# Test that we don't have regression on Issues #39 and #41
# INSEE time series provide the FREQ value as attribute on the series
# instead of a dimension. This caused a runtime error when writing as
# pandas dataframe.
data_response = pandasdmx.read_sdmx(SERIES["UNEMPLOYMENT_CAT_A_B_C"]["data-fp"])
pandasdmx.to_pandas(data_response)
| apache-2.0 | -5,387,207,897,127,990,000 | 31.325758 | 88 | 0.597141 | false |
jenniyanjie/sg-stock-related | version3/jsonwebretrieve.py | 1 | 2109 | import os, re, sys, time, datetime, copy, calendar
import pandas, pdb
import simplejson as json
from pattern.web import URL, extension, cache, plaintext, Newsfeed
class WebJsonRetrieval(object):
"""
General object to retrieve json file from the web.
Would require only the first tag so after that can str away form the dict
"""
def __init__(self):
"""
"""
## parameters
self.saved_json_file = r'./temptryyql.json'
self.target_tag = '' #use to identify the json data needed
## Result dataframe
self.result_json_df = pandas.DataFrame()
def set_url(self, url_str):
""" Set the url for the json retrieval.
url_str (str): json url str
"""
self.com_data_full_url = url_str
def set_target_tag(self, target_tag):
""" Set the target_tag for the json retrieval.
target_tag (str): target_tag for json file
"""
self.target_tag = target_tag
def download_json(self):
""" Download the json file from the self.com_data_full_url.
The save file is default to the self.saved_json_file.
"""
cache.clear()
url = URL(self.com_data_full_url)
f = open(self.saved_json_file, 'wb') # save as test.gif
# pdb.set_trace()
try:
url_data = url.download(timeout = 50)
# print 'download OK!'
except:
url_data = ''
f.write(url_data)
f.close()
def process_json_data(self):
""" Processed the json file for handling the announcement.
"""
try:
self.json_raw_data = json.load(open(self.saved_json_file, 'r'))
except:
print "Problem loading the json file."
self.json_raw_data = [{}] #return list of empty dict
def convert_json_to_df(self):
""" Convert json data (list of dict) to dataframe.
Required the correct input of self.target_tag.
"""
self.result_json_df = pandas.DataFrame(self.json_raw_data[self.target_tag]) | mit | -1,144,586,067,000,230,100 | 29.57971 | 84 | 0.573732 | false |
twhyntie/coding-challenges | cernatschool/helpers.py | 1 | 9886 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#...for the logging.
import logging as lg
#...for the MATH.
import numpy as np
#...for the data analysis.
import pandas as pd
#...for the least squares stuff.
from scipy.optimize import leastsq
#...for the plotting.
import pylab as plt
#...for the colours.
from matplotlib import colorbar, colors
#...for the data values.
from datavals import *
def getConsistentValue(thelist, error, emptyval=None):
"""
Function for extracting a consistent value from a list,
and throwing an error if:
a) there are differing values present, or;
b) the list is empty but no 'empty' value is supplied.
"""
if len(thelist) > 0:
# Check that the values are consistent.
if thelist.count(thelist[0]) != len(thelist):
# Raise the exception with the user supplied error string.
raise IOError(error)
# If they are, return the first value.
return thelist[0]
else:
if emptyval is not None:
return emptyval
else:
raise ValueError("Empty list supplied but no empty value given!")
def getFormat(fn):
## Open the file and look at the first line.
with open(fn, "r") as f:
l = f.readline().strip()
lg.debug("")
lg.debug(" *--> First line is:")
lg.debug("\n\n%s\n" % (l))
lg.debug("")
## The file type value.
filetypeval = 0
# Is it a DSC file?
# TODO: check all possible DSC file starts...
if l == "A000000001":
filetypeval = -1
lg.debug(" *--> This is a %s file." % (DATA_FILE_TYPES[filetypeval]))
return filetypeval
# Is the file empty?
if l == "":
filetypeval = 4114
lg.debug(" *--> This is a %s file." % (DATA_FILE_TYPES[filetypeval]))
return filetypeval
# Try to break up the first line into tab-separated integers.
try:
## Values separated by tab
tabvals = [int(x) for x in l.split('\t')]
lg.debug(" %d tab separated values found in the first line." % (len(tabvals)))
if len(tabvals) == 2:
filetypeval = 8210
elif len(tabvals) == 3:
filetypeval = 4114
lg.debug(" *--> This is a %s file." % (DATA_FILE_TYPES[filetypeval]))
return filetypeval
except ValueError:
lg.debug(" Tab separation into integers failed!")
pass
try:
## Values separated by spaces.
spcvals = [int(x) for x in l.split(' ')]
lg.debug(" %d space separated values found in the first line." % (len(spcvals)))
if len(spcvals) == 256:
filetypeval = 18
lg.debug(" *--> This is a %s file." % (DATA_FILE_TYPES[filetypeval]))
return filetypeval
except ValueError:
lg.debug(" Space separation into integers failed!")
pass
lg.debug(" This is not a valid data file.")
return filetypeval
def get_pixel_dictionary_from_dataframe(df, rows=256, cols=256):
""" Get a {X:C} pixel dictionary from a pandas DataFrame. """
## The pixel dictionary to return {X:C}.
pixel_dict = {}
# Loop over the row in the DataFrame.
for row in df.values:
x = row[0]; y = row[1]; C = row[2]
X = (cols * y) + x
pixel_dict[X] = C
return pixel_dict
def residuals(p, y, x):
""" The residual function required by leastsq."""
## The gradient of the line.
m = p[0]
## The intercept with the y axis.
c = p[1]
## The model we're testing - a straight line in this case.
model = m*x + c
## The result to return.
res = y - model
# Return it!
return res
def getLinearity(pixel_dict):
"""
A helper function for finding the linearity of a cluster.
The residuals are the perpendicular distances of each pixel
from the line of best fit.
@param [in] pixel_dict A dictionary of pixel {X:C} values.
@returns m The gradient of the line of best fit.
@returns c The intercept of the line of best fit.
@returns sumR The sum of the residuals.
@returns lin The linearity, sumR/N_pixels.
"""
lg.debug("*")
lg.debug("*--> getLinearity called:")
lg.debug("* %d pixels found." % (len(pixel_dict)))
# If there are no pixels, return 0.0.
if len(pixel_dict) == 0:
lg.debug("*--> No pixels provided; exiting returning 0.0!")
return None, None, None, None
#m, c, sumR, lin = 0.0, 0.0, 0.0, 0.0
## The list of pixel x values.
xs = []
## The list of pixel y values.
ys = []
# Loop over the pixels provided to get the x and y values.
for X, C in pixel_dict.iteritems():
x = float(X % 256)
y = float(X / 256)
xs.append(x)
ys.append(y)
## An array of the pixel x values.
x_array = np.array(xs)
## The y values.
y_array = np.array(ys)
# Check that it's not just a single pixel cluster.
if len(x_array) == 1:
lg.debug("*--> I am only one pixel! Exiting!")
#return None, None, None, None
return 0.0, x_array[0], 0.0, 0.0
# Get the equation of the line
#------------------------------
# Vertical line
# For a vertical line of pixels, all of the x values will be identical.
if xs.count(xs[0]) == len(xs): # Nifty little trick picked up from SE.
# d is the intercept on the x axis
#d = x_array[0]
return 999999.9, 999999.9, 0.0, 0.0
# Horizontal line
# For a horizontal line of pixels, all of the y values will be identical.
# We can just set m to 0 and c to y_i.
elif ys.count(ys[0]) == len(ys):
return 0.0, ys[0], 0.0, 0.0
# The line is at an angle to the axes - a bit more interesting!
else:
# A first guess from the "end" pixels.
# Note, however, that the "end" pixels may not be at the extremes
# of the cluster (to be investigated further...).
#
lg.debug("* --> Initial guesses from (%3d, %3d) - (%3d, %3d):" % \
(x_array[0], y_array[0], x_array[-1], y_array[-1]))
#m_init = ((y_array[-1])-(y_array[0]))/((x_array[-1])-(x_array[0]))
m_init = 0.0
c_init = y_array[0] - (m_init*(x_array[0]))
# A list of the initial guess parameters.
p0 = [m_init, c_init]
lg.debug("*--> Initial [m, c] = [% f, % f]" % (p0[0], p0[1]))
# The results of the least-squares finding.
plsq = leastsq(residuals, p0, args = (np.array(y_array), np.array(x_array)))
## The gradient of the line.
m = plsq[0][0]
## The intercept with the y axis.
c = plsq[0][1]
lg.debug("*--> Found [m, c] = [% f, % f]" % (m, c))
# Now find the perpendicular distance of the pixel from the
# line (the residuals).
## The denominator of |d| (the perpendicular distance).
#
# We only need this once, so we do it at the start of the loop.
denom = np.sqrt(1 + m*m)
## The list of distances, one for each pixel.
ds = []
# Loop over the pixels and calculate the distances.
for X, C in pixel_dict.iteritems():
x = X % 256
y = X / 256
## The numerator of |d| (the perpendicular distance).
numerator = np.fabs(m * x - y + c)
ds.append(numerator/denom)
return m, c, sum(ds), sum(ds)/(float(len(pixel_dict)))
print("* ERROR - should not reach this point...")
def countEdgePixels(pixels_dict, rows, cols):
""" Count the number of edge pixels in the cluster. """
dir_x = [-1, -1, 0, 1, 1, 1, 0, -1]
dir_y = [ 0, 1, 1, 1, 0, -1, -1, -1]
## The number of edge pixels in the cluster.
num_edge_pixels = 0
# Loop over the pixels in the cluster.
for X in pixels_dict.keys():
x = X%256
y = X/256
is_edge_pixel = False
# Loop over the directions.
for direction in range(8):
# The y poxition of the next pixel to scan.
ny = y + dir_y[direction]
# The x position of the next pixel to scan.
nx = x + dir_x[direction]
# The next pixel's X value.
nX = ny * cols + nx
#print "DEBUG: *-----* %1d->(% 1d, % 1d) is (%3d,%3d)->(%10d)" % \
# (direction, self.dir_x[direction], self.dir_y[direction], nx, ny, nxy)
# If the next row or column is on an edge, skip it.
#if ny<0 or ny>=self.rows or nx<0 or nx>=self.cols:
# continue
# If the next X value is not in the pixel keys, we have an edge pixel.
if nX not in pixels_dict.keys():
#print "DEBUG: *-----* Found neighbour in self.pixels!"
#print "DEBUG: * \\--* xy = %d" % (nxy)
#self.pixels[ xy].set_neighbour( direction, nxy)
#self.pixels[nxy].set_neighbour((direction+4)%8, xy)
is_edge_pixel = True
break
if is_edge_pixel:
num_edge_pixels += 1
return num_edge_pixels
def get_dataframe_of_klusters(klusters):
## A list of cluster JSON objects.
kluster_jsons = []
# Loop over the clusters.
for k in klusters:
# Add the JSON to the list.
kluster_jsons.append(k.getKlusterPropertiesJson())
## A data frame created from the JSON list.
kluster_dataframe_all_properties = pd.DataFrame(kluster_jsons)
## A data frame created from a subset of the cluster properties.
kluster_dataframe = kluster_dataframe_all_properties[['size', 'x_uw', 'y_uw', 'radius_uw', 'density_uw', 'isedgekluster', 'innerfrac']]
return kluster_dataframe
| apache-2.0 | -2,826,983,471,519,716,400 | 28.598802 | 139 | 0.553308 | false |