repo_name
stringlengths 6
100
| path
stringlengths 4
191
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 935
727k
| license
stringclasses 15
values |
---|---|---|---|---|---|
WillCh/cs286A | dataMover/kafka/system_test/utils/metrics.py | 89 | 13937 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#!/usr/bin/env python
# ===================================
# file: metrics.py
# ===================================
import inspect
import json
import logging
import os
import signal
import subprocess
import sys
import traceback
import csv
import time
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from collections import namedtuple
import numpy
from pyh import *
import kafka_system_test_utils
import system_test_utils
logger = logging.getLogger("namedLogger")
thisClassName = '(metrics)'
d = {'name_of_class': thisClassName}
attributeNameToNameInReportedFileMap = {
'Min': 'min',
'Max': 'max',
'Mean': 'mean',
'50thPercentile': 'median',
'StdDev': 'stddev',
'95thPercentile': '95%',
'99thPercentile': '99%',
'999thPercentile': '99.9%',
'Count': 'count',
'OneMinuteRate': '1 min rate',
'MeanRate': 'mean rate',
'FiveMinuteRate': '5 min rate',
'FifteenMinuteRate': '15 min rate',
'Value': 'value'
}
def getCSVFileNameFromMetricsMbeanName(mbeanName):
return mbeanName.replace(":type=", ".").replace(",name=", ".") + ".csv"
def read_metrics_definition(metricsFile):
metricsFileData = open(metricsFile, "r").read()
metricsJsonData = json.loads(metricsFileData)
allDashboards = metricsJsonData['dashboards']
allGraphs = []
for dashboard in allDashboards:
dashboardName = dashboard['name']
graphs = dashboard['graphs']
for graph in graphs:
bean = graph['bean_name']
allGraphs.append(graph)
attributes = graph['attributes']
#print "Filtering on attributes " + attributes
return allGraphs
def get_dashboard_definition(metricsFile, role):
metricsFileData = open(metricsFile, "r").read()
metricsJsonData = json.loads(metricsFileData)
allDashboards = metricsJsonData['dashboards']
dashboardsForRole = []
for dashboard in allDashboards:
if dashboard['role'] == role:
dashboardsForRole.append(dashboard)
return dashboardsForRole
def ensure_valid_headers(headers, attributes):
if headers[0] != "# time":
raise Exception("First column should be time")
for header in headers:
logger.debug(header, extra=d)
# there should be exactly one column with a name that matches attributes
try:
attributeColumnIndex = headers.index(attributes)
return attributeColumnIndex
except ValueError as ve:
#print "#### attributes : ", attributes
#print "#### headers : ", headers
raise Exception("There should be exactly one column that matches attribute: {0} in".format(attributes) +
" headers: {0}".format(",".join(headers)))
def plot_graphs(inputCsvFiles, labels, title, xLabel, yLabel, attribute, outputGraphFile):
if not inputCsvFiles: return
# create empty plot
fig=plt.figure()
fig.subplots_adjust(bottom=0.2)
ax=fig.add_subplot(111)
labelx = -0.3 # axes coords
ax.set_xlabel(xLabel)
ax.set_ylabel(yLabel)
ax.grid()
#ax.yaxis.set_label_coords(labelx, 0.5)
Coordinates = namedtuple("Coordinates", 'x y')
plots = []
coordinates = []
# read data for all files, organize by label in a dict
for fileAndLabel in zip(inputCsvFiles, labels):
inputCsvFile = fileAndLabel[0]
label = fileAndLabel[1]
csv_reader = list(csv.reader(open(inputCsvFile, "rb")))
x,y = [],[]
xticks_labels = []
try:
# read first line as the headers
headers = csv_reader.pop(0)
attributeColumnIndex = ensure_valid_headers(headers, attributeNameToNameInReportedFileMap[attribute])
logger.debug("Column index for attribute {0} is {1}".format(attribute, attributeColumnIndex), extra=d)
start_time = (int)(os.path.getctime(inputCsvFile) * 1000)
int(csv_reader[0][0])
for line in csv_reader:
if(len(line) == 0):
continue
yVal = float(line[attributeColumnIndex])
xVal = int(line[0])
y.append(yVal)
epoch= start_time + int(line[0])
x.append(xVal)
xticks_labels.append(time.strftime("%H:%M:%S", time.localtime(epoch)))
coordinates.append(Coordinates(xVal, yVal))
p1 = ax.plot(x,y)
plots.append(p1)
except Exception as e:
logger.error("ERROR while plotting data for {0}: {1}".format(inputCsvFile, e), extra=d)
traceback.print_exc()
# find xmin, xmax, ymin, ymax from all csv files
xmin = min(map(lambda coord: coord.x, coordinates))
xmax = max(map(lambda coord: coord.x, coordinates))
ymin = min(map(lambda coord: coord.y, coordinates))
ymax = max(map(lambda coord: coord.y, coordinates))
# set x and y axes limits
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
# set ticks accordingly
xticks = numpy.arange(xmin, xmax, 0.2*xmax)
# yticks = numpy.arange(ymin, ymax)
plt.xticks(xticks,xticks_labels,rotation=17)
# plt.yticks(yticks)
plt.legend(plots,labels, loc=2)
plt.title(title)
plt.savefig(outputGraphFile)
def draw_all_graphs(metricsDescriptionFile, testcaseEnv, clusterConfig):
# go through each role and plot graphs for the role's metrics
roles = set(map(lambda config: config['role'], clusterConfig))
for role in roles:
dashboards = get_dashboard_definition(metricsDescriptionFile, role)
entities = kafka_system_test_utils.get_entities_for_role(clusterConfig, role)
for dashboard in dashboards:
graphs = dashboard['graphs']
# draw each graph for all entities
draw_graph_for_role(graphs, entities, role, testcaseEnv)
def draw_graph_for_role(graphs, entities, role, testcaseEnv):
for graph in graphs:
graphName = graph['graph_name']
yLabel = graph['y_label']
inputCsvFiles = []
graphLegendLabels = []
for entity in entities:
entityMetricsDir = kafka_system_test_utils.get_testcase_config_log_dir_pathname(testcaseEnv, role, entity['entity_id'], "metrics")
entityMetricCsvFile = entityMetricsDir + "/" + getCSVFileNameFromMetricsMbeanName(graph['bean_name'])
if(not os.path.exists(entityMetricCsvFile)):
logger.warn("The file {0} does not exist for plotting".format(entityMetricCsvFile), extra=d)
else:
inputCsvFiles.append(entityMetricCsvFile)
graphLegendLabels.append(role + "-" + entity['entity_id'])
# print "Plotting graph for metric {0} on entity {1}".format(graph['graph_name'], entity['entity_id'])
try:
# plot one graph per mbean attribute
labels = graph['y_label'].split(',')
fullyQualifiedAttributeNames = map(lambda attribute: graph['bean_name'] + ':' + attribute,
graph['attributes'].split(','))
attributes = graph['attributes'].split(',')
for labelAndAttribute in zip(labels, fullyQualifiedAttributeNames, attributes):
outputGraphFile = testcaseEnv.testCaseDashboardsDir + "/" + role + "/" + labelAndAttribute[1] + ".svg"
plot_graphs(inputCsvFiles, graphLegendLabels, graph['graph_name'] + '-' + labelAndAttribute[2],
"time", labelAndAttribute[0], labelAndAttribute[2], outputGraphFile)
# print "Finished plotting graph for metric {0} on entity {1}".format(graph['graph_name'], entity['entity_id'])
except Exception as e:
logger.error("ERROR while plotting graph {0}: {1}".format(outputGraphFile, e), extra=d)
traceback.print_exc()
def build_all_dashboards(metricsDefinitionFile, testcaseDashboardsDir, clusterConfig):
metricsHtmlFile = testcaseDashboardsDir + "/metrics.html"
centralDashboard = PyH('Kafka Metrics Dashboard')
centralDashboard << h1('Kafka Metrics Dashboard', cl='center')
roles = set(map(lambda config: config['role'], clusterConfig))
for role in roles:
entities = kafka_system_test_utils.get_entities_for_role(clusterConfig, role)
dashboardPagePath = build_dashboard_for_role(metricsDefinitionFile, role,
entities, testcaseDashboardsDir)
centralDashboard << a(role, href = dashboardPagePath)
centralDashboard << br()
centralDashboard.printOut(metricsHtmlFile)
def build_dashboard_for_role(metricsDefinitionFile, role, entities, testcaseDashboardsDir):
# build all dashboards for the input entity's based on its role. It can be one of kafka, zookeeper, producer
# consumer
dashboards = get_dashboard_definition(metricsDefinitionFile, role)
entityDashboard = PyH('Kafka Metrics Dashboard for ' + role)
entityDashboard << h1('Kafka Metrics Dashboard for ' + role, cl='center')
entityDashboardHtml = testcaseDashboardsDir + "/" + role + "-dashboards.html"
for dashboard in dashboards:
# place the graph svg files in this dashboard
allGraphs = dashboard['graphs']
for graph in allGraphs:
attributes = map(lambda attribute: graph['bean_name'] + ':' + attribute,
graph['attributes'].split(','))
for attribute in attributes:
graphFileLocation = testcaseDashboardsDir + "/" + role + "/" + attribute + ".svg"
entityDashboard << embed(src = graphFileLocation, type = "image/svg+xml")
entityDashboard.printOut(entityDashboardHtml)
return entityDashboardHtml
def start_metrics_collection(jmxHost, jmxPort, role, entityId, systemTestEnv, testcaseEnv):
logger.info("starting metrics collection on jmx port : " + jmxPort, extra=d)
jmxUrl = "service:jmx:rmi:///jndi/rmi://" + jmxHost + ":" + jmxPort + "/jmxrmi"
clusterConfig = systemTestEnv.clusterEntityConfigDictList
metricsDefinitionFile = systemTestEnv.METRICS_PATHNAME
entityMetricsDir = kafka_system_test_utils.get_testcase_config_log_dir_pathname(testcaseEnv, role, entityId, "metrics")
dashboardsForRole = get_dashboard_definition(metricsDefinitionFile, role)
mbeansForRole = get_mbeans_for_role(dashboardsForRole)
kafkaHome = system_test_utils.get_data_by_lookup_keyval(clusterConfig, "entity_id", entityId, "kafka_home")
javaHome = system_test_utils.get_data_by_lookup_keyval(clusterConfig, "entity_id", entityId, "java_home")
for mbean in mbeansForRole:
outputCsvFile = entityMetricsDir + "/" + mbean + ".csv"
startMetricsCmdList = ["ssh " + jmxHost,
"'JAVA_HOME=" + javaHome,
"JMX_PORT= " + kafkaHome + "/bin/kafka-run-class.sh kafka.tools.JmxTool",
"--jmx-url " + jmxUrl,
"--object-name " + mbean + " 1> ",
outputCsvFile + " & echo pid:$! > ",
entityMetricsDir + "/entity_pid'"]
startMetricsCommand = " ".join(startMetricsCmdList)
logger.debug("executing command: [" + startMetricsCommand + "]", extra=d)
system_test_utils.async_sys_call(startMetricsCommand)
time.sleep(1)
pidCmdStr = "ssh " + jmxHost + " 'cat " + entityMetricsDir + "/entity_pid' 2> /dev/null"
logger.debug("executing command: [" + pidCmdStr + "]", extra=d)
subproc = system_test_utils.sys_call_return_subproc(pidCmdStr)
# keep track of JMX ppid in a dictionary of entity_id to list of JMX ppid
# testcaseEnv.entityJmxParentPidDict:
# key: entity_id
# val: list of JMX ppid associated to that entity_id
# { 1: [1234, 1235, 1236], 2: [2234, 2235, 2236], ... }
for line in subproc.stdout.readlines():
line = line.rstrip('\n')
logger.debug("line: [" + line + "]", extra=d)
if line.startswith("pid"):
logger.debug("found pid line: [" + line + "]", extra=d)
tokens = line.split(':')
thisPid = tokens[1]
if entityId not in testcaseEnv.entityJmxParentPidDict:
testcaseEnv.entityJmxParentPidDict[entityId] = []
testcaseEnv.entityJmxParentPidDict[entityId].append(thisPid)
#print "\n#### testcaseEnv.entityJmxParentPidDict ", testcaseEnv.entityJmxParentPidDict, "\n"
def stop_metrics_collection(jmxHost, jmxPort):
logger.info("stopping metrics collection on " + jmxHost + ":" + jmxPort, extra=d)
system_test_utils.sys_call("ps -ef | grep JmxTool | grep -v grep | grep " + jmxPort + " | awk '{print $2}' | xargs kill -9")
def get_mbeans_for_role(dashboardsForRole):
graphs = reduce(lambda x,y: x+y, map(lambda dashboard: dashboard['graphs'], dashboardsForRole))
return set(map(lambda metric: metric['bean_name'], graphs))
| bsd-2-clause |
iarroyof/sentence_embedding | deprecated/entropy_weights.py | 1 | 4091 | from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
import cPickle as pickle
import argparse
import logging
from time import time
import numpy as np
class streamer(object):
def __init__(self, file_name):
self.file_name=file_name
def __iter__(self):
for s in open(self.file_name):
yield s.strip()
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
# categories = None
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Computes Cross-Entropy (TFIDF) weights of a raw text dataset and stores the model.')
parser.add_argument("--dataset", help="The path to the raw text dataset file",
required=True)
parser.add_argument("--cout", help="The path to the cross-entropy output model file",
default="output_tfidf.pk")
parser.add_argument("--minc", help="The minimum word frequency considered to compute CE weight.",
default=2, type=int)
parser.add_argument("--tf", help="TF normalization: none, binary, sublinear (default=none).", default="none")
parser.add_argument("--stop", help="Toggles stop words stripping.", action="store_true")
parser.add_argument("--lsa", help="Toggles LSA computation.", default=0, type=int)
parser.add_argument("--news", help="Toggles making analysis of predefined dataset.", action="store_true")
args = parser.parse_args()
t0 = time()
if not args.news:
corpus=streamer(args.dataset)
vectorizer = TfidfVectorizer(min_df=1,
encoding="latin-1",
decode_error="replace",
lowercase=False,
binary= True if args.tf.startswith("bin") else False,
sublinear_tf= True if args.tf.startswith("subl") else False,
stop_words= "english" if args.stop else None)
X = vectorizer.fit(corpus) if args.lsa<0 else vectorizer.fit_transform(corpus)
else:
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("%d documents" % len(dataset.data))
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if args.lsa==0:
with open(args.cout, 'wb') as fin:
pickle.dump(X, fin)
print("TF-IDF weights saved...")
exit()
from sklearn.decomposition import TruncatedSVD
from sklearn.preprocessing import Normalizer
from sklearn.pipeline import make_pipeline
svd = TruncatedSVD(args.lsa)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print ("Saving vectors to: %s" % args.cout)
np.savetxt(args.cout,X)
| apache-2.0 |
berkeley-stat222/mousestyles | mousestyles/ultradian/__init__.py | 3 | 26924 | from __future__ import (absolute_import, division,
print_function, unicode_literals)
from gatspy.periodic import LombScargleFast
from gatspy.periodic import LombScargle
import matplotlib.pyplot as plt
import mousestyles.data as data
from mousestyles.visualization.plot_lomb_scargle import lombscargle_visualize
import numpy as np
import pandas as pd
import statsmodels.api as sm
import statsmodels.formula.api as smf
from scipy.stats import chi2
plt.style.use('ggplot')
INTERVAL_FEATURES = ["AS", "F", "M_AS", "M_IS", "W"]
ALL_FEATURES = ["AS", "F", "M_AS", "M_IS", "W", "Distance"]
METHOD = ["LombScargleFast", "LombScargle"]
def aggregate_interval(strain, mouse, feature, bin_width):
"""
Aggregate the interval data based on n-minute time
intervals, return a time series.
Parameters
----------
strain: int
nonnegative integer indicating the strain number
mouse: int
nonnegative integer indicating the mouse number
feature: {"AS", "F", "M_AS", "M_IS", "W"}
"AS": Active state probalibity
"F": Food consumed (g)
"M_AS": Movement outside homebase
"M_IS": Movement inside homebase
"W": Water consumed (g)
bin_width: number of minutes of time interval for data aggregation
Returns
-------
ts: pandas.tseries
a pandas time series of length 12(day)*24(hour)*60(minute)/n
"""
# Input Check
if (not isinstance(strain, int)) or (strain < 0):
raise ValueError(
'Strain must be a non-negative integer')
if (not isinstance(mouse, int)) or (mouse < 0):
raise ValueError(
'Mouse value must be a non-negative integer')
if feature not in INTERVAL_FEATURES:
raise ValueError(
'Input value must in {"AS", "F", "M_AS", "M_IS", "W"}')
if (not isinstance(bin_width, int)) or bin_width < 0 or bin_width > 1440:
raise ValueError(
'Bin width (minutes) must be a non-negative integer below 1440')
# load data
intervals = data.load_intervals(feature)
mouse_data = intervals.loc[
(intervals['strain'] == strain) & (intervals['mouse'] == mouse)]
# build data frame
days = sorted(np.unique(mouse_data['day']))
bin_count = int(24 * 60 / bin_width)
time_behaviour = np.repeat(0.0, bin_count * len(days))
bin_length = bin_width * 60
for j in days:
df = mouse_data.loc[mouse_data['day'] == j]
start_end = data.load_start_time_end_time(strain, mouse, j)
start = np.asarray(df['start']) - start_end[0]
end = np.asarray(df['stop']) - start_end[0]
for i in range(len(start)):
start_time = start[i]
end_time = end[i]
start_index = int(start_time / (bin_width * 60))
end_index = int(end_time / (bin_width * 60))
if start_index == end_index:
time_behaviour[start_index + j *
bin_count] += end_time - start_time
elif end_index - start_index == 1:
time_behaviour[
start_index + j *
bin_count] += bin_length * end_index - start_time
time_behaviour[end_index + j *
bin_count] += end_time % bin_length
else:
time_behaviour[
start_index + j *
bin_count] += bin_length * (start_index + 1) - start_time
time_behaviour[end_index + j *
bin_count] += end_time % bin_length
time_behaviour[start_index + j * bin_count +
1:end_index + j * bin_count] += bin_length
if feature == 'F' or feature == 'W':
all_feature = data.load_all_features()
group = all_feature[
["strain", "mouse", "day", "hour", "Food", "Water"]].groupby(
["strain", "mouse", "day"]).sum()
group = group.reset_index()
mouse_data = group.loc[(group['strain'] == strain) &
(group['mouse'] == mouse)].copy()
mouse_data.loc[:, 'day'] = np.arange(len(mouse_data))
for i in mouse_data['day'].astype('int'):
if feature == 'F':
food_amount = float(mouse_data['Food'][mouse_data['day'] == i])
time_behaviour[
(bin_count * i):(bin_count * (i + 1))] /= sum(
time_behaviour[(bin_count * i):(bin_count * (i + 1))])
time_behaviour[(bin_count * i):(bin_count *
(i + 1))] *= food_amount
else:
food_amount = float(mouse_data['Water'][
mouse_data['day'] == i])
time_behaviour[
(bin_count * i):(bin_count * (i + 1))] /= sum(
time_behaviour[(bin_count * i):(bin_count * (i + 1))])
time_behaviour[(bin_count * i):(bin_count *
(i + 1))] *= food_amount
if feature == 'AS':
time_behaviour /= (bin_width * 60)
ts = pd.Series(time_behaviour, index=pd.date_range(
'01/01/2014', periods=len(time_behaviour),
freq=str(bin_width) + 'min'))
return ts
def aggregate_movement(strain, mouse, bin_width):
"""
Aggregate the movement data based on n-minute
time intervals, return a time series.
Parameters
----------
strain: int
nonnegative integer indicating the strain number
mouse: int
nonnegative integer indicating the mouse number
bin_width: number of minutes of time interval for data aggregation
Returns
-------
ts: pandas.tseries
a pandas time series of length (#day)*24(hour)*60(minute)/n
"""
# Input Check
if (not isinstance(strain, int)) or (strain < 0):
raise ValueError(
'Strain must be a non-negative integer')
if (not isinstance(mouse, int)) or (mouse < 0):
raise ValueError(
'Mouse value must be a non-negative integer')
if (not isinstance(bin_width, int)) or bin_width < 0 or bin_width > 1440:
raise ValueError(
'Bin width (minutes) must be a non-negative integer below 1440')
# determine number of days
intervals = data.load_intervals('IS')
mouse_data = intervals.loc[
(intervals['strain'] == strain) & (intervals['mouse'] == mouse)]
days = sorted(np.unique(mouse_data['day']))
# build data frame
bin_count = int(24 * 60 / bin_width)
time_movements = np.repeat(0.0, bin_count * len(days))
bin_length = bin_width * 60
for j in days:
M = data.load_movement(strain, mouse, day=int(j))
distance_df = pd.DataFrame({"start": M["t"].values[0:-1],
"end": M["t"].values[1:],
"distance":
np.linalg.norm(M[["x", "y"]].values[1:] -
M[["x", "y"]].values[0:-1],
axis=1)})
start_end = data.load_start_time_end_time(strain, mouse, j)
start = np.asarray(distance_df['start']) - start_end[0]
end = np.asarray(distance_df['end']) - start_end[0]
dist = distance_df['distance']
for i in range(len(start)):
start_time = start[i]
end_time = end[i]
start_index = int(start_time / (bin_width * 60))
end_index = int(end_time / (bin_width * 60))
if start_index == end_index:
time_movements[start_index + j *
bin_count] += dist[i]
else:
time_movements[
end_index + j * bin_count] += end_time % \
bin_length / (end_time - start_time) * dist[i]
time_movements[
start_index + j * bin_count] += dist[i] - \
end_time % bin_length / (end_time - start_time) * dist[i]
ts = pd.Series(time_movements, index=pd.date_range(
'01/01/2014', periods=len(time_movements),
freq=str(bin_width) + 'min'))
return ts
def aggregate_data(feature, bin_width, nmouse=4, nstrain=3):
r"""
Aggregate all the strains and mouses with any feature together
in one dataframe. It combines the results you got from
aggregate_movements and aggregate_interval. It will return
a dataframe with three variables: mouse, strain, feature and hour.
Parameters
----------
feature :
{"AS", "F", "IS", "M_AS", "M_IS", "W", "Distance"}
bin_width : int
Number of minutes, the time interval for data aggregation.
Returns
-------
pandas.dataframe
describe :
Column 0: the mouse number (number depends on strain)(0-3)
Column 1: the strain of the mouse (0-2)
Column 2: hour(numeric values below 24 accourding to bin_width)
Column 3: feature values
Examples
--------
>>> test = aggregate_data("Distance",20)
>>> print(np.mean(test["Distance"]))
531.4500177747973
"""
if feature not in ALL_FEATURES:
raise ValueError(
'Input value must in {"AS", "F", "M_AS", "M_IS", "W", "Distance"}')
if (not isinstance(bin_width, int)) or bin_width < 0 or bin_width > 1440:
raise ValueError(
'Bin width (minutes) must be a non-negative integer below 1440')
init = pd.DataFrame(columns=["mouse", "strain", "hour", feature])
for i in range(nstrain):
for j in range(nmouse):
if feature == "Distance":
tmp = aggregate_movement(strain=i, mouse=j,
bin_width=bin_width)
else:
tmp = aggregate_interval(strain=i, mouse=j,
feature=feature,
bin_width=bin_width)
tmp = pd.DataFrame(list(tmp.values), index=tmp.index)
tmp.columns = [feature]
tmp["strain"] = i
tmp["mouse"] = j
tmp["hour"] = tmp.index.hour + tmp.index.minute / 60
init = init.append(tmp)
return init
def seasonal_decomposition(strain, mouse, feature, bin_width, period_length):
"""
Apply seasonal decomposition model on the time series
of specified strain, mouse, feature and bin_width.
Parameters
----------
strain: int
nonnegative integer indicating the strain number
mouse: int
nonnegative integer indicating the mouse number
feature: {"AS", "F", "M_AS", "M_IS", "W", "Distance"}
"AS": Active state probalibity
"F": Food consumed (g)
"M_AS": Movement outside homebase
"M_IS": Movement inside homebase
"W": Water consumed (g)
"Distance": Distance traveled
bin_width: int
number of minutes, the time interval for data aggregation
period_length: int or float
number of hours, usually the significant period
length indicated by Lomb-scargle model
Returns
-------
res: statsmodel seasonal decomposition object
seasonal decomposition result for the mouse.
Check the seasonal decomposition plot by res.plot(),
seasonl term and trend term by res.seasonal and
res.trend separately.
Examples
--------
>>> res = seasonal_decomposition(strain=0, mouse=0, feature="W",
bin_width=30, period_length = 24)
"""
if (not isinstance(strain, int)) or (strain < 0):
raise ValueError(
'Strain must be a non-negative integer')
if (not isinstance(mouse, int)) or (mouse < 0):
raise ValueError(
'Mouse value must be a non-negative integer')
if feature not in ALL_FEATURES:
raise ValueError(
'Input value must in {"AS", "F", "M_AS", "M_IS", "W", "Distance"}')
if (not isinstance(bin_width, int)) or bin_width < 0 or bin_width > 1440:
raise ValueError(
'Bin width (minutes) must be a non-negative integer below 1440')
if period_length < 0:
raise ValueError(
'Peoriod length must be a non-negative integer or float')
freq = int(period_length * 60 / bin_width)
if feature == "Distance":
ts = aggregate_movement(strain=strain, mouse=mouse,
bin_width=bin_width)
else:
ts = aggregate_interval(strain=strain, mouse=mouse,
feature=feature, bin_width=bin_width)
res = sm.tsa.seasonal_decompose(ts.values, freq=freq, model="additive")
return res
def strain_seasonal(strain, mouse, feature, bin_width, period_length):
"""
Use seansonal decomposition model on the time series
of specified strain, mouse, feature and bin_width.
return the seasonal term and the plot of seasonal term
by mouse of a set of mouses in a strain
Parameters
----------
strain: int
nonnegative integer indicating the strain number
mouse: list, set or tuple
nonnegative integer indicating the mouse number
feature: {"AS", "F", "M_AS", "M_IS", "W", "Distance"}
"AS": Active state probalibity
"F": Food consumed (g)
"M_AS": Movement outside homebase
"M_IS": Movement inside homebase
"W": Water consumed (g)
"Distance": Distance traveled
bin_width: int
number of minutes, the time interval for data aggregation
period_length: int or float
number of hours, usually the significant period
length indicated by Lomb-scargle model
Returns
-------
seasonal_all: numpy array containing the seasonal term for every
mouse indicated by the input parameter
Examples
--------
>>> res = strain_seasonal(strain=0, mouse={0, 1, 2, 3}, feature="W",
bin_width=30, period_length = 24)
"""
if (not isinstance(strain, int)) or (strain < 0):
raise ValueError(
'Strain must be a non-negative integer')
if (not all([isinstance(m, int)
for m in mouse])) or (any([m < 0 for m in mouse])):
raise ValueError(
'Mouse value must be a non-negative integer')
if feature not in ALL_FEATURES:
raise ValueError(
'Input value must in {"AS", "F", "M_AS", "M_IS", "W", "Distance"}')
if (not isinstance(bin_width, int)) or bin_width < 0 or bin_width > 1440:
raise ValueError(
'Bin width (minutes) must be a non-negative integer below 1440')
if period_length < 0:
raise ValueError(
'Peoriod length must be a non-negative integer or float')
# seasonal decomposition
seasonal_all = np.array([])
freq = int(period_length * 60 / bin_width)
for m in mouse:
res = seasonal_decomposition(
strain, m, feature, bin_width, period_length)
seasonal_all = np.append(seasonal_all, res.seasonal[0:freq])
seasonal_all = seasonal_all.reshape([len(mouse), -1])
return seasonal_all
def find_cycle(feature, strain, mouse=None, bin_width=15,
methods='LombScargleFast', disturb_t=False, gen_doc=False,
plot=True, search_range_fit=None, nyquist_factor=3,
n_cycle=10, search_range_find=(2, 26), sig=np.array([0.05])):
"""
Use Lomb-Scargel method on different strain and mouse's data to find the
best possible periods with highest p-values. The function can be used on
specific strains and specific mouses, as well as just specific strains
without specifying mouse number. We use the O(NlogN) fast implementation
of Lomb-Scargle from the gatspy package, and also provide a way to
visualize the result.
Note that either plotting or calculating L-S power doesn't use the same
method in finding best cycle. The former can use user-specified
search_range, while the latter uses default two grid search_range.
Parameters
----------
feature: string in {"AS", "F", "M_AS", "M_IS", "W", "Distance"}
"AS": Active state probalibity
"F": Food consumed (g)
"M_AS": Movement outside homebase
"M_IS": Movement inside homebase
"W": Water consumed (g)
"Distance": Distance traveled
strain: int
nonnegative integer indicating the strain number
mouse: int, default is None
nonnegative integer indicating the mouse number
bin_width: int, minute unit, default is 15 minutes
number of minutes, the time interval for data aggregation
methods: string in {"LombScargleFast", "LombScargle"}
indicating the method used in determining periods and best cycle.
If choose 'LombScargle', 'disturb_t' must be True.
disturb_t: boolean, default is False
If True, add uniformly distributed noise to the time sequence which
are used to fit the Lomb Scargle model. This is to avoid the singular
matrix error that could happen sometimes.
plot: boolean, default is True
If True, call the visualization function to plot the Lomb Scargle
power versus periods plot. First use the data (either strain specific
or strain-mouse specific) to fit the LS model, then use the
search_range_fit as time sequence to predict the corresponding LS
power, at last draw the plot out. There will also be stars and
horizontal lines indicating the p-value of significance. Three stars
will be p-value in [0,0.001], two stars will be p-value in
[0.001,0.01], one star will be p-value in [0.01,0.05]. The horizontal
line is the LS power that has p-value of 0.05.
search_range_fit: list, numpy array or numpy arange, hours unit,
default is None
list of numbers as the time sequence to predict the corrsponding
Lomb Scargle power. If plot is 'True', these will be drawn as the
x-axis. Note that the number of search_range_fit points can not be
too small, or the prediction smooth line will not be accurate.
However the plot will always give the right periods and their LS
power with 1,2 or 3 stars. This could be a sign to check whether
search_range_fit is not enough to draw the correct plot.
We recommend the default None, which is easy to use.
nyquist_factor: int
If search_range_fit is None, the algorithm will automatically
choose the periods sequence.
5 * nyquist_factor * length(time sequence) / 2 gives the number of
power and periods used to make LS prediction and plot the graph.
n_cycle: int, default is 10
numbers of periods to be returned by function, which have the highest
Lomb Scargle power and p-value.
search_range_find: list, tuple or numpy array with length of 2, default is
(2,26), hours unit
Range of periods to be searched for best cycle. Note that the minimum
should be strictly larger than 0 to avoid 1/0 issues.
sig: list or numpy array, default is [0.05].
significance level to be used for plot horizontal line.
gen_doc: boolean, default is False
If true, return the parameters needed for visualize the LS power versus
periods
Returns
-------
cycle: numpy array of length 'n_cycle'
The best periods with highest LS power and p-values.
cycle_power: numpy array of length 'n_cycle'
The corrsponding LS power of 'cycle'.
cycle_pvalue: numpy array of length 'n_cycle'
The corrsponding p-value of 'cycle'.
periods: numpy array of the same length with 'power'
use as time sequence in LS model to make predictions.Only return when
gen_doc is True.
power: numpy array of the same length with 'periods'
the corresponding predicted power of periods. Only return when
gen_doc is True.
sig: list, tuple or numpy array, default is [0.05].
significance level to be used for plot horizontal line.
Only return when gen_doc is True.
N: int
the length of time sequence in the fit model. Only return when
gen_doc is True.
Examples
-------
>>> a,b,c = find_cycle(feature='F', strain = 0,mouse = 0, plot=False,)
>>> print(a,b,c)
>>> [ 23.98055016 4.81080233 12.00693952 6.01216335 8.0356203
3.4316698 2.56303353 4.9294791 21.37925713 3.5697756 ]
[ 0.11543449 0.05138839 0.03853218 0.02982237 0.02275952
0.0147941 0.01151601 0.00998443 0.00845883 0.0082382 ]
[ 0.00000000e+00 3.29976046e-10 5.39367189e-07 8.10528027e-05
4.71001953e-03 3.70178834e-01 9.52707020e-01 9.99372657e-01
9.99999981e-01 9.99999998e-01]
"""
if feature not in ALL_FEATURES:
raise ValueError(
'Input value must in {"AS", "F", "M_AS", "M_IS", "W", "Distance"}')
if methods not in METHOD:
raise ValueError(
'Input value must in {"LombScargleFast","LombScargle"}')
# get data
if mouse is None:
data_all = aggregate_data(feature=feature, bin_width=bin_width)
n_mouse_in_strain = len(
set(data_all.loc[data_all['strain'] == strain]['mouse']))
data = [[] for i in range(n_mouse_in_strain)]
t = [[] for i in range(n_mouse_in_strain)]
for i in range(n_mouse_in_strain):
data[i] = data_all.loc[(data_all['strain'] == strain) & (
data_all['mouse'] == i)][feature]
t[i] = np.array(np.arange(0, len(data[i]) *
bin_width / 60, bin_width / 60))
data = [val for sublist in data for val in sublist]
N = len(data)
t = [val for sublist in t for val in sublist]
else:
if feature == 'Distance':
data = aggregate_movement(
strain=strain, mouse=mouse, bin_width=bin_width)
N = len(data)
t = np.arange(0, N * bin_width / 60, bin_width / 60)
else:
data = aggregate_interval(
strain=strain, mouse=mouse,
feature=feature, bin_width=bin_width)
N = len(data)
t = np.arange(0, N * bin_width / 60, bin_width / 60)
y = data
# fit model
if disturb_t is True:
t = t + np.random.uniform(-bin_width / 600, bin_width / 600, N)
if methods == 'LombScargleFast':
model = LombScargleFast(fit_period=False).fit(t=t, y=y)
elif methods == 'LombScargle':
model = LombScargle(fit_period=False).fit(t=t, y=y)
# calculate periods' LS power
if search_range_fit is None:
periods, power = model.periodogram_auto(nyquist_factor=nyquist_factor)
else:
periods = search_range_fit
power = model.periodogram(periods=search_range_fit)
# find best cycle
model.optimizer.period_range = search_range_find
cycle, cycle_power = model.find_best_periods(
return_scores=True, n_periods=n_cycle)
cycle_pvalue = 1 - (1 - np.exp(cycle_power / (-2) * (N - 1))) ** (2 * N)
# visualization
if plot is True:
lombscargle_visualize(periods=periods, power=power, sig=sig, N=N,
cycle_power=cycle_power,
cycle_pvalue=cycle_pvalue, cycle=cycle)
if gen_doc is True:
return periods, power, sig, N, cycle, cycle_power, cycle_pvalue
return cycle, cycle_power, cycle_pvalue
def mix_strain(data, feature, print_opt=True,
nstrain=3, search_range=(3, 12), degree=1):
"""
Fit the linear mixed model onto our aggregate data. The fixed effects
are the hour, strain, interactions between hour and strain; The random
effect is mouse because we want to make sure that the different mouses
will not give out any differences. We added two dummy variables:
strain0 and strain1 to be our fixed effects.
Parameters
----------
data: data frame output from aggregate_data function
feature: {"AS", "F", "IS", "M_AS", "M_IS", "W", "Distance"}
print_opt: True or False
nstrain: positive integer
range: array contains two elements
degree: positive integer
Returns
-------
Two mixed model regression results which includes all the coefficients,
t statistics and p values for corresponding coefficients; The first model
includes interaction terms while the second model does not include the
interaction terms
Likelihood ratio test p values, if it is below our significance level,
we can conclude that the different strains have significantly different
time patterns
Examples
--------
>>> result = mix_strain(data = aggregate_data("F",30), feature = "F",
>>> print_opt = False, degree = 2)
>>> print(result)
2.5025846540930469e-09
"""
if not isinstance(data, pd.DataFrame):
raise ValueError(
'Data must be a pandas data frame')
if feature not in ALL_FEATURES:
raise ValueError(
'Input value must in {"AS", "F", "M_AS", "M_IS", "W", "Distance"}')
data["cycle"] = 0
for i in range(nstrain):
result = find_cycle(feature="W", strain=i, plot=False,
search_range_find=search_range)
cycle = result[0][0]
data.loc[data["strain"] == i, "cycle"] = cycle
b = pd.get_dummies(data["strain"])
data["strain0"] = b.ix[:, 0]
data["strain1"] = b.ix[:, 1]
data["strain2"] = b.ix[:, 2]
data["hour2"] = np.array(data["hour"].values)**degree
data = data.drop('strain', 1)
names = data.columns.tolist()
names[names.index(feature)] = 'feature'
data.columns = names
if degree == 1:
md1 = smf.mixedlm("feature ~ hour + strain0 + strain1 + cycle \
+ strain0*hour + strain1*hour", data,
groups=data["mouse"])
else:
md1 = smf.mixedlm("feature ~ hour + hour2 + strain0 + strain1 + \
strain0*hour+ strain1*hour + strain0*hour2+ \
strain1*hour2", data, groups=data["mouse"])
mdf1 = md1.fit()
like1 = mdf1.llf
if print_opt:
print(mdf1.summary())
if degree == 1:
md2 = smf.mixedlm("feature ~ hour + cycle + strain0 \
+ strain1", data, groups=data["mouse"])
else:
md2 = smf.mixedlm("feature ~ hour + hour2 + cycle + strain0 + \
strain1", data, groups=data["mouse"])
mdf2 = md2.fit()
like2 = mdf2.llf
if print_opt:
print(mdf2.summary())
fstat = 2 * abs(like1 - like2)
p_v = chi2.pdf(fstat, df=2)
return p_v
| bsd-2-clause |
bavardage/statsmodels | statsmodels/sandbox/rls.py | 4 | 5141 | """Restricted least squares
from pandas
License: Simplified BSD
"""
import numpy as np
from statsmodels.regression.linear_model import WLS, GLS, RegressionResults
class RLS(GLS):
"""
Restricted general least squares model that handles linear constraints
Parameters
----------
endog: array-like
n length array containing the dependent variable
exog: array-like
n-by-p array of independent variables
constr: array-like
k-by-p array of linear constraints
param (0.): array-like or scalar
p-by-1 array (or scalar) of constraint parameters
sigma (None): scalar or array-like
The weighting matrix of the covariance. No scaling by default (OLS).
If sigma is a scalar, then it is converted into an n-by-n diagonal
matrix with sigma as each diagonal element.
If sigma is an n-length array, then it is assumed to be a diagonal
matrix with the given sigma on the diagonal (WLS).
Notes
-----
endog = exog * beta + epsilon
weights' * constr * beta = param
See Greene and Seaks, "The Restricted Least Squares Estimator:
A Pedagogical Note", The Review of Economics and Statistics, 1991.
"""
def __init__(self, endog, exog, constr, param=0., sigma=None):
N, Q = exog.shape
constr = np.asarray(constr)
if constr.ndim == 1:
K, P = 1, constr.shape[0]
else:
K, P = constr.shape
if Q != P:
raise Exception('Constraints and design do not align')
self.ncoeffs = Q
self.nconstraint = K
self.constraint = constr
if np.isscalar(param) and K > 1:
param = np.ones((K,)) * param
self.param = param
if sigma is None:
sigma = 1.
if np.isscalar(sigma):
sigma = np.ones(N) * sigma
sigma = np.squeeze(sigma)
if sigma.ndim == 1:
self.sigma = np.diag(sigma)
self.cholsigmainv = np.diag(np.sqrt(sigma))
else:
self.sigma = sigma
self.cholsigmainv = np.linalg.cholesky(np.linalg.pinv(self.sigma)).T
super(GLS, self).__init__(endog, exog)
_rwexog = None
@property
def rwexog(self):
"""Whitened exogenous variables augmented with restrictions"""
if self._rwexog is None:
P = self.ncoeffs
K = self.nconstraint
design = np.zeros((P + K, P + K))
design[:P, :P] = np.dot(self.wexog.T, self.wexog) #top left
constr = np.reshape(self.constraint, (K, P))
design[:P, P:] = constr.T #top right partition
design[P:, :P] = constr #bottom left partition
design[P:, P:] = np.zeros((K, K)) #bottom right partition
self._rwexog = design
return self._rwexog
_inv_rwexog = None
@property
def inv_rwexog(self):
"""Inverse of self.rwexog"""
if self._inv_rwexog is None:
self._inv_rwexog = np.linalg.inv(self.rwexog)
return self._inv_rwexog
_rwendog = None
@property
def rwendog(self):
"""Whitened endogenous variable augmented with restriction parameters"""
if self._rwendog is None:
P = self.ncoeffs
K = self.nconstraint
response = np.zeros((P + K,))
response[:P] = np.dot(self.wexog.T, self.wendog)
response[P:] = self.param
self._rwendog = response
return self._rwendog
_ncp = None
@property
def rnorm_cov_params(self):
"""Parameter covariance under restrictions"""
if self._ncp is None:
P = self.ncoeffs
self._ncp = self.inv_rwexog[:P, :P]
return self._ncp
_wncp = None
@property
def wrnorm_cov_params(self):
"""
Heteroskedasticity-consistent parameter covariance
Used to calculate White standard errors.
"""
if self._wncp is None:
df = self.df_resid
pred = np.dot(self.wexog, self.coeffs)
eps = np.diag((self.wendog - pred) ** 2)
sigmaSq = np.sum(eps)
pinvX = np.dot(self.rnorm_cov_params, self.wexog.T)
self._wncp = np.dot(np.dot(pinvX, eps), pinvX.T) * df / sigmaSq
return self._wncp
_coeffs = None
@property
def coeffs(self):
"""Estimated parameters"""
if self._coeffs is None:
betaLambda = np.dot(self.inv_rwexog, self.rwendog)
self._coeffs = betaLambda[:self.ncoeffs]
return self._coeffs
def fit(self):
rncp = self.wrnorm_cov_params
lfit = RegressionResults(self, self.coeffs, normalized_cov_params=rncp)
return lfit
if __name__=="__main__":
import statsmodels.api as sm
dta = np.genfromtxt('./rlsdata.txt', names=True)
design = np.column_stack((dta['Y'],dta['Y']**2,dta[['NE','NC','W','S']].view(float).reshape(dta.shape[0],-1)))
design = sm.add_constant(design, prepend=True)
rls_mod = RLS(dta['G'],design, constr=[0,0,0,1,1,1,1])
rls_fit = rls_mod.fit()
print rls_fit.params
| bsd-3-clause |
zorojean/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_scalability.py | 225 | 5719 | """
============================================
Scalability of Approximate Nearest Neighbors
============================================
This example studies the scalability profile of approximate 10-neighbors
queries using the LSHForest with ``n_estimators=20`` and ``n_candidates=200``
when varying the number of samples in the dataset.
The first plot demonstrates the relationship between query time and index size
of LSHForest. Query time is compared with the brute force method in exact
nearest neighbor search for the same index sizes. The brute force queries have a
very predictable linear scalability with the index (full scan). LSHForest index
have sub-linear scalability profile but can be slower for small datasets.
The second plot shows the speedup when using approximate queries vs brute force
exact queries. The speedup tends to increase with the dataset size but should
reach a plateau typically when doing queries on datasets with millions of
samples and a few hundreds of dimensions. Higher dimensional datasets tends to
benefit more from LSHForest indexing.
The break even point (speedup = 1) depends on the dimensionality and structure
of the indexed data and the parameters of the LSHForest index.
The precision of approximate queries should decrease slowly with the dataset
size. The speed of the decrease depends mostly on the LSHForest parameters and
the dimensionality of the data.
"""
from __future__ import division
print(__doc__)
# Authors: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
# Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
###############################################################################
import time
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Parameters of the study
n_samples_min = int(1e3)
n_samples_max = int(1e5)
n_features = 100
n_centers = 100
n_queries = 100
n_steps = 6
n_iter = 5
# Initialize the range of `n_samples`
n_samples_values = np.logspace(np.log10(n_samples_min),
np.log10(n_samples_max),
n_steps).astype(np.int)
# Generate some structured data
rng = np.random.RandomState(42)
all_data, _ = make_blobs(n_samples=n_samples_max + n_queries,
n_features=n_features, centers=n_centers, shuffle=True,
random_state=0)
queries = all_data[:n_queries]
index_data = all_data[n_queries:]
# Metrics to collect for the plots
average_times_exact = []
average_times_approx = []
std_times_approx = []
accuracies = []
std_accuracies = []
average_speedups = []
std_speedups = []
# Calculate the average query time
for n_samples in n_samples_values:
X = index_data[:n_samples]
# Initialize LSHForest for queries of a single neighbor
lshf = LSHForest(n_estimators=20, n_candidates=200,
n_neighbors=10).fit(X)
nbrs = NearestNeighbors(algorithm='brute', metric='cosine',
n_neighbors=10).fit(X)
time_approx = []
time_exact = []
accuracy = []
for i in range(n_iter):
# pick one query at random to study query time variability in LSHForest
query = queries[rng.randint(0, n_queries)]
t0 = time.time()
exact_neighbors = nbrs.kneighbors(query, return_distance=False)
time_exact.append(time.time() - t0)
t0 = time.time()
approx_neighbors = lshf.kneighbors(query, return_distance=False)
time_approx.append(time.time() - t0)
accuracy.append(np.in1d(approx_neighbors, exact_neighbors).mean())
average_time_exact = np.mean(time_exact)
average_time_approx = np.mean(time_approx)
speedup = np.array(time_exact) / np.array(time_approx)
average_speedup = np.mean(speedup)
mean_accuracy = np.mean(accuracy)
std_accuracy = np.std(accuracy)
print("Index size: %d, exact: %0.3fs, LSHF: %0.3fs, speedup: %0.1f, "
"accuracy: %0.2f +/-%0.2f" %
(n_samples, average_time_exact, average_time_approx, average_speedup,
mean_accuracy, std_accuracy))
accuracies.append(mean_accuracy)
std_accuracies.append(std_accuracy)
average_times_exact.append(average_time_exact)
average_times_approx.append(average_time_approx)
std_times_approx.append(np.std(time_approx))
average_speedups.append(average_speedup)
std_speedups.append(np.std(speedup))
# Plot average query time against n_samples
plt.figure()
plt.errorbar(n_samples_values, average_times_approx, yerr=std_times_approx,
fmt='o-', c='r', label='LSHForest')
plt.plot(n_samples_values, average_times_exact, c='b',
label="NearestNeighbors(algorithm='brute', metric='cosine')")
plt.legend(loc='upper left', fontsize='small')
plt.ylim(0, None)
plt.ylabel("Average query time in seconds")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Impact of index size on response time for first "
"nearest neighbors queries")
# Plot average query speedup versus index size
plt.figure()
plt.errorbar(n_samples_values, average_speedups, yerr=std_speedups,
fmt='o-', c='r')
plt.ylim(0, None)
plt.ylabel("Average speedup")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Speedup of the approximate NN queries vs brute force")
# Plot average precision versus index size
plt.figure()
plt.errorbar(n_samples_values, accuracies, std_accuracies, fmt='o-', c='c')
plt.ylim(0, 1.1)
plt.ylabel("precision@10")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("precision of 10-nearest-neighbors queries with index size")
plt.show()
| bsd-3-clause |
alexsavio/scikit-learn | examples/applications/plot_tomography_l1_reconstruction.py | 81 | 5461 | """
======================================================================
Compressive sensing: tomography reconstruction with L1 prior (Lasso)
======================================================================
This example shows the reconstruction of an image from a set of parallel
projections, acquired along different angles. Such a dataset is acquired in
**computed tomography** (CT).
Without any prior information on the sample, the number of projections
required to reconstruct the image is of the order of the linear size
``l`` of the image (in pixels). For simplicity we consider here a sparse
image, where only pixels on the boundary of objects have a non-zero
value. Such data could correspond for example to a cellular material.
Note however that most images are sparse in a different basis, such as
the Haar wavelets. Only ``l/7`` projections are acquired, therefore it is
necessary to use prior information available on the sample (its
sparsity): this is an example of **compressive sensing**.
The tomography projection operation is a linear transformation. In
addition to the data-fidelity term corresponding to a linear regression,
we penalize the L1 norm of the image to account for its sparsity. The
resulting optimization problem is called the :ref:`lasso`. We use the
class :class:`sklearn.linear_model.Lasso`, that uses the coordinate descent
algorithm. Importantly, this implementation is more computationally efficient
on a sparse matrix, than the projection operator used here.
The reconstruction with L1 penalization gives a result with zero error
(all pixels are successfully labeled with 0 or 1), even if noise was
added to the projections. In comparison, an L2 penalization
(:class:`sklearn.linear_model.Ridge`) produces a large number of labeling
errors for the pixels. Important artifacts are observed on the
reconstructed image, contrary to the L1 penalization. Note in particular
the circular artifact separating the pixels in the corners, that have
contributed to fewer projections than the central disk.
"""
print(__doc__)
# Author: Emmanuelle Gouillart <emmanuelle.gouillart@nsup.org>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import ndimage
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
import matplotlib.pyplot as plt
def _weights(x, dx=1, orig=0):
x = np.ravel(x)
floor_x = np.floor((x - orig) / dx)
alpha = (x - orig - floor_x * dx) / dx
return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))
def _generate_center_coordinates(l_x):
X, Y = np.mgrid[:l_x, :l_x].astype(np.float64)
center = l_x / 2.
X += 0.5 - center
Y += 0.5 - center
return X, Y
def build_projection_operator(l_x, n_dir):
""" Compute the tomography design matrix.
Parameters
----------
l_x : int
linear size of image array
n_dir : int
number of angles at which projections are acquired.
Returns
-------
p : sparse matrix of shape (n_dir l_x, l_x**2)
"""
X, Y = _generate_center_coordinates(l_x)
angles = np.linspace(0, np.pi, n_dir, endpoint=False)
data_inds, weights, camera_inds = [], [], []
data_unravel_indices = np.arange(l_x ** 2)
data_unravel_indices = np.hstack((data_unravel_indices,
data_unravel_indices))
for i, angle in enumerate(angles):
Xrot = np.cos(angle) * X - np.sin(angle) * Y
inds, w = _weights(Xrot, dx=1, orig=X.min())
mask = np.logical_and(inds >= 0, inds < l_x)
weights += list(w[mask])
camera_inds += list(inds[mask] + i * l_x)
data_inds += list(data_unravel_indices[mask])
proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))
return proj_operator
def generate_synthetic_data():
""" Synthetic binary data """
rs = np.random.RandomState(0)
n_pts = 36.
x, y = np.ogrid[0:l, 0:l]
mask_outer = (x - l / 2) ** 2 + (y - l / 2) ** 2 < (l / 2) ** 2
mask = np.zeros((l, l))
points = l * rs.rand(2, n_pts)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)
res = np.logical_and(mask > mask.mean(), mask_outer)
return res - ndimage.binary_erosion(res)
# Generate synthetic images, and projections
l = 128
proj_operator = build_projection_operator(l, l / 7.)
data = generate_synthetic_data()
proj = proj_operator * data.ravel()[:, np.newaxis]
proj += 0.15 * np.random.randn(*proj.shape)
# Reconstruction with L2 (Ridge) penalization
rgr_ridge = Ridge(alpha=0.2)
rgr_ridge.fit(proj_operator, proj.ravel())
rec_l2 = rgr_ridge.coef_.reshape(l, l)
# Reconstruction with L1 (Lasso) penalization
# the best value of alpha was determined using cross validation
# with LassoCV
rgr_lasso = Lasso(alpha=0.001)
rgr_lasso.fit(proj_operator, proj.ravel())
rec_l1 = rgr_lasso.coef_.reshape(l, l)
plt.figure(figsize=(8, 3.3))
plt.subplot(131)
plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L2 penalization')
plt.axis('off')
plt.subplot(133)
plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L1 penalization')
plt.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
right=1)
plt.show()
| bsd-3-clause |
abhirevan/Yelp-Rate-my-Review | src/spectral.py | 1 | 3869 | # -*- coding: utf-8 -*-
from numpy import *
import csv
import argparse
import os
from sklearn import cluster
import pandas as pd
from sklearn.metrics import precision_recall_fscore_support, confusion_matrix
import shutil
def write_data(ip_csv, op_csv, labels, type):
with open(ip_csv, "rb") as source, open(op_csv, "wb") as result:
rdr = csv.reader(source)
wtr = csv.writer(result)
wtr.writerow(next(rdr) + [type])
i = 0
for r in rdr:
wtr.writerow((r) + [labels[i]])
i += 1
def print_analysis(op_csv_list):
y_true = []
y_pred = []
for file in op_csv_list:
file_csv = pd.read_csv(file)
for i, row in enumerate(file_csv.values):
y_true.append(row[2])
y_pred.append(row[5])
print confusion_matrix(y_true, y_pred)
print precision_recall_fscore_support(y_true, y_pred, average='micro')
def gaussian_distance(data, sigma=1.0):
m = shape(data)[0]
adjacency = zeros((m, m))
for i in range(0, m):
for j in range(0, m):
if i >= j: # since it's symmetric, just assign the upper half the same time we assign the lower half
continue
adjacency[j, i] = adjacency[i, j] = sum((data[i] - data[j]) ** 2)
adjacency = exp(-adjacency / (2 * sigma ** 2)) - identity(m)
return adjacency
'''
def update_labels(truth, clusters):
print "Updating labels"
labels = {}
for idx, c in enumerate(clusters):
lst = labels.get(c,[])
lst.append(truth[idx])
labels[c]=lst
replace_labels = {}
for k, v in labels.iteritems():
print v
mst_common = most_common(v)
print mst_common
replace_labels[k] = mst_common
print replace_labels
'''
def convert_to_stars(labels, sorted_centroids):
stars = []
for l in labels:
stars.append(sorted_centroids.index(l) + 1)
return stars
def update_labels(polarity, clusters, k):
print "Updating labels"
df = pd.DataFrame({'clusters': clusters, 'polarity': polarity})
df_grouped = df.groupby('clusters')
centroid = [0] * k
for name, group in df_grouped:
centroid[name] = mean(group['polarity'])
sorted_centroids = [centroid.index(x) for x in sorted(centroid)]
labels = convert_to_stars(clusters, sorted_centroids)
return labels
def spectral_clustering(ip_csv, k):
op_csv = ip_csv + "_"
data = []
truth = []
with open(ip_csv, "rb") as source:
rdr = csv.reader(source)
next(rdr)
for r in rdr:
data.append(float(r[3]))
truth.append(int(r[2]))
# Spectral clustering with gaussian distance affintty matrix
print "Running Spectral clustering with gaussian distance affinity matrix"
clustering = cluster.SpectralClustering(k, affinity='precomputed', eigen_solver='arpack')
#clustering = cluster.SpectralClustering(k, affinity='nearest_neighbors', eigen_solver='arpack')
affinity = gaussian_distance(data)
print "Calculated Gaussian distance"
clustering.fit(affinity)
print "Fit model"
clusters = clustering.fit_predict(affinity)
print "Found clusters"
labels = update_labels(data, clusters, k)
write_data(ip_csv, op_csv, labels, "Spectral_Gauss")
os.remove(ip_csv)
os.rename(op_csv, ip_csv)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Find clustering for csv',
)
parser.add_argument(
'ip_csv',
type=str,
)
args = parser.parse_args()
ip_csv = args.ip_csv
op_csv = '{0}_spec.csv'.format(ip_csv.split('.csv')[0])
shutil.copyfile(ip_csv, op_csv)
print "Staring with spectral_clustering++"
spectral_clustering(op_csv, 5)
print "-" * 100
print "Predicting data"
print_analysis([op_csv])
print "-" * 100
| mit |
selective-inference/selective-inference | sandbox/randomized_tests/test_estimation.py | 3 | 3969 | from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from selection.tests.instance import gaussian_instance
def test_MSE(signal=1, n=100, p=10, s=1):
ninstance = 1
total_mse = 0
nvalid_instance = 0
data_instance = gaussian_instance(n, p, s, signal)
tau = 1.
for i in range(ninstance):
X, y, true_beta, nonzero, sigma = gaussian_instance(n=n, p=p, s=s, signal=signal)
random_Z = np.random.standard_normal(p)
lam, epsilon, active, betaE, cube, initial_soln = selection(X, y, random_Z) # selection not defined -- is in a file that was deleted
print("active set", np.where(active)[0])
if lam < 0:
print("no active covariates")
else:
est = estimation(X, y, active, betaE, cube, epsilon, lam, sigma, tau)
est.compute_mle_all()
mse_mle = est.mse_mle(true_beta[active])
print("MLE", est.mle)
total_mse += mse_mle
nvalid_instance += np.sum(active)
return np.true_divide(total_mse, nvalid_instance)
def MSE_three(signal=5, n=100, p=10, s=0):
ninstance = 5
total_mse_mle, total_mse_unbiased, total_mse_umvu = 0, 0, 0
nvalid_instance = 0
data_instance = instance(n, p, s, signal)
tau = 1.
for i in range(ninstance):
X, y, true_beta, nonzero, sigma = data_instance.generate_response()
random_Z = np.random.standard_normal(p)
lam, epsilon, active, betaE, cube, initial_soln = selection(X, y, random_Z) # selection not defined -- is in a file that was deleted
if lam < 0:
print("no active covariates")
else:
est = umvu(X, y, active, betaE, cube, epsilon, lam, sigma, tau)
est.compute_unbiased_all()
true_vec = true_beta[active]
print("true vector", true_vec)
print("MLE", est.mle, "Unbiased", est.unbiased, "UMVU", est.umvu)
total_mse_mle += est.mse_mle(true_vec)
mse = est.mse_unbiased(true_vec)
total_mse_unbiased += mse[0]
total_mse_umvu += mse[1]
nvalid_instance +=np.sum(active)
if nvalid_instance > 0:
return total_mse_mle/float(nvalid_instance), total_mse_unbiased/float(nvalid_instance), total_mse_umvu/float(nvalid_instance)
def plot_estimation_three():
signal_seq = np.linspace(-10, 10, num=50)
filter = np.zeros(signal_seq.shape[0], dtype=bool)
mse_mle_seq, mse_unbiased_seq, mse_umvu_seq = [], [], []
for i in range(signal_seq.shape[0]):
print("parameter value", signal_seq[i])
mse = MSE_three(signal_seq[i])
if mse is not None:
mse_mle, mse_unbiased, mse_umvu = mse
mse_mle_seq.append(mse_mle)
mse_unbiased_seq.append(mse_unbiased)
mse_umvu_seq.append(mse_umvu)
filter[i] = True
plt.clf()
plt.title("MSE")
fig, ax = plt.subplots()
ax.plot(signal_seq[filter], mse_mle_seq, label = "MLE", linestyle=':', marker='o')
ax.plot(signal_seq[filter], mse_unbiased_seq, label = "Unbiased")
ax.plot(signal_seq[filter], mse_umvu_seq, label ="UMVU")
legend = ax.legend(loc='upper center', shadow=True)
frame = legend.get_frame()
frame.set_facecolor('0.90')
for label in legend.get_texts():
label.set_fontsize('large')
for label in legend.get_lines():
label.set_linewidth(1.5) # the legend line width
plt.pause(0.01)
plt.savefig("MSE")
def make_a_plot(plot=False):
signal_seq = np.linspace(-10, 10, num=20)
mse_seq = []
for i in range(signal_seq.shape[0]):
print("parameter value", signal_seq[i])
mse = MSE(signal_seq[i])
print("MSE", mse)
mse_seq.append(mse)
if plot:
import matplotlib.pyplot as plt
plt.clf()
plt.title("MSE")
plt.plot(signal_seq, mse_seq)
plt.pause(0.01)
plt.savefig("MSE")
| bsd-3-clause |
at15/ts-parallel | bin/agraph.py | 1 | 2982 | #!/usr/bin/env python3
import glob
import re
import csv
import matplotlib.pyplot as plt
def main():
data = {}
operations = ["sort", "reduce"]
types = ["int", "float", "double"]
for op in operations:
for tp in types:
# i.e. sort int
data[op + "_" + tp] = {}
results = glob.glob("*_" + op + "_*_" + tp + ".csv")
for result in results:
backend, num = re.match(
"(.*)_" + op + "_(.*)_" + tp + ".csv", result).groups()
# data[op + "_" + tp]
if backend not in data[op + "_" + tp]:
data[op + "_" + tp][backend] = {}
num = int(num)
# print(backend, num)
data[op + "_" + tp][backend][num] = {}
with open(result) as f:
# NOTE: it will detect the header of CSV and change it to
# key
reader = csv.DictReader(f)
for row in reader:
data[op + "_" + tp][backend][num][row["stage"]
] = row["duration"]
# print(row)
# print(results)
# print(data)
# now let's draw the graph
plot_data = {}
for op, backends in data.items():
# print(op)
plot_data[op] = []
for backend, results in backends.items():
pdata = {"name": backend, "x": [], "y": []}
# print(backend)
# [(10, {'init': '2771', 'generate': '7667', 'copy': '112781784', 'run': '825079', 'delete': '67504'}), (50, {'init': '1045', 'generate': '8579', 'copy': '110102907', 'run': '1389482', 'delete': '68685'})]
sorted_results = sorted(results.items())
for result in sorted_results:
num, stages = result
# print(num)
if "run" not in stages:
print("didn't find run!", op, backend, num)
continue
pdata["x"].append(num)
pdata["y"].append(stages["run"])
plot_data[op].append(pdata)
# print(plot_data)
i = 1
color_map = {"serial": "C1", "boost": "C2", "thrust": "C3"}
exclude = {"serial": True}
for op, pdatas in plot_data.items():
plt.figure(i)
i += 1
for pdata in pdatas:
if pdata["name"] in exclude:
continue
plt.plot(pdata["x"], pdata["y"],
color_map[pdata["name"]], label=pdata["name"])
plt.title(op)
plt.xlabel("Vector length")
# TODO: ylabel is not shown, and the color changes in different figure
# NOTE: we are using microseconds, because nano seconds got negative
# value
plt.ylabel("Time (us)")
plt.legend(loc='upper right', shadow=True, fontsize='x-small')
plt.show()
if __name__ == "__main__":
main()
| mit |
tanayz/Kaggle | Search_result/untitled0.py | 1 | 1302 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 1 16:05:47 2015
@author: uszllmd
"""
import pandas as pd
import numpy as np
#from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import SVC
from sklearn.decomposition import TruncatedSVD
from sklearn.preprocessing import StandardScaler
from sklearn import pipeline, metrics, grid_search
#from sklearn import decomposition
from nltk.stem.porter import PorterStemmer
import re
from bs4 import BeautifulSoup
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction import text
from sklearn.metrics import accuracy_score,classification_report#,confusion_matrix
from numpy import genfromtxt
tr=genfromtxt("tr.csv",delimiter=',')
tr=tr.reshape(10158,180)
ts=genfromtxt("ts.csv",delimiter=',')
ts=ts.reshape(22513,180)
X=genfromtxt("X.csv",delimiter=',')
X=X.reshape(10158,305)
X_test=genfromtxt("X_test.csv",delimiter=',')
X_test=X_test.reshape(22513,305)
train = pd.read_csv("input/train.csv").fillna("")
test = pd.read_csv("input/test.csv").fillna("")
train = pd.read_csv("input/train.csv").fillna("")
test = pd.read_csv("input/test.csv").fillna("")
s_labels=[]
for i in range(len(train.id)):
s_labels.append(str(train["median_relevance"][i])) | apache-2.0 |
Brett777/Predict-Churn | Deploy Persisted Scores.py | 1 | 2784 | import os
from boto.s3.connection import S3Connection
from boto.s3.key import Key
import h2o
import numpy as np
import pandas as pd
from tabulate import tabulate
from sqlalchemy import create_engine
# initialize the model scoring server
h2o.init(nthreads=1,max_mem_size=1, start_h2o=True, strict_version_check = False)
def predict_churn(State,AccountLength,AreaCode,Phone,IntlPlan,VMailPlan,VMailMessage,DayMins,DayCalls,DayCharge,EveMins,EveCalls,EveCharge,NightMins,NightCalls,NightCharge,IntlMins,IntlCalls,IntlCharge,CustServCalls):
# connect to the model scoring service
h2o.init(nthreads=1,max_mem_size=1, start_h2o=True, strict_version_check = False)
# open the downloaded model
ChurnPredictor = h2o.load_model(path='AutoML-leader')
# define a feature vector to evaluate with the model
newData = pd.DataFrame({'State' : State,
'Account Length' : AccountLength,
'Area Code' : AreaCode,
'Phone' : Phone,
'Int\'l Plan' : IntlPlan,
'VMail Plan' : VMailPlan,
'VMail Message' : VMailMessage,
'Day Mins' : DayMins,
'Day Calls' : DayCalls,
'Day Charge' : DayCharge,
'Eve Mins' : EveMins,
'Eve Calls' : EveCalls,
'Eve Charge' : EveCharge,
'Night Mins' : NightMins,
'Night Calls' : NightCalls,
'Night Charge' : NightCharge,
'Intl Mins' :IntlMins,
'Intl Calls' : IntlCalls,
'Intl Charge' : IntlCharge,
'CustServ Calls' : CustServCalls}, index=[0])
# evaluate the feature vector using the model
predictions = ChurnPredictor.predict(h2o.H2OFrame(newData))
predictionsOut = h2o.as_list(predictions, use_pandas=False)
prediction = predictionsOut[1][0]
probabilityChurn = predictionsOut[1][1]
probabilityRetain = predictionsOut[1][2]
mySQL_Username = os.environ['BRETT_MYSQL_USERNAME']
mySQL_Password = os.environ['BRETT_MYSQL_PASSWORD']
mySQL_IP = os.environ['BRETT_MYSQL_IP']
engine = create_engine("mysql+mysqldb://"+mySQL_Username+":"+mySQL_Password+"@"+mySQL_IP+"/customers")
predictionsToDB = h2o.as_list(predictions, use_pandas=True)
predictionsToDB.to_sql(con=engine, name='predictions', if_exists='append')
return "Prediction: " + str(prediction) + " |Probability to Churn: " + str(probabilityChurn) + " |Probability to Retain: " + str(probabilityRetain) | mit |
rs2/bokeh | bokeh/document/events.py | 3 | 26421 | ''' Provide events that represent various changes to Bokeh Documents.
These events are used internally to signal changes to Documents. For
information about user-facing (e.g. UI or tool) events, see the reference
for :ref:`bokeh.events`.
'''
from __future__ import absolute_import
from ..util.dependencies import import_optional
pd = import_optional('pandas')
class DocumentChangedEvent(object):
''' Base class for all internal events representing a change to a
Bokeh Document.
'''
def __init__(self, document, setter=None, callback_invoker=None):
'''
Args:
document (Document) :
A Bokeh document that is to be updated.
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
In the context of a Bokeh server application, incoming updates
to properties will be annotated with the session that is
doing the updating. This value is propagated through any
subsequent change notifications that the update triggers.
The session can compare the event setter to itself, and
suppress any updates that originate from itself.
callback_invoker (callable, optional) :
A callable that will invoke any Model callbacks that should
be executed in response to the change that triggered this
event. (default: None)
'''
self.document = document
self.setter = setter
self.callback_invoker = callback_invoker
def combine(self, event):
'''
'''
return False
def dispatch(self, receiver):
''' Dispatch handling of this event to a receiver.
This method will invoke ``receiver._document_changed`` if it exists.
'''
if hasattr(receiver, '_document_changed'):
receiver._document_changed(self)
class DocumentPatchedEvent(DocumentChangedEvent):
''' A Base class for events that represent updating Bokeh Models and
their properties.
'''
def dispatch(self, receiver):
''' Dispatch handling of this event to a receiver.
This method will invoke ``receiver._document_patched`` if it exists.
'''
super(DocumentPatchedEvent, self).dispatch(receiver)
if hasattr(receiver, '_document_patched'):
receiver._document_patched(self)
def generate(self, references, buffers):
''' Create a JSON representation of this event suitable for sending
to clients.
*Sub-classes must implement this method.*
Args:
references (dict[str, Model]) :
If the event requires references to certain models in order to
function, they may be collected here.
**This is an "out" parameter**. The values it contains will be
modified in-place.
buffers (set) :
If the event needs to supply any additional Bokeh protocol
buffers, they may be added to this set.
**This is an "out" parameter**. The values it contains will be
modified in-place.
'''
raise NotImplementedError()
class ModelChangedEvent(DocumentPatchedEvent):
''' A concrete event representing updating an attribute and value of a
specific Bokeh Model.
This is the "standard" way of updating most Bokeh model attributes. For
special casing situations that can optimized (e.g. streaming, etc.), a
``hint`` may be supplied that overrides normal mechanisms.
'''
def __init__(self, document, model, attr, old, new, serializable_new, hint=None, setter=None, callback_invoker=None):
'''
Args:
document (Document) :
A Bokeh document that is to be updated.
model (Model) :
A Model to update
attr (str) :
The name of the attribute to update on the model.
old (object) :
The old value of the attribute
new (object) :
The new value of the attribute
serializable_new (object) :
A serialized (JSON) version of the new value. It may be
``None`` if a hint is supplied.
hint (DocumentPatchedEvent, optional) :
When appropriate, a secondary event may be supplied that
modifies the normal update process. For example, in order
to stream or patch data more efficiently than the standard
update mechanism.
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
See :class:`~bokeh.document.events.DocumentChangedEvent`
for more details.
callback_invoker (callable, optional) :
A callable that will invoke any Model callbacks that should
be executed in response to the change that triggered this
event. (default: None)
'''
if setter is None and isinstance(hint, (ColumnsStreamedEvent, ColumnsPatchedEvent)):
setter = hint.setter
super(ModelChangedEvent, self).__init__(document, setter, callback_invoker)
self.model = model
self.attr = attr
self.old = old
self.new = new
self.serializable_new = serializable_new
self.hint = hint
def combine(self, event):
'''
'''
if not isinstance(event, ModelChangedEvent): return False
# If these are not true something weird is going on, maybe updates from
# Python bokeh.client, don't try to combine
if self.setter != event.setter: return False
if self.document != event.document: return False
if self.hint:
return self.hint.combine(event.hint)
if (self.model == event.model) and (self.attr == event.attr):
self.new = event.new
self.serializable_new = event.serializable_new
self.callback_invoker = event.callback_invoker
return True
return False
def dispatch(self, receiver):
''' Dispatch handling of this event to a receiver.
This method will invoke ``receiver._document_model_dhanged`` if it
exists.
'''
super(ModelChangedEvent, self).dispatch(receiver)
if hasattr(receiver, '_document_model_changed'):
receiver._document_model_changed(self)
def generate(self, references, buffers):
''' Create a JSON representation of this event suitable for sending
to clients.
Args:
references (dict[str, Model]) :
If the event requires references to certain models in order to
function, they may be collected here.
**This is an "out" parameter**. The values it contains will be
modified in-place.
buffers (set) :
If the event needs to supply any additional Bokeh protocol
buffers, they may be added to this set.
**This is an "out" parameter**. The values it contains will be
modified in-place.
'''
from ..model import collect_models
if self.hint is not None:
return self.hint.generate(references, buffers)
value = self.serializable_new
# the new value is an object that may have
# not-yet-in-the-remote-doc references, and may also
# itself not be in the remote doc yet. the remote may
# already have some of the references, but
# unfortunately we don't have an easy way to know
# unless we were to check BEFORE the attr gets changed
# (we need the old _all_models before setting the
# property). So we have to send all the references the
# remote could need, even though it could be inefficient.
# If it turns out we need to fix this we could probably
# do it by adding some complexity.
value_refs = set(collect_models(value))
# we know we don't want a whole new copy of the obj we're patching
# unless it's also the new value
if self.model != value:
value_refs.discard(self.model)
references.update(value_refs)
return { 'kind' : 'ModelChanged',
'model' : self.model.ref,
'attr' : self.attr,
'new' : value }
class ColumnDataChangedEvent(DocumentPatchedEvent):
''' A concrete event representing efficiently replacing *all*
existing data for a :class:`~bokeh.models.sources.ColumnDataSource`
'''
def __init__(self, document, column_source, cols=None, setter=None, callback_invoker=None):
'''
Args:
document (Document) :
A Bokeh document that is to be updated.
column_source (ColumnDataSource) :
cols (list[str]) :
optional explicit list of column names to update. If None, all
columns will be updated (default: None)
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
See :class:`~bokeh.document.events.DocumentChangedEvent`
for more details.
callback_invoker (callable, optional) :
A callable that will invoke any Model callbacks that should
be executed in response to the change that triggered this
event. (default: None)
'''
super(ColumnDataChangedEvent, self).__init__(document, setter, callback_invoker)
self.column_source = column_source
self.cols = cols
def dispatch(self, receiver):
''' Dispatch handling of this event to a receiver.
This method will invoke ``receiver._column_data_changed`` if it exists.
'''
super(ColumnDataChangedEvent, self).dispatch(receiver)
if hasattr(receiver, '_column_data_changed'):
receiver._column_data_changed(self)
def generate(self, references, buffers):
''' Create a JSON representation of this event suitable for sending
to clients.
.. code-block:: python
{
'kind' : 'ColumnDataChanged'
'column_source' : <reference to a CDS>
'new' : <new data to steam to column_source>
'cols' : <specific columns to update>
}
Args:
references (dict[str, Model]) :
If the event requires references to certain models in order to
function, they may be collected here.
**This is an "out" parameter**. The values it contains will be
modified in-place.
buffers (set) :
If the event needs to supply any additional Bokeh protocol
buffers, they may be added to this set.
**This is an "out" parameter**. The values it contains will be
modified in-place.
'''
from ..util.serialization import transform_column_source_data
data_dict = transform_column_source_data(self.column_source.data, buffers=buffers, cols=self.cols)
return { 'kind' : 'ColumnDataChanged',
'column_source' : self.column_source.ref,
'new' : data_dict,
'cols' : self.cols}
class ColumnsStreamedEvent(DocumentPatchedEvent):
''' A concrete event representing efficiently streaming new data
to a :class:`~bokeh.models.sources.ColumnDataSource`
'''
def __init__(self, document, column_source, data, rollover, setter=None, callback_invoker=None):
'''
Args:
document (Document) :
A Bokeh document that is to be updated.
column_source (ColumnDataSource) :
The data source to stream new data to.
data (dict or DataFrame) :
New data to stream.
If a DataFrame, will be stored as ``{c: df[c] for c in df.columns}``
rollover (int) :
A rollover limit. If the data source columns exceed this
limit, earlier values will be discarded to maintain the
column length under the limit.
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
See :class:`~bokeh.document.events.DocumentChangedEvent`
for more details.
callback_invoker (callable, optional) :
A callable that will invoke any Model callbacks that should
be executed in response to the change that triggered this
event. (default: None)
'''
super(ColumnsStreamedEvent, self).__init__(document, setter, callback_invoker)
self.column_source = column_source
if pd and isinstance(data, pd.DataFrame):
data = {c: data[c] for c in data.columns}
self.data = data
self.rollover = rollover
def dispatch(self, receiver):
''' Dispatch handling of this event to a receiver.
This method will invoke ``receiver._columns_streamed`` if it exists.
'''
super(ColumnsStreamedEvent, self).dispatch(receiver)
if hasattr(receiver, '_columns_streamed'):
receiver._columns_streamed(self)
def generate(self, references, buffers):
''' Create a JSON representation of this event suitable for sending
to clients.
.. code-block:: python
{
'kind' : 'ColumnsStreamed'
'column_source' : <reference to a CDS>
'data' : <new data to steam to column_source>
'rollover' : <rollover limit>
}
Args:
references (dict[str, Model]) :
If the event requires references to certain models in order to
function, they may be collected here.
**This is an "out" parameter**. The values it contains will be
modified in-place.
buffers (set) :
If the event needs to supply any additional Bokeh protocol
buffers, they may be added to this set.
**This is an "out" parameter**. The values it contains will be
modified in-place.
'''
return { 'kind' : 'ColumnsStreamed',
'column_source' : self.column_source.ref,
'data' : self.data,
'rollover' : self.rollover }
class ColumnsPatchedEvent(DocumentPatchedEvent):
''' A concrete event representing efficiently applying data patches
to a :class:`~bokeh.models.sources.ColumnDataSource`
'''
def __init__(self, document, column_source, patches, setter=None, callback_invoker=None):
'''
Args:
document (Document) :
A Bokeh document that is to be updated.
column_source (ColumnDataSource) :
The data source to apply patches to.
patches (list) :
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
See :class:`~bokeh.document.events.DocumentChangedEvent`
for more details.
callback_invoker (callable, optional) :
A callable that will invoke any Model callbacks that should
be executed in response to the change that triggered this
event. (default: None)
'''
super(ColumnsPatchedEvent, self).__init__(document, setter, callback_invoker)
self.column_source = column_source
self.patches = patches
def dispatch(self, receiver):
''' Dispatch handling of this event to a receiver.
This method will invoke ``receiver._columns_patched`` if it exists.
'''
super(ColumnsPatchedEvent, self).dispatch(receiver)
if hasattr(receiver, '_columns_patched'):
receiver._columns_patched(self)
def generate(self, references, buffers):
''' Create a JSON representation of this event suitable for sending
to clients.
.. code-block:: python
{
'kind' : 'ColumnsPatched'
'column_source' : <reference to a CDS>
'patches' : <patches to apply to column_source>
}
Args:
references (dict[str, Model]) :
If the event requires references to certain models in order to
function, they may be collected here.
**This is an "out" parameter**. The values it contains will be
modified in-place.
buffers (set) :
If the event needs to supply any additional Bokeh protocol
buffers, they may be added to this set.
**This is an "out" parameter**. The values it contains will be
modified in-place.
'''
return { 'kind' : 'ColumnsPatched',
'column_source' : self.column_source.ref,
'patches' : self.patches }
class TitleChangedEvent(DocumentPatchedEvent):
''' A concrete event representing a change to the title of a Bokeh
Document.
'''
def __init__(self, document, title, setter=None, callback_invoker=None):
'''
Args:
document (Document) :
A Bokeh document that is to be updated.
title (str) :
The new title to set on the Document
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
See :class:`~bokeh.document.events.DocumentChangedEvent`
for more details.
callback_invoker (callable, optional) :
A callable that will invoke any Model callbacks that should
be executed in response to the change that triggered this
event. (default: None)
'''
super(TitleChangedEvent, self).__init__(document, setter, callback_invoker)
self.title = title
def combine(self, event):
'''
'''
if not isinstance(event, TitleChangedEvent): return False
# If these are not true something weird is going on, maybe updates from
# Python bokeh.client, don't try to combine
if self.setter != event.setter: return False
if self.document != event.document: return False
self.title = event.title
self.callback_invoker = event.callback_invoker
return True
def generate(self, references, buffers):
''' Create a JSON representation of this event suitable for sending
to clients.
.. code-block:: python
{
'kind' : 'TitleChanged'
'title' : <new title to set>
}
Args:
references (dict[str, Model]) :
If the event requires references to certain models in order to
function, they may be collected here.
**This is an "out" parameter**. The values it contains will be
modified in-place.
buffers (set) :
If the event needs to supply any additional Bokeh protocol
buffers, they may be added to this set.
**This is an "out" parameter**. The values it contains will be
modified in-place.
'''
return { 'kind' : 'TitleChanged',
'title' : self.title }
class RootAddedEvent(DocumentPatchedEvent):
''' A concrete event representing a change to add a new Model to a
Document's collection of "root" models.
'''
def __init__(self, document, model, setter=None, callback_invoker=None):
'''
Args:
document (Document) :
A Bokeh document that is to be updated.
model (Model) :
The Bokeh Model to add as a Document root.
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
See :class:`~bokeh.document.events.DocumentChangedEvent`
for more details.
callback_invoker (callable, optional) :
A callable that will invoke any Model callbacks that should
be executed in response to the change that triggered this
event. (default: None)
'''
super(RootAddedEvent, self).__init__(document, setter, callback_invoker)
self.model = model
def generate(self, references, buffers):
''' Create a JSON representation of this event suitable for sending
to clients.
.. code-block:: python
{
'kind' : 'RootAdded'
'title' : <reference to a Model>
}
Args:
references (dict[str, Model]) :
If the event requires references to certain models in order to
function, they may be collected here.
**This is an "out" parameter**. The values it contains will be
modified in-place.
buffers (set) :
If the event needs to supply any additional Bokeh protocol
buffers, they may be added to this set.
**This is an "out" parameter**. The values it contains will be
modified in-place.
'''
references.update(self.model.references())
return { 'kind' : 'RootAdded',
'model' : self.model.ref }
class RootRemovedEvent(DocumentPatchedEvent):
''' A concrete event representing a change to remove an existing Model
from a Document's collection of "root" models.
'''
def __init__(self, document, model, setter=None, callback_invoker=None):
'''
Args:
document (Document) :
A Bokeh document that is to be updated.
model (Model) :
The Bokeh Model to remove as a Document root.
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
See :class:`~bokeh.document.events.DocumentChangedEvent`
for more details.
callback_invoker (callable, optional) :
A callable that will invoke any Model callbacks that should
be executed in response to the change that triggered this
event. (default: None)
'''
super(RootRemovedEvent, self).__init__(document, setter, callback_invoker)
self.model = model
def generate(self, references, buffers):
''' Create a JSON representation of this event suitable for sending
to clients.
.. code-block:: python
{
'kind' : 'RootRemoved'
'title' : <reference to a Model>
}
Args:
references (dict[str, Model]) :
If the event requires references to certain models in order to
function, they may be collected here.
**This is an "out" parameter**. The values it contains will be
modified in-place.
buffers (set) :
If the event needs to supply any additional Bokeh protocol
buffers, they may be added to this set.
**This is an "out" parameter**. The values it contains will be
modified in-place.
'''
return { 'kind' : 'RootRemoved',
'model' : self.model.ref }
class SessionCallbackAdded(DocumentChangedEvent):
''' A concrete event representing a change to add a new callback (e.g.
periodic, timeout, or "next tick") to a Document.
'''
def __init__(self, document, callback):
'''
Args:
document (Document) :
A Bokeh document that is to be updated.
callback (SessionCallback) :
The callback to add
'''
super(SessionCallbackAdded, self).__init__(document)
self.callback = callback
def dispatch(self, receiver):
''' Dispatch handling of this event to a receiver.
This method will invoke ``receiver._session_callback_added`` if
it exists.
'''
super(SessionCallbackAdded, self).dispatch(receiver)
if hasattr(receiver, '_session_callback_added'):
receiver._session_callback_added(self)
class SessionCallbackRemoved(DocumentChangedEvent):
''' A concrete event representing a change to remove an existing callback
(e.g. periodic, timeout, or "next tick") from a Document.
'''
def __init__(self, document, callback):
'''
Args:
document (Document) :
A Bokeh document that is to be updated.
callback (SessionCallback) :
The callback to remove
'''
super(SessionCallbackRemoved, self).__init__(document)
self.callback = callback
def dispatch(self, receiver):
''' Dispatch handling of this event to a receiver.
This method will invoke ``receiver._session_callback_removed`` if
it exists.
'''
super(SessionCallbackRemoved, self).dispatch(receiver)
if hasattr(receiver, '_session_callback_removed'):
receiver._session_callback_removed(self)
| bsd-3-clause |
doublsky/MLProfile | gen_layout.py | 1 | 2916 | from gurobipy import *
import pandas as pd
num_acc = 42
startx = 0
endx = 4
starty = 0
endy = 4
acc_per_tile = 7
# first 48 accelerators
data = pd.read_csv('all.csv', index_col=False)
accelerators = data.drop('Workload', axis=1).sum(axis=0).sort_values(ascending=False).index[:num_acc]
assert len(accelerators) == num_acc
# create comm cost matrix
cost_mat = {}
for acc1 in accelerators:
for acc2 in accelerators:
cost_mat[acc1, acc2] = 0
for acc1 in accelerators:
for acc2 in accelerators:
for _, row in data.iterrows():
if pd.notnull(row[acc1]) and pd.notnull(row[acc2]):
cost_mat[acc1, acc2] += 1
# simulate 160 accelerators
# accelerators = ['A'+str(x) for x in range(160)]
# on a 4x4 chip
locationX = range(startx, endx)
locationY = range(starty, endy)
m = Model('layout')
locations = m.addVars(accelerators, locationX, locationY, vtype=GRB.BINARY, name='locations')
coeffX = {(acc, i, j): i for acc in accelerators for i in locationX for j in locationY}
coeffY = {(acc, i, j): j for acc in accelerators for i in locationX for j in locationY}
locX = m.addVars(accelerators, vtype=GRB.INTEGER, name='locX')
locY = m.addVars(accelerators, vtype=GRB.INTEGER, name='locY')
m.addConstrs((locX[acc] == locations.prod(coeffX, acc, '*', '*') for acc in accelerators), name='bin2decx')
m.addConstrs((locY[acc] == locations.prod(coeffY, acc, '*', '*') for acc in accelerators), name='bin2decy')
distX = m.addVars(accelerators, accelerators, name='distX')
distY = m.addVars(accelerators, accelerators, name='distY')
distance = m.addVars(accelerators, accelerators, name='distance')
for acc1 in accelerators:
for acc2 in accelerators:
distance[acc1, acc2] = (distX[acc1, acc2] + distY[acc1, acc2]) * cost_mat[acc1, acc2]
# abs
m.addConstrs((locX[acc1] - locX[acc2] <= distX[acc1, acc2] for acc1 in accelerators for acc2 in accelerators), name='absx1')
m.addConstrs((locX[acc2] - locX[acc1] <= distX[acc1, acc2] for acc1 in accelerators for acc2 in accelerators), name='absx2')
m.addConstrs((locY[acc1] - locY[acc2] <= distY[acc1, acc2] for acc1 in accelerators for acc2 in accelerators), name='absy1')
m.addConstrs((locY[acc2] - locY[acc1] <= distY[acc1, acc2] for acc1 in accelerators for acc2 in accelerators), name='absy2')
# each accelerator only appear once
m.addConstrs((locations.sum(i, '*', '*') == 1 for i in accelerators), name='acc')
# each tile has acc_per_tile accelrators
none_or_all = m.addVars(locationX, locationY, vtype=GRB.BINARY, name='none_or_all')
m.addConstrs((locations.sum('*', i, j) == acc_per_tile * none_or_all[i, j] for i in locationX for j in locationY), name='tile')
# minimize distance
m.setObjective(distance.sum('*', '*'), GRB.MINIMIZE)
m.optimize()
res = pd.DataFrame()
for acc in accelerators:
res.loc[acc, 'locX'] = locX[acc].x
res.loc[acc, 'locY'] = locY[acc].x
print res
res.to_csv('layout.csv')
| mit |
qifeigit/scikit-learn | examples/datasets/plot_random_dataset.py | 348 | 2254 | """
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
| bsd-3-clause |
decvalts/landlab | landlab/components/potentiality_flowrouting/examples/test_script_fr2.py | 1 | 8071 | # -*- coding: utf-8 -*-
"""test_script_fr2
A script of VV's potentiality flow routing method.
This version attempts to weight the potentials by slopes, equivalent to varying
the depth of the box into which the "injection molding" occurs.
Created on Fri Feb 20 13:45:52 2015
@author: danhobley
"""
from __future__ import print_function
#from landlab import RasterModelGrid
#from landlab.plot.imshow import imshow_node_grid
import numpy as np
from pylab import imshow, show, contour, figure, clabel, quiver
from matplotlib.ticker import MaxNLocator
sqrt = np.sqrt
n = 50
#mg = RasterModelGrid(n, n, 1.)
nt = 10000
width = 1.
p_thresh = 0.000001
diffusivity_offsetter = 100000000.
core = (slice(1,-1),slice(1,-1))
timp=np.zeros((nt, 2), dtype=int)
dtwidth = 0.2
hR = np.zeros((n+2,n+2), dtype=float)
pR=np.zeros_like(hR)
p=pR.view().ravel()
qsourceR=np.zeros_like(hR)
qsource=qsourceR[core].view().ravel()
qspR=np.zeros_like(hR)
qsp = qspR[core].view().ravel()
qsourceR[core][0,-1]=.9*sqrt(2.)*.33
qsourceR[core][0,0]=.9*sqrt(2.)
qsourceR[core][n-1,n//2-1]=1
qspR[core][0,-1]=sqrt(2)
qspR[core][0,0]=sqrt(2)
qspR[core][n-1,n//2-1]=1
slope=0.1
fixdis = 0.4 #this is the p contour at which flow is "pinned"
variable_K = False
hgradEx = np.zeros_like(hR)
hgradWx = np.zeros_like(hR)
hgradNx = np.zeros_like(hR)
hgradSx = np.zeros_like(hR)
pgradEx = np.zeros_like(hR)
pgradWx = np.zeros_like(hR)
pgradNx = np.zeros_like(hR)
pgradSx = np.zeros_like(hR)
hgradEy = np.zeros_like(hR)
hgradWy = np.zeros_like(hR)
hgradNy = np.zeros_like(hR)
hgradSy = np.zeros_like(hR)
pgradEy = np.zeros_like(hR)
pgradWy = np.zeros_like(hR)
pgradNy = np.zeros_like(hR)
pgradSy = np.zeros_like(hR)
CslopeE = np.zeros_like(hR)
CslopeW = np.zeros_like(hR)
CslopeN = np.zeros_like(hR)
CslopeS = np.zeros_like(hR)
thetaE = np.zeros_like(hR)
thetaW = np.zeros_like(hR)
thetaN = np.zeros_like(hR)
thetaS = np.zeros_like(hR)
theta_vE = np.zeros_like(hR)
theta_vW = np.zeros_like(hR)
theta_vN = np.zeros_like(hR)
theta_vS = np.zeros_like(hR)
vmagE = np.zeros_like(hR)
vmagW = np.zeros_like(hR)
vmagN = np.zeros_like(hR)
vmagS = np.zeros_like(hR)
uE = np.zeros_like(hR)
uW = np.zeros_like(hR)
uN = np.zeros_like(hR)
uS = np.zeros_like(hR)
Wchanged = np.zeros_like(hR, dtype=bool)
Echanged = np.zeros_like(Wchanged)
Nchanged = np.zeros_like(Wchanged)
Schanged = np.zeros_like(Wchanged)
#uval = np.zeros_like(hR)
#vval = np.zeros_like(hR)
#set up slice offsets:
Es = (slice(1,-1),slice(2,n+2))
NEs = (slice(2,n+2),slice(2,n+2))
Ns = (slice(2,n+2),slice(1,-1))
NWs = (slice(2,n+2),slice(0,-2))
Ws = (slice(1,-1),slice(0,-2))
SWs = (slice(0,-2),slice(0,-2))
Ss = (slice(0,-2),slice(1,-1))
SEs = (slice(0,-2),slice(2,n+2))
for i in xrange(nt):
if i%100==0:
print(i)
qE = np.zeros_like(hR)
qW = np.zeros_like(hR)
qN = np.zeros_like(hR)
qS = np.zeros_like(hR)
#update the dummy edges of our variables:
hR[0,1:-1] = hR[1,1:-1]
hR[-1,1:-1] = hR[-2,1:-1]
hR[1:-1,0] = hR[1:-1,1]
hR[1:-1,-1] = hR[1:-1,-2]
pR[0,1:-1] = pR[1,1:-1]
pR[-1,1:-1] = pR[-2,1:-1]
pR[1:-1,0] = pR[1:-1,1]
pR[1:-1,-1] = pR[1:-1,-2]
hgradEx[core] = (hR[core]-hR[Es])#/width
hgradEy[core] = hR[SEs]-hR[NEs]+hR[Ss]-hR[Ns]
hgradEy[core] *= 0.25
CslopeE[core] = sqrt(np.square(hgradEx[core])+np.square(hgradEy[core]))
thetaE[core] = np.arctan(np.fabs(hgradEy[core])/(np.fabs(hgradEx[core])+1.e-10))
pgradEx[core] = uE[core] #pgrad is VV's vv, a velocity
pgradEy[core] = uN[core]+uS[core]+uN[Es]+uS[Es]
pgradEy[core] *= 0.25
vmagE[core] = sqrt(np.square(pgradEx[core])+np.square(pgradEy[core]))
#now resolve the effective flow magnitudes to downhill
theta_vE[core] = np.arctan(np.fabs(pgradEy[core])/(np.fabs(pgradEx[core])+1.e-10))
vmagE[core] *= np.cos(np.fabs(thetaE[core]-theta_vE[core]))
qE[core] = np.sign(hgradEx[core])*vmagE[core]*(CslopeE[core]-slope).clip(0.)*np.cos(thetaE[core])
#the clip should deal with the eastern edge, but return here to check if probs
hgradWx[core] = (hR[Ws]-hR[core])#/width
hgradWy[core] = hR[SWs]-hR[NWs]+hR[Ss]-hR[Ns]
hgradWy[core] *= 0.25
CslopeW[core] = sqrt(np.square(hgradWx[core])+np.square(hgradWy[core]))
thetaW[core] = np.arctan(np.fabs(hgradWy[core])/(np.fabs(hgradWx[core])+1.e-10))
pgradWx[core] = uW[core]#/width
pgradWy[core] = uN[core]+uS[core]+uN[Ws]+uS[Ws]
pgradWy[core] *= 0.25
vmagW[core] = sqrt(np.square(pgradWx[core])+np.square(pgradWy[core]))
theta_vW[core] = np.arctan(np.fabs(pgradWy[core])/(np.fabs(pgradWx[core])+1.e-10))
vmagW[core] *= np.cos(np.fabs(thetaW[core]-theta_vW[core]))
qW[core] = np.sign(hgradWx[core])*vmagW[core]*(CslopeW[core]-slope).clip(0.)*np.cos(thetaW[core])
hgradNx[core] = hR[NWs]-hR[NEs]+hR[Ws]-hR[Es]
hgradNx[core] *= 0.25
hgradNy[core] = (hR[core]-hR[Ns])#/width
CslopeN[core] = sqrt(np.square(hgradNx[core])+np.square(hgradNy[core]))
thetaN[core] = np.arctan(np.fabs(hgradNy[core])/(np.fabs(hgradNx[core])+1.e-10))
pgradNx[core] = uE[core]+uW[core]+uE[Ns]+uW[Ns]
pgradNx[core] *= 0.25
pgradNy[core] = uN[core]#/width
vmagN[core] = sqrt(np.square(pgradNx[core])+np.square(pgradNy[core]))
theta_vN[core] = np.arctan(np.fabs(pgradNy[core])/(np.fabs(pgradNx[core])+1.e-10))
vmagN[core] *= np.cos(np.fabs(thetaN[core]-theta_vN[core]))
qN[core] = np.sign(hgradNy[core])*vmagN[core]*(CslopeN[core]-slope).clip(0.)*np.sin(thetaN[core])
hgradSx[core] = hR[SWs]-hR[SEs]+hR[Ws]-hR[Es]
hgradSx[core] *= 0.25
hgradSy[core] = (hR[Ss]-hR[core])#/width
CslopeS[core] = sqrt(np.square(hgradSx[core])+np.square(hgradSy[core]))
thetaS[core] = np.arctan(np.fabs(hgradSy[core])/(np.fabs(hgradSx[core])+1.e-10))
pgradSx[core] = uE[core]+uW[core]+uE[Ss]+uW[Ss]
pgradSx[core] *= 0.25
pgradSy[core] = uS[core]#/width
vmagS[core] = sqrt(np.square(pgradSx[core])+np.square(pgradSy[core]))
theta_vS[core] = np.arctan(np.fabs(pgradSy[core])/(np.fabs(pgradSx[core])+1.e-10))
vmagS[core] *= np.cos(np.fabs(thetaS[core]-theta_vS[core]))
qS[core] = np.sign(hgradSy[core])*vmagS[core]*(CslopeS[core]-slope).clip(0.)*np.sin(thetaS[core])
hR[core] += dtwidth*(qS[core]+qW[core]-qN[core]-qE[core]+qsourceR[core])
###P SOLVER
#mask for which core nodes get updated:
mask = (hR[core]<p_thresh)
#not_mask = np.logical_not(mask)
if variable_K:
kE = 1.+np.fabs(hR[Es]-hR[core])#+diffusivity_offsetter)#/diffusivity_offsetter
kW = 1.+np.fabs(hR[Ws]-hR[core])#+diffusivity_offsetter)#/diffusivity_offsetter
kS = 1.+np.fabs(hR[Ss]-hR[core])#+diffusivity_offsetter)#/diffusivity_offsetter
kN = 1.+np.fabs(hR[Ns]-hR[core])#+diffusivity_offsetter)#/diffusivity_offsetter
else:
kE = 1.
kW = 1.
kN = 1.
kS = 1.
Wchanged[core] = np.less(hR[Ws]+hR[core],fixdis)
Echanged[core] = np.less(hR[Es]+hR[core],fixdis)
Nchanged[core] = np.less(hR[Ns]+hR[core],fixdis)
Schanged[core] = np.less(hR[Ss]+hR[core],fixdis)
for j in xrange(10):
uW[Wchanged] = kW*(pR[Ws]-pR[core])[Wchanged[core]]
uE[Echanged] = kE*(pR[Es]-pR[core])[Echanged[core]]
uN[Nchanged] = kN*(pR[Ns]-pR[core])[Nchanged[core]]
uS[Schanged] = kS*(pR[Ss]-pR[core])[Schanged[core]]
pR[core] += uW[core]
pR[core] += uS[core]
pR[core] -= uE[core]
pR[core] -= uN[core]
pR[core] += qspR[core]
pR[core] /= kN+kS+kE+kW
pR[core][mask] = 0.
X,Y = np.meshgrid(np.arange(n),np.arange(n))
uval = uW[core]+uE[core]
vval = uN[core]+uS[core]
#imshow_node_grid(mg, h)
figure(1)
f1 = imshow(hR[core])
figure(2)
f2 = contour(X,Y,hR[core], locator=MaxNLocator(nbins=100))
clabel(f2)
quiver(X,Y,uval,vval)
figure(3)
f3 = contour(X,Y,pR[core], locator=MaxNLocator(nbins=100))
clabel(f3)
quiver(X,Y,uval,vval)
figure(4)
contour(X,Y,hR[core], locator=MaxNLocator(nbins=100))
contour(X,Y,pR[core], locator=MaxNLocator(nbins=100))
| mit |
richardwolny/sms-tools | lectures/03-Fourier-properties/plots-code/convolution-1.py | 24 | 1341 | import matplotlib.pyplot as plt
import numpy as np
import time, os, sys
from scipy.fftpack import fft, ifft, fftshift
import math
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import utilFunctions as UF
import dftModel as DF
(fs, x) = UF.wavread('../../../sounds/ocean.wav')
(fs, x2) = UF.wavread('../../../sounds/impulse-response.wav')
x1 = x[40000:44096]
N = 4096
plt.figure(1, figsize=(9.5, 7))
plt.subplot(3,2,1)
plt.title('x1 (ocean.wav)')
plt.plot(x1, 'b')
plt.axis([0,N,min(x1),max(x1)])
plt.subplot(3,2,2)
plt.title('x2 (impulse-response.wav)')
plt.plot(x2, 'b')
plt.axis([0,N,min(x2),max(x2)])
mX1, pX1 = DF.dftAnal(x1, np.ones(N), N)
mX1 = mX1 - max(mX1)
plt.subplot(3,2,3)
plt.title('X1')
plt.plot(mX1, 'r')
plt.axis([0,N/2,-70,0])
mX2, pX2 = DF.dftAnal(x2, np.ones(N), N)
mX2 = mX2 - max(mX2)
plt.subplot(3,2,4)
plt.title('X2')
plt.plot(mX2, 'r')
plt.axis([0,N/2,-70,0])
y = np.convolve(x1, x2)
mY, pY = DF.dftAnal(y[0:N], np.ones(N), N)
mY = mY - max(mY)
plt.subplot(3,2,5)
plt.title('DFT(x1 * x2)')
plt.plot(mY, 'r')
plt.axis([0,N/2,-70,0])
plt.subplot(3,2,6)
plt.title('X1 x X2')
mY1 = 20*np.log10(np.abs(fft(x1) * fft(x2)))
mY1 = mY1 - max(mY1)
plt.plot(mY1[0:N/2], 'r')
plt.axis([0,N/2,-84,0])
plt.tight_layout()
plt.savefig('convolution-1.png')
plt.show()
| agpl-3.0 |
tdhopper/scikit-learn | examples/bicluster/plot_spectral_biclustering.py | 403 | 2011 | """
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <kemal@kemaleren.com>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
| bsd-3-clause |
DGrady/pandas | scripts/find_commits_touching_func.py | 5 | 6602 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# copryright 2013, y-p @ github
from __future__ import print_function
from pandas.compat import range, lrange, map, string_types, text_type
"""Search the git history for all commits touching a named method
You need the sh module to run this
WARNING: this script uses git clean -f, running it on a repo with untracked files
will probably erase them.
"""
import logging
import re
import os
from collections import namedtuple
from pandas.compat import parse_date
try:
import sh
except ImportError:
raise ImportError("The 'sh' package is required in order to run this script. ")
import argparse
desc = """
Find all commits touching a sepcified function across the codebase.
""".strip()
argparser = argparse.ArgumentParser(description=desc)
argparser.add_argument('funcname', metavar='FUNCNAME',
help='Name of function/method to search for changes on.')
argparser.add_argument('-f', '--file-masks', metavar='f_re(,f_re)*',
default=["\.py.?$"],
help='comma seperated list of regexes to match filenames against\n'+
'defaults all .py? files')
argparser.add_argument('-d', '--dir-masks', metavar='d_re(,d_re)*',
default=[],
help='comma seperated list of regexes to match base path against')
argparser.add_argument('-p', '--path-masks', metavar='p_re(,p_re)*',
default=[],
help='comma seperated list of regexes to match full file path against')
argparser.add_argument('-y', '--saw-the-warning',
action='store_true',default=False,
help='must specify this to run, acknowledge you realize this will erase untracked files')
argparser.add_argument('--debug-level',
default="CRITICAL",
help='debug level of messages (DEBUG,INFO,etc...)')
args = argparser.parse_args()
lfmt = logging.Formatter(fmt='%(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M:%S'
)
shh = logging.StreamHandler()
shh.setFormatter(lfmt)
logger=logging.getLogger("findit")
logger.addHandler(shh)
Hit=namedtuple("Hit","commit path")
HASH_LEN=8
def clean_checkout(comm):
h,s,d = get_commit_vitals(comm)
if len(s) > 60:
s = s[:60] + "..."
s=s.split("\n")[0]
logger.info("CO: %s %s" % (comm,s ))
sh.git('checkout', comm ,_tty_out=False)
sh.git('clean', '-f')
def get_hits(defname,files=()):
cs=set()
for f in files:
try:
r=sh.git('blame', '-L', '/def\s*{start}/,/def/'.format(start=defname),f,_tty_out=False)
except sh.ErrorReturnCode_128:
logger.debug("no matches in %s" % f)
continue
lines = r.strip().splitlines()[:-1]
# remove comment lines
lines = [x for x in lines if not re.search("^\w+\s*\(.+\)\s*#",x)]
hits = set(map(lambda x: x.split(" ")[0],lines))
cs.update(set([Hit(commit=c,path=f) for c in hits]))
return cs
def get_commit_info(c,fmt,sep='\t'):
r=sh.git('log', "--format={}".format(fmt), '{}^..{}'.format(c,c),"-n","1",_tty_out=False)
return text_type(r).split(sep)
def get_commit_vitals(c,hlen=HASH_LEN):
h,s,d= get_commit_info(c,'%H\t%s\t%ci',"\t")
return h[:hlen],s,parse_date(d)
def file_filter(state,dirname,fnames):
if args.dir_masks and not any([re.search(x,dirname) for x in args.dir_masks]):
return
for f in fnames:
p = os.path.abspath(os.path.join(os.path.realpath(dirname),f))
if any([re.search(x,f) for x in args.file_masks])\
or any([re.search(x,p) for x in args.path_masks]):
if os.path.isfile(p):
state['files'].append(p)
def search(defname,head_commit="HEAD"):
HEAD,s = get_commit_vitals("HEAD")[:2]
logger.info("HEAD at %s: %s" % (HEAD,s))
done_commits = set()
# allhits = set()
files = []
state = dict(files=files)
os.path.walk('.',file_filter,state)
# files now holds a list of paths to files
# seed with hits from q
allhits= set(get_hits(defname, files = files))
q = set([HEAD])
try:
while q:
h=q.pop()
clean_checkout(h)
hits = get_hits(defname, files = files)
for x in hits:
prevc = get_commit_vitals(x.commit+"^")[0]
if prevc not in done_commits:
q.add(prevc)
allhits.update(hits)
done_commits.add(h)
logger.debug("Remaining: %s" % q)
finally:
logger.info("Restoring HEAD to %s" % HEAD)
clean_checkout(HEAD)
return allhits
def pprint_hits(hits):
SUBJ_LEN=50
PATH_LEN = 20
hits=list(hits)
max_p = 0
for hit in hits:
p=hit.path.split(os.path.realpath(os.curdir)+os.path.sep)[-1]
max_p=max(max_p,len(p))
if max_p < PATH_LEN:
SUBJ_LEN += PATH_LEN - max_p
PATH_LEN = max_p
def sorter(i):
h,s,d=get_commit_vitals(hits[i].commit)
return hits[i].path,d
print("\nThese commits touched the %s method in these files on these dates:\n" \
% args.funcname)
for i in sorted(lrange(len(hits)),key=sorter):
hit = hits[i]
h,s,d=get_commit_vitals(hit.commit)
p=hit.path.split(os.path.realpath(os.curdir)+os.path.sep)[-1]
fmt = "{:%d} {:10} {:<%d} {:<%d}" % (HASH_LEN, SUBJ_LEN, PATH_LEN)
if len(s) > SUBJ_LEN:
s = s[:SUBJ_LEN-5] + " ..."
print(fmt.format(h[:HASH_LEN],d.isoformat()[:10],s,p[-20:]) )
print("\n")
def main():
if not args.saw_the_warning:
argparser.print_help()
print("""
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
WARNING: this script uses git clean -f, running it on a repo with untracked files.
It's recommended that you make a fresh clone and run from its root directory.
You must specify the -y argument to ignore this warning.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
""")
return
if isinstance(args.file_masks, string_types):
args.file_masks = args.file_masks.split(',')
if isinstance(args.path_masks, string_types):
args.path_masks = args.path_masks.split(',')
if isinstance(args.dir_masks, string_types):
args.dir_masks = args.dir_masks.split(',')
logger.setLevel(getattr(logging,args.debug_level))
hits=search(args.funcname)
pprint_hits(hits)
pass
if __name__ == "__main__":
import sys
sys.exit(main())
| bsd-3-clause |
pkruskal/scikit-learn | examples/applications/plot_species_distribution_modeling.py | 254 | 7434 | """
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=("bradypus_variegatus_0",
"microryzomys_minutus_0")):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
| bsd-3-clause |
rubikloud/scikit-learn | sklearn/ensemble/tests/test_voting_classifier.py | 140 | 6926 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.grid_search import GridSearchCV
from sklearn import datasets
from sklearn import cross_validation
from sklearn.datasets import make_multilabel_classification
from sklearn.svm import SVC
from sklearn.multiclass import OneVsRestClassifier
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
X, y = iris.data[:, 1:3], iris.target
def test_majority_label_iris():
"""Check classification by majority label on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
scores = cross_validation.cross_val_score(eclf,
X,
y,
cv=5,
scoring='accuracy')
assert_almost_equal(scores.mean(), 0.95, decimal=2)
def test_tie_situation():
"""Check voting classifier selects smaller class label in tie situation."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2)],
voting='hard')
assert_equal(clf1.fit(X, y).predict(X)[73], 2)
assert_equal(clf2.fit(X, y).predict(X)[73], 1)
assert_equal(eclf.fit(X, y).predict(X)[73], 1)
def test_weights_iris():
"""Check classification by average probabilities on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 2, 10])
scores = cross_validation.cross_val_score(eclf,
X,
y,
cv=5,
scoring='accuracy')
assert_almost_equal(scores.mean(), 0.93, decimal=2)
def test_predict_on_toy_problem():
"""Manually check predicted class labels for toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5],
[-1.2, -1.4],
[-3.4, -2.2],
[1.1, 1.2],
[2.1, 1.4],
[3.1, 2.3]])
y = np.array([1, 1, 1, 2, 2, 2])
assert_equal(all(clf1.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf2.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf3.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
def test_predict_proba_on_toy_problem():
"""Calculate predicted probabilities on toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
clf1_res = np.array([[0.59790391, 0.40209609],
[0.57622162, 0.42377838],
[0.50728456, 0.49271544],
[0.40241774, 0.59758226]])
clf2_res = np.array([[0.8, 0.2],
[0.8, 0.2],
[0.2, 0.8],
[0.3, 0.7]])
clf3_res = np.array([[0.9985082, 0.0014918],
[0.99845843, 0.00154157],
[0., 1.],
[0., 1.]])
t00 = (2*clf1_res[0][0] + clf2_res[0][0] + clf3_res[0][0]) / 4
t11 = (2*clf1_res[1][1] + clf2_res[1][1] + clf3_res[1][1]) / 4
t21 = (2*clf1_res[2][1] + clf2_res[2][1] + clf3_res[2][1]) / 4
t31 = (2*clf1_res[3][1] + clf2_res[3][1] + clf3_res[3][1]) / 4
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[2, 1, 1])
eclf_res = eclf.fit(X, y).predict_proba(X)
assert_almost_equal(t00, eclf_res[0][0], decimal=1)
assert_almost_equal(t11, eclf_res[1][1], decimal=1)
assert_almost_equal(t21, eclf_res[2][1], decimal=1)
assert_almost_equal(t31, eclf_res[3][1], decimal=1)
try:
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
eclf.fit(X, y).predict_proba(X)
except AttributeError:
pass
else:
raise AssertionError('AttributeError for voting == "hard"'
' and with predict_proba not raised')
def test_multilabel():
"""Check if error is raised for multilabel classification."""
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=123)
clf = OneVsRestClassifier(SVC(kernel='linear'))
eclf = VotingClassifier(estimators=[('ovr', clf)], voting='hard')
try:
eclf.fit(X, y)
except NotImplementedError:
return
def test_gridsearch():
"""Check GridSearch support."""
clf1 = LogisticRegression(random_state=1)
clf2 = RandomForestClassifier(random_state=1)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft')
params = {'lr__C': [1.0, 100.0],
'voting': ['soft', 'hard'],
'weights': [[0.5, 0.5, 0.5], [1.0, 0.5, 0.5]]}
grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5)
grid.fit(iris.data, iris.target)
| bsd-3-clause |
liyu1990/sklearn | examples/neighbors/plot_nearest_centroid.py | 264 | 1804 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| bsd-3-clause |
kdebrab/pandas | pandas/tests/io/msgpack/test_format.py | 25 | 2882 | # coding: utf-8
from pandas.io.msgpack import unpackb
def check(src, should, use_list=0):
assert unpackb(src, use_list=use_list) == should
def testSimpleValue():
check(b"\x93\xc0\xc2\xc3", (None, False, True, ))
def testFixnum():
check(b"\x92\x93\x00\x40\x7f\x93\xe0\xf0\xff", ((0,
64,
127, ),
(-32,
-16,
-1, ), ))
def testFixArray():
check(b"\x92\x90\x91\x91\xc0", ((), ((None, ), ), ), )
def testFixRaw():
check(b"\x94\xa0\xa1a\xa2bc\xa3def", (b"", b"a", b"bc", b"def", ), )
def testFixMap():
check(b"\x82\xc2\x81\xc0\xc0\xc3\x81\xc0\x80",
{False: {None: None},
True: {None: {}}}, )
def testUnsignedInt():
check(b"\x99\xcc\x00\xcc\x80\xcc\xff\xcd\x00\x00\xcd\x80\x00"
b"\xcd\xff\xff\xce\x00\x00\x00\x00\xce\x80\x00\x00\x00"
b"\xce\xff\xff\xff\xff",
(0,
128,
255,
0,
32768,
65535,
0,
2147483648,
4294967295, ), )
def testSignedInt():
check(b"\x99\xd0\x00\xd0\x80\xd0\xff\xd1\x00\x00\xd1\x80\x00"
b"\xd1\xff\xff\xd2\x00\x00\x00\x00\xd2\x80\x00\x00\x00"
b"\xd2\xff\xff\xff\xff", (0,
-128,
-1,
0,
-32768,
-1,
0,
-2147483648,
-1, ))
def testRaw():
check(b"\x96\xda\x00\x00\xda\x00\x01a\xda\x00\x02ab\xdb\x00\x00"
b"\x00\x00\xdb\x00\x00\x00\x01a\xdb\x00\x00\x00\x02ab",
(b"", b"a", b"ab", b"", b"a", b"ab"))
def testArray():
check(b"\x96\xdc\x00\x00\xdc\x00\x01\xc0\xdc\x00\x02\xc2\xc3\xdd\x00"
b"\x00\x00\x00\xdd\x00\x00\x00\x01\xc0\xdd\x00\x00\x00\x02"
b"\xc2\xc3", ((), (None, ), (False, True), (), (None, ),
(False, True)))
def testMap():
check(b"\x96"
b"\xde\x00\x00"
b"\xde\x00\x01\xc0\xc2"
b"\xde\x00\x02\xc0\xc2\xc3\xc2"
b"\xdf\x00\x00\x00\x00"
b"\xdf\x00\x00\x00\x01\xc0\xc2"
b"\xdf\x00\x00\x00\x02\xc0\xc2\xc3\xc2", ({}, {None: False},
{True: False,
None: False}, {},
{None: False},
{True: False,
None: False}))
| bsd-3-clause |
josesho/bootstrapContrast | bootstrap_contrast/old__/_old.py | 2 | 36463 | '''The bootstrapContrast module.'''
from __future__ import division
from scipy.stats import ttest_ind, ttest_1samp, ttest_rel, mannwhitneyu, norm
from collections import OrderedDict
from numpy.random import randint
import matplotlib.gridspec as gridspec
from matplotlib.lines import Line2D
from matplotlib.ticker import MultipleLocator, MaxNLocator, LinearLocator, FixedLocator
from decimal import Decimal
import matplotlib.pyplot as plt
from matplotlib import rc, rcParams, rcdefaults
import sys
import seaborn.apionly as sns
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings("ignore")
# This imports the custom functions used.
# These have been placed in separate .py files for reduced code clutter.
from .mpl_tools import rotateTicks, normalizeSwarmY, normalizeContrastY, offsetSwarmX, resetSwarmX, getSwarmSpan
from .mpl_tools import align_yaxis, halfviolin, drawback_y, drawback_x
from .bootstrap_tools import bootstrap, jackknife_indexes, bca
from .plot_bootstrap_tools import plotbootstrap, plotbootstrap_hubspoke, swarmsummary
## This is for sandboxing. Features and functions under testing go here.
# from .sandbox import contrastplot_test
from .prototype import cp_proto
from .plot_tools_ import halfviolin,align_yaxis,rotate_ticks
# Taken without modification from scikits.bootstrap package
# Keep python 2/3 compatibility, without using six. At some point,
# we may need to add six as a requirement, but right now we can avoid it.
try:
xrange
except NameError:
xrange = range
class InstabilityWarning(UserWarning):
"""Issued when results may be unstable."""
pass
def contrastplot(
data, x=None, y=None, idx=None, idcol=None,
alpha=0.75,
axis_title_size=None,
ci=95,
contrastShareY=True,
contrastEffectSizeLineStyle='solid',
contrastEffectSizeLineColor='black',
contrastYlim=None,
contrastZeroLineStyle='solid',
contrastZeroLineColor='black',
connectPairs=True,
effectSizeYLabel="Effect Size",
figsize=None,
floatContrast=True,
floatSwarmSpacer=0.2,
heightRatio=(1, 1),
lineWidth=2,
legend=True,
legendFontSize=14,
legendFontProps={},
paired=False,
pairedDeltaLineAlpha=0.3,
pairedDeltaLineWidth=1.2,
pal=None,
rawMarkerSize=8,
rawMarkerType='o',
reps=3000,
showGroupCount=True,
showCI=False,
showAllYAxes=False,
showRawData=True,
smoothboot=False,
statfunction=None,
summaryBar=False,
summaryBarColor='grey',
summaryBarAlpha=0.25,
summaryColour='black',
summaryLine=True,
summaryLineStyle='solid',
summaryLineWidth=0.25,
summaryMarkerSize=10,
summaryMarkerType='o',
swarmShareY=True,
swarmYlim=None,
tickAngle=45,
tickAlignment='right',
violinOffset=0.375,
violinWidth=0.2,
violinColor='k',
xticksize=None,
yticksize=None,
**kwargs):
'''Takes a pandas DataFrame and produces a contrast plot:
either a Cummings hub-and-spoke plot or a Gardner-Altman contrast plot.
Paired and unpaired options available.
Keyword arguments:
data: pandas DataFrame
x: string
column name containing categories to be plotted on the x-axis.
y: string
column name containing values to be plotted on the y-axis.
idx: tuple
flxible declaration of groupwise comparisons.
idcol: string
for paired plots.
alpha: float
alpha (transparency) of raw swarmed data points.
axis_title_size=None
ci=95
contrastShareY=True
contrastEffectSizeLineStyle='solid'
contrastEffectSizeLineColor='black'
contrastYlim=None
contrastZeroLineStyle='solid'
contrastZeroLineColor='black'
effectSizeYLabel="Effect Size"
figsize=None
floatContrast=True
floatSwarmSpacer=0.2
heightRatio=(1,1)
lineWidth=2
legend=True
legendFontSize=14
legendFontProps={}
paired=False
pairedDeltaLineAlpha=0.3
pairedDeltaLineWidth=1.2
pal=None
rawMarkerSize=8
rawMarkerType='o'
reps=3000
showGroupCount=True
showCI=False
showAllYAxes=False
showRawData=True
smoothboot=False
statfunction=None
summaryBar=False
summaryBarColor='grey'
summaryBarAlpha=0.25
summaryColour='black'
summaryLine=True
summaryLineStyle='solid'
summaryLineWidth=0.25
summaryMarkerSize=10
summaryMarkerType='o'
swarmShareY=True
swarmYlim=None
tickAngle=45
tickAlignment='right'
violinOffset=0.375
violinWidth=0.2
violinColor='k'
xticksize=None
yticksize=None
Returns:
An matplotlib Figure.
Organization of figure Axes.
'''
# Check that `data` is a pandas dataframe
if 'DataFrame' not in str(type(data)):
raise TypeError("The object passed to the command is not not a pandas DataFrame.\
Please convert it to a pandas DataFrame.")
# make sure that at least x, y, and idx are specified.
if x is None and y is None and idx is None:
raise ValueError('You need to specify `x` and `y`, or `idx`. Neither has been specifed.')
if x is None:
# if x is not specified, assume this is a 'wide' dataset, with each idx being the name of a column.
datatype='wide'
# Check that the idx are legit columns.
all_idx=np.unique([element for tupl in idx for element in tupl])
# # melt the data.
# data=pd.melt(data,value_vars=all_idx)
# x='variable'
# y='value'
else:
# if x is specified, assume this is a 'long' dataset with each row corresponding to one datapoint.
datatype='long'
# make sure y is not none.
if y is None:
raise ValueError("`paired` is false, but no y-column given.")
# Calculate Ns.
counts=data.groupby(x)[y].count()
# Get and set levels of data[x]
if paired is True:
violinWidth=0.1
# # Calculate Ns--which should be simply the number of rows in data.
# counts=len(data)
# is idcol supplied?
if idcol is None and datatype=='long':
raise ValueError('`idcol` has not been supplied but a paired plot is desired; please specify the `idcol`.')
if idx is not None:
# check if multi-plot or not
if all(isinstance(element, str) for element in idx):
# check that every idx is a column name.
idx_not_in_cols=[n
for n in idx
if n not in data[x].unique()]
if len(idx_not_in_cols)!=0:
raise ValueError(str(idx_not_in_cols)+" cannot be found in the columns of `data`.")
# data_wide_cols=[n for n in idx if n in data.columns]
# if idx is supplied but not a multiplot (ie single list or tuple)
if len(idx) != 2:
raise ValueError(idx+" does not have length 2.")
else:
tuple_in=(tuple(idx, ),)
widthratio=[1]
elif all(isinstance(element, tuple) for element in idx):
# if idx is supplied, and it is a list/tuple of tuples or lists, we have a multiplot!
idx_not_in_cols=[n
for tup in idx
for n in tup
if n not in data[x].unique()]
if len(idx_not_in_cols)!=0:
raise ValueError(str(idx_not_in_cols)+" cannot be found in the column "+x)
# data_wide_cols=[n for tup in idx for n in tup if n in data.columns]
if ( any(len(element) != 2 for element in idx) ):
# If any of the tuples does not contain exactly 2 elements.
raise ValueError(element+" does not have length 2.")
# Make sure the widthratio of the seperate multiplot corresponds to how
# many groups there are in each one.
tuple_in=idx
widthratio=[]
for i in tuple_in:
widthratio.append(len(i))
elif idx is None:
raise ValueError('Please specify idx.')
showRawData=False # Just show lines, do not show data.
showCI=False # wait till I figure out how to plot this for sns.barplot.
if datatype=='long':
if idx is None:
## If `idx` is not specified, just take the FIRST TWO levels alphabetically.
tuple_in=tuple(np.sort(np.unique(data[x]))[0:2],)
# pivot the dataframe if it is long!
data_pivot=data.pivot_table(index = idcol, columns = x, values = y)
elif paired is False:
if idx is None:
widthratio=[1]
tuple_in=( tuple(data[x].unique()) ,)
if len(tuple_in[0])>2:
floatContrast=False
else:
if all(isinstance(element, str) for element in idx):
# if idx is supplied but not a multiplot (ie single list or tuple)
# check all every idx specified can be found in data[x]
idx_not_in_x=[n for n in idx
if n not in data[x].unique()]
if len(idx_not_in_x)!=0:
raise ValueError(str(idx_not_in_x)+" cannot be found in the column "+x)
tuple_in=(idx, )
widthratio=[1]
if len(idx)>2:
floatContrast=False
elif all(isinstance(element, tuple) for element in idx):
# if idx is supplied, and it is a list/tuple of tuples or lists, we have a multiplot!
idx_not_in_x=[n
for tup in idx
for n in tup
if n not in data[x].unique()]
if len(idx_not_in_x)!=0:
raise ValueError(str(idx_not_in_x)+" cannot be found in the column "+x)
tuple_in=idx
if ( any(len(element)>2 for element in tuple_in) ):
# if any of the tuples in idx has more than 2 groups, we turn set floatContrast as False.
floatContrast=False
# Make sure the widthratio of the seperate multiplot corresponds to how
# many groups there are in each one.
widthratio=[]
for i in tuple_in:
widthratio.append(len(i))
else:
raise TypeError("The object passed to `idx` consists of a mixture of single strings and tuples. \
Please make sure that `idx` is either a tuple of column names, or a tuple of tuples, for plotting.")
# Ensure summaryLine and summaryBar are not displayed together.
if summaryLine is True and summaryBar is True:
summaryBar=True
summaryLine=False
# Turn off summary line if floatContrast is true
if floatContrast:
summaryLine=False
# initialise statfunction
if statfunction == None:
statfunction=np.mean
# Create list to collect all the contrast DataFrames generated.
contrastList=list()
contrastListNames=list()
# Setting color palette for plotting.
if pal is None:
if 'hue' in kwargs:
colorCol=kwargs['hue']
if colorCol not in data.columns:
raise ValueError(colorCol+' is not a column name.')
colGrps=data[colorCol].unique()#.tolist()
plotPal=dict( zip( colGrps, sns.color_palette(n_colors=len(colGrps)) ) )
else:
if datatype=='long':
colGrps=data[x].unique()#.tolist()
plotPal=dict( zip( colGrps, sns.color_palette(n_colors=len(colGrps)) ) )
if datatype=='wide':
plotPal=np.repeat('k',len(data))
else:
if datatype=='long':
plotPal=pal
if datatype=='wide':
plotPal=list(map(lambda x:pal[x], data[hue]))
if swarmYlim is None:
# get range of _selected groups_.
# u = list()
# for t in tuple_in:
# for i in np.unique(t):
# u.append(i)
# u = np.unique(u)
u=np.unique([element for tupl in tuple_in for element in tupl])
if datatype=='long':
tempdat=data[data[x].isin(u)]
swarm_ylim=np.array([np.min(tempdat[y]), np.max(tempdat[y])])
if datatype=='wide':
allMin=list()
allMax=list()
for col in u:
allMin.append(np.min(data[col]))
allMax.append(np.max(data[col]))
swarm_ylim=np.array( [np.min(allMin),np.max(allMax)] )
swarm_ylim=np.round(swarm_ylim)
else:
swarm_ylim=np.array([swarmYlim[0],swarmYlim[1]])
if summaryBar is True:
lims=swarm_ylim
# check that 0 lies within the desired limits.
# if not, extend (upper or lower) limit to zero.
if 0 not in range( int(round(lims[0])),int(round(lims[1])) ): # turn swarm_ylim to integer range.
# check if all negative:.
if lims[0]<0. and lims[1]<0.:
swarm_ylim=np.array([np.min(lims),0.])
# check if all positive.
elif lims[0]>0. and lims[1]>0.:
swarm_ylim=np.array([0.,np.max(lims)])
if contrastYlim is not None:
contrastYlim=np.array([contrastYlim[0],contrastYlim[1]])
# plot params
if axis_title_size is None:
axis_title_size=27
if yticksize is None:
yticksize=22
if xticksize is None:
xticksize=22
# Set clean style
sns.set(style='ticks')
axisTitleParams={'labelsize' : axis_title_size}
xtickParams={'labelsize' : xticksize}
ytickParams={'labelsize' : yticksize}
svgParams={'fonttype' : 'none'}
rc('axes', **axisTitleParams)
rc('xtick', **xtickParams)
rc('ytick', **ytickParams)
rc('svg', **svgParams)
if figsize is None:
if len(tuple_in)>2:
figsize=(12,(12/np.sqrt(2)))
else:
figsize=(8,(8/np.sqrt(2)))
# calculate CI.
if ci<0 or ci>100:
raise ValueError('ci should be between 0 and 100.')
alpha_level=(100.-ci)/100.
# Initialise figure, taking into account desired figsize.
fig=plt.figure(figsize=figsize)
# Initialise GridSpec based on `tuple_in` shape.
gsMain=gridspec.GridSpec(
1, np.shape(tuple_in)[0],
# 1 row; columns based on number of tuples in tuple.
width_ratios=widthratio,
wspace=0 )
for gsIdx, current_tuple in enumerate(tuple_in):
#### FOR EACH TUPLE IN IDX
if datatype=='long':
plotdat=data[data[x].isin(current_tuple)]
plotdat[x]=plotdat[x].astype("category")
plotdat[x].cat.set_categories(
current_tuple,
ordered=True,
inplace=True)
plotdat.sort_values(by=[x])
# # Drop all nans.
# plotdat.dropna(inplace=True)
summaries=plotdat.groupby(x)[y].apply(statfunction)
if datatype=='wide':
plotdat=data[list(current_tuple)]
summaries=statfunction(plotdat)
plotdat=pd.melt(plotdat) ##### NOW I HAVE MELTED THE WIDE DATA.
if floatContrast is True:
# Use fig.add_subplot instead of plt.Subplot.
ax_raw=fig.add_subplot(gsMain[gsIdx],
frame_on=False)
ax_contrast=ax_raw.twinx()
else:
# Create subGridSpec with 2 rows and 1 column.
subGridSpec=gridspec.GridSpecFromSubplotSpec(2, 1,
subplot_spec=gsMain[gsIdx],
wspace=0)
# Use plt.Subplot instead of fig.add_subplot
ax_raw=plt.Subplot(fig,
subGridSpec[0, 0],
frame_on=False)
ax_contrast=plt.Subplot(fig,
subGridSpec[1, 0],
sharex=ax_raw,
frame_on=False)
# Calculate the boostrapped contrast
bscontrast=list()
if paired is False:
tempplotdat=plotdat[[x,y]] # only select the columns used for x and y plotting.
for i in range (1, len(current_tuple)):
# Note that you start from one. No need to do auto-contrast!
# if datatype=='long':aas
tempbs=bootstrap_contrast(
data=tempplotdat.dropna(),
x=x,
y=y,
idx=[current_tuple[0], current_tuple[i]],
statfunction=statfunction,
smoothboot=smoothboot,
alpha_level=alpha_level,
reps=reps)
bscontrast.append(tempbs)
contrastList.append(tempbs)
contrastListNames.append(current_tuple[i]+' vs. '+current_tuple[0])
#### PLOT RAW DATA.
ax_raw.set_ylim(swarm_ylim)
# ax_raw.yaxis.set_major_locator(MaxNLocator(n_bins='auto'))
# ax_raw.yaxis.set_major_locator(LinearLocator())
if paired is False and showRawData is True:
# Seaborn swarmplot doc says to set custom ylims first.
sw=sns.swarmplot(
data=plotdat,
x=x, y=y,
order=current_tuple,
ax=ax_raw,
alpha=alpha,
palette=plotPal,
size=rawMarkerSize,
marker=rawMarkerType,
**kwargs)
if floatContrast:
# Get horizontal offset values.
maxXBefore=max(sw.collections[0].get_offsets().T[0])
minXAfter=min(sw.collections[1].get_offsets().T[0])
xposAfter=maxXBefore+floatSwarmSpacer
xAfterShift=minXAfter-xposAfter
# shift the (second) swarmplot
offsetSwarmX(sw.collections[1], -xAfterShift)
# shift the tick.
ax_raw.set_xticks([0.,1-xAfterShift])
elif paired is True:
if showRawData is True:
sw=sns.swarmplot(data=plotdat,
x=x, y=y,
order=current_tuple,
ax=ax_raw,
alpha=alpha,
palette=plotPal,
size=rawMarkerSize,
marker=rawMarkerType,
**kwargs)
if connectPairs is True:
# Produce paired plot with lines.
before=plotdat[plotdat[x]==current_tuple[0]][y].tolist()
after=plotdat[plotdat[x]==current_tuple[1]][y].tolist()
linedf=pd.DataFrame(
{'before':before,
'after':after}
)
# to get color, need to loop thru each line and plot individually.
for ii in range(0,len(linedf)):
ax_raw.plot( [0,0.25], [ linedf.loc[ii,'before'],
linedf.loc[ii,'after'] ],
linestyle='solid',
linewidth=pairedDeltaLineWidth,
color=plotPal[current_tuple[0]],
alpha=pairedDeltaLineAlpha,
)
ax_raw.set_xlim(-0.25,0.5)
ax_raw.set_xticks([0,0.25])
ax_raw.set_xticklabels([current_tuple[0],current_tuple[1]])
# if swarmYlim is None:
# # if swarmYlim was not specified, tweak the y-axis
# # to show all the data without losing ticks and range.
# ## Get all yticks.
# axxYTicks=ax_raw.yaxis.get_majorticklocs()
# ## Get ytick interval.
# YTickInterval=axxYTicks[1]-axxYTicks[0]
# ## Get current ylim
# currentYlim=ax_raw.get_ylim()
# ## Extend ylim by adding a fifth of the tick interval as spacing at both ends.
# ax_raw.set_ylim(
# currentYlim[0]-(YTickInterval/5),
# currentYlim[1]+(YTickInterval/5)
# )
# ax_raw.yaxis.set_major_locator(MaxNLocator(nbins='auto'))
# ax_raw.yaxis.set_major_locator(MaxNLocator(nbins='auto'))
# ax_raw.yaxis.set_major_locator(LinearLocator())
if summaryBar is True:
if paired is False:
bar_raw=sns.barplot(
x=summaries.index.tolist(),
y=summaries.values,
facecolor=summaryBarColor,
ax=ax_raw,
alpha=summaryBarAlpha)
if floatContrast is True:
maxSwarmSpan=2/10.
xlocs=list()
for i, bar in enumerate(bar_raw.patches):
x_width=bar.get_x()
width=bar.get_width()
centre=x_width + (width/2.)
if i == 0:
bar.set_x(centre-maxSwarmSpan/2.)
xlocs.append(centre)
else:
bar.set_x(centre-xAfterShift-maxSwarmSpan/2.)
xlocs.append(centre-xAfterShift)
bar.set_width(maxSwarmSpan)
ax_raw.set_xticks(xlocs) # make sure xticklocs match the barplot.
elif floatContrast is False:
maxSwarmSpan=4/10.
xpos=ax_raw.xaxis.get_majorticklocs()
for i, bar in enumerate(bar_raw.patches):
bar.set_x(xpos[i]-maxSwarmSpan/2.)
bar.set_width(maxSwarmSpan)
else:
# if paired is true
ax_raw.bar([0,0.25],
[ statfunction(plotdat[current_tuple[0]]),
statfunction(plotdat[current_tuple[1]]) ],
color=summaryBarColor,
alpha=0.5,
width=0.05)
## Draw zero reference line.
ax_raw.add_artist(Line2D(
(ax_raw.xaxis.get_view_interval()[0],
ax_raw.xaxis.get_view_interval()[1]),
(0,0),
color='k', linewidth=1.25)
)
if summaryLine is True:
if paired is True:
xdelta=0
else:
xdelta=summaryLineWidth
for i, m in enumerate(summaries):
ax_raw.plot(
(i-xdelta,
i+xdelta), # x-coordinates
(m, m),
color=summaryColour,
linestyle=summaryLineStyle)
if showCI is True:
sns.barplot(
data=plotdat,
x=x, y=y,
ax=ax_raw,
alpha=0, ci=95)
ax_raw.set_xlabel("")
if floatContrast is False:
fig.add_subplot(ax_raw)
#### PLOT CONTRAST DATA.
if len(current_tuple)==2:
if paired is False:
# Plot the CIs on the contrast axes.
plotbootstrap(sw.collections[1],
bslist=tempbs,
ax=ax_contrast,
violinWidth=violinWidth,
violinOffset=violinOffset,
markersize=summaryMarkerSize,
marker=summaryMarkerType,
offset=floatContrast,
color=violinColor,
linewidth=1)
else:
bootsDelta = bootstrap(
plotdat[current_tuple[1]]-plotdat[current_tuple[0]],
statfunction=statfunction,
smoothboot=smoothboot,
alpha_level=alpha_level,
reps=reps)
contrastList.append(bootsDelta)
contrastListNames.append(current_tuple[1]+' vs. '+current_tuple[0])
summDelta = bootsDelta['summary']
lowDelta = bootsDelta['bca_ci_low']
highDelta = bootsDelta['bca_ci_high']
if floatContrast:
xpos=0.375
else:
xpos=0.25
# Plot the summary measure.
ax_contrast.plot(xpos, bootsDelta['summary'],
marker=summaryMarkerType,
markerfacecolor='k',
markersize=summaryMarkerSize,
alpha=0.75
)
# Plot the CI.
ax_contrast.plot([xpos, xpos],
[lowDelta, highDelta],
color='k',
alpha=0.75,
# linewidth=1,
linestyle='solid'
)
# Plot the violin-plot.
v = ax_contrast.violinplot(bootsDelta['stat_array'], [xpos],
widths = violinWidth,
showextrema = False,
showmeans = False)
halfviolin(v, half = 'right', color = 'k')
if floatContrast:
# Set reference lines
if paired is False:
## First get leftmost limit of left reference group
xtemp, _=np.array(sw.collections[0].get_offsets()).T
leftxlim=xtemp.min()
## Then get leftmost limit of right test group
xtemp, _=np.array(sw.collections[1].get_offsets()).T
rightxlim=xtemp.min()
ref=tempbs['summary']
else:
leftxlim=0
rightxlim=0.25
ref=bootsDelta['summary']
ax_contrast.set_xlim(-0.25, 0.5) # does this work?
## zero line
ax_contrast.hlines(0, # y-coordinates
leftxlim, 3.5, # x-coordinates, start and end.
linestyle=contrastZeroLineStyle,
linewidth=1,
color=contrastZeroLineColor)
## effect size line
ax_contrast.hlines(ref,
rightxlim, 3.5, # x-coordinates, start and end.
linestyle=contrastEffectSizeLineStyle,
linewidth=1,
color=contrastEffectSizeLineColor)
if paired is False:
es=float(tempbs['summary'])
refSum=tempbs['statistic_ref']
else:
es=float(bootsDelta['summary'])
refSum=statfunction(plotdat[current_tuple[0]])
## If the effect size is positive, shift the right axis up.
if es>0:
rightmin=ax_raw.get_ylim()[0]-es
rightmax=ax_raw.get_ylim()[1]-es
## If the effect size is negative, shift the right axis down.
elif es<0:
rightmin=ax_raw.get_ylim()[0]+es
rightmax=ax_raw.get_ylim()[1]+es
ax_contrast.set_ylim(rightmin, rightmax)
if gsIdx>0:
ax_contrast.set_ylabel('')
align_yaxis(ax_raw, refSum, ax_contrast, 0.)
else:
# Set bottom axes ybounds
if contrastYlim is not None:
ax_contrast.set_ylim(contrastYlim)
if paired is False:
# Set xlims so everything is properly visible!
swarm_xbounds=ax_raw.get_xbound()
ax_contrast.set_xbound(swarm_xbounds[0] -(summaryLineWidth * 1.1),
swarm_xbounds[1] + (summaryLineWidth * 1.1))
else:
ax_contrast.set_xlim(-0.05,0.25+violinWidth)
else:
# Plot the CIs on the bottom axes.
plotbootstrap_hubspoke(
bslist=bscontrast,
ax=ax_contrast,
violinWidth=violinWidth,
violinOffset=violinOffset,
markersize=summaryMarkerSize,
marker=summaryMarkerType,
linewidth=lineWidth)
if floatContrast is False:
fig.add_subplot(ax_contrast)
if gsIdx>0:
ax_raw.set_ylabel('')
ax_contrast.set_ylabel('')
# Turn contrastList into a pandas DataFrame,
contrastList=pd.DataFrame(contrastList).T
contrastList.columns=contrastListNames
# Get number of axes in figure for aesthetic tweaks.
axesCount=len(fig.get_axes())
for i in range(0, axesCount, 2):
# Set new tick labels.
# The tick labels belong to the SWARM axes
# for both floating and non-floating plots.
# This is because `sharex` was invoked.
axx=fig.axes[i]
newticklabs=list()
for xticklab in axx.xaxis.get_ticklabels():
t=xticklab.get_text()
if paired:
N=str(counts)
else:
N=str(counts.ix[t])
if showGroupCount:
newticklabs.append(t+' n='+N)
else:
newticklabs.append(t)
axx.set_xticklabels(
newticklabs,
rotation=tickAngle,
horizontalalignment=tickAlignment)
## Loop thru SWARM axes for aesthetic touchups.
for i in range(0, axesCount, 2):
axx=fig.axes[i]
if floatContrast is False:
axx.xaxis.set_visible(False)
sns.despine(ax=axx, trim=True, bottom=False, left=False)
else:
sns.despine(ax=axx, trim=True, bottom=True, left=True)
if i==0:
drawback_y(axx)
if i!=axesCount-2 and 'hue' in kwargs:
# If this is not the final swarmplot, remove the hue legend.
axx.legend().set_visible(False)
if showAllYAxes is False:
if i in range(2, axesCount):
axx.yaxis.set_visible(False)
else:
# Draw back the lines for the relevant y-axes.
# Not entirely sure why I have to do this.
drawback_y(axx)
else:
drawback_y(axx)
# Add zero reference line for swarmplots with bars.
if summaryBar is True:
axx.add_artist(Line2D(
(axx.xaxis.get_view_interval()[0],
axx.xaxis.get_view_interval()[1]),
(0,0),
color='black', linewidth=0.75
)
)
if legend is False:
axx.legend().set_visible(False)
else:
if i==axesCount-2: # the last (rightmost) swarm axes.
axx.legend(loc='top right',
bbox_to_anchor=(1.1,1.0),
fontsize=legendFontSize,
**legendFontProps)
## Loop thru the CONTRAST axes and perform aesthetic touch-ups.
## Get the y-limits:
for j,i in enumerate(range(1, axesCount, 2)):
axx=fig.get_axes()[i]
if floatContrast is False:
xleft, xright=axx.xaxis.get_view_interval()
# Draw zero reference line.
axx.hlines(y=0,
xmin=xleft-1,
xmax=xright+1,
linestyle=contrastZeroLineStyle,
linewidth=0.75,
color=contrastZeroLineColor)
# reset view interval.
axx.set_xlim(xleft, xright)
if showAllYAxes is False:
if i in range(2, axesCount):
axx.yaxis.set_visible(False)
else:
# Draw back the lines for the relevant y-axes, only is axesCount is 2.
# Not entirely sure why I have to do this.
if axesCount==2:
drawback_y(axx)
sns.despine(ax=axx,
top=True, right=True,
left=False, bottom=False,
trim=True)
if j==0 and axesCount==2:
# Draw back x-axis lines connecting ticks.
drawback_x(axx)
# Rotate tick labels.
rotateTicks(axx,tickAngle,tickAlignment)
elif floatContrast is True:
if paired is True:
# Get the bootstrapped contrast range.
lower=np.min(contrastList.ix['stat_array',j])
upper=np.max(contrastList.ix['stat_array',j])
else:
lower=np.min(contrastList.ix['diffarray',j])
upper=np.max(contrastList.ix['diffarray',j])
meandiff=contrastList.ix['summary', j]
## Make sure we have zero in the limits.
if lower>0:
lower=0.
if upper<0:
upper=0.
## Get the tick interval from the left y-axis.
leftticks=fig.get_axes()[i-1].get_yticks()
tickstep=leftticks[1] -leftticks[0]
## First re-draw of axis with new tick interval
axx.yaxis.set_major_locator(MultipleLocator(base=tickstep))
newticks1=axx.get_yticks()
## Obtain major ticks that comfortably encompass lower and upper.
newticks2=list()
for a,b in enumerate(newticks1):
if (b >= lower and b <= upper):
# if the tick lies within upper and lower, take it.
newticks2.append(b)
# if the meandiff falls outside of the newticks2 set, add a tick in the right direction.
if np.max(newticks2)<meandiff:
ind=np.where(newticks1 == np.max(newticks2))[0][0] # find out the max tick index in newticks1.
newticks2.append( newticks1[ind+1] )
elif meandiff<np.min(newticks2):
ind=np.where(newticks1 == np.min(newticks2))[0][0] # find out the min tick index in newticks1.
newticks2.append( newticks1[ind-1] )
newticks2=np.array(newticks2)
newticks2.sort()
## Second re-draw of axis to shrink it to desired limits.
axx.yaxis.set_major_locator(FixedLocator(locs=newticks2))
## Despine the axes.
sns.despine(ax=axx, trim=True,
bottom=False, right=False,
left=True, top=True)
# Normalize bottom/right Contrast axes to each other for Cummings hub-and-spoke plots.
if (axesCount>2 and
contrastShareY is True and
floatContrast is False):
# Set contrast ylim as max ticks of leftmost swarm axes.
if contrastYlim is None:
lower=list()
upper=list()
for c in range(0,len(contrastList.columns)):
lower.append( np.min(contrastList.ix['bca_ci_low',c]) )
upper.append( np.max(contrastList.ix['bca_ci_high',c]) )
lower=np.min(lower)
upper=np.max(upper)
else:
lower=contrastYlim[0]
upper=contrastYlim[1]
normalizeContrastY(fig,
contrast_ylim = contrastYlim,
show_all_yaxes = showAllYAxes)
# Zero gaps between plots on the same row, if floatContrast is False
if (floatContrast is False and showAllYAxes is False):
gsMain.update(wspace=0.)
else:
# Tight Layout!
gsMain.tight_layout(fig)
# And we're all done.
rcdefaults() # restore matplotlib defaults.
sns.set() # restore seaborn defaults.
return fig, contrastList | gpl-3.0 |
npotts/SpectralSignalHound | plotting/plot.py | 1 | 8990 | #!/usr/bin/python
# Copyright (c) 2014, Nick Potts
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of SpectralSignalHound nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import csv
import time
import sqlite3
from math import log10
from sys import stdout
from matplotlib.pylab import subplots, suptitle, savefig
import numpy as np
import matplotlib.dates as dates
from matplotlib.ticker import MultipleLocator, FormatStrFormatter, LogLocator, LogFormatter
import datetime
__all__= ["plotSpectrum"]
def datetime_from_string(t):
"""datetime from a string"""
try:
return datetime.datetime.strptime(t, "%Y-%m-%d %H:%M:%S.%f")
except Exception as e:
print("Cannot convert %s: %s" % (t, e))
return None
def dataColumns(sqlite_fname):
rtn=[]
with sqlite3.connect(sqlite_fname) as db:
cur = db.cursor();
cur.execute("SELECT name FROM sqlite_master WHERE type='table' ORDER BY name")
for i in cur.fetchall():
rtn.append(i[0])
rtn.remove("sweep_metadata")
return rtn;
def dataFromSQLite(sqlite_fname, sql_table):
"""Returns a 2-tuple arrays of matplotlib time axis, and frequency data"""
#PRAGMA table_info( your_table_name );
freq_headers = None
timestamps = None
plot_data = None
with sqlite3.connect(sqlite_fname) as db:
data={}
cur = db.cursor();
cur.execute("PRAGMA table_info( %s );" % sql_table) #get table structure
headers = [row[1] for row in cur.fetchall()]
if "csv" in headers: #more than ~2k points per table
cur.execute("SELECT csv FROM [%s] WHERE header_row='true'" % sql_table)
headers = cur.fetchone()[0].split(",") #new headers are embedded in the data file
cur.execute("SELECT timestamp,csv FROM [%s] WHERE header_row='false'" % sql_table)
data.clear();
data["timestamp"] = []
for col in headers:
data[col]=[]
for row in cur.fetchall():
data["timestamp"].append(row[0])
fvalues = row[1].split(",")
for i in range(len(fvalues)):
data[headers[i]].append(fvalues[i])
else:
headers.remove("rowid")
headers.remove("temperature")
headers.remove("header_row")
headers.sort();
for col in headers:
data[col]=[]
q = "SELECT [%s] FROM %s" % ("],[".join(headers), sql_table) #form query to yank raw data
cur.execute(q);
for row in cur.fetchall():
for i in range(len(headers)):
data[headers[i]].append(row[i])
headers.remove("timestamp")
try:
freq_headers = np.array(headers).astype(np.float)
freq_headers.sort()
timestamps = np.array([dates.date2num(datetime_from_string(t)) for t in data["timestamp"]])
plot_data = [data[col] for col in headers]
plot_data = np.array(plot_data).astype(np.float)
except Exception as e:
print("Error: %s" % e)
pass
return (freq_headers, timestamps, plot_data)
def dataFromCSV(csv_fname):
"""Returns a 2-tuple arrays of matplotlib time axis, and frequency"""
with open(csv_fname, "r") as csvfile:
#read headers from the file
headers = csvfile.readline().split(",")
dreader = csv.DictReader(csvfile, fieldnames=headers, restkey=None, restval="-200")
data={}
for col in headers:
data[col]=[]
for row in dreader:
for col in headers:
try:
data[col].append(float(row[col]))
except:
data[col].append(row[col])
#data loaded
timestamps = np.array([dates.date2num(datetime_from_string(t)) for t in data["timestamp"]])
headers.remove("timestamp")
headers.remove("temperature")
headers.sort()
plot_data=[]
for col in headers:
plot_data.append(data[col])
plot_data = np.array(plot_data).astype(np.float)
freq_headers = np.array(headers).astype(np.float)
freq_headers.sort()
return (freq_headers, timestamps, plot_data);
def plotData(output_fname, freq_headers, timestamps, plot_data):
""" """
if timestamps is None or plot_data is None or freq_headers is None of len(timestamps) == 0:
print("No data to plot")
return
fmin = int(freq_headers[0])
fmax = int(freq_headers[-1])
tmin = min(timestamps)
tmax = max(timestamps)
#plot the data
fig, ax = subplots()
pl = ax.pcolormesh(timestamps, freq_headers, plot_data, cmap='spectral', vmin=-100, vmax=-40)
fig.colorbar(pl, orientation='vertical') #show a colorbar legend
#add in title stuffs
suptitle("%s: RF Spectrum" % output_fname)
ax.set_xlabel("Sample Time")
ax.set_ylabel("Frequency (Hz)")
#setup time axis stuff
locator = dates.MinuteLocator(30) #markers start 30 mins past the hour
ax.xaxis.set_major_locator(locator)
formatter = dates.AutoDateFormatter(locator)
formatter.scaled[1/(24.*60.)] = '%H:%M:%S' # only show min and sec
ax.xaxis.set_major_formatter(formatter)
ax.set_xlim([tmin, tmax])
ax.set_ylim([fmin, fmax])
ax.set_yscale('log')
num_major_ticks = 10;
num_minor_ticks_per_major = 5
majorLocator = MultipleLocator((fmax - fmin)/num_major_ticks)
majorFormatter = FormatStrFormatter('%4.2f')
minorLocator = MultipleLocator((fmax - fmin)/(num_major_ticks*num_minor_ticks_per_major))
#set linear ticks if we are less than a decade
if log10(fmax) - log10(fmin) >= .9:
majorLocator = LogLocator(base=10.0, subs=[1.0], numdecs=4, numticks=5)
majorFormatter = LogFormatter(base=10, labelOnlyBase=True)
minorLocator = LogLocator(base=10.0, subs=[1.0], numdecs=4, numticks=5)
ax.yaxis.set_major_locator(majorLocator)
ax.yaxis.set_major_formatter(majorFormatter)
ax.yaxis.set_minor_locator(minorLocator);
ax.get_yaxis().set_tick_params(which='both', direction='out')
fig.autofmt_xdate()
#set image size and save it as a PNG
fig.set_size_inches(18.5,10.5)
savefig(output_fname + ".png", format="png", dpi=200, transparent=True, bbox_inches='tight')
def controller():
"""Read command line opts and generate plots"""
if (len(sys.argv) == 1):
print("Usage: %s <list of .csv and .db files to plot from>" % (sys.argv[0]))
for f in sys.argv[1:]:
timestamps = None
plot_data = None
freq_headers=None
#hack until higher up function can specify plotting source
if f.lower().endswith(".csv"):
(freq_headers, timestamps, plot_data) = dataFromCSV(f)
print("Plotting from %s" % f)
out_fname = f.replace(".csv", "")
plotData(out_fname, freq_headers, timestamps, plot_data)
if f.lower().endswith(".db"):
tables = dataColumns(f)
for table in tables:
print("Plotting from %s:%s" % (f, table))
(freq_headers, timestamps, plot_data) = dataFromSQLite(f, table)
out_fname = f + "_" + table
plotData(out_fname, freq_headers, timestamps, plot_data)
if __name__ == "__main__":
controller()
#dataFromSQLite("403.db", "FAST_20141114L134906")
#plotSpec("403.db", "FAST_20141114L134906")
#plotSpec("403-test-trace.csv")
#plotSpec("403-test-trace-3000.csv")
#plotSpec("wide.csv")
| bsd-3-clause |
benschneider/dephasing_ringdown_sim | equations.py | 1 | 4627 | # -*- coding: utf-8 -*-
import numpy as np
#def hho(w,w0,q):
# return w0*w0/(w0*w0-w*w + i*w*w0/Q)
def Xd(w,w0,Q):
return (w0*w0*w0*w0-w0*w0*w*w)/( (w0*w0-w*w)*(w0*w0-w*w) + w*w*w0*w0/Q/Q )
def Yd(w,w0,Q):
return -w0*w0*w0*w/Q/( (w0*w0-w*w)*(w0*w0-w*w) + w*w*w0*w0/Q/Q )
def Xr(t,w,w0,Q):
return np.e**(-w*t/(2*Q))*(Xd(w,w0,Q)*np.cos(t*(w0 - w)) + Yd(w,w0,Q)*w/w0*np.sin(t*(w0 - w)))
def Yr(t,w,w0,Q):
return np.e**(-w*t/(2*Q))*(-Xd(w,w0,Q)*np.sin(t*(w0 - w)) + Yd(w,w0,Q)*w/w0*np.cos(t*(w0 - w)))
'''
def Xr2(t,w,w0,Q):
return np.e**(-w*t/(2*Q))*(Xd(w,w0,Q)*np.cos(t*(w0 - w))**2 + Yd(w,w0,Q)*w/w0*np.sin(t*(w0 - w))**2)
def Yr2(t,w,w0,Q):
return np.e**(-w*t/(2*Q))*(-Xd(w,w0,Q)*np.sin(t*(w0 - w))**2 + Yd(w,w0,Q)*w/w0*np.cos(t*(w0 - w))**2)
'''
def yqfit(w, Q, w0 = 300*2*np.pi):
'''
Equation for fitting (Spectral Q)
returns -w0*w0*w0*w/Q/( (w0*w0-w*w)*(w0*w0-w*w) + w*w*w0*w0/Q/Q )
The array of numbers returned should represent the imaginary part of the mechanical response.
(usefull to plot these result:
for example:
import matplotlib.pyplot as pl
import equations as eq
imresponse1 = eq.yqfit(w_array, Qd, w_array.mean())
pl.plot(imresponse1)
)
'''
return -w0*w0*w0*w/Q/( (w0*w0-w*w)*(w0*w0-w*w) + w*w*w0*w0/Q/Q )
def expfit(t, Q, a, b, w0 = 300*2*np.pi):
'''Equation for fitting (Ringdown Q)'''
#w0 = 300*2*np.pi; #at a fixed freq
return (a*np.e**(-t*w0/(2*Q))+b)
def Qd(Qs,Qr):
'''
Equation returns an estimate for Qr to be used,
given an estimate to Qr for a given Qs and Qd
'''
tmp = 1.0/(1.0/float(Qs)-1.0/Qr)
return tmp
def pix2f(w):
'''
this little handy fuction simply returns a factor such that one can easily
convert i.e. a frequency to number of points
it requires the array of frequency
then from the frequency span and number of points
it returns a factor describing the number of points required to change frequency by #1
'''
w_span = w[-1]-w[0] #frequency range *2pi
w_p = len(w) #number of pixel
fpix = w_p/w_span #pixel required to shift freq by 1 MHz
return fpix
#Equation for lowpass (returns a gaussian)
#Doing the Lowpas in C,
#Gaussian filtering can be implemented using recursion (Young & Vliet,
#1995; Signal Processing, 44: 139-151). The recursive approximation is
#very accurate and does not introduce ringing. It is anti-causal
#(forward-backward) and has zero phase response.
#or using a correlation funcion from the scipy lib.
# which is a lot slower but good enough for now.
'''
def gaus(sigma=1,pos = 50, w = range(0,100)):
g1 = [1 / (sigma * np.sqrt(2*np.pi)) * np.e**(-float(x-pos)**2/(2*sigma**2)) for x in w]
#g2 = np.ones([m,len(w)]);
return g1 #zip(*g1*g2)
#a gaussian
# the following can be derived with a simple taylor expansion of a gaussian
# and doing some fancy stuff with it which is cool but ... nah...
from numpy import array, zeros, ones, flipud, fliplr
from scipy.signal import lfilter
from math import sqrt
def __gausscoeff(s):
if s < .5: raise ValueError, \
'Sigma for Gaussian filter must be >0.5 samples'
q = 0.98711*s - 0.96330 if s > 0.5 else 3.97156 \
- 4.14554*sqrt(1.0 - 0.26891*s)
b = zeros(4)
b[0] = 1.5785 + 2.44413*q + 1.4281*q**2 + 0.422205*q**3
b[1] = 2.44413*q + 2.85619*q**2 + 1.26661*q**3
b[2] = -(1.4281*q**2 + 1.26661*q**3)
b[3] = 0.422205*q**3
B = 1.0 - ((b[1] + b[2] + b[3])/b[0])
# convert to a format compatible with lfilter's
# difference equation
B = array([B])
A = ones(4)
A[1:] = -b[1:]/b[0]
return B,A
def Gaussian1D(signal, sigma, padding=0):
n = signal.shape[0]
tmp = zeros(n + padding)
if tmp.shape[0] < 4: raise ValueError, \
'Signal and padding too short'
tmp[:n] = signal
B,A = __gausscoeff(sigma)
tmp = lfilter(B, A, tmp)
tmp = tmp[::-1]
tmp = lfilter(B, A, tmp)
tmp = tmp[::-1]
return tmp[:n]
def Gaussian2D(image, sigma, padding=0):
n,m = image.shape[0],image.shape[1]
tmp = zeros((n + padding, m + padding))
if tmp.shape[0] < 4: raise ValueError, \
'Image and padding too small'
if tmp.shape[1] < 4: raise ValueError, \
'Image and padding too small'
B,A = __gausscoeff(sigma)
tmp[:n,:m] = image
tmp = lfilter(B, A, tmp, axis=0)
tmp = flipud(tmp)
tmp = lfilter(B, A, tmp, axis=0)
tmp = flipud(tmp)
tmp = lfilter(B, A, tmp, axis=1)
tmp = fliplr(tmp)
tmp = lfilter(B, A, tmp, axis=1)
tmp = fliplr(tmp)
return tmp[:n,:m]
''' | gpl-2.0 |
potash/scikit-learn | sklearn/linear_model/tests/test_huber.py | 54 | 7619 | # Authors: Manoj Kumar mks542@nyu.edu
# License: BSD 3 clause
import numpy as np
from scipy import optimize, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_false
from sklearn.datasets import make_regression
from sklearn.linear_model import (
HuberRegressor, LinearRegression, SGDRegressor, Ridge)
from sklearn.linear_model.huber import _huber_loss_and_gradient
def make_regression_with_outliers(n_samples=50, n_features=20):
rng = np.random.RandomState(0)
# Generate data with outliers by replacing 10% of the samples with noise.
X, y = make_regression(
n_samples=n_samples, n_features=n_features,
random_state=0, noise=0.05)
# Replace 10% of the sample with noise.
num_noise = int(0.1 * n_samples)
random_samples = rng.randint(0, n_samples, num_noise)
X[random_samples, :] = 2.0 * rng.normal(0, 1, (num_noise, X.shape[1]))
return X, y
def test_huber_equals_lr_for_high_epsilon():
# Test that Ridge matches LinearRegression for large epsilon
X, y = make_regression_with_outliers()
lr = LinearRegression(fit_intercept=True)
lr.fit(X, y)
huber = HuberRegressor(fit_intercept=True, epsilon=1e3, alpha=0.0)
huber.fit(X, y)
assert_almost_equal(huber.coef_, lr.coef_, 3)
assert_almost_equal(huber.intercept_, lr.intercept_, 2)
def test_huber_gradient():
# Test that the gradient calculated by _huber_loss_and_gradient is correct
rng = np.random.RandomState(1)
X, y = make_regression_with_outliers()
sample_weight = rng.randint(1, 3, (y.shape[0]))
loss_func = lambda x, *args: _huber_loss_and_gradient(x, *args)[0]
grad_func = lambda x, *args: _huber_loss_and_gradient(x, *args)[1]
# Check using optimize.check_grad that the gradients are equal.
for _ in range(5):
# Check for both fit_intercept and otherwise.
for n_features in [X.shape[1] + 1, X.shape[1] + 2]:
w = rng.randn(n_features)
w[-1] = np.abs(w[-1])
grad_same = optimize.check_grad(
loss_func, grad_func, w, X, y, 0.01, 0.1, sample_weight)
assert_almost_equal(grad_same, 1e-6, 4)
def test_huber_sample_weights():
# Test sample_weights implementation in HuberRegressor"""
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True)
huber.fit(X, y)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
# Rescale coefs before comparing with assert_array_almost_equal to make sure
# that the number of decimal places used is somewhat insensitive to the
# amplitude of the coefficients and therefore to the scale of the data
# and the regularization parameter
scale = max(np.mean(np.abs(huber.coef_)),
np.mean(np.abs(huber.intercept_)))
huber.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(huber.coef_ / scale, huber_coef / scale)
assert_array_almost_equal(huber.intercept_ / scale,
huber_intercept / scale)
X, y = make_regression_with_outliers(n_samples=5, n_features=20)
X_new = np.vstack((X, np.vstack((X[1], X[1], X[3]))))
y_new = np.concatenate((y, [y[1]], [y[1]], [y[3]]))
huber.fit(X_new, y_new)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
sample_weight = np.ones(X.shape[0])
sample_weight[1] = 3
sample_weight[3] = 2
huber.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(huber.coef_ / scale, huber_coef / scale)
assert_array_almost_equal(huber.intercept_ / scale,
huber_intercept / scale)
# Test sparse implementation with sample weights.
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor(fit_intercept=True)
huber_sparse.fit(X_csr, y, sample_weight=sample_weight)
assert_array_almost_equal(huber_sparse.coef_ / scale,
huber_coef / scale)
def test_huber_sparse():
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True, alpha=0.1)
huber.fit(X, y)
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor(fit_intercept=True, alpha=0.1)
huber_sparse.fit(X_csr, y)
assert_array_almost_equal(huber_sparse.coef_, huber.coef_)
assert_array_equal(huber.outliers_, huber_sparse.outliers_)
def test_huber_scaling_invariant():
"""Test that outliers filtering is scaling independent."""
rng = np.random.RandomState(0)
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=False, alpha=0.0, max_iter=100)
huber.fit(X, y)
n_outliers_mask_1 = huber.outliers_
assert_false(np.all(n_outliers_mask_1))
huber.fit(X, 2. * y)
n_outliers_mask_2 = huber.outliers_
assert_array_equal(n_outliers_mask_2, n_outliers_mask_1)
huber.fit(2. * X, 2. * y)
n_outliers_mask_3 = huber.outliers_
assert_array_equal(n_outliers_mask_3, n_outliers_mask_1)
def test_huber_and_sgd_same_results():
"""Test they should converge to same coefficients for same parameters"""
X, y = make_regression_with_outliers(n_samples=10, n_features=2)
# Fit once to find out the scale parameter. Scale down X and y by scale
# so that the scale parameter is optimized to 1.0
huber = HuberRegressor(fit_intercept=False, alpha=0.0, max_iter=100,
epsilon=1.35)
huber.fit(X, y)
X_scale = X / huber.scale_
y_scale = y / huber.scale_
huber.fit(X_scale, y_scale)
assert_almost_equal(huber.scale_, 1.0, 3)
sgdreg = SGDRegressor(
alpha=0.0, loss="huber", shuffle=True, random_state=0, n_iter=10000,
fit_intercept=False, epsilon=1.35)
sgdreg.fit(X_scale, y_scale)
assert_array_almost_equal(huber.coef_, sgdreg.coef_, 1)
def test_huber_warm_start():
X, y = make_regression_with_outliers()
huber_warm = HuberRegressor(
fit_intercept=True, alpha=1.0, max_iter=10000, warm_start=True, tol=1e-1)
huber_warm.fit(X, y)
huber_warm_coef = huber_warm.coef_.copy()
huber_warm.fit(X, y)
# SciPy performs the tol check after doing the coef updates, so
# these would be almost same but not equal.
assert_array_almost_equal(huber_warm.coef_, huber_warm_coef, 1)
# No n_iter_ in old SciPy (<=0.9)
if huber_warm.n_iter_ is not None:
assert_equal(0, huber_warm.n_iter_)
def test_huber_better_r2_score():
# Test that huber returns a better r2 score than non-outliers"""
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True, alpha=0.01, max_iter=100)
huber.fit(X, y)
linear_loss = np.dot(X, huber.coef_) + huber.intercept_ - y
mask = np.abs(linear_loss) < huber.epsilon * huber.scale_
huber_score = huber.score(X[mask], y[mask])
huber_outlier_score = huber.score(X[~mask], y[~mask])
# The Ridge regressor should be influenced by the outliers and hence
# give a worse score on the non-outliers as compared to the huber regressor.
ridge = Ridge(fit_intercept=True, alpha=0.01)
ridge.fit(X, y)
ridge_score = ridge.score(X[mask], y[mask])
ridge_outlier_score = ridge.score(X[~mask], y[~mask])
assert_greater(huber_score, ridge_score)
# The huber model should also fit poorly on the outliers.
assert_greater(ridge_outlier_score, huber_outlier_score)
| bsd-3-clause |
devkev/mtools | mtools/mplotqueries/plottypes/range_type.py | 7 | 3323 | from mtools.mplotqueries.plottypes.base_type import BasePlotType
from datetime import timedelta
import argparse
try:
from matplotlib.dates import date2num, num2date
except ImportError:
raise ImportError("Can't import matplotlib. See https://github.com/rueckstiess/mtools/blob/master/INSTALL.md for \
instructions how to install matplotlib or try mlogvis instead, which is a simplified version of mplotqueries \
that visualizes the logfile in a web browser.")
from mtools.util.log2code import Log2CodeConverter
class RangePlotType(BasePlotType):
plot_type_str = 'range'
sort_order = 2
l2cc = Log2CodeConverter()
def __init__(self, args=None, unknown_args=None):
BasePlotType.__init__(self, args, unknown_args)
# parse arguments further to get --bucketsize argument
argparser = argparse.ArgumentParser("mplotqueries --type range")
argparser.add_argument('--gap', action='store', metavar='SEC', type=int, help="gap threshold in seconds after which a new line is started (default: 60)", default=None)
sub_args = vars(argparser.parse_args(unknown_args))
self.gap = sub_args['gap']
def accept_line(self, logevent):
""" return True if the log line does not have a duration. """
return True
def log2code(self, logevent):
codeline = self.l2cc(logevent.line_str)
if codeline:
return ' ... '.join(codeline.pattern)
else:
return None
def plot_group(self, group, idx, axis):
y_min, y_max = axis.get_ylim()
if y_min == 0. and y_max == 1.:
axis.set_ylim(0.0, 1.0)
height = (y_max - y_min) / len(self.groups)
y_bottom = y_min + (y_max-y_min) - idx * height
x_lefts = [ date2num( self.groups[group][0].datetime ) ]
x_rights = []
if self.gap:
td = timedelta(seconds=self.gap)
for le, le_next in zip(self.groups[group][:-1], self.groups[group][1:]):
if le_next.datetime - le.datetime >= td:
x_lefts.append( date2num(le_next.datetime) )
x_rights.append( date2num(le.datetime) )
x_rights.append( date2num( self.groups[group][-1].datetime ) )
color=self.colors[idx%len(self.colors)]
artists = []
for x_left, x_right in zip(x_lefts, x_rights):
width = max(0.0001, x_right-x_left)
artist = axis.barh(y_bottom-0.5*height, width=width, height=0.7*height, left=x_left, color=color, alpha=0.8, edgecolor='white', picker=5, linewidth=1, align='center')[0]
artist._mt_plot_type = self
artist._mt_group = group
artist._mt_left = x_left
artist._mt_right = x_right
artists.append(artist)
if len(self.groups) < 50:
axis.annotate(group, xy=(0, y_bottom-height/2.), xycoords='axes fraction', xytext=(-10, 0), textcoords='offset pixels', va='bottom', ha='right', fontsize=9)
axis.axes.get_yaxis().set_visible(False)
return artists
def clicked(self, event):
group = event.artist._mt_group
print num2date(event.artist._mt_left).strftime("%a %b %d %H:%M:%S"), '-', num2date(event.artist._mt_right).strftime("%a %b %d %H:%M:%S")
| apache-2.0 |
fibbo/DIRAC | Core/Utilities/Graphs/PieGraph.py | 14 | 5528 | ########################################################################
# $HeadURL$
########################################################################
""" PieGraph represents a pie graph
The DIRAC Graphs package is derived from the GraphTool plotting package of the
CMS/Phedex Project by ... <to be added>
"""
__RCSID__ = "$Id$"
import numpy, math, time
from matplotlib.patches import Wedge, Shadow
from matplotlib.cbook import is_string_like
from DIRAC.Core.Utilities.Graphs.PlotBase import PlotBase
from DIRAC.Core.Utilities.Graphs.GraphData import GraphData
from DIRAC.Core.Utilities.Graphs.GraphUtilities import *
class PieGraph( PlotBase ):
def __init__( self, data, ax, prefs, *args, **kw ):
PlotBase.__init__( self, data, ax, prefs, *args, **kw )
self.pdata = data
def pie( self, explode = None,
colors = None,
autopct = None,
pctdistance = 0.6,
shadow = False
):
start = time.time()
labels = self.pdata.getLabels()
if labels[0][0] == "NoLabels":
try:
self.pdata.initialize(key_type='string')
self.pdata.sortLabels()
labels = self.pdata.getLabels()
nLabels = self.pdata.getNumberOfLabels()
explode = [0.] * nLabels
if nLabels > 0:
explode[0] = 0.1
except Exception,x:
print "PieGraph Error: can not interpret data for the plot"
#labels.reverse()
values = [l[1] for l in labels]
x = numpy.array( values, numpy.float64 )
self.legendData = labels
sx = float( numpy.sum( x ) )
if sx > 1: x = numpy.divide( x, sx )
labels = [l[0] for l in labels]
if explode is None: explode = [0] * len( x )
assert( len( x ) == len( labels ) )
assert( len( x ) == len( explode ) )
plot_axis_labels = self.prefs.get( 'plot_axis_labels', True )
center = 0, 0
radius = 1.1
theta1 = 0
i = 0
texts = []
slices = []
autotexts = []
for frac, label, expl in zip( x, labels, explode ):
x, y = center
theta2 = theta1 + frac
thetam = 2 * math.pi * 0.5 * ( theta1 + theta2 )
x += expl * math.cos( thetam )
y += expl * math.sin( thetam )
color = self.palette.getColor( label )
w = Wedge( ( x, y ), radius, 360. * theta1, 360. * theta2,
facecolor = color,
lw = pixelToPoint( 0.5, self.dpi ),
edgecolor = '#999999' )
slices.append( w )
self.ax.add_patch( w )
w.set_label( label )
if shadow:
# make sure to add a shadow after the call to
# add_patch so the figure and transform props will be
# set
shad = Shadow( w, -0.02, -0.02,
#props={'facecolor':w.get_facecolor()}
)
shad.set_zorder( 0.9 * w.get_zorder() )
self.ax.add_patch( shad )
if plot_axis_labels:
if frac > 0.03:
xt = x + 1.05 * radius * math.cos( thetam )
yt = y + 1.05 * radius * math.sin( thetam )
thetam %= 2 * math.pi
if 0 < thetam and thetam < math.pi:
valign = 'bottom'
elif thetam == 0 or thetam == math.pi:
valign = 'center'
else:
valign = 'top'
if thetam > math.pi / 2.0 and thetam < 3.0 * math.pi / 2.0:
halign = 'right'
elif thetam == math.pi / 2.0 or thetam == 3.0 * math.pi / 2.0:
halign = 'center'
else:
halign = 'left'
t = self.ax.text( xt, yt, label,
size = pixelToPoint( self.prefs['subtitle_size'], self.dpi ),
horizontalalignment = halign,
verticalalignment = valign )
t.set_family( self.prefs['font_family'] )
t.set_fontname( self.prefs['font'] )
t.set_size( pixelToPoint( self.prefs['text_size'], self.dpi ) )
texts.append( t )
if autopct is not None:
xt = x + pctdistance * radius * math.cos( thetam )
yt = y + pctdistance * radius * math.sin( thetam )
if is_string_like( autopct ):
s = autopct % ( 100. * frac )
elif callable( autopct ):
s = autopct( 100. * frac )
else:
raise TypeError( 'autopct must be callable or a format string' )
t = self.ax.text( xt, yt, s,
horizontalalignment = 'center',
verticalalignment = 'center' )
t.set_family( self.prefs['font_family'] )
t.set_fontname( self.prefs['font'] )
t.set_size( pixelToPoint( self.prefs['text_size'], self.dpi ) )
autotexts.append( t )
theta1 = theta2
i += 1
self.legendData.reverse()
self.ax.set_xlim( ( -1.25, 1.25 ) )
self.ax.set_ylim( ( -1.25, 1.25 ) )
self.ax.set_axis_off()
if autopct is None: return slices, texts
else: return slices, texts, autotexts
min_amount = .1
def getLegendData( self ):
return self.legendData
def draw( self ):
self.ylabel = ''
self.prefs['square_axis'] = True
PlotBase.draw( self )
def my_display( x ):
if x > 100 * self.min_amount:
return '%.1f' % x + '%'
else:
return ""
nLabels = self.pdata.getNumberOfLabels()
explode = [0.] * nLabels
if nLabels > 0:
explode[0] = 0.1
self.wedges, text_labels, percent = self.pie( explode = explode, autopct = my_display )
| gpl-3.0 |
Aidan-Bharath/code_and_stuffs | vert_shear.py | 1 | 3042 | from __future__ import division
import numpy as np
import matplotlib
import netCDF4 as net
from datetime import datetime
from datetime import timedelta
import matplotlib.pyplot as plt
import matplotlib.tri as Tri
import matplotlib.ticker as ticker
if __name__ == "__main__":
GPBPb = [-66.3391,44.2761]
fs5 = [-66.3381,44.2505]
fileDir = '/home/aidan/thesis/ncdata/gp/2014/'
filename = 'dngrid_0001.nc'
nc = net.Dataset(fileDir+filename).variables
t_slice = ['2014-02-02T11:55:00','2014-02-02T12:05:00']
t_slice = np.array(t_slice,dtype='datetime64[us]')
time = nc['time'][:]+678942
time = np.array(time)
t = time.shape[0]
l = []
for i in range(t):
date = datetime.fromordinal(int(time[i]))+timedelta(days=time[i]%1)-timedelta(days=366)
l.append(date)
time = np.array(l,dtype='datetime64[us]')
if t_slice.shape[0] != 1:
argtime = np.argwhere((time>=t_slice[0])&(time<=t_slice[-1])).flatten()
lat = nc['lat'][:]
lon = nc['lon'][:]
h = nc['h'][:]
zeta = nc['zeta'][argtime,:]+h[None,:]
nv = nc['nv'][:].T-1
siglay = nc['siglay'][:]
z = zeta[:,None,:]*siglay[None,:,:]
dep = np.zeros([argtime.shape[0],siglay.shape[0],nv.shape[0]])
for i in range(z.shape[0]):
for j in range(nv.shape[0]):
el = (z[i,:,nv[j,0]]+z[i,:,nv[j,1]]+z[i,:,nv[j,2]])/3
dep[i,:,j] = el
u = nc['u'][argtime,:,:]
v = nc['v'][argtime,:,:]
w = nc['ww'][argtime,:,:]
bot_lvl = 4
top_lvl = 1
vel = np.sqrt(u[:]**2+v[:]**2+w[:]**2)
#topl = np.argwhere((vel[:,top_lvl,:] < vel[:,1,:])).flatten()
#vel[:,:,topl] = 0
vel = vel[:,bot_lvl,:] - vel[:,top_lvl,:]
z = dep[:,bot_lvl,:] - dep[:,top_lvl,:]
dudz = vel/z
levels = np.arange(-40,5,1)
vmax = 0.08
vmin = -0.08
matplotlib.rcParams['contour.negative_linestyle'] = 'solid'
for i in range(vel.shape[0]):
grid = Tri.Triangulation(lon,lat,triangles=nv)
fig = plt.figure()
plt.rc('font',size='22')
ax = fig.add_subplot(111,aspect=(1.0/np.cos(np.mean(lat)*np.pi/180.0)))
CS = ax.tricontour(grid, -h,levels=levels,shading='faceted',cmap=plt.cm.gist_earth)
#ax.clabel(CS, fontsize=9, inline=1,colors='k',fmt='%1d')
f = ax.tripcolor(grid, dudz[i,:],vmax=vmax,vmin=vmin,cmap=plt.cm.PuOr)
frame = plt.gca().patch.set_facecolor('0.5')
cbar = fig.colorbar(f,ax=ax)
cbar.set_label(r'Vertical Shear', rotation=-90,labelpad=30)
plt.scatter(GPBPb[0],GPBPb[1],s=200,color='black')
plt.scatter(fs5[0],fs5[1],s=200,color='black')
#plt.title(str(time[j]))
plt.xlabel('Longitude')
plt.ylabel('Latitude')
plt.grid()
scale = 1
ticks = ticker.FuncFormatter(lambda lon, pos: '{0:g}'.format(lon/scale))
ax.xaxis.set_major_formatter(ticks)
ax.yaxis.set_major_formatter(ticks)
ax.set_xlim([-66.35,-66.325])
ax.set_ylim([44.261,44.272])
plt.show()
| mit |
alexsavio/scikit-learn | sklearn/neighbors/tests/test_dist_metrics.py | 36 | 6957 | import itertools
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy
from scipy.spatial.distance import cdist
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.neighbors import BallTree
from sklearn.utils.testing import SkipTest, assert_raises_regex
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def cmp_version(version1, version2):
version1 = tuple(map(int, version1.split('.')[:2]))
version2 = tuple(map(int, version2.split('.')[:2]))
if version1 < version2:
return -1
elif version1 > version2:
return 1
else:
return 0
class TestMetrics:
def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5,
rseed=0, dtype=np.float64):
np.random.seed(rseed)
self.X1 = np.random.random((n1, d)).astype(dtype)
self.X2 = np.random.random((n2, d)).astype(dtype)
# make boolean arrays: ones and zeros
self.X1_bool = self.X1.round(0)
self.X2_bool = self.X2.round(0)
V = np.random.random((d, d))
VI = np.dot(V, V.T)
self.metrics = {'euclidean': {},
'cityblock': {},
'minkowski': dict(p=(1, 1.5, 2, 3)),
'chebyshev': {},
'seuclidean': dict(V=(np.random.random(d),)),
'wminkowski': dict(p=(1, 1.5, 3),
w=(np.random.random(d),)),
'mahalanobis': dict(VI=(VI,)),
'hamming': {},
'canberra': {},
'braycurtis': {}}
self.bool_metrics = ['matching', 'jaccard', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao',
'sokalmichener', 'sokalsneath']
def test_cdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X2, metric, **kwargs)
yield self.check_cdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X2_bool, metric)
yield self.check_cdist_bool, metric, D_true
def check_cdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1, self.X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool, self.X2_bool)
assert_array_almost_equal(D12, D_true)
def test_pdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X1, metric, **kwargs)
yield self.check_pdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X1_bool, metric)
yield self.check_pdist_bool, metric, D_true
def check_pdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool)
assert_array_almost_equal(D12, D_true)
def test_pickle(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
yield self.check_pickle, metric, kwargs
for metric in self.bool_metrics:
yield self.check_pickle_bool, metric
def check_pickle_bool(self, metric):
dm = DistanceMetric.get_metric(metric)
D1 = dm.pairwise(self.X1_bool)
dm2 = pickle.loads(pickle.dumps(dm))
D2 = dm2.pairwise(self.X1_bool)
assert_array_almost_equal(D1, D2)
def check_pickle(self, metric, kwargs):
dm = DistanceMetric.get_metric(metric, **kwargs)
D1 = dm.pairwise(self.X1)
dm2 = pickle.loads(pickle.dumps(dm))
D2 = dm2.pairwise(self.X1)
assert_array_almost_equal(D1, D2)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) *
np.sin(0.5 * (x1[1] - x2[1])) ** 2))
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1),
np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
# Check if both callable metric and predefined metric initialized
# DistanceMetric object is picklable
euclidean_pkl = pickle.loads(pickle.dumps(euclidean))
pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc))
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
D1_pkl = euclidean_pkl.pairwise(X)
D2_pkl = pyfunc_pkl.pairwise(X)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(D1_pkl, D2_pkl)
def test_bad_pyfunc_metric():
def wrong_distance(x, y):
return "1"
X = np.ones((5, 2))
assert_raises_regex(TypeError,
"Custom distance function must accept two vectors",
BallTree, X, metric=wrong_distance)
def test_input_data_size():
# Regression test for #6288
# Previoulsly, a metric requiring a particular input dimension would fail
def custom_metric(x, y):
assert x.shape[0] == 3
return np.sum((x - y) ** 2)
rng = np.random.RandomState(0)
X = rng.rand(10, 3)
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
eucl = DistanceMetric.get_metric("euclidean")
assert_array_almost_equal(pyfunc.pairwise(X), eucl.pairwise(X))
| bsd-3-clause |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/matplotlib/testing/compare.py | 4 | 13161 | """
Provides a collection of utilities for comparing (image) results.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import hashlib
import os
import shutil
import numpy as np
import matplotlib
from matplotlib.compat import subprocess
from matplotlib.testing.exceptions import ImageComparisonFailure
from matplotlib import _png
from matplotlib import _get_cachedir
from matplotlib import cbook
from distutils import version
__all__ = ['compare_float', 'compare_images', 'comparable_formats']
def make_test_filename(fname, purpose):
"""
Make a new filename by inserting `purpose` before the file's
extension.
"""
base, ext = os.path.splitext(fname)
return '%s-%s%s' % (base, purpose, ext)
def compare_float(expected, actual, relTol=None, absTol=None):
"""
Fail if the floating point values are not close enough, with
the given message.
You can specify a relative tolerance, absolute tolerance, or both.
"""
if relTol is None and absTol is None:
raise ValueError("You haven't specified a 'relTol' relative "
"tolerance or a 'absTol' absolute tolerance "
"function argument. You must specify one.")
msg = ""
if absTol is not None:
absDiff = abs(expected - actual)
if absTol < absDiff:
template = ['',
'Expected: {expected}',
'Actual: {actual}',
'Abs diff: {absDiff}',
'Abs tol: {absTol}']
msg += '\n '.join([line.format(**locals()) for line in template])
if relTol is not None:
# The relative difference of the two values. If the expected value is
# zero, then return the absolute value of the difference.
relDiff = abs(expected - actual)
if expected:
relDiff = relDiff / abs(expected)
if relTol < relDiff:
# The relative difference is a ratio, so it's always unit-less.
template = ['',
'Expected: {expected}',
'Actual: {actual}',
'Rel diff: {relDiff}',
'Rel tol: {relTol}']
msg += '\n '.join([line.format(**locals()) for line in template])
return msg or None
def get_cache_dir():
cachedir = _get_cachedir()
if cachedir is None:
raise RuntimeError('Could not find a suitable configuration directory')
cache_dir = os.path.join(cachedir, 'test_cache')
if not os.path.exists(cache_dir):
try:
cbook.mkdirs(cache_dir)
except IOError:
return None
if not os.access(cache_dir, os.W_OK):
return None
return cache_dir
def get_file_hash(path, block_size=2 ** 20):
md5 = hashlib.md5()
with open(path, 'rb') as fd:
while True:
data = fd.read(block_size)
if not data:
break
md5.update(data)
return md5.hexdigest()
def make_external_conversion_command(cmd):
def convert(old, new):
cmdline = cmd(old, new)
pipe = subprocess.Popen(
cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = pipe.communicate()
errcode = pipe.wait()
if not os.path.exists(new) or errcode:
msg = "Conversion command failed:\n%s\n" % ' '.join(cmdline)
if stdout:
msg += "Standard output:\n%s\n" % stdout
if stderr:
msg += "Standard error:\n%s\n" % stderr
raise IOError(msg)
return convert
def _update_converter():
gs, gs_v = matplotlib.checkdep_ghostscript()
if gs_v is not None:
def cmd(old, new):
return [gs, '-q', '-sDEVICE=png16m', '-dNOPAUSE', '-dBATCH',
'-sOutputFile=' + new, old]
converter['pdf'] = make_external_conversion_command(cmd)
converter['eps'] = make_external_conversion_command(cmd)
if matplotlib.checkdep_inkscape() is not None:
def cmd(old, new):
return ['inkscape', '-z', old, '--export-png', new]
converter['svg'] = make_external_conversion_command(cmd)
#: A dictionary that maps filename extensions to functions which
#: themselves map arguments `old` and `new` (filenames) to a list of strings.
#: The list can then be passed to Popen to convert files with that
#: extension to png format.
converter = {}
_update_converter()
def comparable_formats():
"""
Returns the list of file formats that compare_images can compare
on this system.
"""
return ['png'] + list(six.iterkeys(converter))
def convert(filename, cache):
"""
Convert the named file into a png file. Returns the name of the
created file.
If *cache* is True, the result of the conversion is cached in
`matplotlib._get_cachedir() + '/test_cache/'`. The caching is based
on a hash of the exact contents of the input file. The is no limit
on the size of the cache, so it may need to be manually cleared
periodically.
"""
base, extension = filename.rsplit('.', 1)
if extension not in converter:
raise ImageComparisonFailure(
"Don't know how to convert %s files to png" % extension)
newname = base + '_' + extension + '.png'
if not os.path.exists(filename):
raise IOError("'%s' does not exist" % filename)
# Only convert the file if the destination doesn't already exist or
# is out of date.
if (not os.path.exists(newname) or
os.stat(newname).st_mtime < os.stat(filename).st_mtime):
if cache:
cache_dir = get_cache_dir()
else:
cache_dir = None
if cache_dir is not None:
hash_value = get_file_hash(filename)
new_ext = os.path.splitext(newname)[1]
cached_file = os.path.join(cache_dir, hash_value + new_ext)
if os.path.exists(cached_file):
shutil.copyfile(cached_file, newname)
return newname
converter[extension](filename, newname)
if cache_dir is not None:
shutil.copyfile(newname, cached_file)
return newname
#: Maps file extensions to a function which takes a filename as its
#: only argument to return a list suitable for execution with Popen.
#: The purpose of this is so that the result file (with the given
#: extension) can be verified with tools such as xmllint for svg.
verifiers = {}
# Turning this off, because it seems to cause multiprocessing issues
if matplotlib.checkdep_xmllint() and False:
verifiers['svg'] = lambda filename: [
'xmllint', '--valid', '--nowarning', '--noout', filename]
def verify(filename):
"""Verify the file through some sort of verification tool."""
if not os.path.exists(filename):
raise IOError("'%s' does not exist" % filename)
base, extension = filename.rsplit('.', 1)
verifier = verifiers.get(extension, None)
if verifier is not None:
cmd = verifier(filename)
pipe = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = pipe.communicate()
errcode = pipe.wait()
if errcode != 0:
msg = "File verification command failed:\n%s\n" % ' '.join(cmd)
if stdout:
msg += "Standard output:\n%s\n" % stdout
if stderr:
msg += "Standard error:\n%s\n" % stderr
raise IOError(msg)
def crop_to_same(actual_path, actual_image, expected_path, expected_image):
# clip the images to the same size -- this is useful only when
# comparing eps to pdf
if actual_path[-7:-4] == 'eps' and expected_path[-7:-4] == 'pdf':
aw, ah = actual_image.shape
ew, eh = expected_image.shape
actual_image = actual_image[int(aw / 2 - ew / 2):int(
aw / 2 + ew / 2), int(ah / 2 - eh / 2):int(ah / 2 + eh / 2)]
return actual_image, expected_image
def calculate_rms(expectedImage, actualImage):
"Calculate the per-pixel errors, then compute the root mean square error."
if expectedImage.shape != actualImage.shape:
raise ImageComparisonFailure(
"image sizes do not match expected size: {0} "
"actual size {1}".format(expectedImage.shape, actualImage.shape))
num_values = np.prod(expectedImage.shape)
abs_diff_image = abs(expectedImage - actualImage)
# On Numpy 1.6, we can use bincount with minlength, which is much
# faster than using histogram
expected_version = version.LooseVersion("1.6")
found_version = version.LooseVersion(np.__version__)
if found_version >= expected_version:
histogram = np.bincount(abs_diff_image.ravel(), minlength=256)
else:
histogram = np.histogram(abs_diff_image, bins=np.arange(257))[0]
sum_of_squares = np.sum(histogram * np.arange(len(histogram)) ** 2)
rms = np.sqrt(float(sum_of_squares) / num_values)
return rms
def compare_images(expected, actual, tol, in_decorator=False):
"""
Compare two "image" files checking differences within a tolerance.
The two given filenames may point to files which are convertible to
PNG via the `.converter` dictionary. The underlying RMS is calculated
with the `.calculate_rms` function.
Parameters
----------
expected : str
The filename of the expected image.
actual :str
The filename of the actual image.
tol : float
The tolerance (a color value difference, where 255 is the
maximal difference). The test fails if the average pixel
difference is greater than this value.
in_decorator : bool
If called from image_comparison decorator, this should be
True. (default=False)
Example
-------
img1 = "./baseline/plot.png"
img2 = "./output/plot.png"
compare_images( img1, img2, 0.001 ):
"""
if not os.path.exists(actual):
msg = "Output image %s does not exist." % actual
raise Exception(msg)
if os.stat(actual).st_size == 0:
msg = "Output image file %s is empty." % actual
raise Exception(msg)
verify(actual)
# Convert the image to png
extension = expected.split('.')[-1]
if not os.path.exists(expected):
raise IOError('Baseline image %r does not exist.' % expected)
if extension != 'png':
actual = convert(actual, False)
expected = convert(expected, True)
# open the image files and remove the alpha channel (if it exists)
expectedImage = _png.read_png_int(expected)
actualImage = _png.read_png_int(actual)
expectedImage = expectedImage[:, :, :3]
actualImage = actualImage[:, :, :3]
actualImage, expectedImage = crop_to_same(
actual, actualImage, expected, expectedImage)
# convert to signed integers, so that the images can be subtracted without
# overflow
expectedImage = expectedImage.astype(np.int16)
actualImage = actualImage.astype(np.int16)
rms = calculate_rms(expectedImage, actualImage)
diff_image = make_test_filename(actual, 'failed-diff')
if rms <= tol:
if os.path.exists(diff_image):
os.unlink(diff_image)
return None
save_diff_image(expected, actual, diff_image)
results = dict(rms=rms, expected=str(expected),
actual=str(actual), diff=str(diff_image), tol=tol)
if not in_decorator:
# Then the results should be a string suitable for stdout.
template = ['Error: Image files did not match.',
'RMS Value: {rms}',
'Expected: \n {expected}',
'Actual: \n {actual}',
'Difference:\n {diff}',
'Tolerance: \n {tol}', ]
results = '\n '.join([line.format(**results) for line in template])
return results
def save_diff_image(expected, actual, output):
expectedImage = _png.read_png(expected)
actualImage = _png.read_png(actual)
actualImage, expectedImage = crop_to_same(
actual, actualImage, expected, expectedImage)
expectedImage = np.array(expectedImage).astype(np.float)
actualImage = np.array(actualImage).astype(np.float)
assert expectedImage.ndim == actualImage.ndim
assert expectedImage.shape == actualImage.shape
absDiffImage = abs(expectedImage - actualImage)
# expand differences in luminance domain
absDiffImage *= 255 * 10
save_image_np = np.clip(absDiffImage, 0, 255).astype(np.uint8)
height, width, depth = save_image_np.shape
# The PDF renderer doesn't produce an alpha channel, but the
# matplotlib PNG writer requires one, so expand the array
if depth == 3:
with_alpha = np.empty((height, width, 4), dtype=np.uint8)
with_alpha[:, :, 0:3] = save_image_np
save_image_np = with_alpha
# Hard-code the alpha channel to fully solid
save_image_np[:, :, 3] = 255
_png.write_png(save_image_np, output)
| mit |
daniel20162016/my-first | read_xml_all/calcul_matrix_compare_je_1.py | 2 | 6506 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 31 15:45:22 2016
@author: wang
"""
#from matplotlib import pylab as plt
#from numpy import fft, fromstring, int16, linspace
#import wave
from read_wav_xml_good_1 import*
from matrix_24_2 import*
from max_matrix_norm import*
import numpy as np
# open a wave file
filename = 'francois_filon_pure_3.wav'
filename_1 ='francois_filon_pure_3.xml'
word ='je'
wave_signal_float,framerate, word_start_point, word_length_point, word_end_point= read_wav_xml_good_1(filename,filename_1,word)
print 'word_start_point=',word_start_point
print 'word_length_point=',word_length_point
print 'word_end_point=',word_end_point
XJ_1 =wave_signal_float
t_step=1920;
t_entre_step=1440;
t_du_1_1 = int(word_start_point[0]);
t_du_1_2 = int(word_end_point[0]);
t_du_2_1 = int(word_start_point[1]);
t_du_2_2 = int(word_end_point[1]);
t_du_3_1 = int(word_start_point[2]);
t_du_3_2 = int(word_end_point[2]);
t_du_4_1 = int(word_start_point[3]);
t_du_4_2 = int(word_end_point[3]);
t_du_5_1 = int(word_start_point[4]);
t_du_5_2 = int(word_end_point[4]);
fs=framerate
#XJ_du_1 = wave_signal_float[(t_du_1_1-1):t_du_1_2];
#length_XJ_du_1 = int(word_length_point[0]+1);
#x1,y1,z1=matrix_24_2(XJ_du_1,fs)
#x1=max_matrix_norm(x1)
#==============================================================================
# this part is to calcul the first matrix
#==============================================================================
XJ_du_1_2 = XJ_1[(t_du_1_1-1):(t_du_1_1+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_1 = np.zeros([24,8])
#matrix_all_step_new_1 = np.zeros([192])
for i in range(0,24):
matrix_all_step_1[i][0]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
print i
XJ_du_1_total = XJ_1[(t_du_1_1+t_entre_step*(i)-1):(t_du_1_1+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_1[j][i]=x1_all[j]
#print 'matrix_all_step_1=',matrix_all_step_1
#==============================================================================
# this part is to calcul the second matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_2_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_2 = np.zeros([24,8])
for i in range(0,24):
matrix_all_step_2[i][0]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_2[j][i]=x1_all[j]
#print 'matrix_all_step_2=',matrix_all_step_2
#==============================================================================
# this part is to calcul the 3 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_3_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_3 = np.zeros([24,8])
for i in range(0,24):
matrix_all_step_3[i][0]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_3[j][i]=x1_all[j]
#print 'matrix_all_step_3=',matrix_all_step_3
#==============================================================================
# this part is to calcul the 4 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_4_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_4 = np.zeros([24,8])
for i in range(0,24):
matrix_all_step_4[i][0]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_4[j][i]=x1_all[j]
#print 'matrix_all_step_4=',matrix_all_step_4
#==============================================================================
# this part is to calcul the 5 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_5_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_5 = np.zeros([24,8])
for i in range(0,24):
matrix_all_step_5[i][0]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_5[j][i]=x1_all[j]
#print 'matrix_all_step_5=',matrix_all_step_5
np.savez('je_compare.npz',matrix_all_step_1,matrix_all_step_2,matrix_all_step_3,matrix_all_step_4,matrix_all_step_5)
| mit |
Adai0808/scikit-learn | examples/linear_model/plot_ridge_path.py | 254 | 1655 | """
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations.
"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.set_color_cycle(['b', 'r', 'g', 'c', 'k', 'y', 'm'])
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
| bsd-3-clause |
HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/matplotlib/backends/backend_gtk3.py | 11 | 30540 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os, sys
def fn_name(): return sys._getframe(1).f_code.co_name
try:
import gi
except ImportError:
raise ImportError("Gtk3 backend requires pygobject to be installed.")
try:
gi.require_version("Gtk", "3.0")
except AttributeError:
raise ImportError(
"pygobject version too old -- it must have require_version")
except ValueError:
raise ImportError(
"Gtk3 backend requires the GObject introspection bindings for Gtk 3 "
"to be installed.")
try:
from gi.repository import Gtk, Gdk, GObject, GLib
except ImportError:
raise ImportError("Gtk3 backend requires pygobject to be installed.")
import matplotlib
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase, NavigationToolbar2, cursors, TimerBase
from matplotlib.backend_bases import ShowBase
from matplotlib.cbook import is_string_like, is_writable_file_like
from matplotlib.colors import colorConverter
from matplotlib.figure import Figure
from matplotlib.widgets import SubplotTool
from matplotlib import lines
from matplotlib import cbook
from matplotlib import verbose
from matplotlib import rcParams
backend_version = "%s.%s.%s" % (Gtk.get_major_version(), Gtk.get_micro_version(), Gtk.get_minor_version())
_debug = False
#_debug = True
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 96
cursord = {
cursors.MOVE : Gdk.Cursor.new(Gdk.CursorType.FLEUR),
cursors.HAND : Gdk.Cursor.new(Gdk.CursorType.HAND2),
cursors.POINTER : Gdk.Cursor.new(Gdk.CursorType.LEFT_PTR),
cursors.SELECT_REGION : Gdk.Cursor.new(Gdk.CursorType.TCROSS),
}
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw_idle()
class Show(ShowBase):
def mainloop(self):
if Gtk.main_level() == 0:
Gtk.main()
show = Show()
class TimerGTK3(TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses GTK3 for timer events.
Attributes:
* interval: The time between timer events in milliseconds. Default
is 1000 ms.
* single_shot: Boolean flag indicating whether this timer should
operate as single shot (run once and then stop). Defaults to False.
* callbacks: Stores list of (func, args) tuples that will be called
upon timer events. This list can be manipulated directly, or the
functions add_callback and remove_callback can be used.
'''
def _timer_start(self):
# Need to stop it, otherwise we potentially leak a timer id that will
# never be stopped.
self._timer_stop()
self._timer = GLib.timeout_add(self._interval, self._on_timer)
def _timer_stop(self):
if self._timer is not None:
GLib.source_remove(self._timer)
self._timer = None
def _timer_set_interval(self):
# Only stop and restart it if the timer has already been started
if self._timer is not None:
self._timer_stop()
self._timer_start()
def _on_timer(self):
TimerBase._on_timer(self)
# Gtk timeout_add() requires that the callback returns True if it
# is to be called again.
if len(self.callbacks) > 0 and not self._single:
return True
else:
self._timer = None
return False
class FigureCanvasGTK3 (Gtk.DrawingArea, FigureCanvasBase):
keyvald = {65507 : 'control',
65505 : 'shift',
65513 : 'alt',
65508 : 'control',
65506 : 'shift',
65514 : 'alt',
65361 : 'left',
65362 : 'up',
65363 : 'right',
65364 : 'down',
65307 : 'escape',
65470 : 'f1',
65471 : 'f2',
65472 : 'f3',
65473 : 'f4',
65474 : 'f5',
65475 : 'f6',
65476 : 'f7',
65477 : 'f8',
65478 : 'f9',
65479 : 'f10',
65480 : 'f11',
65481 : 'f12',
65300 : 'scroll_lock',
65299 : 'break',
65288 : 'backspace',
65293 : 'enter',
65379 : 'insert',
65535 : 'delete',
65360 : 'home',
65367 : 'end',
65365 : 'pageup',
65366 : 'pagedown',
65438 : '0',
65436 : '1',
65433 : '2',
65435 : '3',
65430 : '4',
65437 : '5',
65432 : '6',
65429 : '7',
65431 : '8',
65434 : '9',
65451 : '+',
65453 : '-',
65450 : '*',
65455 : '/',
65439 : 'dec',
65421 : 'enter',
}
# Setting this as a static constant prevents
# this resulting expression from leaking
event_mask = (Gdk.EventMask.BUTTON_PRESS_MASK |
Gdk.EventMask.BUTTON_RELEASE_MASK |
Gdk.EventMask.EXPOSURE_MASK |
Gdk.EventMask.KEY_PRESS_MASK |
Gdk.EventMask.KEY_RELEASE_MASK |
Gdk.EventMask.ENTER_NOTIFY_MASK |
Gdk.EventMask.LEAVE_NOTIFY_MASK |
Gdk.EventMask.POINTER_MOTION_MASK |
Gdk.EventMask.POINTER_MOTION_HINT_MASK|
Gdk.EventMask.SCROLL_MASK)
def __init__(self, figure):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
FigureCanvasBase.__init__(self, figure)
GObject.GObject.__init__(self)
self._idle_draw_id = 0
self._need_redraw = True
self._lastCursor = None
self.connect('scroll_event', self.scroll_event)
self.connect('button_press_event', self.button_press_event)
self.connect('button_release_event', self.button_release_event)
self.connect('configure_event', self.configure_event)
self.connect('draw', self.on_draw_event)
self.connect('key_press_event', self.key_press_event)
self.connect('key_release_event', self.key_release_event)
self.connect('motion_notify_event', self.motion_notify_event)
self.connect('leave_notify_event', self.leave_notify_event)
self.connect('enter_notify_event', self.enter_notify_event)
self.connect('size_allocate', self.size_allocate)
self.set_events(self.__class__.event_mask)
self.set_double_buffered(True)
self.set_can_focus(True)
self._renderer_init()
self._idle_event_id = GLib.idle_add(self.idle_event)
def destroy(self):
#Gtk.DrawingArea.destroy(self)
self.close_event()
GLib.source_remove(self._idle_event_id)
if self._idle_draw_id != 0:
GLib.source_remove(self._idle_draw_id)
def scroll_event(self, widget, event):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
x = event.x
# flipy so y=0 is bottom of canvas
y = self.get_allocation().height - event.y
if event.direction==Gdk.ScrollDirection.UP:
step = 1
else:
step = -1
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
return False # finish event propagation?
def button_press_event(self, widget, event):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
x = event.x
# flipy so y=0 is bottom of canvas
y = self.get_allocation().height - event.y
FigureCanvasBase.button_press_event(self, x, y, event.button, guiEvent=event)
return False # finish event propagation?
def button_release_event(self, widget, event):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
x = event.x
# flipy so y=0 is bottom of canvas
y = self.get_allocation().height - event.y
FigureCanvasBase.button_release_event(self, x, y, event.button, guiEvent=event)
return False # finish event propagation?
def key_press_event(self, widget, event):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
key = self._get_key(event)
if _debug: print("hit", key)
FigureCanvasBase.key_press_event(self, key, guiEvent=event)
return False # finish event propagation?
def key_release_event(self, widget, event):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
key = self._get_key(event)
if _debug: print("release", key)
FigureCanvasBase.key_release_event(self, key, guiEvent=event)
return False # finish event propagation?
def motion_notify_event(self, widget, event):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
if event.is_hint:
t, x, y, state = event.window.get_pointer()
else:
x, y, state = event.x, event.y, event.get_state()
# flipy so y=0 is bottom of canvas
y = self.get_allocation().height - y
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)
return False # finish event propagation?
def leave_notify_event(self, widget, event):
FigureCanvasBase.leave_notify_event(self, event)
def enter_notify_event(self, widget, event):
FigureCanvasBase.enter_notify_event(self, event)
def size_allocate(self, widget, allocation):
if _debug:
print("FigureCanvasGTK3.%s" % fn_name())
print("size_allocate (%d x %d)" % (allocation.width, allocation.height))
dpival = self.figure.dpi
winch = allocation.width / dpival
hinch = allocation.height / dpival
self.figure.set_size_inches(winch, hinch)
FigureCanvasBase.resize_event(self)
self.draw_idle()
def _get_key(self, event):
if event.keyval in self.keyvald:
key = self.keyvald[event.keyval]
elif event.keyval < 256:
key = chr(event.keyval)
else:
key = None
modifiers = [
(Gdk.ModifierType.MOD4_MASK, 'super'),
(Gdk.ModifierType.MOD1_MASK, 'alt'),
(Gdk.ModifierType.CONTROL_MASK, 'ctrl'),
]
for key_mask, prefix in modifiers:
if event.state & key_mask:
key = '{0}+{1}'.format(prefix, key)
return key
def configure_event(self, widget, event):
if _debug: print('FigureCanvasGTK3.%s' % fn_name())
if widget.get_property("window") is None:
return
w, h = event.width, event.height
if w < 3 or h < 3:
return # empty fig
# resize the figure (in inches)
dpi = self.figure.dpi
self.figure.set_size_inches (w/dpi, h/dpi)
self._need_redraw = True
return False # finish event propagation?
def on_draw_event(self, widget, ctx):
# to be overwritten by GTK3Agg or GTK3Cairo
pass
def draw(self):
self._need_redraw = True
if self.get_visible() and self.get_mapped():
self.queue_draw()
# do a synchronous draw (its less efficient than an async draw,
# but is required if/when animation is used)
self.get_property("window").process_updates (False)
def draw_idle(self):
def idle_draw(*args):
try:
self.draw()
finally:
self._idle_draw_id = 0
return False
if self._idle_draw_id == 0:
self._idle_draw_id = GLib.idle_add(idle_draw)
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of :class:`backend_bases.Timer`.
This is useful for getting periodic events through the backend's native
event loop. Implemented only for backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs) will
be executed by the timer every *interval*.
"""
return TimerGTK3(*args, **kwargs)
def flush_events(self):
Gdk.threads_enter()
while Gtk.events_pending():
Gtk.main_iteration(True)
Gdk.flush()
Gdk.threads_leave()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
class FigureManagerGTK3(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The Gtk.Toolbar (gtk only)
vbox : The Gtk.VBox containing the canvas and toolbar (gtk only)
window : The Gtk.Window (gtk only)
"""
def __init__(self, canvas, num):
if _debug: print('FigureManagerGTK3.%s' % fn_name())
FigureManagerBase.__init__(self, canvas, num)
self.window = Gtk.Window()
self.set_window_title("Figure %d" % num)
try:
self.window.set_icon_from_file(window_icon)
except (SystemExit, KeyboardInterrupt):
# re-raise exit type Exceptions
raise
except:
# some versions of gtk throw a glib.GError but not
# all, so I am not sure how to catch it. I am unhappy
# doing a blanket catch here, but am not sure what a
# better way is - JDH
verbose.report('Could not load matplotlib icon: %s' % sys.exc_info()[1])
self.vbox = Gtk.Box()
self.vbox.set_property("orientation", Gtk.Orientation.VERTICAL)
self.window.add(self.vbox)
self.vbox.show()
self.canvas.show()
self.vbox.pack_start(self.canvas, True, True, 0)
self.toolbar = self._get_toolbar(canvas)
# calculate size for window
w = int (self.canvas.figure.bbox.width)
h = int (self.canvas.figure.bbox.height)
if self.toolbar is not None:
self.toolbar.show()
self.vbox.pack_end(self.toolbar, False, False, 0)
size_request = self.toolbar.size_request()
h += size_request.height
self.window.set_default_size (w, h)
def destroy(*args):
Gcf.destroy(num)
self.window.connect("destroy", destroy)
self.window.connect("delete_event", destroy)
if matplotlib.is_interactive():
self.window.show()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar is not None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
self.canvas.grab_focus()
def destroy(self, *args):
if _debug: print('FigureManagerGTK3.%s' % fn_name())
self.vbox.destroy()
self.window.destroy()
self.canvas.destroy()
if self.toolbar:
self.toolbar.destroy()
if Gcf.get_num_fig_managers()==0 and \
not matplotlib.is_interactive() and \
Gtk.main_level() >= 1:
Gtk.main_quit()
def show(self):
# show the figure window
self.window.show()
def full_screen_toggle (self):
self._full_screen_flag = not self._full_screen_flag
if self._full_screen_flag:
self.window.fullscreen()
else:
self.window.unfullscreen()
_full_screen_flag = False
def _get_toolbar(self, canvas):
# must be inited after the window, drawingArea and figure
# attrs are set
if rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2GTK3 (canvas, self.window)
else:
toolbar = None
return toolbar
def get_window_title(self):
return self.window.get_title()
def set_window_title(self, title):
self.window.set_title(title)
def resize(self, width, height):
'set the canvas size in pixels'
#_, _, cw, ch = self.canvas.allocation
#_, _, ww, wh = self.window.allocation
#self.window.resize (width-cw+ww, height-ch+wh)
self.window.resize(width, height)
class NavigationToolbar2GTK3(NavigationToolbar2, Gtk.Toolbar):
def __init__(self, canvas, window):
self.win = window
GObject.GObject.__init__(self)
NavigationToolbar2.__init__(self, canvas)
self.ctx = None
def set_message(self, s):
self.message.set_label(s)
def set_cursor(self, cursor):
self.canvas.get_property("window").set_cursor(cursord[cursor])
#self.canvas.set_cursor(cursord[cursor])
def release(self, event):
try: del self._pixmapBack
except AttributeError: pass
def dynamic_update(self):
# legacy method; new method is canvas.draw_idle
self.canvas.draw_idle()
def draw_rubberband(self, event, x0, y0, x1, y1):
'adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189744'
self.ctx = self.canvas.get_property("window").cairo_create()
# todo: instead of redrawing the entire figure, copy the part of
# the figure that was covered by the previous rubberband rectangle
self.canvas.draw()
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [int(val) for val in (min(x0,x1), min(y0, y1), w, h)]
self.ctx.new_path()
self.ctx.set_line_width(0.5)
self.ctx.rectangle(rect[0], rect[1], rect[2], rect[3])
self.ctx.set_source_rgb(0, 0, 0)
self.ctx.stroke()
def _init_toolbar(self):
self.set_style(Gtk.ToolbarStyle.ICONS)
basedir = os.path.join(rcParams['datapath'],'images')
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.insert( Gtk.SeparatorToolItem(), -1 )
continue
fname = os.path.join(basedir, image_file + '.png')
image = Gtk.Image()
image.set_from_file(fname)
tbutton = Gtk.ToolButton()
tbutton.set_label(text)
tbutton.set_icon_widget(image)
self.insert(tbutton, -1)
tbutton.connect('clicked', getattr(self, callback))
tbutton.set_tooltip_text(tooltip_text)
toolitem = Gtk.SeparatorToolItem()
self.insert(toolitem, -1)
toolitem.set_draw(False)
toolitem.set_expand(True)
toolitem = Gtk.ToolItem()
self.insert(toolitem, -1)
self.message = Gtk.Label()
toolitem.add(self.message)
self.show_all()
def get_filechooser(self):
fc = FileChooserDialog(
title='Save the figure',
parent=self.win,
path=os.path.expanduser(rcParams.get('savefig.directory', '')),
filetypes=self.canvas.get_supported_filetypes(),
default_filetype=self.canvas.get_default_filetype())
fc.set_current_name(self.canvas.get_default_filename())
return fc
def save_figure(self, *args):
chooser = self.get_filechooser()
fname, format = chooser.get_filename_from_user()
chooser.destroy()
if fname:
startpath = os.path.expanduser(rcParams.get('savefig.directory', ''))
if startpath == '':
# explicitly missing key or empty str signals to use cwd
rcParams['savefig.directory'] = startpath
else:
# save dir for next time
rcParams['savefig.directory'] = os.path.dirname(six.text_type(fname))
try:
self.canvas.print_figure(fname, format=format)
except Exception as e:
error_msg_gtk(str(e), parent=self)
def configure_subplots(self, button):
toolfig = Figure(figsize=(6,3))
canvas = self._get_canvas(toolfig)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
w = int (toolfig.bbox.width)
h = int (toolfig.bbox.height)
window = Gtk.Window()
try:
window.set_icon_from_file(window_icon)
except (SystemExit, KeyboardInterrupt):
# re-raise exit type Exceptions
raise
except:
# we presumably already logged a message on the
# failure of the main plot, don't keep reporting
pass
window.set_title("Subplot Configuration Tool")
window.set_default_size(w, h)
vbox = Gtk.Box()
vbox.set_property("orientation", Gtk.Orientation.VERTICAL)
window.add(vbox)
vbox.show()
canvas.show()
vbox.pack_start(canvas, True, True, 0)
window.show()
def _get_canvas(self, fig):
return self.canvas.__class__(fig)
class FileChooserDialog(Gtk.FileChooserDialog):
"""GTK+ file selector which remembers the last file/directory
selected and presents the user with a menu of supported image formats
"""
def __init__ (self,
title = 'Save file',
parent = None,
action = Gtk.FileChooserAction.SAVE,
buttons = (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_SAVE, Gtk.ResponseType.OK),
path = None,
filetypes = [],
default_filetype = None
):
super (FileChooserDialog, self).__init__ (title, parent, action,
buttons)
self.set_default_response (Gtk.ResponseType.OK)
if not path: path = os.getcwd() + os.sep
# create an extra widget to list supported image formats
self.set_current_folder (path)
self.set_current_name ('image.' + default_filetype)
hbox = Gtk.Box(spacing=10)
hbox.pack_start(Gtk.Label(label="File Format:"), False, False, 0)
liststore = Gtk.ListStore(GObject.TYPE_STRING)
cbox = Gtk.ComboBox() #liststore)
cbox.set_model(liststore)
cell = Gtk.CellRendererText()
cbox.pack_start(cell, True)
cbox.add_attribute(cell, 'text', 0)
hbox.pack_start(cbox, False, False, 0)
self.filetypes = filetypes
self.sorted_filetypes = list(six.iteritems(filetypes))
self.sorted_filetypes.sort()
default = 0
for i, (ext, name) in enumerate(self.sorted_filetypes):
liststore.append(["%s (*.%s)" % (name, ext)])
if ext == default_filetype:
default = i
cbox.set_active(default)
self.ext = default_filetype
def cb_cbox_changed (cbox, data=None):
"""File extension changed"""
head, filename = os.path.split(self.get_filename())
root, ext = os.path.splitext(filename)
ext = ext[1:]
new_ext = self.sorted_filetypes[cbox.get_active()][0]
self.ext = new_ext
if ext in self.filetypes:
filename = root + '.' + new_ext
elif ext == '':
filename = filename.rstrip('.') + '.' + new_ext
self.set_current_name (filename)
cbox.connect ("changed", cb_cbox_changed)
hbox.show_all()
self.set_extra_widget(hbox)
def get_filename_from_user (self):
while True:
filename = None
if self.run() != int(Gtk.ResponseType.OK):
break
filename = self.get_filename()
break
return filename, self.ext
class DialogLineprops:
"""
A GUI dialog for controlling lineprops
"""
signals = (
'on_combobox_lineprops_changed',
'on_combobox_linestyle_changed',
'on_combobox_marker_changed',
'on_colorbutton_linestyle_color_set',
'on_colorbutton_markerface_color_set',
'on_dialog_lineprops_okbutton_clicked',
'on_dialog_lineprops_cancelbutton_clicked',
)
linestyles = [ls for ls in lines.Line2D.lineStyles if ls.strip()]
linestyled = dict([ (s,i) for i,s in enumerate(linestyles)])
markers = [m for m in lines.Line2D.markers if cbook.is_string_like(m)]
markerd = dict([(s,i) for i,s in enumerate(markers)])
def __init__(self, lines):
import Gtk.glade
datadir = matplotlib.get_data_path()
gladefile = os.path.join(datadir, 'lineprops.glade')
if not os.path.exists(gladefile):
raise IOError('Could not find gladefile lineprops.glade in %s'%datadir)
self._inited = False
self._updateson = True # suppress updates when setting widgets manually
self.wtree = Gtk.glade.XML(gladefile, 'dialog_lineprops')
self.wtree.signal_autoconnect(dict([(s, getattr(self, s)) for s in self.signals]))
self.dlg = self.wtree.get_widget('dialog_lineprops')
self.lines = lines
cbox = self.wtree.get_widget('combobox_lineprops')
cbox.set_active(0)
self.cbox_lineprops = cbox
cbox = self.wtree.get_widget('combobox_linestyles')
for ls in self.linestyles:
cbox.append_text(ls)
cbox.set_active(0)
self.cbox_linestyles = cbox
cbox = self.wtree.get_widget('combobox_markers')
for m in self.markers:
cbox.append_text(m)
cbox.set_active(0)
self.cbox_markers = cbox
self._lastcnt = 0
self._inited = True
def show(self):
'populate the combo box'
self._updateson = False
# flush the old
cbox = self.cbox_lineprops
for i in range(self._lastcnt-1,-1,-1):
cbox.remove_text(i)
# add the new
for line in self.lines:
cbox.append_text(line.get_label())
cbox.set_active(0)
self._updateson = True
self._lastcnt = len(self.lines)
self.dlg.show()
def get_active_line(self):
'get the active line'
ind = self.cbox_lineprops.get_active()
line = self.lines[ind]
return line
def get_active_linestyle(self):
'get the active lineinestyle'
ind = self.cbox_linestyles.get_active()
ls = self.linestyles[ind]
return ls
def get_active_marker(self):
'get the active lineinestyle'
ind = self.cbox_markers.get_active()
m = self.markers[ind]
return m
def _update(self):
'update the active line props from the widgets'
if not self._inited or not self._updateson: return
line = self.get_active_line()
ls = self.get_active_linestyle()
marker = self.get_active_marker()
line.set_linestyle(ls)
line.set_marker(marker)
button = self.wtree.get_widget('colorbutton_linestyle')
color = button.get_color()
r, g, b = [val/65535. for val in (color.red, color.green, color.blue)]
line.set_color((r,g,b))
button = self.wtree.get_widget('colorbutton_markerface')
color = button.get_color()
r, g, b = [val/65535. for val in (color.red, color.green, color.blue)]
line.set_markerfacecolor((r,g,b))
line.figure.canvas.draw()
def on_combobox_lineprops_changed(self, item):
'update the widgets from the active line'
if not self._inited: return
self._updateson = False
line = self.get_active_line()
ls = line.get_linestyle()
if ls is None: ls = 'None'
self.cbox_linestyles.set_active(self.linestyled[ls])
marker = line.get_marker()
if marker is None: marker = 'None'
self.cbox_markers.set_active(self.markerd[marker])
r,g,b = colorConverter.to_rgb(line.get_color())
color = Gdk.Color(*[int(val*65535) for val in (r,g,b)])
button = self.wtree.get_widget('colorbutton_linestyle')
button.set_color(color)
r,g,b = colorConverter.to_rgb(line.get_markerfacecolor())
color = Gdk.Color(*[int(val*65535) for val in (r,g,b)])
button = self.wtree.get_widget('colorbutton_markerface')
button.set_color(color)
self._updateson = True
def on_combobox_linestyle_changed(self, item):
self._update()
def on_combobox_marker_changed(self, item):
self._update()
def on_colorbutton_linestyle_color_set(self, button):
self._update()
def on_colorbutton_markerface_color_set(self, button):
'called colorbutton marker clicked'
self._update()
def on_dialog_lineprops_okbutton_clicked(self, button):
self._update()
self.dlg.hide()
def on_dialog_lineprops_cancelbutton_clicked(self, button):
self.dlg.hide()
# Define the file to use as the GTk icon
if sys.platform == 'win32':
icon_filename = 'matplotlib.png'
else:
icon_filename = 'matplotlib.svg'
window_icon = os.path.join(matplotlib.rcParams['datapath'], 'images', icon_filename)
def error_msg_gtk(msg, parent=None):
if parent is not None: # find the toplevel Gtk.Window
parent = parent.get_toplevel()
if not parent.is_toplevel():
parent = None
if not is_string_like(msg):
msg = ','.join(map(str,msg))
dialog = Gtk.MessageDialog(
parent = parent,
type = Gtk.MessageType.ERROR,
buttons = Gtk.ButtonsType.OK,
message_format = msg)
dialog.run()
dialog.destroy()
FigureCanvas = FigureCanvasGTK3
FigureManager = FigureManagerGTK3
| gpl-2.0 |
X-DataInitiative/tick | examples/plot_logistic_tick_vs_scikit.py | 2 | 3161 | """
================================================================
Logistic regression comparison: ``scikit-learn`` versus ``tick``
================================================================
In this example we give a naive comparison of ``tick`` and ``scikit-learn`` for
binary classification using logistic regression with :math:`\ell_1`
penalization.
This comparison is done using the well-known ``adult`` dataset, a standard
benchmark dataset for binary clasification.
Some remarks are the following:
* Both classifiers have the same performance in terms of AUC (area under the
ROC curve)
* Learned model-weights are slightly different. This is explained by the fact
that ``scikit-learn`` uses ``liblinear`` for optimization of the
:math:`\ell_1`-penalized likelihood. When using this solver, the
``intercept`` is penalized like the model weights (``coeff_``), while this is
not the case in `tick`. Note that this difference can be reduced by tuning the
``intercept_scaling`` parameter from ``scikit-learn``'s
``LogisticRegression``
* In this example, the computational time of ``tick`` is better than ``scikit``'s
"""
import numpy as np
from time import time
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
from sklearn.linear_model import LogisticRegression as LogRegScikit
from tick.dataset import fetch_tick_dataset
from tick.linear_model import LogisticRegression as LogRegTick
train_set = fetch_tick_dataset('binary/adult/adult.trn.bz2')
test_set = fetch_tick_dataset('binary/adult/adult.tst.bz2')
# scklearn 0.22 default solver for LogisticRegression is lbfgs
clf_tick = LogRegTick(C=1e5, penalty='l1', tol=1e-8)
clf_scikit = LogRegScikit(penalty='l1', solver='liblinear', tol=1e-8)
t1 = time()
clf_tick.fit(train_set[0], train_set[1])
t_tick = time() - t1
t1 = time()
clf_scikit.fit(train_set[0], train_set[1])
t_scikit = time() - t1
pred_tick = clf_tick.predict_proba(test_set[0])
pred_scikit = clf_scikit.predict_proba(test_set[0])
fpr_tick, tpr_tick, _ = roc_curve(test_set[1], pred_tick[:, 1])
fpr_scikit, tpr_scikit, _ = roc_curve(test_set[1], pred_scikit[:, 1])
plt.figure(figsize=(10, 8))
ax1 = plt.subplot2grid((2, 2), (0, 0))
plt.stem(clf_tick.weights)
plt.title(r'Model-weights in $\mathtt{tick}$', fontsize=16)
plt.ylim((-2, 2.5))
ax2 = plt.subplot2grid((2, 2), (0, 1))
plt.stem(np.ravel(clf_scikit.coef_))
# plt.legend()
plt.ylim((-2, 2.5))
plt.title(r'Model-weights in $\mathtt{scikit-learn}$', fontsize=16)
plt.subplot2grid((2, 2), (1, 0))
plt.plot(fpr_tick, tpr_tick, lw=2)
plt.plot(fpr_scikit, tpr_scikit, lw=2)
plt.legend([
"tick (AUC = {:.2f})".format(auc(fpr_tick, tpr_tick)),
"scikit-learn (AUC = {:.2f})".format(auc(fpr_tick, tpr_tick))
], loc='center right', fontsize=12)
plt.ylabel("True Positive Rate", fontsize=14)
plt.xlabel("False Positive Rate", fontsize=14)
plt.title('ROC curves comparison', fontsize=16)
ax4 = plt.subplot2grid((2, 2), (1, 1))
plt.bar([1, 2], [t_tick, t_scikit])
ax4.set_xticks([1, 2])
ax4.set_xticklabels(['tick', 'scikit-learn'], fontsize=14)
plt.title('Computational time (seconds)', fontsize=16)
plt.tight_layout()
plt.show()
| bsd-3-clause |
gpetretto/pymatgen | pymatgen/analysis/eos.py | 4 | 19247 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals, division, print_function
"""
This module implements various equation of states.
Note: Most of the code were initially adapted from ASE and deltafactor by
@gmatteo but has since undergone major refactoring.
"""
from copy import deepcopy
import six
from abc import ABCMeta, abstractmethod
import logging
import warnings
import numpy as np
from scipy.optimize import leastsq, minimize
from pymatgen.core.units import FloatWithUnit
from pymatgen.util.plotting import pretty_plot, add_fig_kwargs, get_ax_fig_plt
__author__ = "Kiran Mathew, gmatteo"
__credits__ = "Cormac Toher"
logger = logging.getLogger(__file__)
class EOSBase(six.with_metaclass(ABCMeta)):
"""
Abstract class that must be subcalssed by all equation of state
implementations.
"""
def __init__(self, volumes, energies):
"""
Args:
volumes (list/numpy.array): volumes in Ang^3
energies (list/numpy.array): energy in eV
"""
self.volumes = np.array(volumes)
self.energies = np.array(energies)
# minimum energy(e0), buk modulus(b0),
# derivative of bulk modulus wrt pressure(b1), minimum volume(v0)
self._params = None
# the eos function parameters. It is the same as _params except for
# equation of states that uses polynomial fits(deltafactor and
# numerical_eos)
self.eos_params = None
def _initial_guess(self):
"""
Quadratic fit to get an initial guess for the parameters.
Returns:
tuple: (e0, b0, b1, v0)
"""
a, b, c = np.polyfit(self.volumes, self.energies, 2)
self.eos_params = [a, b, c]
v0 = -b/(2*a)
e0 = a*(v0**2) + b*v0 + c
b0 = 2 * a * v0
b1 = 4 # b1 is usually a small number like 4
vmin, vmax = min(self.volumes), max(self.volumes)
if not vmin < v0 and v0 < vmax:
raise EOSError('The minimum volume of a fitted parabola is '
'not in the input volumes\n.')
return e0, b0, b1, v0
def fit(self):
"""
Do the fitting. Does least square fitting. If you want to use custom
fitting, must override this.
"""
# the objective function that will be minimized in the least square
# fitting
objective_func = lambda pars, x, y: y - self._func(x, pars)
self._params = self._initial_guess()
self.eos_params, ierr = leastsq(
objective_func, self._params, args=(self.volumes, self.energies))
# e0, b0, b1, v0
self._params = self.eos_params
if ierr not in [1, 2, 3, 4]:
raise EOSError("Optimal parameters not found")
@abstractmethod
def _func(self, volume, params):
"""
The equation of state function. This must be implemented by all classes
that derive from this abstract class.
Args:
volume (float/numpy.array)
params (list/tuple): values for the parameters other than the
volume used by the eos.
"""
pass
def func(self, volume):
"""
The equation of state function with the paramters other than volume set
to the ones obtained from fitting.
Args:
volume (list/numpy.array)
Returns:
numpy.array
"""
return self._func(np.array(volume), self.eos_params)
def __call__(self, volume):
return self.func(volume)
@property
def e0(self):
"""
Returns the min energy.
"""
return self._params[0]
@property
def b0(self):
"""
Returns the bulk modulus.
Note: the units for the bulk modulus: unit of energy/unit of volume^3.
"""
return self._params[1]
@property
def b0_GPa(self):
"""
Returns the bulk modulus in GPa.
Note: This assumes that the energy and volumes are in eV and Ang^3
respectively
"""
return FloatWithUnit(self.b0, "eV ang^-3").to("GPa")
@property
def b1(self):
"""
Returns the derivative of bulk modulus wrt pressure(dimensionless)
"""
return self._params[2]
@property
def v0(self):
"""
Returns the minimum or the reference volume in Ang^3.
"""
return self._params[3]
@property
def results(self):
"""
Returns a summary dict.
Returns:
dict
"""
return dict(e0=self.e0, b0=self.b0, b1=self.b1, v0=self.v0)
def plot(self, width=8, height=None, plt=None, dpi=None, **kwargs):
"""
Plot the equation of state.
Args:
width (float): Width of plot in inches. Defaults to 8in.
height (float): Height of plot in inches. Defaults to width *
golden ratio.
plt (matplotlib.pyplot): If plt is supplied, changes will be made
to an existing plot. Otherwise, a new plot will be created.
dpi:
kwargs (dict): additional args fed to pyplot.plot.
supported keys: style, color, text, label
Returns:
Matplotlib plot object.
"""
plt = pretty_plot(width=width, height=height, plt=plt, dpi=dpi)
color = kwargs.get("color", "r")
label = kwargs.get("label", "{} fit".format(self.__class__.__name__))
lines = ["Equation of State: %s" % self.__class__.__name__,
"Minimum energy = %1.2f eV" % self.e0,
"Minimum or reference volume = %1.2f Ang^3" % self.v0,
"Bulk modulus = %1.2f eV/Ang^3 = %1.2f GPa" %
(self.b0, self.b0_GPa),
"Derivative of bulk modulus wrt pressure = %1.2f" % self.b1]
text = "\n".join(lines)
text = kwargs.get("text", text)
# Plot input data.
plt.plot(self.volumes, self.energies, linestyle="None", marker="o",
color=color)
# Plot eos fit.
vmin, vmax = min(self.volumes), max(self.volumes)
vmin, vmax = (vmin - 0.01 * abs(vmin), vmax + 0.01 * abs(vmax))
vfit = np.linspace(vmin, vmax, 100)
plt.plot(vfit, self.func(vfit), linestyle="dashed", color=color,
label=label)
plt.grid(True)
plt.xlabel("Volume $\\AA^3$")
plt.ylabel("Energy (eV)")
plt.legend(loc="best", shadow=True)
# Add text with fit parameters.
plt.text(0.4, 0.5, text, transform=plt.gca().transAxes)
return plt
@add_fig_kwargs
def plot_ax(self, ax=None, fontsize=12, **kwargs):
"""
Plot the equation of state on axis `ax`
Args:
ax: matplotlib :class:`Axes` or None if a new figure should be created.
fontsize: Legend fontsize.
color (str): plot color.
label (str): Plot label
text (str): Legend text (options)
Returns:
Matplotlib figure object.
"""
ax, fig, plt = get_ax_fig_plt(ax=ax)
color = kwargs.get("color", "r")
label = kwargs.get("label", "{} fit".format(self.__class__.__name__))
lines = ["Equation of State: %s" % self.__class__.__name__,
"Minimum energy = %1.2f eV" % self.e0,
"Minimum or reference volume = %1.2f Ang^3" % self.v0,
"Bulk modulus = %1.2f eV/Ang^3 = %1.2f GPa" %
(self.b0, self.b0_GPa),
"Derivative of bulk modulus wrt pressure = %1.2f" % self.b1]
text = "\n".join(lines)
text = kwargs.get("text", text)
# Plot input data.
ax.plot(self.volumes, self.energies, linestyle="None", marker="o", color=color)
# Plot eos fit.
vmin, vmax = min(self.volumes), max(self.volumes)
vmin, vmax = (vmin - 0.01 * abs(vmin), vmax + 0.01 * abs(vmax))
vfit = np.linspace(vmin, vmax, 100)
ax.plot(vfit, self.func(vfit), linestyle="dashed", color=color, label=label)
ax.grid(True)
ax.set_xlabel("Volume $\\AA^3$")
ax.set_ylabel("Energy (eV)")
ax.legend(loc="best", shadow=True)
# Add text with fit parameters.
ax.text(0.5, 0.5, text, fontsize=fontsize, horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes)
return fig
class Murnaghan(EOSBase):
def _func(self, volume, params):
"""
From PRB 28,5480 (1983)
"""
e0, b0, b1, v0 = tuple(params)
return (e0 +
b0 * volume / b1 * (((v0 / volume)**b1) / (b1 - 1.0) + 1.0) -
v0 * b0 / (b1 - 1.0))
class Birch(EOSBase):
def _func(self, volume, params):
"""
From Intermetallic compounds: Principles and Practice, Vol. I:
Principles Chapter 9 pages 195-210 by M. Mehl. B. Klein,
D. Papaconstantopoulos.
case where n=0
"""
e0, b0, b1, v0 = tuple(params)
return (e0
+ 9.0 / 8.0 * b0 * v0 * ((v0 / volume)**(2.0/3.0) - 1.0) ** 2
+ 9.0 / 16.0 * b0 * v0 * (b1 - 4.) *
((v0 / volume)**(2.0/3.0) - 1.0) ** 3)
class BirchMurnaghan(EOSBase):
def _func(self, volume, params):
"""
BirchMurnaghan equation from PRB 70, 224107
"""
e0, b0, b1, v0 = tuple(params)
eta = (volume / v0) ** (1. / 3.)
return (e0 +
9. * b0 * v0 / 16. * (eta ** 2 - 1)**2 *
(6 + b1 * (eta ** 2 - 1.) - 4. * eta ** 2))
class PourierTarantola(EOSBase):
def _func(self, volume, params):
"""
Pourier-Tarantola equation from PRB 70, 224107
"""
e0, b0, b1, v0 = tuple(params)
eta = (volume / v0) ** (1. / 3.)
squiggle = -3.*np.log(eta)
return e0 + b0 * v0 * squiggle ** 2 / 6. * (3. + squiggle * (b1 - 2))
class Vinet(EOSBase):
def _func(self, volume, params):
"""
Vinet equation from PRB 70, 224107
"""
e0, b0, b1, v0 = tuple(params)
eta = (volume / v0) ** (1. / 3.)
return (e0 + 2. * b0 * v0 / (b1 - 1.) ** 2
* (2. - (5. + 3. * b1 * (eta - 1.) - 3. * eta)
* np.exp(-3. * (b1 - 1.) * (eta - 1.) / 2.)))
class PolynomialEOS(EOSBase):
"""
Derives from EOSBase. Polynomial based equations of states must subclass
this.
"""
def _func(self, volume, params):
return np.poly1d(list(params))(volume)
def fit(self, order):
"""
Do polynomial fitting and set the parameters. Uses numpy polyfit.
Args:
order (int): order of the fit polynomial
"""
self.eos_params = np.polyfit(self.volumes, self.energies, order)
self._set_params()
def _set_params(self):
"""
Use the fit polynomial to compute the parameter e0, b0, b1 and v0
and set to the _params attribute.
"""
fit_poly = np.poly1d(self.eos_params)
# the volume at min energy, used as the intial guess for the
# optimization wrt volume.
v_e_min = self.volumes[np.argmin(self.energies)]
# evaluate e0, v0, b0 and b1
min_wrt_v = minimize(fit_poly, v_e_min)
e0, v0 = min_wrt_v.fun, min_wrt_v.x[0]
pderiv2 = np.polyder(fit_poly, 2)
pderiv3 = np.polyder(fit_poly, 3)
b0 = v0 * np.poly1d(pderiv2)(v0)
db0dv = np.poly1d(pderiv2)(v0) + v0 * np.poly1d(pderiv3)(v0)
# db/dp
b1 = - v0 * db0dv / b0
self._params = [e0, b0, b1, v0]
class DeltaFactor(PolynomialEOS):
def _func(self, volume, params):
x = volume**(-2. / 3.)
return np.poly1d(list(params))(x)
def fit(self, order=3):
"""
Overriden since this eos works with volume**(2/3) instead of volume.
"""
x = self.volumes**(-2./3.)
self.eos_params = np.polyfit(x, self.energies, order)
self._set_params()
def _set_params(self):
"""
Overriden to account for the fact the fit with volume**(2/3) instead
of volume.
"""
deriv0 = np.poly1d(self.eos_params)
deriv1 = np.polyder(deriv0, 1)
deriv2 = np.polyder(deriv1, 1)
deriv3 = np.polyder(deriv2, 1)
for x in np.roots(deriv1):
if x > 0 and deriv2(x) > 0:
v0 = x**(-3./2.)
break
else:
raise EOSError("No minimum could be found")
derivV2 = 4./9. * x**5. * deriv2(x)
derivV3 = (-20./9. * x**(13./2.) * deriv2(x) - 8./27. *
x**(15./2.) * deriv3(x))
b0 = derivV2 / x**(3./2.)
b1 = -1 - x**(-3./2.) * derivV3 / derivV2
# e0, b0, b1, v0
self._params = [deriv0(v0**(-2./3.)), b0, b1, v0]
class NumericalEOS(PolynomialEOS):
def fit(self, min_ndata_factor=3, max_poly_order_factor=5, min_poly_order=2):
"""
Fit the input data to the 'numerical eos', the equation of state employed
in the quasiharmonic Debye model described in the paper:
10.1103/PhysRevB.90.174107.
credits: Cormac Toher
Args:
min_ndata_factor (int): parameter that controls the minimum number
of data points that will be used for fitting.
minimum number of data points =
total data points-2*min_ndata_factor
max_poly_order_factor (int): parameter that limits the max order
of the polynomial used for fitting.
max_poly_order = number of data points used for fitting -
max_poly_order_factor
min_poly_order (int): minimum order of the polynomial to be
considered for fitting.
"""
warnings.simplefilter('ignore', np.RankWarning)
get_rms = lambda x, y: np.sqrt(np.sum((np.array(x)-np.array(y))**2)/len(x))
# list of (energy, volume) tuples
e_v = [(i, j) for i, j in zip(self.energies, self.volumes)]
ndata = len(e_v)
# minimum number of data points used for fitting
ndata_min = max(ndata - 2 * min_ndata_factor, min_poly_order + 1)
rms_min = np.inf
# number of data points available for fit in each iteration
ndata_fit = ndata
# store the fit polynomial coefficients and the rms in a dict,
# where the key=(polynomial order, number of data points used for
# fitting)
all_coeffs = {}
# sort by energy
e_v = sorted(e_v, key=lambda x: x[0])
# minimum energy tuple
e_min = e_v[0]
# sort by volume
e_v = sorted(e_v, key=lambda x: x[1])
# index of minimum energy tuple in the volume sorted list
emin_idx = e_v.index(e_min)
# the volume lower than the volume corresponding to minimum energy
v_before = e_v[emin_idx - 1][1]
# the volume higher than the volume corresponding to minimum energy
v_after = e_v[emin_idx + 1][1]
e_v_work = deepcopy(e_v)
# loop over the data points.
while (ndata_fit >= ndata_min) and (e_min in e_v_work):
max_poly_order = ndata_fit - max_poly_order_factor
e = [ei[0] for ei in e_v_work]
v = [ei[1] for ei in e_v_work]
# loop over polynomial order
for i in range(min_poly_order, max_poly_order + 1):
coeffs = np.polyfit(v, e, i)
pder = np.polyder(coeffs)
a = np.poly1d(pder)(v_before)
b = np.poly1d(pder)(v_after)
if a * b < 0:
rms = get_rms(e, np.poly1d(coeffs)(v))
rms_min = min(rms_min, rms * i / ndata_fit)
all_coeffs[(i, ndata_fit)] = [coeffs.tolist(), rms]
# store the fit coefficients small to large,
# i.e a0, a1, .. an
all_coeffs[(i, ndata_fit)][0].reverse()
# remove 1 data point from each end.
e_v_work.pop()
e_v_work.pop(0)
ndata_fit = len(e_v_work)
logger.info("total number of polynomials: {}".format(len(all_coeffs)))
norm = 0.
fit_poly_order = ndata
# weight average polynomial coefficients.
weighted_avg_coeffs = np.zeros((fit_poly_order,))
# combine all the filtered polynomial candidates to get the final fit.
for k, v in all_coeffs.items():
# weighted rms = rms * polynomial order / rms_min / ndata_fit
weighted_rms = v[1] * k[0] / rms_min / k[1]
weight = np.exp(-(weighted_rms ** 2))
norm += weight
coeffs = np.array(v[0])
# pad the coefficient array with zeros
coeffs = np.lib.pad(coeffs,
(0, max(fit_poly_order-len(coeffs), 0)),
'constant')
weighted_avg_coeffs += weight * coeffs
# normalization
weighted_avg_coeffs /= norm
weighted_avg_coeffs = weighted_avg_coeffs.tolist()
# large to small(an, an-1, ..., a1, a0) as expected by np.poly1d
weighted_avg_coeffs.reverse()
self.eos_params = weighted_avg_coeffs
self._set_params()
class EOS(object):
"""
Convenient wrapper. Retained in its original state to ensure backward
compatibility.
Fit equation of state for bulk systems.
The following equations are supported::
murnaghan: PRB 28, 5480 (1983)
birch: Intermetallic compounds: Principles and Practice, Vol I:
Principles. pages 195-210
birch_murnaghan: PRB 70, 224107
pourier_tarantola: PRB 70, 224107
vinet: PRB 70, 224107
deltafactor
numerical_eos: 10.1103/PhysRevB.90.174107.
Usage::
eos = EOS(eos_name='murnaghan')
eos_fit = eos.fit(volumes, energies)
eos_fit.plot()
"""
MODELS = {
"murnaghan": Murnaghan,
"birch": Birch,
"birch_murnaghan": BirchMurnaghan,
"pourier_tarantola": PourierTarantola,
"vinet": Vinet,
"deltafactor": DeltaFactor,
"numerical_eos": NumericalEOS
}
def __init__(self, eos_name='murnaghan'):
if eos_name not in self.MODELS:
raise EOSError("The equation of state '{}' is not supported. "
"Please choose one from the following list: {}".
format(eos_name, list(self.MODELS.keys())))
self._eos_name = eos_name
self.model = self.MODELS[eos_name]
def fit(self, volumes, energies):
"""
Fit energies as function of volumes.
Args:
volumes (list/np.array)
energies (list/np.array)
Returns:
EOSBase: EOSBase object
"""
eos_fit = self.model(np.array(volumes), np.array(energies))
eos_fit.fit()
return eos_fit
class EOSError(Exception): pass
| mit |
liumengjun/cn-deep-learning | image-classification/helper.py | 155 | 5631 | import pickle
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelBinarizer
def _load_label_names():
"""
Load the label names from file
"""
return ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def load_cfar10_batch(cifar10_dataset_folder_path, batch_id):
"""
Load a batch of the dataset
"""
with open(cifar10_dataset_folder_path + '/data_batch_' + str(batch_id), mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
labels = batch['labels']
return features, labels
def display_stats(cifar10_dataset_folder_path, batch_id, sample_id):
"""
Display Stats of the the dataset
"""
batch_ids = list(range(1, 6))
if batch_id not in batch_ids:
print('Batch Id out of Range. Possible Batch Ids: {}'.format(batch_ids))
return None
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_id)
if not (0 <= sample_id < len(features)):
print('{} samples in batch {}. {} is out of range.'.format(len(features), batch_id, sample_id))
return None
print('\nStats of batch {}:'.format(batch_id))
print('Samples: {}'.format(len(features)))
print('Label Counts: {}'.format(dict(zip(*np.unique(labels, return_counts=True)))))
print('First 20 Labels: {}'.format(labels[:20]))
sample_image = features[sample_id]
sample_label = labels[sample_id]
label_names = _load_label_names()
print('\nExample of Image {}:'.format(sample_id))
print('Image - Min Value: {} Max Value: {}'.format(sample_image.min(), sample_image.max()))
print('Image - Shape: {}'.format(sample_image.shape))
print('Label - Label Id: {} Name: {}'.format(sample_label, label_names[sample_label]))
plt.axis('off')
plt.imshow(sample_image)
def _preprocess_and_save(normalize, one_hot_encode, features, labels, filename):
"""
Preprocess data and save it to file
"""
features = normalize(features)
labels = one_hot_encode(labels)
pickle.dump((features, labels), open(filename, 'wb'))
def preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode):
"""
Preprocess Training and Validation Data
"""
n_batches = 5
valid_features = []
valid_labels = []
for batch_i in range(1, n_batches + 1):
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_i)
validation_count = int(len(features) * 0.1)
# Prprocess and save a batch of training data
_preprocess_and_save(
normalize,
one_hot_encode,
features[:-validation_count],
labels[:-validation_count],
'preprocess_batch_' + str(batch_i) + '.p')
# Use a portion of training batch for validation
valid_features.extend(features[-validation_count:])
valid_labels.extend(labels[-validation_count:])
# Preprocess and Save all validation data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(valid_features),
np.array(valid_labels),
'preprocess_validation.p')
with open(cifar10_dataset_folder_path + '/test_batch', mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
# load the test data
test_features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
test_labels = batch['labels']
# Preprocess and Save all test data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(test_features),
np.array(test_labels),
'preprocess_test.p')
def batch_features_labels(features, labels, batch_size):
"""
Split features and labels into batches
"""
for start in range(0, len(features), batch_size):
end = min(start + batch_size, len(features))
yield features[start:end], labels[start:end]
def load_preprocess_training_batch(batch_id, batch_size):
"""
Load the Preprocessed Training data and return them in batches of <batch_size> or less
"""
filename = 'preprocess_batch_' + str(batch_id) + '.p'
features, labels = pickle.load(open(filename, mode='rb'))
# Return the training data in batches of size <batch_size> or less
return batch_features_labels(features, labels, batch_size)
def display_image_predictions(features, labels, predictions):
n_classes = 10
label_names = _load_label_names()
label_binarizer = LabelBinarizer()
label_binarizer.fit(range(n_classes))
label_ids = label_binarizer.inverse_transform(np.array(labels))
fig, axies = plt.subplots(nrows=4, ncols=2)
fig.tight_layout()
fig.suptitle('Softmax Predictions', fontsize=20, y=1.1)
n_predictions = 3
margin = 0.05
ind = np.arange(n_predictions)
width = (1. - 2. * margin) / n_predictions
for image_i, (feature, label_id, pred_indicies, pred_values) in enumerate(zip(features, label_ids, predictions.indices, predictions.values)):
pred_names = [label_names[pred_i] for pred_i in pred_indicies]
correct_name = label_names[label_id]
axies[image_i][0].imshow(feature)
axies[image_i][0].set_title(correct_name)
axies[image_i][0].set_axis_off()
axies[image_i][1].barh(ind + margin, pred_values[::-1], width)
axies[image_i][1].set_yticks(ind + margin)
axies[image_i][1].set_yticklabels(pred_names[::-1])
axies[image_i][1].set_xticks([0, 0.5, 1.0])
| mit |
quoccuongngo/speech-recognition | deep_learning_course/notmnist_problems.py | 2 | 3171 | import os
import time
from six.moves import cPickle as pickle
from sklearn import linear_model
data_root = '/home/stack/PycharmProjects/speech-recognition/deep_learning_course/dataset' # noqa
pickle_file = os.path.join(data_root, 'notMNIST.pickle')
with open(pickle_file, 'rb') as f:
notMNIST = pickle.load(f)
train_dataset = notMNIST.get('train_dataset')
train_labels = notMNIST.get('train_labels')
valid_dataset = notMNIST.get('valid_dataset')
valid_labels = notMNIST.get('valid_labels')
test_dataset = notMNIST.get('test_dataset')
test_labels = notMNIST.get('test_labels')
nsamples, nx, ny = train_dataset.shape
d2_train_dataset = train_dataset.reshape((nsamples, nx * ny))
nsamples, nx, ny = valid_dataset.shape
d2_valid_dataset = valid_dataset.reshape((nsamples, nx * ny))
nsamples, nx, ny = test_dataset.shape
d2_test_dataset = test_dataset.reshape((nsamples, nx * ny))
print(train_dataset.shape, d2_train_dataset.shape, train_labels.shape)
print(valid_dataset.shape, d2_valid_dataset.shape, valid_labels.shape)
print(test_dataset.shape, d2_test_dataset.shape, test_labels.shape)
def maybe_train(filename, force=False):
model_file = os.path.join(data_root, filename)
if force or not os.path.exists(model_file):
logreg = linear_model.LogisticRegression(C=1e5)
logreg.fit(d2_train_dataset, train_labels)
with open(model_file, 'wb') as f:
pickle.dump(logreg, f)
else:
print('Model is already trained, load it')
with open(model_file, 'rb') as fid:
logreg = pickle.load(fid)
return logreg
def problem6():
start_time = time.clock()
logreg = maybe_train('sklearn_model.pickle')
print("\nTraining set")
train_predicts = logreg.predict(d2_train_dataset)
ncorrect = sum(1 for label, predict in zip(train_labels, train_predicts)
if label == predict)
nincorrect = sum(1 for label, predict in zip(train_labels, train_predicts)
if label != predict)
print("Correct:", ncorrect)
print('Incorrect:', nincorrect)
print('Accuracy:', (ncorrect / (ncorrect + nincorrect) * 100), '%%')
print("\nValidation set")
valid_predicts = logreg.predict(d2_valid_dataset)
ncorrect = sum(1 for label, predict in zip(valid_labels, valid_predicts)
if label == predict)
nincorrect = sum(1 for label, predict in zip(valid_labels, valid_predicts)
if label != predict)
print("Correct:", ncorrect)
print('Incorrect:', nincorrect)
print('Accuracy:', (ncorrect / (ncorrect + nincorrect) * 100), '%%')
print("\nTest set")
test_predicts = logreg.predict(d2_test_dataset)
ncorrect = sum(1 for label, predict in zip(test_labels, test_predicts)
if label == predict)
nincorrect = sum(1 for label, predict in zip(test_labels, test_predicts)
if label != predict)
print("#Correct:", ncorrect)
print('#Incorrect:', nincorrect)
print('Accuracy: %.1f %%' % (ncorrect / (ncorrect + nincorrect) * 100))
print('\nRunning time:', (time.clock() - start_time))
if __name__ == '__main__':
problem6()
| gpl-3.0 |
atantet/ergoPack | example/art/spinupTransferOU.py | 1 | 9533 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import scipy.linalg
import ergoPlot
x01 = np.array([-1.5, -1.8])
x02 = np.array([-1., -2.])
x03 = np.array([-0.7, -2.7])
simCol = '0.4'
simTailWidth = '0.08'
simMarkerSize=5
caseName = 'NA'
transferName = '\mathcal{P}_t'
#flowName = 'S(t)'
flowName = '\psi(t)'
x = np.linspace(-3, 3, 100)
nx = x.shape[0]
y = x
ny = y.shape[0]
N = nx * ny
(X, Y) = np.meshgrid(x, y)
XY = np.zeros((2, N))
XY[0] = X.flatten()
XY[1] = Y.flatten()
muf = np.array([-1, -2])
sigf = np.array([[0.25, 0], [0, 0.25]])
dsigf = np.linalg.det(sigf)
isigf = np.linalg.inv(sigf)
mug = np.array([0, 0])
sigg = np.array([[1, 0], [0, 1]])
dsigg = np.linalg.det(sigg)
isigg = np.linalg.inv(sigg)
d = muf.shape[0]
XYmMuf = XY - np.tile(muf, (N, 1)).T
XYmMug = XY - np.tile(mug, (N, 1)).T
nlev = 4
f = np.empty((N,))
for k in np.arange(N):
f[k] = 1. / np.sqrt((2*np.pi)**d*dsigf) \
* np.exp(-np.dot(XYmMuf[:, k], np.dot(isigf, XYmMuf[:, k]))/2)
f = f.reshape(nx, ny)
levelsf = np.linspace(f.min(), f.max(), nlev)
g = np.empty((N,))
for k in np.arange(N):
g[k] = 1. / np.sqrt((2*np.pi)**d*dsigg) \
* np.exp(-np.dot(XYmMug[:, k], np.dot(isigg, XYmMug[:, k]))/2)
g = g.reshape(nx, ny)
levelsg = np.linspace(g.min(), g.max(), nlev)
t = 0.5
A = np.array([[-1., 0.3],[0.3, -1.]])
mu = np.array([4., 3.])
ftr = np.empty((N,))
XYmMuftr = np.dot(scipy.linalg.expm(-t*A), XY - np.tile(mu, (N, 1)).T) \
+ np.tile(mu, (N, 1)).T \
- np.tile(muf, (N, 1)).T
for k in np.arange(N):
ftr[k] = 1. / np.sqrt((2*np.pi)**d*dsigf) \
* np.exp(-np.dot(XYmMuftr[:, k], np.dot(isigf, XYmMuftr[:, k]))/2)
ftr /= np.abs(np.linalg.det(scipy.linalg.expm(-t*A)))
levelsftr = np.linspace(ftr.min(), ftr.max(), nlev)
ftr = ftr.reshape(nx, ny)
gtr = np.empty((N,))
XYmMugtr = np.dot(scipy.linalg.expm(t*A), XY - np.tile(mu, (N, 1)).T) \
+ np.tile(mu, (N, 1)).T \
- np.tile(mug, (N, 1)).T
for k in np.arange(N):
gtr[k] = 1. / np.sqrt((2*np.pi)**d*dsigg) \
* np.exp(-np.dot(XYmMugtr[:, k], np.dot(isigg, XYmMugtr[:, k]))/2)
levelsgtr = np.linspace(gtr.min(), gtr.max(), nlev)
gtr = gtr.reshape(nx, ny)
idX01 = np.argmin(((XY - np.tile(x01, (XY.shape[1],1)).T)**2).sum(0))
x01 = XY[:, idX01]
xf1 = XYmMugtr[:, idX01]
idX02 = np.argmin(((XY - np.tile(x02, (XY.shape[1],1)).T)**2).sum(0))
x02 = XY[:, idX02]
xf2 = XYmMugtr[:, idX02]
idX03 = np.argmin(((XY - np.tile(x03, (XY.shape[1],1)).T)**2).sum(0))
x03 = XY[:, idX03]
xf3 = XYmMugtr[:, idX03]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.contourf(X, Y, g, np.linspace(g.min(), g.max(), nlev+2)[1:], cmap=cm.Blues)
ax.text(0.4, 0.7, r'$g(x)$', fontsize=ergoPlot.fs_latex)
ax.contour(X, Y, f, nlev, linewidths=2, colors='k', linestyles='--', label=r'$f(x)$')
ax.text(-0.1, -2.5, r'$\rho_0(x)$', fontsize=ergoPlot.fs_latex)
ax.set_axis_off()
plt.plot(x01[0], x01[1], marker='o', markerfacecolor=simCol, markersize=simMarkerSize, markeredgecolor="none")
plt.plot(x02[0], x02[1], marker='o', markerfacecolor=simCol, markersize=simMarkerSize, markeredgecolor="none")
plt.plot(x03[0], x03[1], marker='o', markerfacecolor=simCol, markersize=simMarkerSize, markeredgecolor="none")
fig.savefig('plot/spinupTransfer%s.%s' % (caseName, ergoPlot.figFormat), bbox_inches='tight', dpi=ergoPlot.dpi)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.contourf(X, Y, g, np.linspace(g.min(), g.max(), nlev+2)[1:], cmap=cm.Blues)
ax.text(0.4, 0.7, r'$g(x)$', fontsize=ergoPlot.fs_latex)
ax.contour(X, Y, f, nlev, linewidths=2, colors='k', linestyles='--', label=r'$f(x)$')
ax.text(-0.1, -2.5, r'$\rho_0(x)$', fontsize=ergoPlot.fs_latex)
ax.contour(X, Y, ftr, nlev, linewidths=2, colors='k', label=r'$%s \rho_0(x)$' % transferName)
ax.text(1.05, -0.8, r'$%s \rho_0(x)$' % transferName, fontsize=ergoPlot.fs_latex)
ax.arrow(0.3, -2.15, 1.15 - 0.3, -1.05 + 2.15, head_width=0.1, width = 0.02, head_length=0.1,
fc='k', ec='k')
#ax.text(0.9, -1.1, r'$\mathcal{L}_t \rho_0(x)$', fontsize=ergoPlot.fs_latex)
ax.set_axis_off()
plt.plot(x01[0], x01[1], marker='o', markerfacecolor=simCol, markersize=simMarkerSize, markeredgecolor="none")
plt.plot(xf1[0], xf1[1], marker='o', markerfacecolor=simCol, markersize=simMarkerSize, markeredgecolor="none")
#ax.text(x01[0]-0.45, x01[1] + 0.2, r'$x_0^{(1)}$', color=simCol, fontsize=ergoPlot.fs_latex)
ax.annotate('', xy=xf1, xycoords='data',
xytext=(x01[0], x01[1]), textcoords='data',
size=ergoPlot.fs_latex, color=simCol,
arrowprops=dict(arrowstyle='simple, tail_width=' + simTailWidth,
fc=simCol, ec="none",
connectionstyle="arc3,rad=-0.3"))
#ax.text(xf1[0]-0.65, xf1[1]+0.2, r'$\phi(t) x_0^{(1)}$', color=simCol, fontsize=ergoPlot.fs_latex)
plt.plot(x02[0], x02[1], marker='o', markerfacecolor=simCol, markersize=simMarkerSize, markeredgecolor="none")
plt.plot(xf2[0], xf2[1], marker='o', markerfacecolor=simCol, markersize=simMarkerSize, markeredgecolor="none")
#ax.text(x02[0]-0.45, x02[1] + 0.2, r'$x_0^{(2)}$', color=simCol, fontsize=ergoPlot.fs_latex)
ax.annotate('', xy=xf2, xycoords='data',
xytext=(x02[0], x02[1]), textcoords='data',
size=ergoPlot.fs_latex, color=simCol,
arrowprops=dict(arrowstyle="simple, tail_width=" + simTailWidth,
fc=simCol, ec="none",
connectionstyle="arc3,rad=-0.2"))
#ax.text(xf2[0]-0.65, xf2[1]+0.2, r'$\phi(t) x_0^{(2)}$', color=simCol, fontsize=ergoPlot.fs_latex)
plt.plot(x03[0], x03[1], marker='o', markerfacecolor=simCol, markersize=simMarkerSize, markeredgecolor="none")
plt.plot(xf3[0], xf3[1], marker='o', markerfacecolor=simCol, markersize=simMarkerSize, markeredgecolor="none")
#ax.text(x03[0]-0.45, x03[1] + 0.2, r'$x_0^{(3)}$', color=simCol, fontsize=ergoPlot.fs_latex)
ax.annotate('', xy=xf3, xycoords='data',
xytext=(x03[0], x03[1]), textcoords='data',
size=ergoPlot.fs_latex, color=simCol,
arrowprops=dict(arrowstyle="simple, tail_width=" + simTailWidth,
fc=simCol, ec="none",
connectionstyle="arc3,rad=-0.1"))
#ax.text(xf3[0]-0.65, xf3[1]+0.2, r'$\phi(t) x_0^{(3)}$', color=simCol, fontsize=ergoPlot.fs_latex)
fig.savefig('plot/spinupTransfer%s.%s' % (caseName, ergoPlot.figFormat), bbox_inches='tight', dpi=ergoPlot.dpi)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.contourf(X, Y, gtr, np.linspace(gtr.min(), gtr.max(), nlev+2)[1:], cmap=cm.Blues)
ax.text(-2.85, 0.75, r'$g(%s x)$' % flowName, fontsize=ergoPlot.fs_latex)
ax.contour(X, Y, g, np.linspace(g.min(), g.max(), nlev+2)[1:],
linewidths=1.5, colors='b', linestyles='--', label=r'$g(x)$')
ax.text(0.5, 1.2, r'$g(x)$', fontsize=ergoPlot.fs_latex)
ax.contour(X, Y, f, nlev, linewidths=2, colors='k', label=r'$f(x)$')
ax.text(-0.1, -2.5, r'$\rho_0(x)$', fontsize=ergoPlot.fs_latex)
ax.arrow(0.4, 1.275, -1.7-0.4, 0.85-1.275, head_width=0.1, width = 0.02, head_length=0.1,
fc='k', ec='k')
ax.set_axis_off()
plt.plot(x01[0], x01[1], marker='o', markerfacecolor=simCol, markersize=simMarkerSize, markeredgecolor="none")
# plt.plot(xf1[0], xf1[1], marker='o', markerfacecolor=simCol, markersize=simMarkerSize, markeredgecolor="none")
# #ax.text(x01[0]-0.45, x01[1] + 0.2, r'$x_0^{(1)}$', color=simCol, fontsize=ergoPlot.fs_latex)
# ax.annotate('', xy=xf1, xycoords='data',
# xytext=(x01[0], x01[1]), textcoords='data',
# size=ergoPlot.fs_latex, color=simCol,
# arrowprops=dict(arrowstyle='simple, tail_width=' + simTailWidth,
# fc=simCol, ec="none",
# connectionstyle="arc3,rad=-0.3"))
#ax.text(xf1[0]-0.65, xf1[1]+0.2, r'$\phi(t) x_0^{(1)}$', color=simCol, fontsize=ergoPlot.fs_latex)
plt.plot(x02[0], x02[1], marker='o', markerfacecolor=simCol, markersize=simMarkerSize, markeredgecolor="none")
# plt.plot(xf2[0], xf2[1], marker='o', markerfacecolor=simCol, markersize=simMarkerSize, markeredgecolor="none")
# #ax.text(x02[0]-0.45, x02[1] + 0.2, r'$x_0^{(2)}$', color=simCol, fontsize=ergoPlot.fs_latex)
# ax.annotate('', xy=xf2, xycoords='data',
# xytext=(x02[0], x02[1]), textcoords='data',
# size=ergoPlot.fs_latex, color=simCol,
# arrowprops=dict(arrowstyle="simple, tail_width=" + simTailWidth,
# fc=simCol, ec="none",
# connectionstyle="arc3,rad=-0.2"))
#ax.text(xf2[0]-0.65, xf2[1]+0.2, r'$\phi(t) x_0^{(2)}$', color=simCol, fontsize=ergoPlot.fs_latex)
plt.plot(x03[0], x03[1], marker='o', markerfacecolor=simCol, markersize=simMarkerSize, markeredgecolor="none")
# plt.plot(xf3[0], xf3[1], marker='o', markerfacecolor=simCol, markersize=simMarkerSize, markeredgecolor="none")
# #ax.text(x03[0]-0.45, x03[1] + 0.2, r'$x_0^{(3)}$', color=simCol, fontsize=ergoPlot.fs_latex)
# ax.annotate('', xy=xf3, xycoords='data',
# xytext=(x03[0], x03[1]), textcoords='data',
# size=ergoPlot.fs_latex, color=simCol,
# arrowprops=dict(arrowstyle="simple, tail_width=" + simTailWidth,
# fc=simCol, ec="none",
# connectionstyle="arc3,rad=-0.1"))
#ax.text(xf3[0]-0.65, xf3[1]+0.2, r'$\phi(t) x_0^{(3)}$', color=simCol, fontsize=ergoPlot.fs_latex)
fig.savefig('plot/spinupKoopman%s.%s' % (caseName, ergoPlot.figFormat), bbox_inches='tight', dpi=ergoPlot.dpi)
| gpl-3.0 |
dsquareindia/scikit-learn | sklearn/ensemble/tests/test_weight_boosting.py | 28 | 18031 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal, assert_true, assert_greater
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.base import BaseEstimator
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import weight_boosting
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_samme_proba():
# Test the `_samme_proba` helper function.
# Define some example (bad) `predict_proba` output.
probs = np.array([[1, 1e-6, 0],
[0.19, 0.6, 0.2],
[-999, 0.51, 0.5],
[1e-6, 1, 1e-9]])
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
# _samme_proba calls estimator.predict_proba.
# Make a mock object so I can control what gets returned.
class MockEstimator(object):
def predict_proba(self, X):
assert_array_equal(X.shape, probs.shape)
return probs
mock = MockEstimator()
samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs))
assert_array_equal(samme_proba.shape, probs.shape)
assert_true(np.isfinite(samme_proba).all())
# Make sure that the correct elements come out as smallest --
# `_samme_proba` should preserve the ordering in each example.
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
def test_oneclass_adaboost_proba():
# Test predict_proba robustness for one class label input.
# In response to issue #7501
# https://github.com/scikit-learn/scikit-learn/issues/7501
y_t = np.ones(len(X))
clf = AdaBoostClassifier().fit(X, y_t)
assert_array_equal(clf.predict_proba(X), np.ones((len(X), 1)))
def test_classification_toy():
# Check classification on a toy dataset.
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Check we used multiple estimators
assert_greater(len(clf.estimators_), 1)
# Check for distinct random states (see issue #7408)
assert_equal(len(set(est.random_state for est in clf.estimators_)),
len(clf.estimators_))
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
# Check consistency on dataset boston house prices.
reg = AdaBoostRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
assert score > 0.85
# Check we used multiple estimators
assert_true(len(reg.estimators_) > 1)
# Check for distinct random states (see issue #7408)
assert_equal(len(set(est.random_state for est in reg.estimators_)),
len(reg.estimators_))
def test_staged_predict():
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.linear_model import LogisticRegression
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sample_weight_adaboost_regressor():
"""
AdaBoostRegressor should work without sample_weights in the base estimator
The random weighted sampling is done internally in the _boost method in
AdaBoostRegressor.
"""
class DummyEstimator(BaseEstimator):
def fit(self, X, y):
pass
def predict(self, X):
return np.zeros(X.shape[0])
boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3)
boost.fit(X, y_regr)
assert_equal(len(boost.estimator_weights_), len(boost.estimator_errors_))
| bsd-3-clause |
agogear/sfl_corpling | data/tallies.py | 1 | 1479 | # This is a list of the total number of first, second, third posts, and so on.
def postcounter(directory):
"""This gets the top post of each username in the corpus and formats it for plotter()"""
import os
import re
from collections import Counter
import pandas as pd
from time import localtime, strftime
time = strftime("%H:%M:%S", localtime())
print '%s: Working ... ' % time
try:
from IPython.display import display, clear_output
have_ipython = True
except ImportError:
have_ipython = False
regex = re.compile('^user-(.*)-([0-9]+).txt.xml')
users = []
for root, dirs, files in os.walk(directory):
for name in files:
if not name.startswith('.'):
username, num = re.findall(regex, name)[0]
users.append([username, num])
users.sort(key=lambda x: int(x[-1]), reverse=True)
top_num = []
for user in list(set([data[0] for data in users])):
for entry in users:
if entry[0] == user:
top_num.append(entry[1])
#print entry
break
ints = [int(n) for n in top_num]
dictionary = Counter(ints)
tot = [n for n in dictionary.keys()]
count = [c for c in dictionary.values()]
out = pd.DataFrame(count, index = tot, columns = ['Count'])
out.fillna(0, inplace = True)
time = strftime("%H:%M:%S", localtime())
print '\n%s: Done!' % time
return out
| mit |
spinellic/Mission-Planner | Lib/site-packages/numpy/lib/npyio.py | 53 | 59490 | __all__ = ['savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource']
import numpy as np
import format
import sys
import os
import sys
import itertools
import warnings
from operator import itemgetter
from cPickle import load as _cload, loads
from _datasource import DataSource
if sys.platform != 'cli':
from _compiled_base import packbits, unpackbits
else:
def packbits(*args, **kw):
raise NotImplementedError()
def unpackbits(*args, **kw):
raise NotImplementedError()
from _iotools import LineSplitter, NameValidator, StringConverter, \
ConverterError, ConverterLockError, ConversionWarning, \
_is_string_like, has_nested_fields, flatten_dtype, \
easy_dtype, _bytes_to_name
from numpy.compat import asbytes, asstr, asbytes_nested, bytes
if sys.version_info[0] >= 3:
from io import BytesIO
else:
from cStringIO import StringIO as BytesIO
_string_like = _is_string_like
def seek_gzip_factory(f):
"""Use this factory to produce the class so that we can do a lazy
import on gzip.
"""
import gzip
class GzipFile(gzip.GzipFile):
def seek(self, offset, whence=0):
# figure out new position (we can only seek forwards)
if whence == 1:
offset = self.offset + offset
if whence not in [0, 1]:
raise IOError, "Illegal argument"
if offset < self.offset:
# for negative seek, rewind and do positive seek
self.rewind()
count = offset - self.offset
for i in range(count // 1024):
self.read(1024)
self.read(count % 1024)
def tell(self):
return self.offset
if isinstance(f, str):
f = GzipFile(f)
elif isinstance(f, gzip.GzipFile):
# cast to our GzipFile if its already a gzip.GzipFile
g = GzipFile(fileobj=f.fileobj)
g.name = f.name
g.mode = f.mode
f = g
return f
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
self._obj = obj
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError, key
def zipfile_factory(*args, **kwargs):
import zipfile
if sys.version_info >= (2, 5):
kwargs['allowZip64'] = True
return zipfile.ZipFile(*args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ".npy" extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ".npy" extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ".npy" extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.read(key)
if bytes.startswith(format.MAGIC_PREFIX):
value = BytesIO(bytes)
return format.read_array(value)
else:
return bytes
else:
raise KeyError, "%s is not a file in the archive" % key
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ".npy" extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None):
"""
Load a pickled, ``.npy``, or ``.npz`` binary file.
Parameters
----------
file : file-like object or string
The file to read. It must support ``seek()`` and ``read()`` methods.
If the filename extension is ``.gz``, the file is first decompressed.
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode
(see `numpy.memmap`). The mode has no effect for pickled or
zipped files.
A memory-mapped array is stored on disk, and not directly loaded
into memory. However, it can be accessed and sliced like any
ndarray. Memory mapping is especially useful for accessing
small fragments of large files without reading the entire file
into memory.
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file.
Raises
------
IOError
If the input file does not exist or cannot be read.
See Also
--------
save, savez, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
Notes
-----
- If the file contains pickle data, then whatever is stored in the
pickle is returned.
- If the file is a ``.npy`` file, then an array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
import gzip
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
elif isinstance(file, gzip.GzipFile):
fid = seek_gzip_factory(file)
own_fid = True
else:
fid = file
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
fid.seek(-N, 1) # back-up
if magic.startswith(_ZIP_PREFIX): # zip-file (assume .npz)
own_fid = False
return NpzFile(fid, own_fid=True)
elif magic == format.MAGIC_PREFIX: # .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid)
else: # Try a pickle
try:
return _cload(fid)
except:
raise IOError, \
"Failed to interpret file %s as a pickle" % repr(file)
finally:
if own_fid:
fid.close()
def save(file, arr):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file or str
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string, a ``.npy``
extension will be appended to the file name if it does not already
have one.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see `format`.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
else:
fid = file
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the .npz file, are 'arr_0', 'arr_1', etc. If keyword arguments
are given, the corresponding variable names, in the ``.npz`` file will
match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already there.
*args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
**kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with *args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with **kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
See Also
--------
numpy.savez_compressed : Save several arrays into a compressed .npz file format
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : string
File name of .npz file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed .npz file format
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError, "Cannot use un-named variables and keyword %s" % key
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zip = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.iteritems():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val))
fid.close()
fid = None
zip.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zip.close()
# Adapted from matplotlib
def _getconv(dtype):
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.floating):
return float
elif issubclass(typ, np.complex):
return complex
elif issubclass(typ, np.bytes_):
return bytes
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file or str
File or filename to read. If the filename extension is ``.gz`` or
``.bz2``, the file is first decompressed.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a record
data-type, the resulting array will be 1-dimensional, and each row
will be interpreted as an element of the array. In this case, the
number of columns used must match the number of fields in the
data-type.
comments : str, optional
The character used to indicate the start of a comment; default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data:
``converters = {3: lambda s: float(s or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. The default is False.
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
Examples
--------
>>> from StringIO import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
comments = asbytes(comments)
if delimiter is not None:
delimiter = asbytes(delimiter)
user_converters = converters
if usecols is not None:
usecols = list(usecols)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
fh = seek_gzip_factory(fname)
elif fname.endswith('.bz2'):
import bz2
fh = bz2.BZ2File(fname)
else:
fh = open(fname, 'U')
elif hasattr(fname, 'readline'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
return [dt.base] * int(np.prod(dt.shape))
else:
types = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt = flatten_dtype(tp)
types.extend(flat_dt)
return types
def split_line(line):
"""Chop off comments, strip, and split at delimiter."""
line = asbytes(line).split(comments)[0].strip()
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in xrange(skiprows):
fh.readline()
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
while not first_vals:
first_line = fh.readline()
if not first_line: # EOF reached
raise IOError('End-of-file reached before encountering data.')
first_vals = split_line(first_line)
N = len(usecols or first_vals)
dtype_types = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in xrange(N)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).iteritems():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
# Convert each value according to its column and store
X.append(tuple([conv(val) for (conv, val) in zip(converters, vals)]))
finally:
if own_fh:
fh.close()
if len(dtype_types) > 1:
# We're dealing with a structured array, with a dtype such as
# [('x', int), ('y', [('s', int), ('t', float)])]
#
# First, create the array using a flattened dtype:
# [('x', int), ('s', int), ('t', float)]
#
# Then, view the array using the specified dtype.
try:
X = np.array(X, dtype=np.dtype([('', t) for t in dtype_types]))
X = X.view(dtype)
except TypeError:
# In the case we have an object dtype
X = np.array(X, dtype=dtype)
else:
X = np.array(X, dtype)
X = np.squeeze(X)
if unpack:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n'):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored.
delimiter : str
Character separating columns.
newline : str
.. versionadded:: 1.5.0
Character separating lines.
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into a ``.npz`` compressed archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to preceed result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'seek'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif type(fmt) is str:
if fmt.count('%') == 1:
fmt = [fmt, ]*ncol
format = delimiter.join(fmt)
elif fmt.count('%') != ncol:
raise AttributeError('fmt has wrong number of %% formats. %s'
% fmt)
else:
format = fmt
for row in X:
fh.write(asbytes(format % tuple(row) + newline))
finally:
if own_fh:
fh.close()
import re
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
fh.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skiprows=0, skip_header=0, skip_footer=0, converters=None,
missing='', missing_values=None, filling_values=None,
usecols=None, names=None,
excludelist=None, deletechars=None, replace_space='_',
autostrip=False, case_sensitive=True, defaultfmt="f%i",
unpack=None, usemask=False, loose=True, invalid_raise=True):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skiprows` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file or str
File or filename to read. If the filename extension is `.gz` or
`.bz2`, the file is first decompressed.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skip_header : int, optional
The numbers of lines to skip at the beginning of the file.
skip_footer : int, optional
The numbers of lines to skip at the end of the file
converters : variable or None, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing_values : variable or None, optional
The set of strings corresponding to missing data.
filling_values : variable or None, optional
The set of values to be used as default when the data are missing.
usecols : sequence or None, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skiprows` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables names.
By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
Examples
---------
>>> from StringIO import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
# Py3 data conversions to bytes, for convenience
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing, unicode):
missing = asbytes(missing)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
errmsg = "The input argument 'converter' should be a valid dictionary "\
"(got '%s' instead)"
raise TypeError(errmsg % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
if isinstance(fname, basestring):
fhd = np.lib._datasource.open(fname, 'U')
own_fhd = True
elif not hasattr(fname, 'read'):
raise TypeError("The input should be a string or a filehandle. "\
"(got %s instead)" % type(fname))
else:
fhd = fname
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Get the first valid lines after the first skiprows ones ..
if skiprows:
warnings.warn("The use of `skiprows` is deprecated.\n"\
"Please use `skip_header` instead.",
DeprecationWarning)
skip_header = skiprows
# Skip the first `skip_header` rows
for i in xrange(skip_header):
fhd.readline()
# Keep on until we find the first valid values
first_values = None
while not first_values:
first_line = fhd.readline()
if not first_line:
raise IOError('End-of-file reached before encountering data.')
if names is True:
if comments in first_line:
first_line = asbytes('').join(first_line.split(comments)[1:])
first_values = split_line(first_line)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = dtype.names
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped, then
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the deprecated `missing`
if missing != asbytes(''):
warnings.warn("The use of `missing` is deprecated.\n"\
"Please use `missing_values` instead.",
DeprecationWarning)
values = [str(_) for _ in missing.split(asbytes(","))]
for entry in missing_values:
entry.extend(values)
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values or []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped, then
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (i, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(i):
try:
i = names.index(i)
except ValueError:
continue
elif usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
# Find the value to test:
if len(first_line):
testing_value = first_values[i]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
# Select only the columns we need
if usecols:
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values, missing_values)]))
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = map(itemgetter(i), rows)
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = itertools.imap(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
# if loose:
# conversionfuncs = [conv._loose_call for conv in converters]
# else:
# conversionfuncs = [conv._strict_call for conv in converters]
# for (i, vals) in enumerate(rows):
# rows[i] = tuple([convert(val)
# for (convert, val) in zip(conversionfuncs, vals)])
if loose:
rows = zip(*[map(converter._loose_call, map(itemgetter(i), rows))
for (i, converter) in enumerate(converters)])
else:
rows = zip(*[map(converter._strict_call, map(itemgetter(i), rows))
for (i, converter) in enumerate(converters)])
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = zip(names, column_types)
mdtype = zip(names, [np.bool] * len(column_types))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
errmsg = "Nested fields involving objects "\
"are not supported..."
raise NotImplementedError(errmsg)
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(masks,
dtype=np.dtype([('', np.bool)
for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for (i, ttype) in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Complete description of all the optional input parameters is available in
the docstring of the `genfromtxt` function.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
For a complete description of all the input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Complete description of all the optional input parameters is available in
the docstring of the `genfromtxt` function.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.update(dtype=kwargs.get('dtype', None))
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
For a complete description of all the input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
case_sensitive = kwargs.get('case_sensitive', "lower") or "lower"
names = kwargs.get('names', True)
if names is None:
names = True
kwargs.update(dtype=kwargs.get('update', None),
delimiter=kwargs.get('delimiter', ",") or ",",
names=names,
case_sensitive=case_sensitive)
usemask = kwargs.get("usemask", False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| gpl-3.0 |
nansencenter/sentinel1ice | get_dynamic_range.py | 1 | 2701 | ''' Use built-in backend AGG to prevent X server error.
This error happens when work in remote server through ssh '''
import matplotlib; matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os, glob
import numpy as np
import config as cfg
low_edge = {'HH':0.1, 'HV':0.1} # threshold for clipping extreme values
high_edge = {'HH':0.1, 'HV':0.1} # threshold for clipping extreme values
# find input files
ifiles = sorted(glob.glob(cfg.outputDirectory + '*/S1?_EW_GRDM_1SDH*_sigma0.npz'))
for pol in ['HH','HV']:
for dtype in ['raw','denoised']:
key = pol + '_' + dtype
# compute effective dynamic range for texture analysis
hist_bin_edges = np.load(ifiles[0])['sigma0_%s_hist' % key][1]
hist_stack = np.zeros(hist_bin_edges.size-1)
for ifile in ifiles:
hist_stack = np.vstack([hist_stack, np.load(ifile)['sigma0_%s_hist' % key][0]])
hist_stack = hist_stack[1:]
hist_sum = np.sum(hist_stack)
hist_cum = np.cumsum(np.sum(hist_stack,axis=0))
sigma0_min = hist_bin_edges[np.where(hist_cum < low_edge[pol]/100. * hist_sum)[0][-1]+1]
sigma0_max = hist_bin_edges[np.where(hist_cum > (100-high_edge[pol])/100. * hist_sum)[0][0]]
print( '%s sigma0 range (%3.2f-%3.2f%%): %.2f dB to %.2f dB'
% (key,low_edge[pol],100-high_edge[pol],sigma0_min,sigma0_max))
plt.plot( hist_bin_edges[:-1]+np.diff(hist_bin_edges)/2, np.sum(hist_stack,axis=0), label='%s' % key )
plt.xlabel('Sigma nought (dB)'), plt.ylabel('Number of samples')
plt.title(r'$\sigma^0$' + ' distributions from %d images' % len(ifiles))
plt.legend()
plt.tight_layout()
plt.savefig('sigma0_distribution.png', dpi=300)
'''
import os, glob
import numpy as np
import matplotlib.pyplot as plt
from nansat import Nansat
import config as cfg
ifiles = sorted(glob.glob(cfg.outputDirectory + '*/S1?_EW_GRDM_1SDH*_sigma0.npz'))
incAng = []
sigma0HHdB = []
sigma0HVdB = []
for li,ifile in enumerate(ifiles):
print(li)
sigma0HHdB.append(10*np.log10(Nansat(ifile.replace('.npz','_HH_denoised.tif'))[1]).flatten())
sigma0HVdB.append(10*np.log10(Nansat(ifile.replace('.npz','_HV_denoised.tif'))[1]).flatten())
incAng.append(np.load(ifile)['incidenceAngle'].flatten())
sigma0HHdB = np.hstack(sigma0HHdB)
sigma0HVdB = np.hstack(sigma0HVdB)
incAng = np.hstack(incAng)
gpm = np.isfinite(sigma0HHdB * sigma0HVdB * incAng)
sigma0HHdB = sigma0HHdB[gpm]
sigma0HVdB = sigma0HVdB[gpm]
incAng = incAng[gpm]
plt.figure()
plt.hist2d(incAng, sigma0HHdB, bins=100, range=[[incAng.min(),incAng.max()],[-40,10]])
plt.figure()
plt.hist2d(incAng, sigma0HVdB, bins=100, range=[[incAng.min(),incAng.max()],[-40,10]])
'''
| gpl-3.0 |
jagrio/MachineLearningSlippage | MLslippagesrc/MLslippage/ml_training.py | 1 | 121792 | #! /usr/bin/env python
"""Mainly Edited for private usage by: Ioannis Agriomallos
Ioanna Mitsioni
License: BSD 3 clause
============= CURRENT CODE USAGE =============
Current code trains MLP Classifiers, to classify force input samples as stable (0) or slip (1)
---- Input
-> Input samples originate from Optoforce and ATI sensors and are 3D (fx,fy,fz) and come into 3 different datasets,
one training (Optoforce), containing several surfaces as well as slip-stable occurrences,
one validation (Optoforce), containing 1 surface with slip-stable occurrences on a completely unseen task-setup
and one testing set acquired from ATI sensor for different normal desired forces, for several low-pass filter cutoff
frequencies and for both translational and rotational slipping occurrences.
---- Input transformation
-> Several pre-features can be taken from these inputs, but here |f| is kept.
-> Several time and frequency domain features are extracted from pre-feature windows.
(implemented in 'featext.py') These windows have size w and are shifted by s on each sample
-> Then a feature selection-ranking is performed using MutualVariableInformation
-> Finally PCA is performed to keep a reduced set among the best selected features
---- Training of ML Classifiers
-> Several MLP Classifiers are trained for all combinations of selected featuresets (using the training Optoforce dataset)
---- Results
-> Stats of classification results are kept inside each .npz along with the respective trained model in results* folders
"""
# ############################################## EXAMPLE OF CODE USAGE ################################################
# ############ TRAINING PROCEDURE ##############
# # necessary steps before training
# f,l,fd,member,m1,m2 = data_prep(datafile) # read input force and labels
# prefeat = compute_prefeat(f) # compute corresponding prefeatures
# features, labels = feature_extraction(prefeat, member) # feature extraction from prefeatures
# avg_feat_comp_time(prefeat) # average feature extraction time
# new_labels = label_cleaning(prefeat,labels,member) # trim labels, around change points
# X,Y,Yn,Xsp,Ysp = computeXY(features,labels,new_labels,m1,m2) # compute data and labels, trimmed and untrimmed
# surf, surfla = computeXY_persurf(Xsp,Ysp) # compute per surface data and labels
# # training
# train_1_surface(surf,surfla) # training of all combinations per 1 surface
# train_2_surface(surf,surfla) # training of all combinations per 2 surfaces
# train_3_surface(surf,surfla) # training of all combinations per 3 surfaces
# train_4_surface(surf,surfla) # training of all combinations per 4 surfaces
# train_5_surface(surf,surfla) # training of all combinations per 5 surfaces
#
# ############ OFFLINE TESTING PROCEDURE ##############
# # generate files with stats
# bargraph_perf_gen1(6)
# bargraph_perf_gen2(6)
# bargraph_perf_gen3(6)
# bargraph_perf_gen4(6)
# bargraph_perf_gen5(6)
# # use the bargraph tool to plot graphs from generated files
# # -left column cross-accuracy (trained on one, tested on all the others),
# # -right column self-accuracy (trained and tested on the same)
# # -each row i represents training only with i surfaces.
# # -each stack represents a training group, each bar represents a subfeatureset(AFFT,FREQ,TIME,BOTH)
# # -blue,green,yellow,red : TP,TN,FN,FP
# plt.figure(figsize=(20,40))
# for i in range(5):
# make_bargraphs_from_perf(i)
#
# ############ ONLINE TESTING PROCEDURE ##############
# # same necessary steps as in training
# f,l,fd,member,m1,m2 = data_prep(validfile)
# prefeat = compute_prefeat(f)
# features, labels = feature_extraction(prefeat, member, validfeatfile, 'validfeat_')
# new_labels = label_cleaning(prefeat,labels,member)
# X,Y,Yn,Xsp,Ysp = computeXY(features,labels,new_labels,m1,m2,validXYfile,validXYsplitfile)
# surf, surfla = computeXY_persurf(Xsp,Ysp,validsurffile)
#
# ####### TESTING DATA FROM ATI F/T SENSOR TRANSLATIONAL CASE
# prediction('ati_new_fd1.5N_kp3_152Hz_validation.mat')
# ####### TESTING DATA FROM ATI F/T SENSOR ROTATIONAL CASE
# prediction('ati_new_fd1.5N_kp3_152Hz_validation_rot.mat')
import time
start_time = time.time()
from copy import deepcopy, copy
import math
import scipy.io as sio
import shutil
import os, errno
from random import shuffle
import numpy as np
from pylab import *
from featext import *
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import matplotlib.image as mpimg
from mpl_toolkits.mplot3d import Axes3D
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.feature_selection import SelectKBest, mutual_info_classif
from sklearn.exceptions import ConvergenceWarning
from sklearn.pipeline import Pipeline
from sklearn.metrics import classification_report, confusion_matrix, f1_score
import re
import datetime
import urllib
import tarfile
import zipfile
import joblib
from subprocess import call, check_output
from joblib import Parallel, delayed, Memory
from tempfile import mkdtemp
import copy_reg
import types
import itertools
import glob
def _pickle_method(m):
"""Useful function for successful convertion from directories and lists to numpy arrays"""
if m.im_self is None:
return getattr, (m.im_class, m.im_func.func_name)
else:
return getattr, (m.im_self, m.im_func.func_name)
copy_reg.pickle(types.MethodType, _pickle_method)
def ensure_dir(directory):
"""Useful function for creating directory only if not existent"""
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def comb(n,r):
"""Combinations of n objects by r, namely picking r among n possible.
comb(n,r) = n!/(r!(n-r)!)
"""
return math.factorial(n)/(math.factorial(r)*math.factorial(n-r))
############ PRE-FEATURES ############
###### DEFINITION
# featnum 0 : sf = (fx^2+fy^2+fz^2)^0.5
# 1 : ft = (fx^2+fy^2)^0.5
# 2 : fn = |fz|
# 3 : ft/fn = (fx^2+fy^2)^0.5/|fz|
# input (nxm) -> keep (nx3) -> compute pre-feature and return (nx1)
def sf(f):
"""Computation of norm (sf) of force (f)"""
return np.power(np.sum(np.power(f[:,:3],2),axis=1),0.5)
def ft(f):
"""Computation of tangential (ft) of force (f)"""
return np.power(np.sum(np.power(f[:,:2],2),axis=1),0.5)
def fn(f):
"""Computation of normal (fn) of force (f)"""
return np.abs(f[:,2])
def ftn(f):
"""Computation of tangential (ft) to normal (fn) ratio of force (f),
corresponding to the friction cone boundary
"""
retft = ft(f)
retfn = fn(f)
retft[retfn<=1e-2] = 0
return np.divide(retft,retfn+np.finfo(float).eps)
def lab(f):
"""Label embedded in input f"""
return np.abs(f[:,-1])
class ml:
def __init__(self,c):
######## TRAINING DEFAULTS
global cv, scaler, decomp, names, classifiers, download, delete_big_features
cv = c.cv
scaler = c.scaler
decomp = c.decomp
names = c.names
classifiers = c.classifiers
download = c.download # Download pre-computed (1) data or compute them all anew (0)
delete_big_features = c.delete_big_features # Delete (1) or keep (0) computed big-in-size features,
# helping mainly to avoid several computations when recomputing features
############ INITIALISATION PARAMETERS ############
global window, shift, samplesperdataset, havelabel, returntime, \
featlabel, magnFFT, featall, featparam, numfeat, nfeat
window, shift = c.window, c.shift
samplesperdataset = c.samplesperdataset
havelabel = c.havelabel
returntime = c.returntime
featlabel = c.featlabel # 0: all features, 1: temporal, 2: frequency, 3: FFT only
magnFFT = c.magnFFT # 0: FFT in magnitude format, 1: FFT in real and imag format,
featall = c.featall # 0: all, 1: feat1 (phinyomark's), 2: feat2 (golz's)
featparam = [havelabel,featlabel,magnFFT,featall,returntime]
CV = c.CV # cross validation checks
numfeat = c.numfeat # number of features to show
nfeat = c.nfeat # number of features to keep
###### Initialize necessary names and paths
global datapath, datafile, validfile, featpath, allfeatpath, prefeatpath,\
prefeatname, prefeatfile, featname, featfile, validfeatname, validfeatfile,\
surffile, XYfile, XYsplitfile, respath, toolfile, toolpath, tool
datapath = c.datapath
ensure_dir(datapath)
datafile = c.datafile
validfile = c.validfile
featpath = c.featpath
ensure_dir(featpath)
allfeatpath = c.allfeatpath
ensure_dir(allfeatpath)
prefeatname = c.prefeatname
prefeatfile = c.prefeatfile
featname = c.featname
featfile = c.featfile
validfeatname = c.validfeatname
validfeatfile = c.validfeatfile
surffile = c.surffile
XYfile = c.XYfile
XYsplitfile = c.XYsplitfile
validsurffile = c.validsurffile
validXYfile = c.validXYfile
validXYsplitfile = c.validXYsplitfile
respath = c.respath
toolfile = c.toolfile
toolpath = c.toolpath
tool = c.tool
############ Feature Names ###########
global featnames
"""features: || if\n"""+\
"""|--> time domain : || samples = 1024\n"""+\
"""|----|---> phinyomark : 11+3{shist} --------------------------> = 14+0.0samples || 14\n"""+\
"""|----|---> golz : 10+samples{acrol} --------------------> = 10+1.0samples || 1034\n"""+\
"""|--> frequency domain :\n"""+\
"""|----|---> phinyomark : 3{arco}+4{mf}+2(samples/2+1){RF,IF} --> = 9+1.0samples || 1033\n"""+\
"""|----|---> golz : 2(samples/2+1){AF,PF} ----------------> = 2+1.0samples || 1026\n"""+\
"""|----|----------------|-------alltogether---------------------> = 35+3.0samples || numfeat = 3107"""
## Time Domain Phinyomark feats
featnames = ['IS', 'MAV', 'MAVSLP', 'SSI', 'VAR', 'RMS', 'RNG', 'WAVL', 'ZC', 'SSC', 'WAMP',
'HIST_1', 'HIST_2', 'HIST_3'] # 11+3{shist}
## Frequency Domain Phinyomark feats
featnames += ['ARCO_1', 'ARCO_2', 'ARCO_3', 'MNF', 'MDF', 'MMNF', 'MMDF'] # 3{arco}+4{mf}
featnames += ['RF_{:03d}'.format(i) for i in range(window/2+1)] # samples/2+1{RF}
featnames += ['IF_{:03d}'.format(i) for i in range(window/2+1)] # samples/2+1{IF}
## Time Domain Golz feats
featnames += ['MV', 'STD', 'MAX', 'RNGX', 'RNGY', 'MED', 'HJORTH', 'SENTR', 'SE', 'SSK'] # 10
featnames += ['ACORL_{:04d}'.format(i) for i in range(window)] # samples{acrol}
## Frequency Domain Golz feats
featnames += ['AF_{:03d}'.format(i) for i in range(window/2+1)] # samples/2+1{AF}
featnames += ['PF_{:03d}'.format(i) for i in range(window/2+1)] # samples/2+1{PF}
############ PREFEATURES #############
global prefeatfn, prefeatnames, prefeatid
prefeatfn = np.array([sf,ft,fn,ftn,lab]) # convert to np.array to be easily indexed by a list
prefeatnames = np.array(['fnorm','ft','fn','ftdivfn','label'])
prefeatid = [0,4] # only the prefeatures with corresponding ids will be computed
############ SUBFEATURES #############
global subfeats
subfeats = ['AFFT','FREQ','TIME','BOTH']
############ Download necessary files ############
def convert_bytes(num):
"""this function will convert bytes to MB.... GB... etc"""
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
def file_size(file_path):
"""this function will return the file size"""
if os.path.isfile(file_path):
file_info = os.stat(file_path)
return file_info.st_size
def download_file(datafile, targetlink):
"""Function for checking if targetfile exists, else downloading it from targetlink to targetpath+targetfile"""
if not os.path.isfile(datafile):
print 'Necessary ', datafile, ' not here! Downloading...'
u = urllib.urlopen(targetlink)
data = u.read()
print 'Completed downloading ','{:.2f}'.format(len(data)*1./(1024**2)),'MB of ',datafile,'!'
u.close()
with open(datafile, "wb") as f :
f.write(data)
print 'Necessary ', datafile, ' completed saving!'
else:
print 'Necessary ', datafile, ' already here!'
return file_size(datafile)
def extract_file(source,destination='.'):
"""Decompress source zip, tar or tgz file to destination folder"""
print "Extracting compressed file..."
if (source.endswith('tar.gz') or source.endswith('tgz')):
with tarfile.open(source, 'r:gz' ) as tgz_ref:
tgz_ref.extractall(destination)
print "Done!"
elif (source.endswith('tar')):
with tarfile.open(source, 'r:' ) as tar_ref:
tar_ref.extractall(destination)
print "Done!"
elif (source.endswith('zip')):
with zipfile.ZipFile(source, 'r') as zip_ref:
zip_ref.extractall(destination)
print "Done!"
else:
print "Unsupported extension for decompressing. Supported extensions are .zip, .tgz, .tar.gz, .tar"
######### Download necessary dataset #############
def download_required_files():
total_size_of_downloads = 0
# datafile = datapath+'dataset.npz'
# validfile = datapath+'validation.mat'
datalink = 'https://www.dropbox.com/s/j88wmtx1vvpik1m/dataset.npz?dl=1'
validlink = 'https://www.dropbox.com/s/r8jl57lij28ljrw/validation.mat?dl=1'
total_size_of_downloads += download_file(datafile, datalink)
total_size_of_downloads += download_file(validfile, validlink)
####### Download bargraph tool if not already downloaded (by Derek Bruening)
toollink = 'https://github.com/derekbruening/bargraph/archive/rel_4_8.zip'
# toolfile = datapath+'bargraph.zip'
# toolpath = datapath+'bargraph-rel_4_8/'
if not os.path.isdir(toolpath):
total_size_of_downloads += download_file(toolfile, toollink)
if os.path.isfile(toolfile):
extract_file(toolfile,datapath+'.')
# tool = './'+toolpath+'bargraph.pl'
call(['chmod','+x',tool]) # make tool executable
call(['rm',toolfile]) # delete zip file
####### Download features and trained models, if not wanting to compute them and not already there
if download==1:
featlink = 'https://www.dropbox.com/s/qvk9pcvlir06zse/features_1024_20_10000.npz?dl=1'
validfeatlink = 'https://www.dropbox.com/s/sghqwifo8rxwbcs/validfeatures_1024_20_10000.npz?dl=1'
total_size_of_downloads += download_file(featfile, featlink)
total_size_of_downloads += download_file(validfeatfile, validfeatlink)
reslink = {}
reslink[0] = 'https://www.dropbox.com/sh/mib7wk4sfv6eye3/AACUWSOgQjBD9i2sChtNisNKa?dl=1'
reslink[1] = 'https://www.dropbox.com/sh/y6js9ha585n4zam/AACARvB8krZnC3VPsOjWTaRra?dl=1'
reslink[2] = 'https://www.dropbox.com/sh/fc9jgi2cs7d0dzg/AADfw42xG0XtiUOYWo7cmmtUa?dl=1'
reslink[3] = 'https://www.dropbox.com/sh/mx6e7jcxzbcr5s4/AACkVMPatRd2UZfyUkxvP_tLa?dl=1'
reslink[4] = 'https://www.dropbox.com/sh/88itj3b4nwpe0f1/AACceO9FsZp5w55n7PKlVnWSa?dl=1'
for i in range(len(reslink)):
resfold = datapath+'results'+str(i+1)
if not os.path.isdir(resfold):
resfile = resfold+'.zip'
total_size_of_downloads += download_file(resfile, reslink[i]) # download
extract_file(resfile, resfold) # extract
call(['rm',resfile]) # delete zip
else:
print "Desired trained models for "+str(i+1)+" surface found!"
print "Downloaded "+convert_bytes(total_size_of_downloads)+" of content in total!"
############ READ THE DATASET ############
def data_prep(datafile,step=1,k=2,scale=[1.0],fdes=0.0,printit=True):
"""Prepare dataset, from each of the k fingers for all n surfaces (see fd for details)
-> datafile : input file either in .npz or in .mat form
-> step : increasing sampling step, decreases sampling frequency of input, which is 1KHz initially
-> k : number of fingers logging data
-> scale : list of scales for scaled outputs to be computed
-> fdes : desired force to be subtracted from the force norm measurements
----- input format ----- either 'fi', 'li', 'fdi', with i in {1,...,k} for each finger
or 'f', 'l', 'fd' for a finger
corresponding to force, label and details respectively
<- f,l,fd : output force, label and details for each experiment in the dataset
<- member : how much each dataset is represented,
to skip samples effectively and keep dimensions correct
<- m1, m2 : portion of data belonging to finger1 and finger2
"""
if printit:
print "---------------------------- LOADING DATA and COMPUTING NECESSARY STRUCTS ----------------------------"
if datafile[-3:]=='mat':
inp = sio.loadmat(datafile,struct_as_record=True)
elif datafile[-3:]=='npz':
inp = np.load(datafile)
else:
print "Unsupported input file format. Supported types: .npz .mat"
return -1
if k==2:
f1, f2, l1, l2, fd1, fd2 = inp['f1'], inp['f2'], inp['l1'], inp['l2'], inp['fd1'], inp['fd2']
if printit:
print 1, '-> f1:', f1.shape, l1.shape, fd1.shape
print 2, '-> f2:', f2.shape, l2.shape, fd2.shape
####### MERGE THE DATASETS
f = np.concatenate((f1,f2),axis=0)
l = np.concatenate((l1,l2),axis=0)
fd = np.concatenate((fd2,fd2),axis=0)
elif k==1:
f, l, fd = inp['f'], inp['l'], inp['fd']
else:
print "Unsupported number of fingers k. Should be k in {1,2}"
if printit:
print 3, '-> f:', f.shape, l.shape, fd.shape
# membership of each sample, representing its portion in the dataset
# (first half finger1 and second half finger2)
member = np.zeros(len(f))
m1,m2 = len(f)/2, len(f)/2
member[:m1] = np.ones(m1)*1./m1
member[-m2:] = np.ones(m2)*1./m2
if printit:
print 4, '-> m1,m2:', m1, m2, sum(member[:m1]), sum(member[-m2:])
####### MERGE f and l
while f.ndim>1:
f = f[:,0]
l = l[:,0]
for i in range(len(f)):
while l[i].ndim<2:
l[i] = l[i][:,np.newaxis]
f = np.array([np.concatenate((f[i],l[i]),axis=1) for i in range(len(f))])
if printit:
print 5, '-> f=f+l:', f.shape, ":", [fi.shape for fi in f]
####### SUBSAMPLING
# step = 1 # NO SAMPLING
if step!=1:
f = np.array([fi[::step,:] for fi in f])
if printit:
print 6, '-> fsampled:',f.shape, ":", [fi.shape for fi in f]
####### SCALING
tf = deepcopy(f)
tl = deepcopy(l)
tfd = fd.tolist()
if len(scale) == 1 and scale[0] < 0.0: # adaptive scaling
scale = [1./(1.1*fdes)]
fdes = 0.0
for sc in scale:
for i in range(len(f)):
tmpf = deepcopy(f[i][:,:-1])
tmpnorm = np.linalg.norm(tmpf,axis=1)
unitmpf = deepcopy(tmpf)
for j in range(unitmpf.shape[1]):
unitmpf[:,j] = np.divide(tmpf[:,j], tmpnorm) # find unitvector
tf[i][:,:-1] = sc * (tmpf - fdes * unitmpf) # scale data after removing DC
tfd[i].append('scale='+str(sc))
# plt.figure()
# plt.subplot(1,2,1)
# plt.plot(tmpf)
# plt.subplot(1,2,2)
# plt.plot(unitmpf)
tf = np.concatenate((deepcopy(f),tf),axis=0)
tl = np.concatenate((deepcopy(l),tl),axis=0)
tfd = fd.tolist()+tfd
# tfd = np.concatenate((deepcopy(fd),tfd),axis=0)
tf = tf[len(f):]
tl = tl[len(l):]
tfd = np.array(tfd[len(fd):])
tm = np.ones(len(f)*len(scale))*member[0]/(len(scale)*1.)
m1, m2 = len(scale)*m1, len(scale)*m2
if printit:
print 7, '-> fscaled: ', tf.shape, tm.shape, tl.shape, tfd.shape
return tf,tl,tfd,tm,m1,m2
############ PRE-FEATURES ############
###### DEFINITION
# featnum 0 : sf = (fx^2+fy^2+fz^2)^0.5
# 1 : ft = (fx^2+fy^2)^0.5
# 2 : fn = |fz|
# 3 : ft/fn = (fx^2+fy^2)^0.5/|fz|
# input (nxm) -> keep (nx3) -> compute pre-feature and return (nx1)
###### COMPUTATION
# prefeatfn = np.array([sf,ft,fn,ftn,lab]) # convert to np.array to be easily indexed by a list
# prefeatnames = np.array(['fnorm','ft','fn','ftdivfn','label'])
# prefeatid = [0,4] # only the prefeatures with corresponding ids will be computed
def compute_prefeat(f,printit=True):
"""Prefeature computation
-> f : input force as an i by n by 4 matrix
<- prefeat : corresponding force profiles
"""
if printit:
print "--------------------------------------- COMPUTING PREFEATURES ----------------------------------------"
prefeat = [np.array([prfn(f[i]) for prfn in prefeatfn[prefeatid]]).transpose() for i in range(len(f))]
prefeat.append(prefeat[-1][:-1])
prefeat = np.array(prefeat)[:-1]
if printit:
print prefeat.shape,":",[p.shape for p in prefeat]
return prefeat
############ AVG Computation time of ALL features in secs ############
def avg_feat_comp_time(prefeat,printit=True):
"""Average computation time for feature extraction
-> prefeat : desired prefeature input
"""
if printit:
print "------------------------------------ AVG FEATURE COMPUTATION TIME ------------------------------------"
t1 = time.time()
m = int(ceil(0.2*len(prefeat)))
# avg over m*100 times
tmpfeat = [feat(prefeat[k][i:i+window,:2],*featparam) for k in range(m) for i in range(100)]
if printit:
print 'Avg feature computation time (millisec): ', (time.time() - t1) / (100 * m) * 1000
############ FEATURE COMPUTATION ############
def tmpfeatfilename(p,name,mode='all'):
"""Filename for feature computation and intermittent saving
-> p : prefeat id
-> name : desired prefix name for tmp filenames
-> mode : whether keeping whole feature matrix ('all') or sampling rows ('red') to reduce size
<- corresponding output filename
"""
if mode == 'all':
return allfeatpath+name+str(p)+'.pkl.z'
elif mode == 'red':
return allfeatpath+name+str(p)+'_red'+str(samplesperdataset)+'.pkl.z'
def feature_extraction(prefeat, member, featfile, name='feat_', printit=True):
"""Computation of all features in parallel or loading if already computed
-> prefeat : computed prefeatures
-> member : how much each dataset is represented,
to skip samples effectively and keep dimensions correct
-> featfile : desired final feature filename
-> name : desired per dataset feature temporary filenames
<- features, labels : computed features and corresponding labels
"""
if printit:
print "---------------------------------------- FEATURE EXTRACTION ------------------------------------------"
if os.path.isfile(featfile):
start_time = time.time()
features = np.load(featfile)['features']
labels = np.load(featfile)['labels']
if printit:
print("Features FOUND PRECOMPUTED! Feature Loading DONE in: %s seconds " % (time.time() - start_time))
if delete_big_features:
for j in glob.glob(allfeatpath+"*"):
if 'red' not in j:
call(['rm',j]) # delete big feature file, after reducing its size to desired
else:
start_time = time.time()
features = []
labels = []
for ixp in range(len(prefeat)):
p = prefeat[ixp]
now = time.time()
tmpfn = tmpfeatfilename(ixp,name)
tmpfnred = tmpfeatfilename(ixp,name,'red')
if not os.path.isfile(tmpfnred):
if not os.path.isfile(tmpfn):
# Computation of all features in PARALLEL by ALL cores
tmp = np.array([Parallel(n_jobs=-1)([delayed(feat) (p[k:k+window],*featparam)
for k in range(0,len(p)-window,shift)])])
with open(tmpfn,'wb') as fo:
joblib.dump(tmp,fo)
if printit:
print 'sample:', ixp, ', time(sec):', '{:.2f}'.format(time.time()-now), tmpfn, ' computing... ', tmp.shape
else:
with open(tmpfn,'rb') as fo:
tmp = joblib.load(fo)
if printit:
print 'sample:', ixp, ', time(sec):', '{:.2f}'.format(time.time()-now), tmpfn, ' already here!', tmp.shape
# keep less from each feature vector but keep number of samples for each dataset almost equal
try:
tmpskip = int(round(tmp.shape[1]/(member[ixp]*samplesperdataset)))
except:
tmpskip = 1
if tmpskip == 0:
tmpskip = 1
# Save reduced size features
tmp = tmp[0,::tmpskip,:,:]
with open(tmpfnred,'wb') as fo:
joblib.dump(tmp,fo)
if printit:
print 'sample:',ixp, ', time(sec):', '{:.2f}'.format(time.time()-now), tmpfnred, tmp.shape
if delete_big_features:
call(['rm',tmpfn]) # delete big feature file, after reducing its size to desired
else:
if delete_big_features:
call(['rm',tmpfn]) # delete big feature file, since reduced size file exists
for ixp in range(len(prefeat)):
if delete_big_features:
tmpfn = tmpfeatfilename(ixp,name)
call(['rm',tmpfn]) # delete big feature file if still here for some reason
tmpfnred = tmpfeatfilename(ixp,name,'red')
with open(tmpfnred,'rb') as fo:
tmp = joblib.load(fo)
if printit:
print 'sample:', ixp, ', time(sec):', '{:.2f}'.format(time.time()-now), tmpfnred, 'already here!', tmp.shape
features.append(tmp[:,:,:-1])
labels.append(tmp[:,0,-1])
if printit:
print("Features NOT FOUND PRECOMPUTED! Feature Computation DONE in: %s sec " % (time.time() - start_time))
features.append(tmp[:-1,:,:-1])
features = np.array(features)[:-1]
labels.append(tmp[:-1,0,-1])
labels = np.array(labels)[:-1]
if printit:
print 'features: ',features.shape,[ftmp.shape for ftmp in features]
print 'labels: ', labels.shape,[l.shape for l in labels]
np.savez(featfile,features=features,labels=labels)
if printit:
print 'features: ', features.shape, ', labels: ', labels.shape
return features, labels
############ LABEL TRIMMING ############
def label_cleaning(prefeat,labels,member,history=500,printit=True):
"""Keep the purely stable and slip parts of label, thus omitting some samples around sign change points
-> prefeat : computed prefeatures
-> labels : main structure, where the trimming will be performed around change points
-> member : how much each dataset is represented, to skip samples effectively and keep dimensions correct
-> history : how much samples to throw away around change points
<- new_labels : the trimmed labels
"""
if printit:
print "----------- KEEPING LABEL's PURE (STABLE, SLIP) PHASE PARTS (TRIMMING AROUND CHANGE POINTS)-----------"
lbl_approx = []
for i in range(len(prefeat)):
tmpd = np.abs(np.diff(prefeat[i][:,-1].astype(int),n=1,axis=0))
if np.sum(tmpd) > 0:
tmpind = np.array(range(len(tmpd)))[tmpd > 0] # find the sign change points
tmpindrng = []
for j in range(len(tmpind)):
length = history # keep/throw a portion of the signal's length around change points
tmprng = np.array(range(tmpind[j]-length,tmpind[j]+length))
tmprng = tmprng[tmprng>=0] # make sure inside singal's x-range
tmprng = tmprng[tmprng<prefeat[i].shape[0]]
tmpindrng += tmprng.tolist()
tmpindrng = np.array(tmpindrng).flatten()
tmp_lbl = deepcopy(prefeat[i][:,-1])
tmp_lbl[tmpindrng] = -1
lbl_approx.append(tmp_lbl)
else:
lbl_approx.append(prefeat[i][:,-1])
new_labels = deepcopy(labels)
for ixp in range(len(lbl_approx)):
p = lbl_approx[ixp]
tmp = np.array([p[k+window] for k in range(0,len(p)-window,shift)])
try:
tmpskip = int(round(tmp.shape[0]/(member[ixp]*samplesperdataset)))
except:
tmpskip = 1
if tmpskip == 0:
tmpskip = 1
# Sampling appropriately
tmp = tmp[::tmpskip]
if len(tmp) > len(labels[ixp]):
tmp = tmp[:-1]
new_labels[ixp] = tmp
if printit:
print 'new_labels: ', new_labels.shape
return new_labels
############ GATHERING into complete arrays ready for FITTING ############
def computeXY(features,labels,new_labels,m1,m2,XYfile,XYsplitfile,printit=True):
"""
-> features : computed features as input data
-> labels : corresponding labels
-> new_labels : labels trimmed around change point
-> m1, m2 : portion of data belonging to finger1 and finger2
-> XY[split]file : desired output filenames
<- X,Y,Yn,Xsp,Ysp : X corresponds to the data, Y the label, and *sp to the trimmed label's versions
"""
if printit:
print "----------------------------- COMPUTING X,Y for CLASSIFIERS' INPUT -----------------------------------"
if os.path.isfile(XYfile) and os.path.isfile(XYsplitfile):
X = np.load(XYfile)['X']
Y = np.load(XYfile)['Y']
Yn = np.load(XYfile)['Yn']
Xsp = np.load(XYsplitfile)['X']
Ysp = np.load(XYsplitfile)['Y']
if printit:
print("XY files FOUND PRECOMPUTED!")
else:
# gathering features X,Xsp and labels Y,Ysp,Yn into one array each
ind,X,Xsp,Y,Ysp,Yn = {},{},{},{},{},{}
ind[2] = range(features.shape[0]) # indeces for both fingers
ind[0] = range(features.shape[0])[:m1] # indeces for finger1
ind[1] = range(features.shape[0])[-m2:] # indeces for finger2
ind = np.array([i for _,i in ind.items()]) # convert to array
for k in range(len(ind)):
X[k] = features[ind[k]] # input feature matrix
Y[k] = labels[ind[k]] # output label vector
Yn[k] = new_labels[ind[k]] # output new_label vector
if printit:
print 'Before -> X[',k,']: ',X[k].shape,', Y[',k,']: ',Y[k].shape,', Yn[',k,']: ',Yn[k].shape
X[k] = np.concatenate(X[k],axis=0)
Y[k] = np.concatenate(Y[k],axis=0)
Yn[k] = np.concatenate(Yn[k],axis=0)
if printit:
print 'Gathered -> X[',k,']: ',X[k].shape,', Y[',k,']: ',Y[k].shape,', Yn[',k,']: ',Yn[k].shape
X[k] = np.array([X[k][:,:,i] for i in range(X[k].shape[2])])
tmp_sampling = int(round(X[k].shape[1]*1./samplesperdataset))
if tmp_sampling == 0:
tmp_sampling = 1
X[k] = X[k][0,::tmp_sampling,:]
Y[k] = Y[k][::tmp_sampling]
Yn[k] = Yn[k][::tmp_sampling]
if printit:
print 'Gathered, sampled to max ', samplesperdataset, ' -> X[', k,']: ', X[k].shape, \
', Y[', k, ']: ', Y[k].shape, ', Yn[', k,']: ', Yn[k].shape
keepind = Yn[k]>=0
Xsp[k] = X[k][keepind,:]
Ysp[k] = Yn[k][keepind]
if printit:
print 'Split -> Xsp[',k,']: ',Xsp[k].shape,', Ysp[',k,']: ',Ysp[k].shape
X = np.array([i for _,i in X.items()])
Xsp = np.array([i for _,i in Xsp.items()])
Y = np.array([i for _,i in Y.items()])
Ysp = np.array([i for _,i in Ysp.items()])
Yn = np.array([i for _,i in Yn.items()])
np.savez(XYfile,X=X,Y=Y,Yn=Yn)
np.savez(XYsplitfile, X=Xsp, Y=Ysp)
if printit:
print 'X,Y [0,1,2]: ', X[0].shape, Y[0].shape, X[1].shape, Y[1].shape, X[2].shape, Y[2].shape
print 'Xsp,Ysp [0,1,2]: ', Xsp[0].shape, Ysp[0].shape, Xsp[1].shape, Ysp[1].shape, Xsp[2].shape, Ysp[2].shape
return X,Y,Yn,Xsp,Ysp
############ Prepare the indeces for each feature ############
def get_feat_id(feat_ind, printit=False):
"""Find the corresponding indeces of the desired features inside feature vector,
and link them with their names and level of abstraction
-> feat_ind : range of indeces
-> printit : print output indeces (1) or not (0)
-> sample_window : parameter for accurate computation of feature indeces
<- full_path_id : indeces of all features
<- norm_time_feats : indeces of time features
<- norm_freq_feats : indeces of frequency features
"""
sample_window = window
# get the feat inds wrt their source : 3rd level
norm_time_phin = range(0,14)
norm_freq_phin = range(norm_time_phin[-1] + 1, norm_time_phin[-1] + 9 + sample_window + 1)
norm_time_golz = range(norm_freq_phin[-1] + 1, norm_freq_phin[-1] + 10 + sample_window + 1)
norm_freq_golz = range(norm_time_golz[-1] + 1, norm_time_golz[-1] + 2 + sample_window + 1)
# get the feat inds wrt their domain : 2nd level
norm_time_feats = norm_time_phin + norm_time_golz
norm_freq_feats = norm_freq_phin + norm_freq_golz
# get the feat inds wrt their prefeat: 1st level
norm_feats = norm_time_feats + norm_freq_feats
# get the feat inds wrt their source : 3rd level
disp = norm_feats[-1]+1
ftfn_time_phin = range(disp ,disp + 14)
ftfn_freq_phin = range(ftfn_time_phin[-1] + 1, ftfn_time_phin[-1] + 9 + sample_window + 1)
ftfn_time_golz = range(ftfn_freq_phin[-1] + 1, ftfn_freq_phin[-1] + 10 + sample_window + 1)
ftfn_freq_golz = range(ftfn_time_golz[-1] + 1, ftfn_time_golz[-1] + 2 + sample_window + 1)
# get the feat inds wrt their domain : 2nd level
ftfn_time_feats = ftfn_time_phin + ftfn_time_golz
ftfn_freq_feats = ftfn_freq_phin + ftfn_freq_golz
# get the feat inds wrt their prefeat: 1st level
ftfn_feats = ftfn_time_feats + ftfn_freq_feats
# create the final "reference dictionary"
# 3 np.arrays, id_list[0] = level 1 etc
id_list = [np.zeros((len(ftfn_feats + norm_feats),1)) for i in range(3)]
id_list[0][:norm_feats[-1]+1] = 0 # 0 signifies norm / 1 signifies ft/fn
id_list[0][norm_feats[-1]+1:] = 1
id_list[1][:norm_time_phin[-1]+1] = 0 # 0 signifies time / 1 signifies freq
id_list[1][norm_time_phin[-1]+1:norm_freq_phin[-1]+1] = 1
id_list[1][norm_freq_phin[-1]+1:norm_time_golz[-1]+1] = 0
id_list[1][norm_time_golz[-1]+1:norm_freq_golz[-1]+1] = 1
id_list[1][norm_freq_golz[-1]+1:ftfn_time_phin[-1]+1] = 0
id_list[1][ftfn_time_phin[-1]+1:ftfn_freq_phin[-1]+1] = 1
id_list[1][ftfn_freq_phin[-1]+1:ftfn_time_golz[-1]+1] = 0
id_list[1][ftfn_time_golz[-1]+1:] = 1
id_list[2][:norm_freq_phin[-1]+1] = 0 #0 signifies phinyomark / 1 signifies golz
id_list[2][norm_freq_phin[-1]+1:norm_freq_golz[-1]+1] = 1
id_list[2][norm_freq_golz[-1]+1:ftfn_freq_phin[-1]+1] = 0
id_list[2][ftfn_freq_phin[-1]+1:] = 1
full_path_id = [np.zeros((len(feat_ind),5)) for i in range(len(feat_ind))]
freq_path_id = []
time_path_id = []
for ind, val in enumerate(feat_ind):
full_path_id[ind] = [val, id_list[2][val], id_list[1][val], id_list[0][val]]
if(full_path_id[ind][1]==0):
lvl3 = 'Phin'
else:
lvl3 = 'Golz'
if(full_path_id[ind][2]==0):
lvl2 = 'Time'
time_path_id.append(val)
else:
lvl2 = 'Freq'
freq_path_id.append(val)
if(full_path_id[ind][3]==0):
lvl1 = 'Norm'
else:
lvl1 = 'Ft/Fn'
if (printit):
print(feat_ind[ind],featnames[val%(norm_feats[-1]+1)],lvl3,lvl2,lvl1)
return(full_path_id,time_path_id,freq_path_id)
def get_feat_names(printit=True):
"""Return a list with all computed feature names"""
return featnames
def get_feat_ids_from_names(feat_name_list, printit=False):
"""Return a list of indexes corresponding to the given list of feature names"""
tmpfind = []
for m in feat_name_list:
try:
ti = m.index('_')
except:
ti = len(m)+1
for i in range(len(featnames)):
if featnames[i][:ti] == m[:ti]:
tmpfind.append(i)
if printit:
print tmpfind
print np.array(featnames)[tmpfind]
return tmpfind
############ Surface Splitting ############
def surface_split(data_X, data_Y, n=6, k=2, printit=True):
"""Split input data in k*n equal slices which represent n different surfaces sampled from k fingers.
Indexes 0:n:(k-1)*n, 1:n:(k-1)*n+1, 2:n:(k-1)*n+2, ... correspond to the same surface (finger1 upto fingerk)
Assuming k=2, namely 2 fingers case, unless stated differently
-> data_X, data_Y : input data and labels, with the convention that data_X contains k*n almost
equally sized data, where the n first are acquired from finger1 ...
and the n last from fingerk.
-> n : number of different surfaces
-> k : number of fingers logging data
<- surfaces, surf_labels : corresponding output data and labels
"""
keep = data_X.shape[0]-np.mod(data_X.shape[0],k*n)
surfaces_pre = np.array(np.split(data_X[:keep,:],k*n))
surf_labels_pre = np.array(np.split(data_Y[:keep],k*n))
surfaces, surf_labels = {},{}
for i in range(n):
inds = range(i,k*n,n)
surfaces[inds[0]] = surfaces_pre[inds[0]]
surf_labels[inds[0]] = surf_labels_pre[inds[0]]
for tk in range(k-1):
surfaces[inds[0]] = np.concatenate((surfaces[inds[0]], surfaces_pre[inds[tk+1]]), axis = 0)
surf_labels[inds[0]] = np.concatenate((surf_labels[inds[0]], surf_labels_pre[inds[tk+1]]), axis = 0)
surfaces = np.array([i for _,i in surfaces.items()])
surf_labels = np.array([i for _,i in surf_labels.items()])
return surfaces, surf_labels
############ Featureset Splitting ############
def feat_subsets(data,fs_ind,keep_from_fs_ind):
"""returns a splitting per featureset of input features
-> data : input data X
-> fs_ind : prefeature id
-> keep_from_fs_ind : list of feature indexes to be kept from whole feature vector
<- X_amfft, X_freq_all, X_time, X_both : split featuresets amplitude of FFT, all time only,
all frequency only and all features
"""
ofs = len(keep_from_fs_ind)
_,tf,ff = get_feat_id(keep_from_fs_ind)
amfft_inds = []
temp1 = deepcopy(data)
for i in keep_from_fs_ind:
if (featnames[i].startswith('AF')):
amfft_inds.append(i)
if (fs_ind == 2):
ff2 = [ff[i]+ofs for i in range(len(ff))]
tf2 = [tf[i]+ofs for i in range(len(tf))]
amfft2 = [amfft_inds[i]+ofs for i in range(len(amfft_inds))]
freqf = ff2 + ff
timef = tf2 + tf
amfft = amfft_inds + amfft2
else:
freqf = ff
timef = tf
amfft = amfft_inds
X_amfft = temp1[:,amfft]
X_time = temp1[:,timef]
X_freq_all = temp1[:,freqf]
X_both = data[:,keep_from_fs_ind]
return X_amfft, X_freq_all, X_time, X_both
############ Prepare the dataset split for each surface ############
def computeXY_persurf(Xsp, Ysp, surffile, keepind=[-1], n=6, k=2, saveload=True, printit=True):
"""returns a split per surface data and label of inputs
-> Xsp, Ysp : input data and labels, after having trimmed data around the label's change points
-> surffile : desired output's filename for saving
-> keepind : list of feature indexes to be kept from whole feature vector
-> n,k,saveload : different surfaces in dataset, number of different data sources (fingers), save/load computed data
<- surf, surfla : output data and label, split per surface
"""
if len(keepind) == 0 or keepind[0] == -1:
keepind = range(len(featnames))
if printit:
print "------------------------ COMPUTING X,Y per surface CLASSIFIERS' INPUT --------------------------------"
if os.path.isfile(surffile) and saveload:
surf = np.load(surffile)['surf'] # input array containing computed features for each surface
surfla = np.load(surffile)['surfla'] # corresponding label
else:
surf, surfla = [], []
for i in range(len(prefeatid)-1): # for each featureset (corresponding to each prefeature, here only |f|)
surf1, surfla1 = surface_split(Xsp[2], Ysp[2], n, k, printit)
tmpsurf = deepcopy(surf1)
tmpsurfla = deepcopy(surfla1)
tmpsurfsubfeat = []
for j in range(tmpsurf.shape[0]+1): # for each surface
if (printit):
print i,j,surf1.shape
if j == tmpsurf.shape[0]:
# ommit a sample for converting to array
tmpsurfsubfeat.append(feat_subsets(tmpsurf[j-1,:-1,:],i,keepind))
else:
# keep all subfeaturesets
tmpsurfsubfeat.append(feat_subsets(tmpsurf[j],i,keepind))
surf.append(tmpsurfsubfeat)
surfla.append(surfla1)
# surf dims: (featuresets, surfaces, prefeaturesets) with each one enclosing (samples, features)
surf = np.array(surf).transpose()[:,:-1,:]
# surfla dims: (samples, surfaces, prefeaturesets)
surfla = np.array(surfla).transpose()
if saveload:
np.savez(surffile,surf=surf,surfla=surfla)
if (printit):
print surf.shape, surfla.shape
return surf, surfla
############ PIPELINE OF TRANSFORMATIONS ############
def make_pipe_clf(scaler,feature_selection,decomp,clf):
"""returns a pipeline of inputs:
-> scaler : first normalize
-> feature_selection : then perform feature selection
-> decomp : followed by PCA
-> clf : and finally the desired classifier
<- pipeline : output pipeline
"""
pipeline = Pipeline([('scaler', scaler),
('feature_selection', feature_selection),
('decomp', decomp),
('classifier', clf) ])
return pipeline
############ TRAINING with 1 surface each time, out of 6 surfaces in total ##############
def filename1(i=0,j=0,k=0,l=0,retpath=0):
"""function for the filename of the selected combination for training per 1 surface
-> i : prefeature id, among all computed prefeatures (0: |f|, ... see prefeatid)
-> j : subfeatureset among all features (0: AFFT, 1: FREQ, 2: TIME, 3: ALL)
-> k : surface id trained on
-> l : surface id tested on
<- filename
"""
filepath = respath+'1/'
ensure_dir(filepath)
if retpath:
return filepath
else:
return filepath+'fs_'+str(i)+'_subfs_'+str(j)+'_tr_'+str(k)+'_ts_'+str(l)+'.npz'
def cross_fit1(i,j,k,kmax,l,data,labels,data2,labels2,pipe,printit=True):
"""function for fitting model per 1 surface
-> i : prefeature id, among all computed prefeatures (0: |f|, ... see prefeatid)
-> j : subfeatureset among all features (0: AFFT, 1: FREQ, 2: TIME, 3: ALL)
-> k : surface id trained on
-> kmax : maximum surfaces
-> l : surface id tested on
-> data, labels : training data and labels
-> data2, labels2 : testing data and labels
-> pipe : the desired pipeline configuration
<- no output, saved model and confusion matrix in corresponding filename.npz
"""
fileid = filename1(i,j,k,l)
if not os.path.isfile(fileid):
if (printit):
print i,j,k,l
if k==l: # perform K-fold cross-validation
folds = cv.split(data, labels)
cm_all = np.zeros((2,2))
for fold, (train_ind, test_ind) in enumerate(folds):
x_train, x_test = data[train_ind], data[test_ind]
y_train, y_test = labels[train_ind], labels[test_ind]
model = pipe.fit(x_train,y_train)
y_pred = model.predict(x_test)
cm = confusion_matrix(y_pred=y_pred, y_true=y_test)
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
cm_all += cm/5.
np.savez(fileid,cm=cm_all,model=np.array([model]))
else: # perform cross-check
tr_data = data
tr_labels = labels
ts_data = data2
ts_labels = labels2
# Check if model already existent, but not the cross-validated one (on the same surface)
model = []
for m in range(kmax):
tmpcopyfileid = filename1(i,j,k,m)
if k!=m and os.path.isfile(tmpcopyfileid):
if (printit):
print 'Found precomputed model of '+str(k)+', tested on '+str(m)+'. Testing on '+str(l)+'...'
model = np.load(tmpcopyfileid)['model'][0]
break
if model==[]: # model not found precomputed
if (printit):
print 'Fitting on '+str(k)+', testing on '+str(l)+'...'
model = pipe.fit(tr_data,tr_labels)
y_pred = model.predict(ts_data)
cm = confusion_matrix(y_pred=y_pred, y_true=ts_labels)
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
np.savez(fileid,cm=cm,model=np.array([model]))
def init_steps1(i,j,jmax,surf,surfla,printit=True):
"""function for helping parallelization of computations per 1 surface
-> i : prefeature id, among all computed prefeatures (0: |f|, ... see prefeatid)
-> j : subfeatureset among all features (0: AFFT, 1: FREQ, 2: TIME, 3: ALL)
-> jmax : number of all subfeaturesets
-> surf, surfla : surface data and labels
"""
if j==jmax:
featsel = SelectKBest(k=1000,score_func= mutual_info_classif)
else:
featsel = SelectKBest(k='all',score_func= mutual_info_classif)
pipe = make_pipe_clf(scaler, featsel, decomp, classifiers[2])
for k in range(surf.shape[0]): # for every training surface
for l in range(surf.shape[0]): # for every testing surface
cross_fit1(i,j,k,surf.shape[0],l,surf[k],surfla[:,k],surf[l],surfla[:,l],pipe,printit)
def train_1_surface(surf,surfla,n=-1,printit=True):
"""Parallel training -on surface level- of all combinations on 1 surface
-> n : number of cores to run in parallel,
input of joblib's Parallel (n=-1 means all available cores)
-> surf, surfla : surface data and labels
*** Cross surface validation, TRAINING with 1 surface each time, out of 6 surfaces in total
total= 4 (featuresets) * [comb(6,1)*6] (surface combinations: trained on 1, tested on 1) * 1 (prefeatureset)
= 4*6*6*1 = 144 different runs-files.
Note that comb(n,r) = n!/(r!(n-r)!)
"""
if (printit):
print "-------------------------- TRAINING all combinations per 1 surface -----------------------------------"
for i in range(len(prefeatid)-1):
_ = [Parallel(n_jobs=n)([delayed(init_steps1) (i,j,surf.shape[0]-1,surf[j,:,i],surfla[:,:,i],printit)
for j in range(surf.shape[0])])]
def bargraph_perf_gen1(maxsurf,printit=True):
"""Perf file for bargraph generation using bargraph tool, for 1 surface"""
if (printit):
print "---------------------------- Generating perf files for 1 surface -------------------------------------"
prefeats = prefeatnames[prefeatid][:-1]
# prefeatures, subfeatures, trained, tested, (TP,TN,FN,FP)
acc = np.zeros((len(prefeats),len(subfeats),maxsurf,maxsurf,4))
# prefeatures, subfeatures, trained, cross_val_self_accuracy, (TP,TN,FN,FP)
self_acc = np.zeros((len(prefeats),len(subfeats),maxsurf,1,4))
# features, subfeatures, trained, (tested avg, tested std), (TP,TN,FN,FP)
cross_acc = np.zeros((len(prefeats),len(subfeats),maxsurf,2,4))
initial_str = "# clustered and stacked graph bogus data\n=stackcluster;TP;TN;FN;FP\n"+\
"colors=med_blue,dark_green,yellow,red\n=nogridy\n=noupperright\nfontsz=5\nlegendx=right\n"+\
"legendy=center\ndatascale=50\nyformat=%g%%\nxlabel=TrainedON-TestedON\nylabel=Metrics\n=table"
respath = filename1(retpath=1)
for i in range(len(prefeats)):
outname = respath+prefeats[i]
outfile = outname+'.perf'
outfile1 = outname+'_selfaccuracy.perf'
outfile2 = outname+'_crossaccuracy.perf'
out = open(outfile,'w+')
out.write(initial_str+"\n")
out1 = open(outfile1,'w+')
out1.write(initial_str+"\n")
out2 = open(outfile2,'w+')
out2.write(initial_str+"\n")
for k in range(maxsurf):
for k2 in range(maxsurf):
out.write("multimulti="+str(k)+"-"+str(k2)+"\n")
for j in range(len(subfeats)):
fileid = filename1(i,j,k,k2)
tmp = np.load(fileid)['cm']
# print to outfile
acc[i,j,k,k2,0] = round(tmp[1,1],2) # TP
acc[i,j,k,k2,1] = round(tmp[0,0],2) # TN
acc[i,j,k,k2,2] = 1-round(tmp[1,1],2) # FN
acc[i,j,k,k2,3] = 1-round(tmp[0,0],2) # FP
out.write("%s %.2f %.2f %.2f %.2f\n" % (subfeats[j],acc[i,j,k,k2,0],acc[i,j,k,k2,1],
acc[i,j,k,k2,2],acc[i,j,k,k2,3]))
# prepare and print to outfile1
if k == k2:
if j == 0:
out1.write("multimulti="+str(k)+"-"+str(k2)+"\n")
self_acc[i,j,k,0,:] = acc[i,j,k,k2,:]
out1.write("%s %.2f %.2f %.2f %.2f\n" % (subfeats[j],self_acc[i,j,k,0,0],
self_acc[i,j,k,0,1],self_acc[i,j,k,0,2],
self_acc[i,j,k,0,3]))
# prepare and print to outfile2
if k != k2:
# all values of corresponding subfeatureset j have been filled to compute avg and std
if (k < maxsurf-1 and k2 == maxsurf-1) or (k == maxsurf-1 and k2 == maxsurf-2):
if j == 0:
out2.write("multimulti="+str(k)+"\n")
t = range(maxsurf)
t.remove(k)
cross_acc[i,j,k,0,:] = np.mean(acc[i,j,k,t,:], axis=0) # avg
# cross_acc[i,j,k,1,:] = np.std(acc[i,j,k,t,:], axis=0) # std
out2.write("%s %.2f %.2f %.2f %.2f\n" % (subfeats[j], cross_acc[i,j,k,0,0],
cross_acc[i,j,k,0,1], cross_acc[i,j,k,0,2],
cross_acc[i,j,k,0,3]))
out.write("multimulti=AVG\n")
out1.write("multimulti=AVG\n")
out2.write("multimulti=AVG\n")
for j in range(4):
avgacc = np.mean(np.mean(acc[i,j,:,:,:], axis=0), axis=0)
out.write("%s %.2f %.2f %.2f %.2f\n" % (subfeats[j], avgacc[0], avgacc[1], avgacc[2], avgacc[3]))
avgselfacc = np.mean(self_acc[i,j,:,0,:], axis=0)
out1.write("%s %.2f %.2f %.2f %.2f\n" % (subfeats[j], avgselfacc[0], avgselfacc[1],
avgselfacc[2], avgselfacc[3]))
avgcrossacc0 = np.mean(cross_acc[i,j,:,0,:], axis=0)
# avgcrossacc1 = np.std(cross_acc[i,j,:,0,:], axis=0)
out2.write("%s %.2f %.2f %.2f %.2f\n" % (subfeats[j], avgcrossacc0[0], avgcrossacc0[1],
avgcrossacc0[2], avgcrossacc0[3]))
out.close()
out1.close()
out2.close()
############ TRAINING with 2 surfaces each time, out of 6 surfaces in total ##############
def filename2(i=0,j=0,k1=0,k2=0,l=0,retpath=0):
"""function for the filename of the selected combination for training per 2 surfaces
-> i : prefeature id, among all computed prefeatures (0: |f|, ... see prefeatid)
-> j : subfeatureset among all features (0: AFFT, 1: FREQ, 2: TIME, 3: ALL)
-> ki : surface ids trained on
-> l : surface id tested on
<- filename
"""
filepath = respath+'2/'
ensure_dir(filepath)
if retpath:
return filepath
else:
return filepath+'fs_'+str(i)+'_subfs_'+str(j)+'_tr1_'+str(k1)+'_tr2_'+str(k2)+'_ts_'+str(l)+'.npz'
def cross_fit2(i,j,k1,k2,kmax,l,data,labels,data2,labels2,pipe,printit=True):
"""function for fitting model per 2 surfaces
-> i : prefeature id, among all computed prefeatures (0: |f|, ... see prefeatid)
-> j : subfeatureset among all features (0: AFFT, 1: FREQ, 2: TIME, 3: ALL)
-> ki : surface ids trained on
-> kmax : maximum surfaces
-> l : surface id tested on
-> data, labels : training data and labels
-> data2, labels2 : testing data and labels
-> pipe : the desired pipeline configuration
<- no output, saved model and confusion matrix in corresponding filename.npz
"""
fileid = filename2(i,j,k1,k2,l)
if not os.path.isfile(fileid):
if (printit):
print i,j,k1,k2,l
if k1==l or k2==l: # perform K-fold
if (printit):
print 'Fitting on '+str(k1)+"-"+str(k2)+', cross-validating on '+str(l)+'...'
if l == k1: # copy if existent from the other sibling file
tmpcopyfileid = filename2(i,j,k1,k2,k2)
else: # same as above
tmpcopyfileid = filename2(i,j,k1,k2,k1)
if not os.path.isfile(tmpcopyfileid):
folds = cv.split(data, labels)
cm_all = np.zeros((2,2))
for fold, (train_ind, test_ind) in enumerate(folds):
x_train, x_test = data[train_ind], data[test_ind]
y_train, y_test = labels[train_ind], labels[test_ind]
model = pipe.fit(x_train,y_train)
y_pred = model.predict(x_test)
cm = confusion_matrix(y_pred=y_pred, y_true=y_test)
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
cm_all += cm/5.
else:
cm_all = np.load(tmpcopyfileid)['cm']
model = np.load(tmpcopyfileid)['model'][0]
np.savez(fileid,cm=cm_all,model=np.array([model]))
else: # perform cross-check
tr_data = data
tr_labels = labels
ts_data = data2
ts_labels = labels2
model = []
for m in range(kmax):
tmpcopyfileid = filename2(i,j,k1,k2,m)
if k1!=m and k2!=m and os.path.isfile(tmpcopyfileid):
if (printit):
print 'Found precomputed model of '+str(k1)+str(k2)+', tested on '+str(m)+'. Testing on '+str(l)+'...'
model = np.load(tmpcopyfileid)['model'][0]
break
if model==[]: # model not found precomputed
if (printit):
print 'Fitting on '+str(k1)+"-"+str(k2)+', testing on '+str(l)+'...'
model = pipe.fit(tr_data,tr_labels)
y_pred = model.predict(ts_data)
cm = confusion_matrix(y_pred=y_pred, y_true=ts_labels)
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
np.savez(fileid,cm=cm,model=np.array([model]))
def init_steps2(i,j,jmax,surf,surfla,printit=True):
"""function for helping parallelization of computations per 2 surfaces
-> i : prefeature id, among all computed prefeatures (0: |f|, ... see prefeatid)
-> j : subfeatureset among all features (0: AFFT, 1: FREQ, 2: TIME, 3: ALL)
-> jmax : number of all subfeaturesets
-> surf, surfla : surface data and labels
"""
if j==jmax:
featsel = SelectKBest(k=1000,score_func= mutual_info_classif)
else:
featsel = SelectKBest(k='all',score_func= mutual_info_classif)
pipe = make_pipe_clf(scaler, featsel, decomp, classifiers[2])
for k1 in range(surf.shape[0]): # for every training surface1
for k2 in range(surf.shape[0]): # for every training surface2
if k2 > k1:
for l in range(surf.shape[0]): # for every testing surface
tr_surf = np.concatenate((surf[k1],surf[k2]),axis=0)
tr_surfla = np.concatenate((surfla[:,k1],surfla[:,k2]),axis=0)
ts_surf, ts_surfla = surf[l], surfla[:,l]
cross_fit2(i,j,k1,k2,surf.shape[0],l,tr_surf,tr_surfla,ts_surf,ts_surfla,pipe,printit)
def train_2_surface(surf,surfla,n=-1,printit=True):
"""Parallel training -on surface level- of all combinations on 2 surfaces
-> n : number of cores to run in parallel,
input of joblib's Parallel (n=-1 means all available cores)
-> surf, surfla : surface data and labels
*** Cross surface validation, TRAINING with 2 surfaces each time, out of 6 surfaces in total
total= 4 (featuresets) * [comb(6,2)*6] (surface combinations: trained on 2, tested on 1) * 1 (prefeatureset)
= 4*15*6*1 = 360 different runs-files.
Note that comb(n,r) = n!/(r!(n-r)!)
"""
if (printit):
print "-------------------------- TRAINING all combinations per 2 surfaces ----------------------------------"
for i in range(len(prefeatid)-1):
_ = [Parallel(n_jobs=n)([delayed(init_steps2) (i,j,surf.shape[0]-1,surf[j,:,i],surfla[:,:,i],printit)
for j in range(surf.shape[0])])]
def bargraph_perf_gen2(maxsurf,printit=True):
"""Perf file for bargraph generation using bargraph tool, for 2 surfaces"""
if (printit):
print "---------------------------- Generating perf files for 2 surfaces ------------------------------------"
prefeats = prefeatnames[prefeatid][:-1]
# prefeatures, subfeatures, trained, tested, (TP,TN,FN,FP)
acc = np.zeros((len(prefeats),len(subfeats),maxsurf,maxsurf,maxsurf,4))
# features, subfeatures, (TP,TN,FN,FP) -> avg over all tested surfaces
avg = np.zeros((len(prefeats),len(subfeats),4))
# prefeatures, subfeatures, trained, cross_val_self_accuracy, (TP,TN,FN,FP)
self_acc = np.zeros((len(prefeats),len(subfeats),maxsurf,maxsurf,1,4))
# features, subfeatures, (TP,TN,FN,FP) -> avg over all self tested surfaces
avgs = np.zeros((len(prefeats),len(subfeats),4))
# features, subfeatures, trained, (tested avg, tested std), (TP,TN,FN,FP)
cross_acc = np.zeros((len(prefeats),len(subfeats),maxsurf,maxsurf,2,4))
# features, subfeatures, (TP,TN,FN,FP) -> avg over all cross tested surfaces
avgc = np.zeros((len(prefeats),len(subfeats),4))
initial_str = "# clustered and stacked graph bogus data\n=stackcluster;TP;TN;FN;FP\n"+\
"colors=med_blue,dark_green,yellow,red\n=nogridy\n=noupperright\nfontsz=5\nlegendx=right\n"+\
"legendy=center\ndatascale=50\nyformat=%g%%\nxlabel=TrainedON-TestedON\nylabel=Metrics\n=table"
respath = filename2(retpath=1)
for i in range(len(prefeats)):
outname = respath+prefeats[i]
outfile = outname+'.perf'
outfile1 = outname+'_selfaccuracy.perf'
outfile2 = outname+'_crossaccuracy.perf'
out = open(outfile,'w+')
out.write(initial_str+"\n")
out1 = open(outfile1,'w+')
out1.write(initial_str+"\n")
out2 = open(outfile2,'w+')
out2.write(initial_str+"\n")
for k1 in range(maxsurf):
for k2 in range(maxsurf):
if k2 > k1:
for l in range(maxsurf):
out.write("multimulti="+str(k1)+str(k2)+"-"+str(l)+"\n")
for j in range(len(subfeats)):
fileid = filename2(i,j,k1,k2,l)
tmp = np.load(fileid)['cm']
acc[i,j,k1,k2,l,0] = round(tmp[1,1],2) # TP
acc[i,j,k1,k2,l,1] = round(tmp[0,0],2) # TN
acc[i,j,k1,k2,l,2] = 1-round(tmp[1,1],2) # FN
acc[i,j,k1,k2,l,3] = 1-round(tmp[0,0],2) # FP
avg[i,j,:] += acc[i,j,k1,k2,l,:]
out.write("%s %.2f %.2f %.2f %.2f\n" % (subfeats[j],acc[i,j,k1,k2,l,0],
acc[i,j,k1,k2,l,1],acc[i,j,k1,k2,l,2],
acc[i,j,k1,k2,l,3]))
if l == k1 or l == k2: # selc accuracy
if j == 0 and l == k2:
out1.write("multimulti="+str(k1)+str(k2)+"-"+str(l)+"\n")
self_acc[i,j,k1,k2,0,:] = acc[i,j,k1,k2,l]
avgs[i,j,:] += self_acc[i,j,k1,k2,0,:]
if l == k2:
out1.write("%s %.2f %.2f %.2f %.2f\n" % (subfeats[j],
self_acc[i,j,k1,k2,0,0],
self_acc[i,j,k1,k2,0,1],
self_acc[i,j,k1,k2,0,2],
self_acc[i,j,k1,k2,0,3]))
if l != k1 and l != k2:
t = range(maxsurf)
t.remove(k1)
t.remove(k2)
if (l == t[-1]):
if j == 0:
out2.write("multimulti="+str(k1)+str(k2)+"\n")
cross_acc[i,j,k1,k2,0,:] = np.mean(acc[i,j,k1,k2,t,:], axis=0) # avg
# cross_acc[i,j,k1,k2,1,:] = np.std(acc[i,j,k1,k2,t,:], axis=0) # std
avgc[i,j,:] += cross_acc[i,j,k1,k2,0,:]
out2.write("%s %.2f %.2f %.2f %.2f\n" % (subfeats[j],
cross_acc[i,j,k1,k2,0,0],
cross_acc[i,j,k1,k2,0,1],
cross_acc[i,j,k1,k2,0,2],
cross_acc[i,j,k1,k2,0,3]))
out.write("multimulti=AVG\n")
out1.write("multimulti=AVG\n")
out2.write("multimulti=AVG\n")
avg /= comb(maxsurf,2)*maxsurf*1.
avgs /= comb(maxsurf,2)*2.
avgc /= comb(maxsurf,2)*1.
for j in range(len(subfeats)):
out.write("%s %.2f %.2f %.2f %.2f\n" % (subfeats[j],avg[i,j,0],avg[i,j,1],avg[i,j,2],avg[i,j,3]))
out1.write("%s %.2f %.2f %.2f %.2f\n" % (subfeats[j],avgs[i,j,0],avgs[i,j,1],avgs[i,j,2],avgs[i,j,3]))
out2.write("%s %.2f %.2f %.2f %.2f\n" % (subfeats[j],avgc[i,j,0],avgc[i,j,1],avgc[i,j,2],avgc[i,j,3]))
out.close()
out1.close()
out2.close()
############ TRAINING with 3 surfaces each time, out of 6 surfaces in total ##############
def filename3(i=0,j=0,k1=0,k2=0,k3=0,l=0,retpath=0):
"""function for the filename of the selected combination for training per 3 surfaces
-> i : prefeature id, among all computed prefeatures (0: |f|, ... see prefeatid)
-> j : subfeatureset among all features (0: AFFT, 1: FREQ, 2: TIME, 3: ALL)
-> ki : surface ids trained on
-> l : surface id tested on
<- filename
"""
filepath = respath+'3/'
ensure_dir(filepath)
if retpath:
return filepath
else:
return filepath+'fs_'+str(i)+'_subfs_'+str(j)+'_tr1_'+str(k1)+'_tr2_'+str(k2)+'_tr3_'+str(k3)+'_ts_'+str(l)+'.npz'
def cross_fit3(i,j,k1,k2,k3,kmax,l,data,labels,data2,labels2,pipe,printit=True):
"""function for fitting model per 3 surfaces
-> i : prefeature id, among all computed prefeatures (0: |f|, ... see prefeatid)
-> j : subfeatureset among all features (0: AFFT, 1: FREQ, 2: TIME, 3: ALL)
-> ki : surface ids trained on
-> kmax : maximum surfaces
-> l : surface id tested on
-> data, labels : training data and labels
-> data2, labels2 : testing data and labels
-> pipe : the desired pipeline configuration
<- no output, saved model and confusion matrix in corresponding filename.npz
"""
fileid = filename3(i,j,k1,k2,k3,l)
if not os.path.isfile(fileid):
if (printit):
print i,j,k1,k2,k3,l
if k1==l or k2==l or k3==l: # perform K-fold
if (printit):
print 'Fitting on '+str(k1)+"-"+str(k2)+"-"+str(k3)+', cross-validating on '+str(l)+'...'
if l == k1: # copy if existent from the other sibling file
tmpcopyfileid1 = filename3(i,j,k1,k2,k3,k2)
tmpcopyfileid2 = filename3(i,j,k1,k2,k3,k3)
elif l == k2: # same as above
tmpcopyfileid1 = filename3(i,j,k1,k2,k3,k1)
tmpcopyfileid2 = filename3(i,j,k1,k2,k3,k3)
else:
tmpcopyfileid1 = filename3(i,j,k1,k2,k3,k1)
tmpcopyfileid2 = filename3(i,j,k1,k2,k3,k2)
if not os.path.isfile(tmpcopyfileid1) and not os.path.isfile(tmpcopyfileid2):
folds = cv.split(data, labels)
cm_all = np.zeros((2,2))
for fold, (train_ind, test_ind) in enumerate(folds):
x_train, x_test = data[train_ind], data[test_ind]
y_train, y_test = labels[train_ind], labels[test_ind]
model = pipe.fit(x_train,y_train)
y_pred = model.predict(x_test)
cm = confusion_matrix(y_pred=y_pred, y_true=y_test)
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
cm_all += cm/5.
else:
if os.path.isfile(tmpcopyfileid1):
cm_all = np.load(tmpcopyfileid1)['cm']
model = np.load(tmpcopyfileid1)['model'][0]
else:
cm_all = np.load(tmpcopyfileid2)['cm']
model = np.load(tmpcopyfileid2)['model'][0]
np.savez(fileid,cm=cm_all,model=np.array([model]))
else: # perform cross-check
tr_data = data
tr_labels = labels
ts_data = data2
ts_labels = labels2
model = []
for m in range(kmax):
tmpcopyfileid = filename3(i,j,k1,k2,k3,m)
if k1!=m and k2!=m and k3!=m and os.path.isfile(tmpcopyfileid):
if (printit):
print 'Found precomputed model of '+str(k1)+str(k2)+str(k3)+', tested on '+str(m)+'. Testing on '+str(l)+'...'
model = np.load(tmpcopyfileid)['model'][0]
break
if model==[]: # model not found precomputed
if (printit):
print 'Fitting on '+str(k1)+"-"+str(k2)+"-"+str(k3)+', testing on '+str(l)+'...'
model = pipe.fit(tr_data,tr_labels)
y_pred = model.predict(ts_data)
cm = confusion_matrix(y_pred=y_pred, y_true=ts_labels)
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
np.savez(fileid,cm=cm,model=np.array([model]))
def init_steps3(i,j,jmax,surf,surfla,printit=True):
"""function for helping parallelization of computations per 3 surfaces
-> i : prefeature id, among all computed prefeatures (0: |f|, ... see prefeatid)
-> j : subfeatureset among all features (0: AFFT, 1: FREQ, 2: TIME, 3: ALL)
-> jmax : number of all subfeaturesets
-> surf, surfla : surface data and labels
"""
if j==jmax:
featsel = SelectKBest(k=1000,score_func= mutual_info_classif)
else:
featsel = SelectKBest(k='all',score_func= mutual_info_classif)
pipe = make_pipe_clf(scaler, featsel, decomp, classifiers[2])
for k1 in range(surf.shape[0]): # for every training surface1
for k2 in range(surf.shape[0]): # for every training surface2
if k2 > k1:
for k3 in range(surf.shape[0]):
if k3 > k2:
for l in range(surf.shape[0]): # for every testing surface
tr_surf = np.concatenate((surf[k1],surf[k2],surf[k3]),axis=0)
tr_surfla = np.concatenate((surfla[:,k1],surfla[:,k2],surfla[:,k3]),axis=0)
ts_surf, ts_surfla = surf[l], surfla[:,l]
cross_fit3(i,j,k1,k2,k3,surf.shape[0],l,tr_surf,tr_surfla,ts_surf,ts_surfla,pipe,printit)
def train_3_surface(surf,surfla,n=-1,printit=True):
"""Parallel training -on surface level- of all combinations on 3 surfaces
-> n : number of cores to run in parallel,
input of joblib's Parallel (n=-1 means all available cores)
-> surf, surfla : surface data and labels
*** Cross surface validation, TRAINING with 3 surfaces each time, out of 6 surfaces in total
total= 4 (featuresets) * [comb(6,3)*6] (surface combinations: trained on 3, tested on 1) * 1 (prefeatureset)
= 4*20*6*1 = 480 different runs-files.
Note that comb(n,r) = n!/(r!(n-r)!)
"""
if (printit):
print "-------------------------- TRAINING all combinations per 3 surfaces ----------------------------------"
for i in range(len(prefeatid)-1):
_ = [Parallel(n_jobs=n)([delayed(init_steps3) (i,j,surf.shape[0]-1,surf[j,:,i],surfla[:,:,i],printit)
for j in range(surf.shape[0])])]
def bargraph_perf_gen3(maxsurf,printit=True):
"""Perf file for bargraph generation using bargraph tool, for 3 surfaces"""
if (printit):
print "---------------------------- Generating perf files for 3 surfaces ------------------------------------"
prefeats = prefeatnames[prefeatid][:-1]
# prefeatures, subfeatures, trained, tested, (TP,TN,FN,FP)
acc = np.zeros((len(prefeats),len(subfeats),maxsurf,maxsurf,maxsurf,maxsurf,4))
# features, subfeatures, (TP,TN,FN,FP) -> avg over all tested surfaces
avg = np.zeros((len(prefeats),len(subfeats),4))
# prefeatures, subfeatures, trained, cross_val_self_accuracy, (TP,TN,FN,FP)
self_acc = np.zeros((len(prefeats),len(subfeats),maxsurf,maxsurf,maxsurf,1,4))
# features, subfeatures, (TP,TN,FN,FP) -> avg over all self tested surfaces
avgs = np.zeros((len(prefeats),len(subfeats),4))
# features, subfeatures, trained, (tested avg, tested std), (TP,TN,FN,FP)
cross_acc = np.zeros((len(prefeats),len(subfeats),maxsurf,maxsurf,maxsurf,2,4))
# features, subfeatures, (TP,TN,FN,FP) -> avg over all cross tested surfaces
avgc = np.zeros((len(prefeats),len(subfeats),4))
initial_str = "# clustered and stacked graph bogus data\n=stackcluster;TP;TN;FN;FP\n"+\
"colors=med_blue,dark_green,yellow,red\n=nogridy\n=noupperright\nfontsz=5\nlegendx=right\n"+\
"legendy=center\ndatascale=50\nyformat=%g%%\nxlabel=TrainedON-TestedON\nylabel=Metrics\n=table"
respath = filename3(retpath=1)
for i in range(len(prefeats)):
outname = respath+prefeats[i]
outfile = outname+'.perf'
outfile1 = outname+'_selfaccuracy.perf'
outfile2 = outname+'_crossaccuracy.perf'
out = open(outfile,'w+')
out.write(initial_str+"\n")
out1 = open(outfile1,'w+')
out1.write(initial_str+"\n")
out2 = open(outfile2,'w+')
out2.write(initial_str+"\n")
for k1 in range(maxsurf):
for k2 in range(maxsurf):
if k2 > k1:
for k3 in range(maxsurf):
if k3 > k2:
for l in range(maxsurf):
out.write("multimulti="+str(k1)+str(k2)+str(k3)+"-"+str(l)+"\n")
for j in range(len(subfeats)):
fileid = filename3(i,j,k1,k2,k3,l)
tmp = np.load(fileid)['cm']
acc[i,j,k1,k2,k3,l,0] = round(tmp[1,1],2) # TP
acc[i,j,k1,k2,k3,l,1] = round(tmp[0,0],2) # TN
acc[i,j,k1,k2,k3,l,2] = 1-round(tmp[1,1],2) # FN
acc[i,j,k1,k2,k3,l,3] = 1-round(tmp[0,0],2) # FP
avg[i,j,:] += acc[i,j,k1,k2,k3,l,:]
out.write("%s %.2f %.2f %.2f %.2f\n" % (subfeats[j],acc[i,j,k1,k2,k3,l,0],
acc[i,j,k1,k2,k3,l,1],
acc[i,j,k1,k2,k3,l,2],
acc[i,j,k1,k2,k3,l,3]))
if l == k1 or l == k2 or l == k3: # selc accuracy
if j == 0 and l == k3:
out1.write("multimulti="+str(k1)+str(k2)+str(k3)+"-"+str(l)+"\n")
self_acc[i,j,k1,k2,k3,0,:] = acc[i,j,k1,k2,k3,l]
avgs[i,j,:] += self_acc[i,j,k1,k2,k3,0,:]
if l == k3:
out1.write("%s %.2f %.2f %.2f %.2f\n" % (subfeats[j],
self_acc[i,j,k1,k2,k3,0,0],
self_acc[i,j,k1,k2,k3,0,1],
self_acc[i,j,k1,k2,k3,0,2],
self_acc[i,j,k1,k2,k3,0,3]))
if l != k1 and l != k2 and l != k3:
t = range(maxsurf)
t.remove(k1)
t.remove(k2)
t.remove(k3)
if (l == t[-1]):
if j == 0:
out2.write("multimulti="+str(k1)+str(k2)+str(k3)+"\n")
# avg
cross_acc[i,j,k1,k2,k3,0,:] = np.mean(acc[i,j,k1,k2,k3,t,:], axis=0)
# std
# cross_acc[i,j,k1,k2,k3,1,:] = np.std(acc[i,j,k1,k2,k3,t,:], axis=0)
avgc[i,j,:] += cross_acc[i,j,k1,k2,k3,0,:]
out2.write("%s %.2f %.2f %.2f %.2f\n" % (subfeats[j],
cross_acc[i,j,k1,k2,k3,0,0],
cross_acc[i,j,k1,k2,k3,0,1],
cross_acc[i,j,k1,k2,k3,0,2],
cross_acc[i,j,k1,k2,k3,0,3]))
out.write("multimulti=AVG\n")
out1.write("multimulti=AVG\n")
out2.write("multimulti=AVG\n")
avg /= comb(maxsurf,3)*maxsurf*1.
avgs /= comb(maxsurf,3)*3.
avgc /= comb(maxsurf,3)*1.
for j in range(len(subfeats)):
out.write("%s %.2f %.2f %.2f %.2f\n" % (subfeats[j],avg[i,j,0],avg[i,j,1],avg[i,j,2],avg[i,j,3]))
out1.write("%s %.2f %.2f %.2f %.2f\n" % (subfeats[j],avgs[i,j,0],avgs[i,j,1],avgs[i,j,2],avgs[i,j,3]))
out2.write("%s %.2f %.2f %.2f %.2f\n" % (subfeats[j],avgc[i,j,0],avgc[i,j,1],avgc[i,j,2],avgc[i,j,3]))
out.close()
out1.close()
out2.close()
############ TRAINING with 4 surfaces each time, out of 6 surfaces in total ##############
def filename4(i=0,j=0,k1=0,k2=0,k3=0,k4=0,l=0,retpath=0):
"""function for the filename of the selected combination for training per 4 surfaces
-> i : prefeature id, among all computed prefeatures (0: |f|, ... see prefeatid)
-> j : subfeatureset among all features (0: AFFT, 1: FREQ, 2: TIME, 3: ALL)
-> ki : surface ids trained on
-> l : surface id tested on
<- filename
"""
filepath = respath+'4/'
ensure_dir(filepath)
if retpath:
return filepath
else:
return filepath+'fs_'+str(i)+'_subfs_'+str(j)+'_tr1_'+str(k1)+'_tr2_'+str(k2)+'_tr3_'+str(k3)+'_tr4_'+str(k4)+'_ts_'+str(l)+'.npz'
def cross_fit4(i,j,k1,k2,k3,k4,kmax,l,data,labels,data2,labels2,pipe,printit=True):
"""function for fitting model per 4 surfaces
-> i : prefeature id, among all computed prefeatures (0: |f|, ... see prefeatid)
-> j : subfeatureset among all features (0: AFFT, 1: FREQ, 2: TIME, 3: ALL)
-> ki : surface ids trained on
-> kmax : maximum surfaces
-> l : surface id tested on
-> data, labels : training data and labels
-> data2, labels2 : testing data and labels
-> pipe : the desired pipeline configuration
<- no output, saved model and confusion matrix in corresponding filename.npz
"""
fileid = filename4(i,j,k1,k2,k3,k4,l)
if not os.path.isfile(fileid):
if (printit):
print i,j,k1,k2,k3,k4,l
if k1==l or k2==l or k3==l or k4==l: # perform K-fold
if (printit):
print 'Fitting on '+str(k1)+"-"+str(k2)+"-"+str(k3)+"-"+str(k4)+', cross-validating on '+str(l)+'...'
if l == k1: # copy if existent from the other sibling file
tmpcopyfileid1 = filename4(i,j,k1,k2,k3,k4,k2)
tmpcopyfileid2 = filename4(i,j,k1,k2,k3,k4,k3)
tmpcopyfileid3 = filename4(i,j,k1,k2,k3,k4,k4)
elif l == k2: # same as above
tmpcopyfileid1 = filename4(i,j,k1,k2,k3,k4,k1)
tmpcopyfileid2 = filename4(i,j,k1,k2,k3,k4,k3)
tmpcopyfileid3 = filename4(i,j,k1,k2,k3,k4,k4)
elif l == k3: # same as above
tmpcopyfileid1 = filename4(i,j,k1,k2,k3,k4,k1)
tmpcopyfileid2 = filename4(i,j,k1,k2,k3,k4,k2)
tmpcopyfileid3 = filename4(i,j,k1,k2,k3,k4,k4)
else:
tmpcopyfileid1 = filename4(i,j,k1,k2,k3,k4,k1)
tmpcopyfileid2 = filename4(i,j,k1,k2,k3,k4,k2)
tmpcopyfileid3 = filename4(i,j,k1,k2,k3,k4,k3)
if not os.path.isfile(tmpcopyfileid1) and not os.path.isfile(tmpcopyfileid2) and not os.path.isfile(tmpcopyfileid3):
folds = cv.split(data, labels)
cm_all = np.zeros((2,2))
for fold, (train_ind, test_ind) in enumerate(folds):
x_train, x_test = data[train_ind], data[test_ind]
y_train, y_test = labels[train_ind], labels[test_ind]
model = pipe.fit(x_train,y_train)
y_pred = model.predict(x_test)
cm = confusion_matrix(y_pred=y_pred, y_true=y_test)
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
cm_all += cm/5.
else:
if os.path.isfile(tmpcopyfileid1):
cm_all = np.load(tmpcopyfileid1)['cm']
model = np.load(tmpcopyfileid1)['model'][0]
elif os.path.isfile(tmpcopyfileid2):
cm_all = np.load(tmpcopyfileid2)['cm']
model = np.load(tmpcopyfileid2)['model'][0]
elif os.path.isfile(tmpcopyfileid3):
cm_all = np.load(tmpcopyfileid3)['cm']
model = np.load(tmpcopyfileid3)['model'][0]
np.savez(fileid,cm=cm_all,model=np.array([model]))
else: # perform cross-check
tr_data = data
tr_labels = labels
ts_data = data2
ts_labels = labels2
model = []
for m in range(kmax):
tmpcopyfileid = filename4(i,j,k1,k2,k3,k4,m)
if k1!=m and k2!=m and k3!=m and k4!=m and os.path.isfile(tmpcopyfileid):
if (printit):
print 'Found precomputed model of '+str(k1)+str(k2)+str(k3)+str(k4)+', tested on '+str(m)+'. Testing on '+str(l)+'...'
model = np.load(tmpcopyfileid)['model'][0]
break
if model==[]: # model not found precomputed
if (printit):
print 'Fitting on '+str(k1)+"-"+str(k2)+"-"+str(k3)+"-"+str(k4)+', testing on '+str(l)+'...'
model = pipe.fit(tr_data,tr_labels)
y_pred = model.predict(ts_data)
cm = confusion_matrix(y_pred=y_pred, y_true=ts_labels)
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
np.savez(fileid,cm=cm,model=np.array([model]))
def init_steps4(i,j,jmax,surf,surfla,printit=True):
"""function for helping parallelization of computations per 4 surfaces
-> i : prefeature id, among all computed prefeatures (0: |f|, ... see prefeatid)
-> j : subfeatureset among all features (0: AFFT, 1: FREQ, 2: TIME, 3: ALL)
-> jmax : number of all subfeaturesets
-> surf, surfla : surface data and labels
"""
if j==jmax:
featsel = SelectKBest(k=1000,score_func= mutual_info_classif)
else:
featsel = SelectKBest(k='all',score_func= mutual_info_classif)
pipe = make_pipe_clf(scaler, featsel, decomp, classifiers[2])
for k1 in range(surf.shape[0]): # for every training surface1
for k2 in range(surf.shape[0]): # for every training surface2
if k2 > k1:
for k3 in range(surf.shape[0]):
if k3 > k2:
for k4 in range(surf.shape[0]):
if k4 > k3:
for l in range(surf.shape[0]): # for every testing surface
tr_surf = np.concatenate((surf[k1],surf[k2],surf[k3]),axis=0)
tr_surfla = np.concatenate((surfla[:,k1],surfla[:,k2],surfla[:,k3]),axis=0)
ts_surf, ts_surfla = surf[l], surfla[:,l]
cross_fit4(i,j,k1,k2,k3,k4,surf.shape[0],l,
tr_surf,tr_surfla,ts_surf,ts_surfla,pipe,printit)
def train_4_surface(surf,surfla,n=-1,printit=True):
"""Parallel training -on surface level- of all combinations on 4 surfaces
-> n : number of cores to run in parallel,
input of joblib's Parallel (n=-1 means all available cores)
-> surf, surfla : surface data and labels
*** Cross surface validation, TRAINING with 2 surfaces each time, out of 6 surfaces in total
total= 4 (featuresets) * [comb(6,4)*6] (surface combinations: trained on 4, tested on 1) * 1 (prefeatureset)
= 4*15*6*1 = 360 different runs-files.
Note that comb(n,r) = n!/(r!(n-r)!)
"""
if (printit):
print "-------------------------- TRAINING all combinations per 4 surfaces ----------------------------------"
for i in range(len(prefeatid)-1):
_ = [Parallel(n_jobs=n)([delayed(init_steps4) (i,j,surf.shape[0]-1,surf[j,:,i],surfla[:,:,i],printit)
for j in range(surf.shape[0])])]
def bargraph_perf_gen4(maxsurf,printit=True):
"""Perf file for bargraph generation using bargraph tool, for 4 surfaces"""
if (printit):
print "---------------------------- Generating perf files for 4 surfaces ------------------------------------"
prefeats = prefeatnames[prefeatid][:-1]
# prefeatures, subfeatures, trained, tested, (TP,TN,FN,FP)
acc = np.zeros((len(prefeats),len(subfeats),maxsurf,maxsurf,maxsurf,maxsurf,maxsurf,4))
# features, subfeatures, (TP,TN,FN,FP) -> avg over all tested surfaces
avg = np.zeros((len(prefeats),len(subfeats),4))
# prefeatures, subfeatures, trained, cross_val_self_accuracy, (TP,TN,FN,FP)
self_acc = np.zeros((len(prefeats),len(subfeats),maxsurf,maxsurf,maxsurf,maxsurf,1,4))
# features, subfeatures, (TP,TN,FN,FP) -> avg over all self tested surfaces
avgs = np.zeros((len(prefeats),len(subfeats),4))
# features, subfeatures, trained, (tested avg, tested std), (TP,TN,FN,FP)
cross_acc = np.zeros((len(prefeats),len(subfeats),maxsurf,maxsurf,maxsurf,maxsurf,2,4))
# features, subfeatures, (TP,TN,FN,FP) -> avg over all cross tested surfaces
avgc = np.zeros((len(prefeats),len(subfeats),4))
initial_str = "# clustered and stacked graph bogus data\n=stackcluster;TP;TN;FN;FP\n"+\
"colors=med_blue,dark_green,yellow,red\n=nogridy\n=noupperright\nfontsz=5\nlegendx=right\n"+\
"legendy=center\ndatascale=50\nyformat=%g%%\nxlabel=TrainedON-TestedON\nylabel=Metrics\n=table"
respath = filename4(retpath=1)
for i in range(len(prefeats)):
outname = respath+prefeats[i]
outfile = outname+'.perf'
outfile1 = outname+'_selfaccuracy.perf'
outfile2 = outname+'_crossaccuracy.perf'
out = open(outfile,'w+')
out.write(initial_str+"\n")
out1 = open(outfile1,'w+')
out1.write(initial_str+"\n")
out2 = open(outfile2,'w+')
out2.write(initial_str+"\n")
for k1 in range(maxsurf):
for k2 in range(maxsurf):
if k2 > k1:
for k3 in range(maxsurf):
if k3 > k2:
for k4 in range(maxsurf):
if k4 > k3:
for l in range(maxsurf):
out.write("multimulti="+str(k1)+str(k2)+str(k3)+str(k4)+"-"+str(l)+"\n")
for j in range(len(subfeats)):
fileid = filename4(i,j,k1,k2,k3,k4,l)
tmp = np.load(fileid)['cm']
acc[i,j,k1,k2,k3,k4,l,0] = round(tmp[1,1],2) # TP
acc[i,j,k1,k2,k3,k4,l,1] = round(tmp[0,0],2) # TN
acc[i,j,k1,k2,k3,k4,l,2] = 1-round(tmp[1,1],2) # FN
acc[i,j,k1,k2,k3,k4,l,3] = 1-round(tmp[0,0],2) # FP
avg[i,j,:] += acc[i,j,k1,k2,k3,k4,l,:]
out.write("%s %.2f %.2f %.2f %.2f\n" % (subfeats[j],
acc[i,j,k1,k2,k3,k4,l,0],
acc[i,j,k1,k2,k3,k4,l,1],
acc[i,j,k1,k2,k3,k4,l,2],
acc[i,j,k1,k2,k3,k4,l,3]))
if l == k1 or l == k2 or l == k3 or l == k4: # selc accuracy
if j == 0 and l == k4:
out1.write("multimulti="+str(k1)+str(k2)+str(k3)+str(k4)+"-"+str(l)+"\n")
self_acc[i,j,k1,k2,k3,k4,0,:] = acc[i,j,k1,k2,k3,k4,l]
avgs[i,j,:] += self_acc[i,j,k1,k2,k3,k4,0,:]
if l == k4:
out1.write("%s %.2f %.2f %.2f %.2f\n" % (subfeats[j],
self_acc[i,j,k1,k2,k3,k4,0,0],
self_acc[i,j,k1,k2,k3,k4,0,1],
self_acc[i,j,k1,k2,k3,k4,0,2],
self_acc[i,j,k1,k2,k3,k4,0,3]))
if l != k1 and l != k2 and l != k3 and l!= k4:
t = range(maxsurf)
t.remove(k1)
t.remove(k2)
t.remove(k3)
t.remove(k4)
if (l == t[-1]):
if j == 0:
out2.write("multimulti="+str(k1)+str(k2)+str(k3)+str(k4)+"\n")
cross_acc[i,j,k1,k2,k3,k4,0,:] = np.mean(acc[i,j,k1,k2,k3,k4,t,:], axis=0)
avgc[i,j,:] += cross_acc[i,j,k1,k2,k3,k4,0,:]
out2.write("%s %.2f %.2f %.2f %.2f\n" % (subfeats[j],
cross_acc[i,j,k1,k2,k3,k4,0,0],
cross_acc[i,j,k1,k2,k3,k4,0,1],
cross_acc[i,j,k1,k2,k3,k4,0,2],
cross_acc[i,j,k1,k2,k3,k4,0,3]))
out.write("multimulti=AVG\n")
out1.write("multimulti=AVG\n")
out2.write("multimulti=AVG\n")
avg /= comb(maxsurf,4)*maxsurf*1.
avgs /= comb(maxsurf,4)*4.
avgc /= comb(maxsurf,4)*1.
for j in range(len(subfeats)):
out.write("%s %.2f %.2f %.2f %.2f\n" % (subfeats[j],avg[i,j,0],avg[i,j,1],avg[i,j,2],avg[i,j,3]))
out1.write("%s %.2f %.2f %.2f %.2f\n" % (subfeats[j],avgs[i,j,0],avgs[i,j,1],avgs[i,j,2],avgs[i,j,3]))
out2.write("%s %.2f %.2f %.2f %.2f\n" % (subfeats[j],avgc[i,j,0],avgc[i,j,1],avgc[i,j,2],avgc[i,j,3]))
out.close()
out1.close()
out2.close()
############ TRAINING with 5 surfaces each time, out of 6 surfaces in total ##############
def filename5(i=0,j=0,k1=0,k2=0,k3=0,k4=0,k5=0,l=0,retpath=0):
"""function for the filename of the selected combination for training per 5 surfaces
-> i : prefeature id, among all computed prefeatures (0: |f|, ... see prefeatid)
-> j : subfeatureset among all features (0: AFFT, 1: FREQ, 2: TIME, 3: ALL)
-> ki : surface ids trained on
-> l : surface id tested on
<- filename
"""
filepath = respath+'5/'
ensure_dir(filepath)
if retpath:
return filepath
else:
return filepath+'fs_'+str(i)+'_subfs_'+str(j)+'_tr1_'+str(k1)+'_tr2_'+str(k2)+'_tr3_'+str(k3)+'_tr4_'+str(k4)+'_tr5_'+str(k5)+'_ts_'+str(l)+'.npz'
def cross_fit5(i,j,k1,k2,k3,k4,k5,kmax,l,data,labels,data2,labels2,pipe,printit=True):
"""function for fitting model per 5 surfaces
-> i : prefeature id, among all computed prefeatures (0: |f|, ... see prefeatid)
-> j : subfeatureset among all features (0: AFFT, 1: FREQ, 2: TIME, 3: ALL)
-> ki : surface ids trained on
-> kmax : maximum surfaces
-> l : surface id tested on
-> data, labels : training data and labels
-> data2, labels2 : testing data and labels
-> pipe : the desired pipeline configuration
<- no output, saved model and confusion matrix in corresponding filename.npz
"""
fileid = filename5(i,j,k1,k2,k3,k4,k5,l)
if not os.path.isfile(fileid):
if (printit):
print i,j,k1,k2,k3,k4,k5,l
if k1==l or k2==l or k3==l or k4==l or k5==l: # perform K-fold
if (printit):
print 'Fitting on '+str(k1)+"-"+str(k2)+"-"+str(k3)+"-"+str(k4)+"-"+str(k5)+', cross-validating on '+str(l)+'...'
if l == k1: # copy if existent from the other sibling file
tmpcopyfileid1 = filename5(i,j,k1,k2,k3,k4,k5,k2)
tmpcopyfileid2 = filename5(i,j,k1,k2,k3,k4,k5,k3)
tmpcopyfileid3 = filename5(i,j,k1,k2,k3,k4,k5,k4)
tmpcopyfileid4 = filename5(i,j,k1,k2,k3,k4,k5,k5)
elif l == k2: # same as above
tmpcopyfileid1 = filename5(i,j,k1,k2,k3,k4,k5,k1)
tmpcopyfileid2 = filename5(i,j,k1,k2,k3,k4,k5,k3)
tmpcopyfileid3 = filename5(i,j,k1,k2,k3,k4,k5,k4)
tmpcopyfileid4 = filename5(i,j,k1,k2,k3,k4,k5,k5)
elif l == k3: # same as above
tmpcopyfileid1 = filename5(i,j,k1,k2,k3,k4,k5,k1)
tmpcopyfileid2 = filename5(i,j,k1,k2,k3,k4,k5,k2)
tmpcopyfileid3 = filename5(i,j,k1,k2,k3,k4,k5,k4)
tmpcopyfileid4 = filename5(i,j,k1,k2,k3,k4,k5,k5)
elif l == k4: # same as above
tmpcopyfileid1 = filename5(i,j,k1,k2,k3,k4,k5,k1)
tmpcopyfileid2 = filename5(i,j,k1,k2,k3,k4,k5,k2)
tmpcopyfileid3 = filename5(i,j,k1,k2,k3,k4,k5,k3)
tmpcopyfileid4 = filename5(i,j,k1,k2,k3,k4,k5,k5)
else:
tmpcopyfileid1 = filename5(i,j,k1,k2,k3,k4,k5,k1)
tmpcopyfileid2 = filename5(i,j,k1,k2,k3,k4,k5,k2)
tmpcopyfileid3 = filename5(i,j,k1,k2,k3,k4,k5,k3)
tmpcopyfileid4 = filename5(i,j,k1,k2,k3,k4,k5,k4)
if not os.path.isfile(tmpcopyfileid1) and not os.path.isfile(tmpcopyfileid2)\
and not os.path.isfile(tmpcopyfileid3) and not os.path.isfile(tmpcopyfileid4):
folds = cv.split(data, labels)
cm_all = np.zeros((2,2))
for fold, (train_ind, test_ind) in enumerate(folds):
x_train, x_test = data[train_ind], data[test_ind]
y_train, y_test = labels[train_ind], labels[test_ind]
model = pipe.fit(x_train,y_train)
y_pred = model.predict(x_test)
cm = confusion_matrix(y_pred=y_pred, y_true=y_test)
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
cm_all += cm/5.
else:
if os.path.isfile(tmpcopyfileid1):
cm_all = np.load(tmpcopyfileid1)['cm']
model = np.load(tmpcopyfileid1)['model'][0]
elif os.path.isfile(tmpcopyfileid2):
cm_all = np.load(tmpcopyfileid2)['cm']
model = np.load(tmpcopyfileid2)['model'][0]
elif os.path.isfile(tmpcopyfileid3):
cm_all = np.load(tmpcopyfileid3)['cm']
model = np.load(tmpcopyfileid3)['model'][0]
elif os.path.isfile(tmpcopyfileid4):
cm_all = np.load(tmpcopyfileid4)['cm']
model = np.load(tmpcopyfileid4)['model'][0]
np.savez(fileid,cm=cm_all,model=np.array([model]))
else: # perform cross-check
tr_data = data
tr_labels = labels
ts_data = data2
ts_labels = labels2
model = []
for m in range(kmax):
tmpcopyfileid = filename5(i,j,k1,k2,k3,k4,k5,m)
if k1!=m and k2!=m and k3!=m and k4!=m and k5!=m and os.path.isfile(tmpcopyfileid):
if (printit):
print 'Found precomputed model of '+str(k1)+str(k2)+str(k3)+str(k4)+str(k5)+', tested on '+str(m)+'. Testing on '+str(l)+'...'
model = np.load(tmpcopyfileid)['model'][0]
break
if model==[]: # model not found precomputed
if (printit):
print 'Fitting on '+str(k1)+"-"+str(k2)+"-"+str(k3)+"-"+str(k4)+"-"+str(k5)+', testing on '+str(l)+'...'
model = pipe.fit(tr_data,tr_labels)
y_pred = model.predict(ts_data)
cm = confusion_matrix(y_pred=y_pred, y_true=ts_labels)
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
np.savez(fileid,cm=cm,model=np.array([model]))
def init_steps5(i,j,jmax,surf,surfla,printit=True):
"""function for helping parallelization of computations per 5 surfaces
-> i : prefeature id, among all computed prefeatures (0: |f|, ... see prefeatid)
-> j : subfeatureset among all features (0: AFFT, 1: FREQ, 2: TIME, 3: ALL)
-> jmax : number of all subfeaturesets
-> surf, surfla : surface data and labels
"""
if j==jmax:
featsel = SelectKBest(k=1000,score_func= mutual_info_classif)
else:
featsel = SelectKBest(k='all',score_func= mutual_info_classif)
pipe = make_pipe_clf(scaler, featsel, decomp, classifiers[2])
for k1 in range(surf.shape[0]): # for every training surface1
for k2 in range(surf.shape[0]): # for every training surface2
if k2 > k1:
for k3 in range(surf.shape[0]):
if k3 > k2:
for k4 in range(surf.shape[0]):
if k4 > k3:
for k5 in range(surf.shape[0]):
if k5 > k4:
for l in range(surf.shape[0]): # for every testing surface
tr_surf = np.concatenate((surf[k1],surf[k2],surf[k3]),axis=0)
tr_surfla = np.concatenate((surfla[:,k1],surfla[:,k2],
surfla[:,k3]),axis=0)
ts_surf, ts_surfla = surf[l], surfla[:,l]
cross_fit5(i,j,k1,k2,k3,k4,k5,surf.shape[0],l,
tr_surf,tr_surfla,ts_surf,ts_surfla,pipe,printit)
def train_5_surface(surf,surfla,n=-1,printit=True):
"""Parallel training -on surface level- of all combinations on 5 surfaces
-> n : number of cores to run in parallel,
input of joblib's Parallel (n=-1 means all available cores)
-> surf, surfla : surface data and labels
*** Cross surface validation, TRAINING with 5 surfaces each time, out of 6 surfaces in total
total= 4 (featuresets) * [comb(6,5)*6] (surface combinations: trained on 5, tested on 1) * 1 (prefeatureset)
= 4*6*6*1 = 144 different runs-files.
Note that comb(n,r) = n!/(r!(n-r)!)
"""
if (printit):
print "-------------------------- TRAINING all combinations per 5 surfaces ----------------------------------"
for i in range(len(prefeatid)-1):
_ = [Parallel(n_jobs=n)([delayed(init_steps5) (i,j,surf.shape[0]-1,surf[j,:,i],surfla[:,:,i],printit)
for j in range(surf.shape[0])])]
def bargraph_perf_gen5(maxsurf,printit=True):
"""Perf file for bargraph generation using bargraph tool, for 5 surfaces"""
if (printit):
print "---------------------------- Generating perf files for 5 surfaces ------------------------------------"
prefeats = prefeatnames[prefeatid][:-1]
# prefeatures, subfeatures, trained, tested, (TP,TN,FN,FP)
acc = np.zeros((len(prefeats),len(subfeats),maxsurf,maxsurf,maxsurf,maxsurf,maxsurf,maxsurf,4))
# features, subfeatures, (TP,TN,FN,FP) -> avg over all tested surfaces
avg = np.zeros((len(prefeats),len(subfeats),4))
# prefeatures, subfeatures, trained, cross_val_self_accuracy, (TP,TN,FN,FP)
self_acc = np.zeros((len(prefeats),len(subfeats),maxsurf,maxsurf,maxsurf,maxsurf,maxsurf,1,4))
# features, subfeatures, (TP,TN,FN,FP) -> avg over all self tested surfaces
avgs = np.zeros((len(prefeats),len(subfeats),4))
# features, subfeatures, trained, (tested avg, tested std), (TP,TN,FN,FP)
cross_acc = np.zeros((len(prefeats),len(subfeats),maxsurf,maxsurf,maxsurf,maxsurf,maxsurf,2,4))
# features, subfeatures, (TP,TN,FN,FP) -> avg over all cross tested surfaces
avgc = np.zeros((len(prefeats),len(subfeats),4))
initial_str = "# clustered and stacked graph bogus data\n=stackcluster;TP;TN;FN;FP\n"+\
"colors=med_blue,dark_green,yellow,red\n=nogridy\n=noupperright\nfontsz=5\nlegendx=right\n"+\
"legendy=center\ndatascale=50\nyformat=%g%%\nxlabel=TrainedON-TestedON\nylabel=Metrics\n=table"
respath = filename5(retpath=1)
for i in range(len(prefeats)):
outname = respath+prefeats[i]
outfile = outname+'.perf'
outfile1 = outname+'_selfaccuracy.perf'
outfile2 = outname+'_crossaccuracy.perf'
out = open(outfile,'w+')
out.write(initial_str+"\n")
out1 = open(outfile1,'w+')
out1.write(initial_str+"\n")
out2 = open(outfile2,'w+')
out2.write(initial_str+"\n")
for k1 in range(maxsurf):
for k2 in range(maxsurf):
if k2 > k1:
for k3 in range(maxsurf):
if k3 > k2:
for k4 in range(maxsurf):
if k4 > k3:
for k5 in range(maxsurf):
if k5 > k4:
for l in range(maxsurf):
out.write("multimulti="+str(k1)+str(k2)+str(k3)+str(k4)
+str(k5)+"-"+str(l)+"\n")
for j in range(len(subfeats)):
fileid = filename5(i,j,k1,k2,k3,k4,k5,l)
tmp = np.load(fileid)['cm']
acc[i,j,k1,k2,k3,k4,k5,l,0] = round(tmp[1,1],2) # TP
acc[i,j,k1,k2,k3,k4,k5,l,1] = round(tmp[0,0],2) # TN
acc[i,j,k1,k2,k3,k4,k5,l,2] = 1-round(tmp[1,1],2) # FN
acc[i,j,k1,k2,k3,k4,k5,l,3] = 1-round(tmp[0,0],2) # FP
avg[i,j,:] += acc[i,j,k1,k2,k3,k4,k5,l,:]
out.write("%s %.2f %.2f %.2f %.2f\n" % (subfeats[j],
acc[i,j,k1,k2,k3,k4,k5,l,0],
acc[i,j,k1,k2,k3,k4,k5,l,1],
acc[i,j,k1,k2,k3,k4,k5,l,2],
acc[i,j,k1,k2,k3,k4,k5,l,3]))
# selc accuracy
if l == k1 or l == k2 or l == k3 or l == k4 or l == k5:
if j == 0 and l == k5:
out1.write("multimulti="+str(k1)+str(k2)
+str(k3)+str(k4)+str(k5)+"-"+str(l)+"\n")
self_acc[i,j,k1,k2,k3,k4,k5,0,:] = acc[i,j,k1,k2,k3,k4,k5,l]
avgs[i,j,:] += self_acc[i,j,k1,k2,k3,k4,k5,0,:]
if l == k5:
out1.write("%s %.2f %.2f %.2f %.2f\n" % (subfeats[j],
self_acc[i,j,k1,k2,k3,k4,k5,0,0],
self_acc[i,j,k1,k2,k3,k4,k5,0,1],
self_acc[i,j,k1,k2,k3,k4,k5,0,2],
self_acc[i,j,k1,k2,k3,k4,k5,0,3]))
if l != k1 and l != k2 and l != k3 and l!= k4 and l!= k5:
t = range(maxsurf)
t.remove(k1)
t.remove(k2)
t.remove(k3)
t.remove(k4)
t.remove(k5)
if (l == t[-1]):
if j == 0:
out2.write("multimulti="+str(k1)+str(k2)+str(k3)+str(k4)+str(k5)+"\n")
cross_acc[i,j,k1,k2,k3,k4,k5,0,:] = np.mean(acc[i,j,k1,k2,k3,k4,k5,t,:], axis=0)
avgc[i,j,:] += cross_acc[i,j,k1,k2,k3,k4,k5,0,:]
out2.write("%s %.2f %.2f %.2f %.2f\n" % (subfeats[j],
cross_acc[i,j,k1,k2,k3,k4,k5,0,0],
cross_acc[i,j,k1,k2,k3,k4,k5,0,1],
cross_acc[i,j,k1,k2,k3,k4,k5,0,2],
cross_acc[i,j,k1,k2,k3,k4,k5,0,3]))
out.write("multimulti=AVG\n")
out1.write("multimulti=AVG\n")
out2.write("multimulti=AVG\n")
avg /= comb(maxsurf,5)*maxsurf*1.
avgs /= comb(maxsurf,5)*5.
avgc /= comb(maxsurf,5)*1.
for j in range(len(subfeats)):
out.write("%s %.2f %.2f %.2f %.2f\n" % (subfeats[j],avg[i,j,0],avg[i,j,1],avg[i,j,2],avg[i,j,3]))
out1.write("%s %.2f %.2f %.2f %.2f\n" % (subfeats[j],avgs[i,j,0],avgs[i,j,1],avgs[i,j,2],avgs[i,j,3]))
out2.write("%s %.2f %.2f %.2f %.2f\n" % (subfeats[j],avgc[i,j,0],avgc[i,j,1],avgc[i,j,2],avgc[i,j,3]))
out.close()
out1.close()
out2.close()
def make_bargraphs_from_perf(i,maxsurf=6,printit=True):
"""Bargraph generation using bargraph tool, for i surfaces"""
if (printit):
print "---------------------------- Generating bar graphs for "+str(i+1)+" surfaces ------------------------------------"
resfold = respath+str(i+1)+'/'
allperf = glob.glob(resfold+"*.perf")
maxperf = len(allperf)
for k in range(len(allperf)):
j = allperf[k]
tmppdf = j[:-4]+"pdf"
tmppng = j[:-4]+"png"
with open(tmppdf, "w") as f1:
call([tool,"-pdf",j],stdout=f1)
with open(tmppng, "w") as f2:
call([tool,"-png","-non-transparent",j],stdout=f2)
img = mpimg.imread(tmppng)
if k!=0:
plt.subplot(maxsurf,maxperf-1,k+i*(maxperf-1))
plt.imshow(img)
plt.xticks([])
plt.yticks([])
############ PREDICTING ACCURACY FOR ATI SENSOR DATA ##############
def testing_accuracy_simple(surf, surfla, Xsp, Ysp, keepind=[-1], printit=True, ltest=6):
"""Evaluating performance of single classifier
-> surf, Xsp : surface or whole dataset
-> surfla, Ysp : surface label or whole dataset labels
-> keepind : feature indexes to keep (default keep all)
-> ltest : number of testing surfaces
<- Yscn, Yscbn : Accuracy for BOTH and AFFT trained models (per 1 surface) for whole dataset
<- Yf1scn, Yf1scbn : F1score for BOTH and AFFT trained models (per 1 surface) for whole dataset
<- Ycmn, Ycmbn : Confusion matrix for BOTH and AFFT trained models (per 1 surface) for whole dataset
"""
if len(keepind) == 0 or keepind[0] == -1:
keepind = range(len(featnames))
fileid = filename1(0,3,0,5) # all features, 1 trained surface(surf 0)
fileidb = filename1(0,0,0,5) # |FFT| features, 1 trained surface(surf 0)
# fileid5 = filename5(0,3,0,1,2,3,4,5) # all features, 5 trained surfaces(surf 0-4)
# fileid5b = filename5(0,0,0,1,2,3,4,5) # |FFT| features, 5 trained surfaces(surf 0-4)
model = np.load(fileid)['model'][0]
modelb = np.load(fileidb)['model'][0]
# model5 = np.load(fileid5)['model'][0]
# model5b = np.load(fileid5b)['model'][0]
for i in range(ltest):
Yout = model.predict(surf[3, i, 0])
Youtb = modelb.predict(surf[0, i, 0])
# Yout5 = model5.predict(surf[3, i, 0])
# Yout5b = model5b.predict(surf[0, i, 0])
if printit:
# print i, Yout.shape, Youtb.shape#, Yout5.shape, Yout5b.shape
pass
Ysc = model.score(surf[3, i ,0], surfla[:, i, 0])
Yscb = modelb.score(surf[0, i, 0], surfla[:, i, 0])
# Ysc5 = model5.score(surf[3, i, 0], surfla[:, i, 0])
# Ysc5b = model5b.score(surf[0, i ,0], surfla[:, i, 0])
Ycm = confusion_matrix(y_pred=Yout, y_true=surfla[:, i, 0])
Ycm = Ycm.astype('float') / Ycm.sum(axis=1)[:, np.newaxis]
Ycmb = confusion_matrix(y_pred=Youtb, y_true=surfla[:, i, 0])
Ycmb = Ycmb.astype('float') / Ycmb.sum(axis=1)[:, np.newaxis]
Yf1sc = f1_score(y_pred=Yout, y_true=surfla[:, i, 0])
Yf1scb = f1_score(y_pred=Youtb, y_true=surfla[:, i, 0])
# Ycm5 = confusion_matrix(y_pred=Yout5, y_true=surfla[:, i, 0])
# Ycm5b = confusion_matrix(y_pred=Yout5b, y_true=surfla[:, i, 0])
if printit:
print "Accuracy for surface ", i, Ysc, Yscb #, Ysc5, Ysc5b
print "F1score for surface ", i, Yf1sc, Yf1scb
print "TN(stable) and TP(slip) for surface ", i, Ycm[0,0], Ycm[1,1],'|', Ycmb[0,0], Ycmb[1,1]
Youtn = model.predict(Xsp[2][:,keepind])
Youtbn = modelb.predict(Xsp[2][:,-window-2:-window/2-1])
# Yout5n = model5.predict(Xsp[2])
# Yout5bn = model5b.predict(Xsp[2][:,-window-2:-window/2-1])
Yscn = model.score(Xsp[2][:,keepind],Ysp[2])
Yscbn = modelb.score(Xsp[2][:,-window-2:-window/2-1],Ysp[2])
# Ysc5n = model5.score(Xsp[2],Ysp[2])
# Ysc5bn = model5b.score(Xsp[2][:,-window-2:-window/2-1],Ysp[2])
Ycmn = confusion_matrix(y_pred=Youtn, y_true=Ysp[2])
Ycmn = Ycmn.astype('float') / Ycmn.sum(axis=1)[:, np.newaxis]
Ycmbn = confusion_matrix(y_pred=Youtbn, y_true=Ysp[2])
Ycmbn = Ycmbn.astype('float') / Ycmbn.sum(axis=1)[:, np.newaxis]
Yf1scn = f1_score(y_pred=Youtn, y_true=Ysp[2])
Yf1scbn = f1_score(y_pred=Youtbn, y_true=Ysp[2])
# Ycm5n = confusion_matrix(y_pred=Yout5n, y_true=Ysp[2])
# Ycm5bn = confusion_matrix(y_pred=Yout5bn, y_true=Ysp[2])
print "======================================================================================"
print "Accuracy for dataset ", Yscn, Yscbn #, Ysc5n, Ysc5bn
print "F1score for dataset ", Yf1scn, Yf1scbn
print "TN(stable) and TP(slip) for dataset ", Ycmn[0,0], Ycmn[1,1],'|', Ycmbn[0,0], Ycmbn[1,1]
print "======================================================================================"
return Yscn, Yscbn, Yf1scn, Yf1scbn, Ycmn, Ycmbn
############ PREDICTING ACCURACY FOR ATI SENSOR DATA DETAILED ##############
def testing_accuracy(surf, surfla, trsurf=[1, 5], ltest=6, printit=True):
"""Evaluating performance of all classifiers on average
-> surf : surfaces of whole dataset
-> surfla : surface labels of whole dataset
-> trsurf : number of surfaces used for training, that will be evaluated
-> ltest : number of testing surfaces
<- acc : Average accuracy from all trained models
"""
lsurf = len(trsurf)
lsubfs = surf.shape[0]
acc = np.zeros((lsurf,lsubfs,ltest,2))
for r in range(lsurf): # for each number of surfaces used for training
for k in range(lsubfs): # for each subfs
filenames = glob.glob(respath + str(trsurf[r]) + "/fs_" + str(0) + "_subfs_" + str(k) + "_*.npz")
numf = len(filenames)
for i in range(ltest): # for each testing surface
curracc = np.zeros(numf)
for n in range(numf):
model = np.load(filenames[n])['model'][0]
Ysc = model.score(surf[k, i ,0], surfla[:, i, 0])
curracc[n] = Ysc
# if printit:
# print "Surf: ",trsurf[r],"subfs: ",k,"test_surf: ",i,"model: ",n,"Acc: ",Ysc
acc[r,k,i,0] = np.mean(curracc)
acc[r,k,i,1] = np.std(curracc)
if printit:
print "Surf: ",trsurf[r],"subfs: ",k,"test_surf: ",i,"Acc_mean-std: ",acc[r,k,i,0], acc[r,k,i,1]
if printit:
print "Surf: ",trsurf[r],"subfs: ",k,"Acc_mean-std: ",np.mean(acc[r,k,:,0]), np.mean(acc[r,k,:,1])
return acc
############ VISUALIZING ONLINE TESTING PROCEDURE ##############
def visualize(f, surf, surfla, chosensurf=5, plotpoints=200, save=False, printit=True):
matplotlib.rcParams['text.usetex'] = True
offset = window
inp = f[chosensurf][offset-600:,:3]
lab = f[chosensurf][offset-600:,-1]
INP1 = surf[3,chosensurf,0]
INP2 = surf[0,chosensurf,0]
OUT = surfla[:,chosensurf,0]
answerfreq = 5. # Hz
# plotpoints = 200. # 200 datapoints answer for visual purposes
# plotpoints = INP1.shape[0]*1. # like 1ms answers
# plotpoints = answerfreq*inp.shape[0]/window # like real time answers (200ms or 5Hz)
skipINP = int(round(INP1.shape[0]/plotpoints))
endsetINP = range(INP1.shape[0])[::skipINP]
minlen = len(endsetINP)
mult = (inp.shape[0]-offset)/INP1.shape[0]*1.
tx = np.array(endsetINP[:minlen][:-1])*mult
tfx = range(inp.shape[0])[:int(endsetINP[-1]*mult)]
tfind = (np.array(tfx) + offset).tolist()
if printit:
print skipINP, len(endsetINP)
endsetINP = endsetINP[:minlen][-1]
if printit:
print skipINP, endsetINP
fileid = filename1(0,3,0,5)
fileidb = filename1(0,0,0,5)
model = np.load(fileid)['model'][0]
modelb = np.load(fileidb)['model'][0]
Yout = model.predict(INP1)
Youtb = modelb.predict(INP2)
if printit:
print Yout.shape, Youtb.shape
plt.rc('text', usetex=True)
plt.rc('axes', linewidth=2)
plt.rc('font', weight='bold')
plt.rcParams['text.latex.preamble'] = [r'\usepackage{sfmath} \boldmath']
ax = plt.figure(figsize=(20,10))
tf = np.linalg.norm(inp[tfind],axis=1)
tl = lab[tfind]
ty = Yout[:endsetINP:skipINP]+0.02
tyb = Youtb[:endsetINP:skipINP]+0.04
tyl = OUT[:endsetINP:skipINP]+0.06
if printit:
print tf.shape, ty.shape, len(tx)
p1, = plt.plot(tfx,tf/max(tf),linewidth=5)
plt.hold
pl, = plt.plot(tfx,tl,linewidth=5,color='green')
p = plt.scatter(tx,ty,color='red',s=30)
pb = plt.scatter(tx,tyb,color='magenta',s=30)
pbl = plt.scatter(tx,tyl,color='brown',s=30)
plt.text(100, 0.15, r'\textbf{Stable}', ha="center", va="center", rotation=0,
size=25)
plt.text(100, 0.85, r'\textbf{Slip}', ha="center", va="center", rotation=0,
size=25)
plt.annotate('', fontsize=10, xy=(100, 0.05), xytext=(100, 0.12),
arrowprops=dict(facecolor='black', shrink=0.05))
plt.annotate('', xy=(100, 0.98), xytext=(100, 0.9),
arrowprops=dict(facecolor='black', shrink=0.05))
plt.xlabel(r't ($1e^{-2} sec$)',fontsize=35)
# plt.yticks([])
plt.legend([p1,pl,p,pb,pbl],[r'$|\textbf{f}|$',r'$\textbf{Flabel}$',r'\textbf{outBOTH}',r'\textbf{outFFT}',r'$\textbf{TRlabel}$'], prop={'size': 35})
plt.tick_params(labelsize=20)
plt.tight_layout()
if save:
savefig(datapath+'validation_ati.pdf', bbox_inches='tight')
###### Prediction function for new datasets
def prediction(dataset,keepind=[-1],k=1,n=6,scale=1.0,fdes=0.0,printit=False,plotit=False):
if len(keepind) == 0 or keepind[0] == -1:
keepind = range(len(featnames))
print "Filename for prediction: "+dataset
if dataset[-4:] == '.mat':
atifile = datapath+dataset
atifeatname = dataset[:-4]+'_'+featname+'_'+str(scale)+'_'+str(fdes)+'_'
atifeatfile = featpath+atifeatname+'.npz'
atisurffile = featpath+atifeatname+'_'+str(len(keepind))+'_'+str(k)+'fing_'+str(n)+'surf.npz'
atiXYfile = featpath+atifeatname+'_XY.npz'
atiXYsplitfile = featpath+atifeatname+'_XYsplit.npz'
f,l,fd,member,m1,m2 = data_prep(atifile,scale=[scale],fdes=fdes,k=k,printit=printit)
# print np.max(f[0][:,:-1])
# for i in range(len(f)):
# f[i][:,:-1] = scale * f[i][:,:-1]
# print np.max(f[0][:,:-1])
prefeat = compute_prefeat(f,printit)
features, labels = feature_extraction(prefeat, member, atifeatfile, atifeatname,printit)
new_labels = label_cleaning(prefeat,labels,member,printit=printit)
X,Y,Yn,Xsp,Ysp = computeXY(features,labels,new_labels,m1,m2,atiXYfile,atiXYsplitfile,printit)
surf, surfla = computeXY_persurf(Xsp,Ysp,atisurffile,keepind,n=n,k=k,printit=printit)
############ PREDICTING SCORE FOR ATI SENSOR DATA ROTATIONAL ##############
testing_accuracy_simple(surf, surfla, Xsp, Ysp, keepind, ltest=n)
############ PREDICTING SCORE FOR ATI SENSOR DATA DETAILED ##############
# _ = testing_accuracy(surf, surfla, ltest=6)
surfnosplit, surflanosplit = computeXY_persurf(X,Y,atisurffile,keepind,n=n,k=k,saveload=False,printit=printit)
for chosensurf in range(5):
if plotit:
visualize(f, surfnosplit, surflanosplit, chosensurf, plotpoints=200, printit=printit)
else:
print "Your dataset should be .mat file. You provided instead a file."+dataset[-3:]
| bsd-3-clause |
rousseab/pymatgen | pymatgen/electronic_structure/plotter.py | 1 | 42354 | # coding: utf-8
from __future__ import division, unicode_literals, print_function
"""
This module implements plotter for DOS and band structure.
"""
__author__ = "Shyue Ping Ong, Geoffroy Hautier"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "May 1, 2012"
import logging
import math
import itertools
from collections import OrderedDict
import numpy as np
from monty.json import jsanitize
from pymatgen.electronic_structure.core import Spin
from pymatgen.electronic_structure.bandstructure import BandStructureSymmLine
logger = logging.getLogger('BSPlotter')
class DosPlotter(object):
"""
Class for plotting DOSs. Note that the interface is extremely flexible
given that there are many different ways in which people want to view
DOS. The typical usage is::
# Initializes plotter with some optional args. Defaults are usually
# fine,
plotter = DosPlotter()
# Adds a DOS with a label.
plotter.add_dos("Total DOS", dos)
# Alternatively, you can add a dict of DOSs. This is the typical
# form returned by CompleteDos.get_spd/element/others_dos().
plotter.add_dos_dict({"dos1": dos1, "dos2": dos2})
plotter.add_dos_dict(complete_dos.get_spd_dos())
Args:
zero_at_efermi: Whether to shift all Dos to have zero energy at the
fermi energy. Defaults to True.
stack: Whether to plot the DOS as a stacked area graph
key_sort_func: function used to sort the dos_dict keys.
sigma: A float specifying a standard deviation for Gaussian smearing
the DOS for nicer looking plots. Defaults to None for no
smearing.
"""
def __init__(self, zero_at_efermi=True, stack=False, sigma=None):
self.zero_at_efermi = zero_at_efermi
self.stack = stack
self.sigma = sigma
self._doses = OrderedDict()
def add_dos(self, label, dos):
"""
Adds a dos for plotting.
Args:
label:
label for the DOS. Must be unique.
dos:
Dos object
"""
energies = dos.energies - dos.efermi if self.zero_at_efermi \
else dos.energies
densities = dos.get_smeared_densities(self.sigma) if self.sigma \
else dos.densities
efermi = dos.efermi
self._doses[label] = {'energies': energies, 'densities': densities,
'efermi': efermi}
def add_dos_dict(self, dos_dict, key_sort_func=None):
"""
Add a dictionary of doses, with an optional sorting function for the
keys.
Args:
dos_dict: dict of {label: Dos}
key_sort_func: function used to sort the dos_dict keys.
"""
if key_sort_func:
keys = sorted(dos_dict.keys(), key=key_sort_func)
else:
keys = dos_dict.keys()
for label in keys:
self.add_dos(label, dos_dict[label])
def get_dos_dict(self):
"""
Returns the added doses as a json-serializable dict. Note that if you
have specified smearing for the DOS plot, the densities returned will
be the smeared densities, not the original densities.
Returns:
Dict of dos data. Generally of the form, {label: {'energies':..,
'densities': {'up':...}, 'efermi':efermi}}
"""
return jsanitize(self._doses)
def get_plot(self, xlim=None, ylim=None):
"""
Get a matplotlib plot showing the DOS.
Args:
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
"""
import prettyplotlib as ppl
from prettyplotlib import brewer2mpl
from pymatgen.util.plotting_utils import get_publication_quality_plot
ncolors = max(3, len(self._doses))
ncolors = min(9, ncolors)
colors = brewer2mpl.get_map('Set1', 'qualitative', ncolors).mpl_colors
y = None
alldensities = []
allenergies = []
plt = get_publication_quality_plot(12, 8)
# Note that this complicated processing of energies is to allow for
# stacked plots in matplotlib.
for key, dos in self._doses.items():
energies = dos['energies']
densities = dos['densities']
if not y:
y = {Spin.up: np.zeros(energies.shape),
Spin.down: np.zeros(energies.shape)}
newdens = {}
for spin in [Spin.up, Spin.down]:
if spin in densities:
if self.stack:
y[spin] += densities[spin]
newdens[spin] = y[spin].copy()
else:
newdens[spin] = densities[spin]
allenergies.append(energies)
alldensities.append(newdens)
keys = list(self._doses.keys())
keys.reverse()
alldensities.reverse()
allenergies.reverse()
allpts = []
for i, key in enumerate(keys):
x = []
y = []
for spin in [Spin.up, Spin.down]:
if spin in alldensities[i]:
densities = list(int(spin) * alldensities[i][spin])
energies = list(allenergies[i])
if spin == Spin.down:
energies.reverse()
densities.reverse()
x.extend(energies)
y.extend(densities)
allpts.extend(list(zip(x, y)))
if self.stack:
plt.fill(x, y, color=colors[i % ncolors],
label=str(key))
else:
ppl.plot(x, y, color=colors[i % ncolors],
label=str(key), linewidth=3)
if not self.zero_at_efermi:
ylim = plt.ylim()
ppl.plot([self._doses[key]['efermi'],
self._doses[key]['efermi']], ylim,
color=colors[i % ncolors],
linestyle='--', linewidth=2)
if xlim:
plt.xlim(xlim)
if ylim:
plt.ylim(ylim)
else:
xlim = plt.xlim()
relevanty = [p[1] for p in allpts
if xlim[0] < p[0] < xlim[1]]
plt.ylim((min(relevanty), max(relevanty)))
if self.zero_at_efermi:
ylim = plt.ylim()
plt.plot([0, 0], ylim, 'k--', linewidth=2)
plt.xlabel('Energies (eV)')
plt.ylabel('Density of states')
plt.legend()
leg = plt.gca().get_legend()
ltext = leg.get_texts() # all the text.Text instance in the legend
plt.setp(ltext, fontsize=30)
plt.tight_layout()
return plt
def save_plot(self, filename, img_format="eps", xlim=None, ylim=None):
"""
Save matplotlib plot to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
"""
plt = self.get_plot(xlim, ylim)
plt.savefig(filename, format=img_format)
def show(self, xlim=None, ylim=None):
"""
Show the plot using matplotlib.
Args:
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
"""
plt = self.get_plot(xlim, ylim)
plt.show()
class BSPlotter(object):
"""
Class to plot or get data to facilitate the plot of band structure objects.
Args:
bs: A BandStructureSymmLine object.
"""
def __init__(self, bs):
if not isinstance(bs, BandStructureSymmLine):
raise ValueError(
"BSPlotter only works with BandStructureSymmLine objects. "
"A BandStructure object (on a uniform grid for instance and "
"not along symmetry lines won't work)")
self._bs = bs
# TODO: come with an intelligent way to cut the highest unconverged
# bands
self._nb_bands = self._bs._nb_bands
def _maketicks(self, plt):
"""
utility private method to add ticks to a band structure
"""
ticks = self.get_ticks()
# Sanitize only plot the uniq values
uniq_d = []
uniq_l = []
temp_ticks = list(zip(ticks['distance'], ticks['label']))
for i in range(len(temp_ticks)):
if i == 0:
uniq_d.append(temp_ticks[i][0])
uniq_l.append(temp_ticks[i][1])
logger.debug("Adding label {l} at {d}".format(
l=temp_ticks[i][0], d=temp_ticks[i][1]))
else:
if temp_ticks[i][1] == temp_ticks[i - 1][1]:
logger.debug("Skipping label {i}".format(
i=temp_ticks[i][1]))
else:
logger.debug("Adding label {l} at {d}".format(
l=temp_ticks[i][0], d=temp_ticks[i][1]))
uniq_d.append(temp_ticks[i][0])
uniq_l.append(temp_ticks[i][1])
logger.debug("Unique labels are %s" % list(zip(uniq_d, uniq_l)))
plt.gca().set_xticks(uniq_d)
plt.gca().set_xticklabels(uniq_l)
for i in range(len(ticks['label'])):
if ticks['label'][i] is not None:
# don't print the same label twice
if i != 0:
if ticks['label'][i] == ticks['label'][i - 1]:
logger.debug("already print label... "
"skipping label {i}".format(
i=ticks['label'][i]))
else:
logger.debug("Adding a line at {d}"
" for label {l}".format(
d=ticks['distance'][i], l=ticks['label'][i]))
plt.axvline(ticks['distance'][i], color='k')
else:
logger.debug("Adding a line at {d} for label {l}".format(
d=ticks['distance'][i], l=ticks['label'][i]))
plt.axvline(ticks['distance'][i], color='k')
return plt
def bs_plot_data(self, zero_to_efermi=True):
"""
Get the data nicely formatted for a plot
Args:
zero_to_efermi: Automatically subtract off the Fermi energy from the
eigenvalues and plot.
Returns:
A dict of the following format:
ticks: A dict with the 'distances' at which there is a kpoint (the
x axis) and the labels (None if no label)
energy: A dict storing bands for spin up and spin down data
[{Spin:[band_index][k_point_index]}] as a list (one element
for each branch) of energy for each kpoint. The data is
stored by branch to facilitate the plotting
vbm: A list of tuples (distance,energy) marking the vbms. The
energies are shifted with respect to the fermi level is the
option has been selected.
cbm: A list of tuples (distance,energy) marking the cbms. The
energies are shifted with respect to the fermi level is the
option has been selected.
lattice: The reciprocal lattice.
zero_energy: This is the energy used as zero for the plot.
band_gap:A string indicating the band gap and its nature (empty if
it's a metal).
is_metal: True if the band structure is metallic (i.e., there is at
least one band crossing the fermi level).
"""
distance = []
energy = []
if self._bs.is_metal():
zero_energy = self._bs.efermi
else:
zero_energy = self._bs.get_vbm()['energy']
if not zero_to_efermi:
zero_energy = 0.0
for b in self._bs._branches:
if self._bs.is_spin_polarized:
energy.append({str(Spin.up): [], str(Spin.down): []})
else:
energy.append({str(Spin.up): []})
distance.append([self._bs._distance[j]
for j in range(b['start_index'],
b['end_index'] + 1)])
ticks = self.get_ticks()
for i in range(self._nb_bands):
energy[-1][str(Spin.up)].append(
[self._bs._bands[Spin.up][i][j] - zero_energy
for j in range(b['start_index'], b['end_index'] + 1)])
if self._bs.is_spin_polarized:
for i in range(self._nb_bands):
energy[-1][str(Spin.down)].append(
[self._bs._bands[Spin.down][i][j] - zero_energy
for j in range(b['start_index'], b['end_index'] + 1)])
vbm = self._bs.get_vbm()
cbm = self._bs.get_cbm()
vbm_plot = []
cbm_plot = []
for index in cbm['kpoint_index']:
cbm_plot.append((self._bs._distance[index],
cbm['energy'] - zero_energy if zero_to_efermi
else cbm['energy']))
for index in vbm['kpoint_index']:
vbm_plot.append((self._bs._distance[index],
vbm['energy'] - zero_energy if zero_to_efermi
else vbm['energy']))
bg = self._bs.get_band_gap()
direct = "Indirect"
if bg['direct']:
direct = "Direct"
return {'ticks': ticks, 'distances': distance, 'energy': energy,
'vbm': vbm_plot, 'cbm': cbm_plot,
'lattice': self._bs._lattice_rec.as_dict(),
'zero_energy': zero_energy, 'is_metal': self._bs.is_metal(),
'band_gap': "{} {} bandgap = {}".format(direct,
bg['transition'],
bg['energy'])
if not self._bs.is_metal() else ""}
def get_plot(self, zero_to_efermi=True, ylim=None, smooth=False,
vbm_cbm_marker=False):
"""
get a matplotlib object for the bandstructure plot.
Blue lines are up spin, red lines are down
spin.
Args:
zero_to_efermi: Automatically subtract off the Fermi energy from
the eigenvalues and plot (E-Ef).
ylim: Specify the y-axis (energy) limits; by default None let
the code choose. It is vbm-4 and cbm+4 if insulator
efermi-10 and efermi+10 if metal
smooth: interpolates the bands by a spline cubic
"""
from pymatgen.util.plotting_utils import get_publication_quality_plot
plt = get_publication_quality_plot(12, 8)
from matplotlib import rc
import scipy.interpolate as scint
rc('text', usetex=True)
# main internal config options
e_min = -4
e_max = 4
if self._bs.is_metal():
e_min = -10
e_max = 10
band_linewidth = 3
data = self.bs_plot_data(zero_to_efermi)
if not smooth:
for d in range(len(data['distances'])):
for i in range(self._nb_bands):
plt.plot(data['distances'][d],
[data['energy'][d][str(Spin.up)][i][j]
for j in range(len(data['distances'][d]))], 'b-',
linewidth=band_linewidth)
if self._bs.is_spin_polarized:
plt.plot(data['distances'][d],
[data['energy'][d][str(Spin.down)][i][j]
for j in range(len(data['distances'][d]))],
'r--', linewidth=band_linewidth)
else:
for d in range(len(data['distances'])):
for i in range(self._nb_bands):
tck = scint.splrep(
data['distances'][d],
[data['energy'][d][str(Spin.up)][i][j]
for j in range(len(data['distances'][d]))])
step = (data['distances'][d][-1]
- data['distances'][d][0]) / 1000
plt.plot([x * step + data['distances'][d][0]
for x in range(1000)],
[scint.splev(x * step + data['distances'][d][0],
tck, der=0)
for x in range(1000)], 'b-',
linewidth=band_linewidth)
if self._bs.is_spin_polarized:
tck = scint.splrep(
data['distances'][d],
[data['energy'][d][str(Spin.down)][i][j]
for j in range(len(data['distances'][d]))])
step = (data['distances'][d][-1]
- data['distances'][d][0]) / 1000
plt.plot([x * step + data['distances'][d][0]
for x in range(1000)],
[scint.splev(
x * step + data['distances'][d][0],
tck, der=0)
for x in range(1000)], 'r--',
linewidth=band_linewidth)
self._maketicks(plt)
# Main X and Y Labels
plt.xlabel(r'$\mathrm{Wave\ Vector}$', fontsize=30)
ylabel = r'$\mathrm{E\ -\ E_f\ (eV)}$' if zero_to_efermi \
else r'$\mathrm{Energy\ (eV)}$'
plt.ylabel(ylabel, fontsize=30)
# Draw Fermi energy, only if not the zero
if not zero_to_efermi:
ef = self._bs.efermi
plt.axhline(ef, linewidth=2, color='k')
# X range (K)
# last distance point
x_max = data['distances'][-1][-1]
plt.xlim(0, x_max)
if ylim is None:
if self._bs.is_metal():
# Plot A Metal
if zero_to_efermi:
plt.ylim(e_min, e_max)
else:
plt.ylim(self._bs.efermi + e_min, self._bs._efermi + e_max)
else:
if vbm_cbm_marker:
for cbm in data['cbm']:
plt.scatter(cbm[0], cbm[1], color='r', marker='o',
s=100)
for vbm in data['vbm']:
plt.scatter(vbm[0], vbm[1], color='g', marker='o',
s=100)
plt.ylim(data['vbm'][0][1] + e_min,
data['cbm'][0][1] + e_max)
else:
plt.ylim(ylim)
plt.tight_layout()
return plt
def show(self, zero_to_efermi=True, ylim=None, smooth=False):
"""
Show the plot using matplotlib.
Args:
zero_to_efermi: Automatically subtract off the Fermi energy from
the eigenvalues and plot (E-Ef).
ylim: Specify the y-axis (energy) limits; by default None let
the code choose. It is vbm-4 and cbm+4 if insulator
efermi-10 and efermi+10 if metal
smooth: interpolates the bands by a spline cubic
"""
plt = self.get_plot(zero_to_efermi, ylim, smooth)
plt.show()
def save_plot(self, filename, img_format="eps", ylim=None,
zero_to_efermi=True, smooth=False):
"""
Save matplotlib plot to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
ylim: Specifies the y-axis limits.
"""
plt = self.get_plot(ylim=ylim, zero_to_efermi=zero_to_efermi,
smooth=smooth)
plt.savefig(filename, format=img_format)
plt.close()
def get_ticks(self):
"""
Get all ticks and labels for a band structure plot.
Returns:
A dict with 'distance': a list of distance at which ticks should
be set and 'label': a list of label for each of those ticks.
"""
tick_distance = []
tick_labels = []
previous_label = self._bs._kpoints[0].label
previous_branch = self._bs._branches[0]['name']
for i, c in enumerate(self._bs._kpoints):
if c.label is not None:
tick_distance.append(self._bs._distance[i])
this_branch = None
for b in self._bs._branches:
if b['start_index'] <= i <= b['end_index']:
this_branch = b['name']
break
if c.label != previous_label \
and previous_branch != this_branch:
label1 = c.label
if label1.startswith("\\") or label1.find("_") != -1:
label1 = "$" + label1 + "$"
label0 = previous_label
if label0.startswith("\\") or label0.find("_") != -1:
label0 = "$" + label0 + "$"
tick_labels.pop()
tick_distance.pop()
tick_labels.append(label0 + "$\mid$" + label1)
else:
if c.label.startswith("\\") or c.label.find("_") != -1:
tick_labels.append("$" + c.label + "$")
else:
tick_labels.append(c.label)
previous_label = c.label
previous_branch = this_branch
return {'distance': tick_distance, 'label': tick_labels}
def plot_compare(self, other_plotter):
"""
plot two band structure for comparison. One is in red the other in blue
(no difference in spins). The two band structures need to be defined
on the same symmetry lines! and the distance between symmetry lines is
the one of the band structure used to build the BSPlotter
Args:
another band structure object defined along the same symmetry lines
Returns:
a matplotlib object with both band structures
"""
# TODO: add exception if the band structures are not compatible
plt = self.get_plot()
data_orig = self.bs_plot_data()
data = other_plotter.bs_plot_data()
band_linewidth = 3
for i in range(other_plotter._nb_bands):
plt.plot(data_orig['distances'],
[e for e in data['energy'][str(Spin.up)][i]],
'r-', linewidth=band_linewidth)
if other_plotter._bs.is_spin_polarized:
plt.plot(data_orig['distances'],
[e for e in data['energy'][str(Spin.down)][i]],
'r-', linewidth=band_linewidth)
return plt
def plot_brillouin(self):
"""
plot the Brillouin zone
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
mpl.rcParams['legend.fontsize'] = 10
fig = plt.figure()
ax = Axes3D(fig)
vec1 = self._bs.lattice.matrix[0]
vec2 = self._bs.lattice.matrix[1]
vec3 = self._bs.lattice.matrix[2]
# make the grid
max_x = -1000
max_y = -1000
max_z = -1000
min_x = 1000
min_y = 1000
min_z = 1000
list_k_points = []
for i in [-1, 0, 1]:
for j in [-1, 0, 1]:
for k in [-1, 0, 1]:
list_k_points.append(i * vec1 + j * vec2 + k * vec3)
if list_k_points[-1][0] > max_x:
max_x = list_k_points[-1][0]
if list_k_points[-1][1] > max_y:
max_y = list_k_points[-1][1]
if list_k_points[-1][2] > max_z:
max_z = list_k_points[-1][0]
if list_k_points[-1][0] < min_x:
min_x = list_k_points[-1][0]
if list_k_points[-1][1] < min_y:
min_y = list_k_points[-1][1]
if list_k_points[-1][2] < min_z:
min_z = list_k_points[-1][0]
vertex = _qvertex_target(list_k_points, 13)
lines = get_lines_voronoi(vertex)
for i in range(len(lines)):
vertex1 = lines[i]['start']
vertex2 = lines[i]['end']
ax.plot([vertex1[0], vertex2[0]], [vertex1[1], vertex2[1]],
[vertex1[2], vertex2[2]], color='k')
for b in self._bs._branches:
vertex1 = self._bs.kpoints[b['start_index']].cart_coords
vertex2 = self._bs.kpoints[b['end_index']].cart_coords
ax.plot([vertex1[0], vertex2[0]], [vertex1[1], vertex2[1]],
[vertex1[2], vertex2[2]], color='r', linewidth=3)
for k in self._bs.kpoints:
if k.label:
label = k.label
if k.label.startswith("\\") or k.label.find("_") != -1:
label = "$" + k.label + "$"
off = 0.01
ax.text(k.cart_coords[0] + off, k.cart_coords[1] + off,
k.cart_coords[2] + off, label, color='b', size='25')
ax.scatter([k.cart_coords[0]], [k.cart_coords[1]],
[k.cart_coords[2]], color='b')
# make ticklabels and ticklines invisible
for a in ax.w_xaxis.get_ticklines() + ax.w_xaxis.get_ticklabels():
a.set_visible(False)
for a in ax.w_yaxis.get_ticklines() + ax.w_yaxis.get_ticklabels():
a.set_visible(False)
for a in ax.w_zaxis.get_ticklines() + ax.w_zaxis.get_ticklabels():
a.set_visible(False)
ax.grid(False)
plt.show()
ax.axis("off")
class BSPlotterProjected(BSPlotter):
"""
Class to plot or get data to facilitate the plot of band structure objects
projected along orbitals, elements or sites.
Args:
bs: A BandStructureSymmLine object with projections.
"""
def __init__(self, bs):
if len(bs._projections) == 0:
raise ValueError("try to plot projections"
" on a band structure without any")
super(BSPlotterProjected, self).__init__(bs)
def _get_projections_by_branches(self, dictio):
proj = self._bs.get_projections_on_elts_and_orbitals(dictio)
proj_br = []
print(len(proj[Spin.up]))
print(len(proj[Spin.up][0]))
for c in proj[Spin.up][0]:
print(c)
for b in self._bs._branches:
print(b)
if self._bs.is_spin_polarized:
proj_br.append(
{str(Spin.up): [[] for l in range(self._nb_bands)],
str(Spin.down): [[] for l in range(self._nb_bands)]})
else:
proj_br.append(
{str(Spin.up): [[] for l in range(self._nb_bands)]})
print((len(proj_br[-1][str(Spin.up)]), self._nb_bands))
for i in range(self._nb_bands):
for j in range(b['start_index'], b['end_index'] + 1):
proj_br[-1][str(Spin.up)][i].append(
{e: {o: proj[Spin.up][i][j][e][o]
for o in proj[Spin.up][i][j][e]}
for e in proj[Spin.up][i][j]})
if self._bs.is_spin_polarized:
for b in self._bs._branches:
for i in range(self._nb_bands):
for j in range(b['start_index'], b['end_index'] + 1):
proj_br[-1][str(Spin.down)][i].append(
{e: {o: proj[Spin.down][i][j][e][o]
for o in proj[Spin.down][i][j][e]}
for e in proj[Spin.down][i][j]})
return proj_br
def get_projected_plots_dots(self, dictio, zero_to_efermi=True, ylim=None,
vbm_cbm_marker=False):
"""
Method returning a plot composed of subplots along different elements
and orbitals.
Args:
dictio: The element and orbitals you want a projection on. The
format is {Element:[Orbitals]} for instance
{'Cu':['d','s'],'O':['p']} will give projections for Cu on
d and s orbitals and on oxygen p.
Returns:
a pylab object with different subfigures for each projection
The blue and red colors are for spin up and spin down.
The bigger the red or blue dot in the band structure the higher
character for the corresponding element and orbital.
"""
from pymatgen.util.plotting_utils import get_publication_quality_plot
band_linewidth = 1.0
fig_number = sum([len(v) for v in dictio.values()])
proj = self._get_projections_by_branches(dictio)
data = self.bs_plot_data(zero_to_efermi)
plt = get_publication_quality_plot(12, 8)
e_min = -4
e_max = 4
if self._bs.is_metal():
e_min = -10
e_max = 10
count = 1
for el in dictio:
for o in dictio[el]:
plt.subplot(100 * math.ceil(fig_number / 2) + 20 + count)
self._maketicks(plt)
for b in range(len(data['distances'])):
for i in range(self._nb_bands):
plt.plot(data['distances'][b],
[data['energy'][b][str(Spin.up)][i][j]
for j in range(len(data['distances'][b]))],
'b-',
linewidth=band_linewidth)
if self._bs.is_spin_polarized:
plt.plot(data['distances'][b],
[data['energy'][b][str(Spin.down)][i][j]
for j in
range(len(data['distances'][b]))],
'r--', linewidth=band_linewidth)
for j in range(
len(data['energy'][b][str(Spin.up)][i])):
plt.plot(data['distances'][b][j],
data['energy'][b][str(Spin.down)][i][
j], 'ro',
markersize=
proj[b][str(Spin.down)][i][j][str(el)][
o] * 15.0)
for j in range(len(data['energy'][b][str(Spin.up)][i])):
plt.plot(data['distances'][b][j],
data['energy'][b][str(Spin.up)][i][j],
'bo',
markersize=
proj[b][str(Spin.up)][i][j][str(el)][
o] * 15.0)
if ylim is None:
if self._bs.is_metal():
if zero_to_efermi:
plt.ylim(e_min, e_max)
else:
plt.ylim(self._bs.efermi + e_min, self._bs._efermi
+ e_max)
else:
if vbm_cbm_marker:
for cbm in data['cbm']:
plt.scatter(cbm[0], cbm[1], color='r',
marker='o',
s=100)
for vbm in data['vbm']:
plt.scatter(vbm[0], vbm[1], color='g',
marker='o',
s=100)
plt.ylim(data['vbm'][0][1] + e_min, data['cbm'][0][1]
+ e_max)
else:
plt.ylim(ylim)
plt.title(str(el) + " " + str(o))
count += 1
return plt
def get_elt_projected_plots(self, zero_to_efermi=True, ylim=None,
vbm_cbm_marker=False):
"""
Method returning a plot composed of subplots along different elements
Returns:
a pylab object with different subfigures for each projection
The blue and red colors are for spin up and spin down
The bigger the red or blue dot in the band structure the higher
character for the corresponding element and orbital
"""
band_linewidth = 1.0
proj = self._get_projections_by_branches({e.symbol: ['s', 'p', 'd']
for e in
self._bs._structure.composition.elements})
data = self.bs_plot_data(zero_to_efermi)
from pymatgen.util.plotting_utils import get_publication_quality_plot
plt = get_publication_quality_plot(12, 8)
e_min = -4
e_max = 4
if self._bs.is_metal():
e_min = -10
e_max = 10
count = 1
for el in self._bs._structure.composition.elements:
plt.subplot(220 + count)
self._maketicks(plt)
for b in range(len(data['distances'])):
for i in range(self._nb_bands):
plt.plot(data['distances'][b],
[data['energy'][b][str(Spin.up)][i][j]
for j in range(len(data['distances'][b]))], 'b-',
linewidth=band_linewidth)
if self._bs.is_spin_polarized:
plt.plot(data['distances'][b],
[data['energy'][b][str(Spin.down)][i][j]
for j in range(len(data['distances'][b]))],
'r--', linewidth=band_linewidth)
for j in range(len(data['energy'][b][str(Spin.up)][i])):
plt.plot(data['distances'][b][j],
data['energy'][b][str(Spin.down)][i][j],
'ro',
markersize=sum([proj[b][str(Spin.down)][i][
j][str(el)][o] for o in
proj[b]
[str(Spin.down)][i][j][
str(el)]]) * 15.0)
for j in range(len(data['energy'][b][str(Spin.up)][i])):
plt.plot(data['distances'][b][j],
data['energy'][b][str(Spin.up)][i][j], 'bo',
markersize=sum(
[proj[b][str(Spin.up)][i][j][str(el)][o]
for o in proj[b]
[str(Spin.up)][i][j][str(el)]]) * 15.0)
if ylim is None:
if self._bs.is_metal():
if zero_to_efermi:
plt.ylim(e_min, e_max)
else:
plt.ylim(self._bs.efermi + e_min, self._bs._efermi
+ e_max)
else:
if vbm_cbm_marker:
for cbm in data['cbm']:
plt.scatter(cbm[0], cbm[1], color='r', marker='o',
s=100)
for vbm in data['vbm']:
plt.scatter(vbm[0], vbm[1], color='g', marker='o',
s=100)
plt.ylim(data['vbm'][0][1] + e_min, data['cbm'][0][1]
+ e_max)
else:
plt.ylim(ylim)
plt.title(str(el))
count += 1
return plt
def get_elt_projected_plots_color(self, zero_to_efermi=True,
elt_ordered=None):
"""
returns a pylab plot object with one plot where the band structure
line color depends on the character of the band (along different
elements). Each element is associated with red, green or blue
and the corresponding rgb color depending on the character of the band
is used. The method can only deal with binary and ternary compounds
spin up and spin down are differientiated by a '-' and a '--' line
Args:
elt_ordered: A list of Element ordered. The first one is red,
second green, last blue
Returns:
a pylab object
"""
band_linewidth = 3.0
if len(self._bs._structure.composition.elements) > 3:
raise ValueError
if elt_ordered is None:
elt_ordered = self._bs._structure.composition.elements
proj = self._get_projections_by_branches(
{e.symbol: ['s', 'p', 'd']
for e in self._bs._structure.composition.elements})
data = self.bs_plot_data(zero_to_efermi)
from pymatgen.util.plotting_utils import get_publication_quality_plot
plt = get_publication_quality_plot(12, 8)
spins = [Spin.up]
if self._bs.is_spin_polarized:
spins = [Spin.up, Spin.down]
self._maketicks(plt)
for s in spins:
for b in range(len(data['distances'])):
for i in range(self._nb_bands):
for j in range(len(data['energy'][b][str(s)][i]) - 1):
sum_e = 0.0
for el in elt_ordered:
sum_e = sum_e + \
sum([proj[b][str(s)][i][j][str(el)][o]
for o
in proj[b][str(s)][i][j][str(el)]])
if sum_e == 0.0:
color = [0.0] * len(elt_ordered)
else:
color = [sum([proj[b][str(s)][i][j][str(el)][o]
for o
in proj[b][str(s)][i][j][str(el)]])
/ sum_e
for el in elt_ordered]
if len(color) == 2:
color.append(0.0)
color[2] = color[1]
color[1] = 0.0
sign = '-'
if s == Spin.down:
sign = '--'
plt.plot([data['distances'][b][j],
data['distances'][b][j + 1]],
[data['energy'][b][str(s)][i][j],
data['energy'][b][str(s)][i][j + 1]], sign,
color=color, linewidth=band_linewidth)
plt.ylim(data['vbm'][0][1] - 4.0, data['cbm'][0][1] + 2.0)
return plt
def _qvertex_target(data, index):
"""
Input data should be in the form of a list of a list of floats.
index is the index of the targeted point
Returns the vertices of the voronoi construction around this target point.
"""
from pyhull import qvoronoi
output = qvoronoi("p QV" + str(index), data)
output.pop(0)
output.pop(0)
return [[float(i) for i in row.split()] for row in output]
def get_lines_voronoi(data):
from pyhull import qconvex
output = qconvex("o", data)
nb_points = int(output[1].split(" ")[0])
list_lines = []
list_points = []
for i in range(2, 2 + nb_points):
list_points.append([float(c) for c in output[i].strip().split()])
facets = []
for i in range(2 + nb_points, len(output)):
if output[i] != '':
tmp = output[i].strip().split(" ")
facets.append([int(tmp[j]) for j in range(1, len(tmp))])
for i in range(len(facets)):
for line in itertools.combinations(facets[i], 2):
for j in range(len(facets)):
if i != j and line[0] in facets[j] and line[1] in facets[j]:
# check if the two facets i and j are not coplanar
vector1 = np.array(list_points[facets[j][0]]) \
- np.array(list_points[facets[j][1]])
vector2 = np.array(list_points[facets[j][0]]) \
- np.array(list_points[facets[j][2]])
n1 = np.cross(vector1, vector2)
vector1 = np.array(list_points[facets[i][0]]) \
- np.array(list_points[facets[i][1]])
vector2 = np.array(list_points[facets[i][0]]) \
- np.array(list_points[facets[i][2]])
n2 = np.cross(vector1, vector2)
dot = math.fabs(np.dot(n1, n2) / (np.linalg.norm(n1)
* np.linalg.norm(n2)))
if 1.05 > dot > 0.95:
continue
list_lines.append({'start': list_points[line[0]],
'end': list_points[line[1]]})
break
return list_lines
| mit |
elsdrium/.unix_settings | .ipython/profile_nbserver/ipython_config.py | 1 | 23360 | # Configuration file for ipython.
#------------------------------------------------------------------------------
# Configurable configuration
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# InteractiveShellApp configuration
#------------------------------------------------------------------------------
# A Mixin for applications that start InteractiveShell instances.
#
# Provides configurables for loading extensions and executing files as part of
# configuring a Shell environment.
#
# The following methods should be called by the :meth:`initialize` method of the
# subclass:
#
# - :meth:`init_path`
# - :meth:`init_shell` (to be implemented by the subclass)
# - :meth:`init_gui_pylab`
# - :meth:`init_extensions`
# - :meth:`init_code`
# Execute the given command string.
# c.InteractiveShellApp.code_to_run = ''
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.InteractiveShellApp.exec_PYTHONSTARTUP = True
# List of files to run at IPython startup.
# c.InteractiveShellApp.exec_files = []
# lines of code to run at IPython startup.
# c.InteractiveShellApp.exec_lines = []
# A list of dotted module names of IPython extensions to load.
# c.InteractiveShellApp.extensions = []
# dotted module name of an IPython extension to load.
# c.InteractiveShellApp.extra_extension = ''
# A file to be run
# c.InteractiveShellApp.file_to_run = ''
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'osx',
# 'pyglet', 'qt', 'qt5', 'tk', 'wx').
# c.InteractiveShellApp.gui = None
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.InteractiveShellApp.hide_initial_ns = True
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.InteractiveShellApp.matplotlib = None
# Run the module as a script.
# c.InteractiveShellApp.module_to_run = ''
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.InteractiveShellApp.pylab = None
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.InteractiveShellApp.pylab_import_all = True
# Reraise exceptions encountered loading IPython extensions?
# c.InteractiveShellApp.reraise_ipython_extension_failures = False
#------------------------------------------------------------------------------
# LoggingConfigurable configuration
#------------------------------------------------------------------------------
# A parent class for Configurables that log.
#
# Subclasses have a log trait, and the default behavior is to get the logger
# from the currently running Application.
#------------------------------------------------------------------------------
# SingletonConfigurable configuration
#------------------------------------------------------------------------------
# A configurable that only allows one instance.
#
# This class is for classes that should only have one instance of itself or
# *any* subclass. To create and retrieve such a class use the
# :meth:`SingletonConfigurable.instance` method.
#------------------------------------------------------------------------------
# Application configuration
#------------------------------------------------------------------------------
# This is an application.
# The date format used by logging formatters for %(asctime)s
# c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
# The Logging format template
# c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Set the log level by value or name.
# c.Application.log_level = 30
#------------------------------------------------------------------------------
# BaseIPythonApplication configuration
#------------------------------------------------------------------------------
# IPython: an enhanced interactive Python shell.
# Whether to create profile dir if it doesn't exist
# c.BaseIPythonApplication.auto_create = False
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.BaseIPythonApplication.copy_config_files = False
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.BaseIPythonApplication.extra_config_file = u''
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
# c.BaseIPythonApplication.ipython_dir = u''
# Whether to overwrite existing config files when copying
# c.BaseIPythonApplication.overwrite = False
# The IPython profile to use.
# c.BaseIPythonApplication.profile = u'default'
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.BaseIPythonApplication.verbose_crash = False
#------------------------------------------------------------------------------
# TerminalIPythonApp configuration
#------------------------------------------------------------------------------
# Whether to display a banner upon starting IPython.
# c.TerminalIPythonApp.display_banner = True
# If a command or file is given via the command-line, e.g. 'ipython foo.py',
# start an interactive shell after executing the file or command.
# c.TerminalIPythonApp.force_interact = False
# Start IPython quickly by skipping the loading of config files.
# c.TerminalIPythonApp.quick = False
#------------------------------------------------------------------------------
# InteractiveShell configuration
#------------------------------------------------------------------------------
# An enhanced, interactive shell for Python.
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.InteractiveShell.ast_node_interactivity = 'last_expr'
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.InteractiveShell.ast_transformers = []
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.InteractiveShell.autocall = 0
# Autoindent IPython code entered interactively.
# c.InteractiveShell.autoindent = True
# Enable magic commands to be called without the leading %.
# c.InteractiveShell.automagic = True
# The part of the banner to be printed before the profile
# c.InteractiveShell.banner1 = 'Python 2.7.6 (default, Jun 22 2015, 17:58:13) \nType "copyright", "credits" or "license" for more information.\n\nIPython 5.0.0 -- An enhanced Interactive Python.\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
# The part of the banner to be printed after the profile
# c.InteractiveShell.banner2 = ''
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.InteractiveShell.cache_size = 1000
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.InteractiveShell.color_info = True
# Set the color scheme (NoColor, Neutral, Linux, or LightBG).
# c.InteractiveShell.colors = 'Neutral'
#
# c.InteractiveShell.debug = False
# **Deprecated**
#
# Will be removed in IPython 6.0
#
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). `deep_reload`
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.InteractiveShell.deep_reload = False
# Don't call post-execute functions that have failed in the past.
# c.InteractiveShell.disable_failing_post_execute = False
# If True, anything that would be passed to the pager will be displayed as
# regular output instead.
# c.InteractiveShell.display_page = False
# (Provisional API) enables html representation in mime bundles sent to pagers.
# c.InteractiveShell.enable_html_pager = False
# Total length of command history
# c.InteractiveShell.history_length = 10000
# The number of saved history entries to be loaded into the history buffer at
# startup.
# c.InteractiveShell.history_load_length = 1000
#
# c.InteractiveShell.ipython_dir = ''
# Start logging to the given file in append mode. Use `logfile` to specify a log
# file to **overwrite** logs to.
# c.InteractiveShell.logappend = ''
# The name of the logfile to use.
# c.InteractiveShell.logfile = ''
# Start logging to the default log file in overwrite mode. Use `logappend` to
# specify a log file to **append** logs to.
# c.InteractiveShell.logstart = False
#
# c.InteractiveShell.object_info_string_level = 0
# Automatically call the pdb debugger after every exception.
# c.InteractiveShell.pdb = False
# Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
# c.InteractiveShell.prompt_in1 = 'In [\\#]: '
# Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
# c.InteractiveShell.prompt_in2 = ' .\\D.: '
# Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
# c.InteractiveShell.prompt_out = 'Out[\\#]: '
# Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
# c.InteractiveShell.prompts_pad_left = True
#
# c.InteractiveShell.quiet = False
#
# c.InteractiveShell.separate_in = '\n'
#
# c.InteractiveShell.separate_out = ''
#
# c.InteractiveShell.separate_out2 = ''
# Show rewritten input, e.g. for autocall.
# c.InteractiveShell.show_rewritten_input = True
# Enables rich html representation of docstrings. (This requires the docrepr
# module).
# c.InteractiveShell.sphinxify_docstring = False
#
# c.InteractiveShell.wildcards_case_sensitive = True
#
# c.InteractiveShell.xmode = 'Context'
#------------------------------------------------------------------------------
# TerminalInteractiveShell configuration
#------------------------------------------------------------------------------
# Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
# c.TerminalInteractiveShell.confirm_exit = True
#
# c.TerminalInteractiveShell.display_completions = 'multicolumn'
# DEPRECATED
# c.TerminalInteractiveShell.display_completions_in_columns = None
# Shortcut style to use at the prompt. 'vi' or 'emacs'.
# c.TerminalInteractiveShell.editing_mode = 'emacs'
# Set the editor used by IPython (default to $EDITOR/vi/notepad).
# c.TerminalInteractiveShell.editor = u'vi'
# Highlight matching brackets .
# c.TerminalInteractiveShell.highlight_matching_brackets = True
# The name of a Pygments style to use for syntax highlighting: manni, igor,
# lovelace, xcode, vim, autumn, vs, rrt, native, perldoc, borland, tango, emacs,
# friendly, monokai, paraiso-dark, colorful, murphy, bw, pastie, algol_nu,
# paraiso-light, trac, default, algol, fruity
# c.TerminalInteractiveShell.highlighting_style = 'legacy'
# Override highlighting format for specific tokens
# c.TerminalInteractiveShell.highlighting_style_overrides = {}
# Enable mouse support in the prompt
# c.TerminalInteractiveShell.mouse_support = False
# Class used to generate Prompt token for prompt_toolkit
# c.TerminalInteractiveShell.prompts_class = 'IPython.terminal.prompts.Prompts'
# Use `raw_input` for the REPL, without completion, multiline input, and prompt
# colors.
#
# Useful when controlling IPython as a subprocess, and piping STDIN/OUT/ERR.
# Known usage are: IPython own testing machinery, and emacs inferior-shell
# integration through elpy.
#
# This mode default to `True` if the `IPY_TEST_SIMPLE_PROMPT` environment
# variable is set, or the current terminal is not a tty.
# c.TerminalInteractiveShell.simple_prompt = False
# Number of line at the bottom of the screen to reserve for the completion menu
# c.TerminalInteractiveShell.space_for_menu = 6
# Automatically set the terminal title
# c.TerminalInteractiveShell.term_title = True
#------------------------------------------------------------------------------
# HistoryAccessorBase configuration
#------------------------------------------------------------------------------
# An abstract class for History Accessors
#------------------------------------------------------------------------------
# HistoryAccessor configuration
#------------------------------------------------------------------------------
# Access the history database without adding to it.
#
# This is intended for use by standalone history tools. IPython shells use
# HistoryManager, below, which is a subclass of this.
# Options for configuring the SQLite connection
#
# These options are passed as keyword args to sqlite3.connect when establishing
# database conenctions.
# c.HistoryAccessor.connection_options = {}
# enable the SQLite history
#
# set enabled=False to disable the SQLite history, in which case there will be
# no stored history, no SQLite connection, and no background saving thread.
# This may be necessary in some threaded environments where IPython is embedded.
# c.HistoryAccessor.enabled = True
# Path to file to use for SQLite history database.
#
# By default, IPython will put the history database in the IPython profile
# directory. If you would rather share one history among profiles, you can set
# this value in each, so that they are consistent.
#
# Due to an issue with fcntl, SQLite is known to misbehave on some NFS mounts.
# If you see IPython hanging, try setting this to something on a local disk,
# e.g::
#
# ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
#
# you can also use the specific value `:memory:` (including the colon at both
# end but not the back ticks), to avoid creating an history file.
# c.HistoryAccessor.hist_file = u''
#------------------------------------------------------------------------------
# HistoryManager configuration
#------------------------------------------------------------------------------
# A class to organize all history-related functionality in one place.
# Write to database every x commands (higher values save disk access & power).
# Values of 1 or less effectively disable caching.
# c.HistoryManager.db_cache_size = 0
# Should the history database include output? (default: no)
# c.HistoryManager.db_log_output = False
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = u''
#------------------------------------------------------------------------------
# BaseFormatter configuration
#------------------------------------------------------------------------------
# A base formatter class that is configurable.
#
# This formatter should usually be used as the base class of all formatters. It
# is a traited :class:`Configurable` class and includes an extensible API for
# users to determine how their objects are formatted. The following logic is
# used to find a function to format an given object.
#
# 1. The object is introspected to see if it has a method with the name
# :attr:`print_method`. If is does, that object is passed to that method
# for formatting.
# 2. If no print method is found, three internal dictionaries are consulted
# to find print method: :attr:`singleton_printers`, :attr:`type_printers`
# and :attr:`deferred_printers`.
#
# Users should use these dictionaries to register functions that will be used to
# compute the format data for their objects (if those objects don't have the
# special print methods). The easiest way of using these dictionaries is through
# the :meth:`for_type` and :meth:`for_type_by_name` methods.
#
# If no function/callable is found to compute the format data, ``None`` is
# returned and this format type is not used.
#
# c.BaseFormatter.deferred_printers = {}
#
# c.BaseFormatter.enabled = True
#
# c.BaseFormatter.singleton_printers = {}
#
# c.BaseFormatter.type_printers = {}
#------------------------------------------------------------------------------
# PlainTextFormatter configuration
#------------------------------------------------------------------------------
# The default pretty-printer.
#
# This uses :mod:`IPython.lib.pretty` to compute the format data of the object.
# If the object cannot be pretty printed, :func:`repr` is used. See the
# documentation of :mod:`IPython.lib.pretty` for details on how to write pretty
# printers. Here is a simple example::
#
# def dtype_pprinter(obj, p, cycle):
# if cycle:
# return p.text('dtype(...)')
# if hasattr(obj, 'fields'):
# if obj.fields is None:
# p.text(repr(obj))
# else:
# p.begin_group(7, 'dtype([')
# for i, field in enumerate(obj.descr):
# if i > 0:
# p.text(',')
# p.breakable()
# p.pretty(field)
# p.end_group(7, '])')
#
# c.PlainTextFormatter.float_precision = ''
# Truncate large collections (lists, dicts, tuples, sets) to this size.
#
# Set to 0 to disable truncation.
# c.PlainTextFormatter.max_seq_length = 1000
#
# c.PlainTextFormatter.max_width = 79
#
# c.PlainTextFormatter.newline = '\n'
#
# c.PlainTextFormatter.pprint = True
#
# c.PlainTextFormatter.verbose = False
#------------------------------------------------------------------------------
# Completer configuration
#------------------------------------------------------------------------------
# Activate greedy completion PENDING DEPRECTION. this is now mostly taken care
# of with Jedi.
#
# This will enable completion on elements of lists, results of function calls,
# etc., but can be unsafe because the code is actually evaluated on TAB.
# c.Completer.greedy = False
#------------------------------------------------------------------------------
# IPCompleter configuration
#------------------------------------------------------------------------------
# Extension of the completer class with IPython-specific features
# DEPRECATED as of version 5.0.
#
# Instruct the completer to use __all__ for the completion
#
# Specifically, when completing on ``object.<tab>``.
#
# When True: only those names in obj.__all__ will be included.
#
# When False [default]: the __all__ attribute is ignored
# c.IPCompleter.limit_to__all__ = False
# Whether to merge completion results into a single list
#
# If False, only the completion results from the first non-empty completer will
# be returned.
# c.IPCompleter.merge_completions = True
# Instruct the completer to omit private method names
#
# Specifically, when completing on ``object.<tab>``.
#
# When 2 [default]: all names that start with '_' will be excluded.
#
# When 1: all 'magic' names (``__foo__``) will be excluded.
#
# When 0: nothing will be excluded.
# c.IPCompleter.omit__names = 2
#------------------------------------------------------------------------------
# Magics configuration
#------------------------------------------------------------------------------
# Base class for implementing magic functions.
#
# Shell functions which can be reached as %function_name. All magic functions
# should accept a string, which they can parse for their own needs. This can
# make some functions easier to type, eg `%cd ../` vs. `%cd("../")`
#
# Classes providing magic functions need to subclass this class, and they MUST:
#
# - Use the method decorators `@line_magic` and `@cell_magic` to decorate
# individual methods as magic functions, AND
#
# - Use the class decorator `@magics_class` to ensure that the magic
# methods are properly registered at the instance level upon instance
# initialization.
#
# See :mod:`magic_functions` for examples of actual implementation classes.
#------------------------------------------------------------------------------
# ScriptMagics configuration
#------------------------------------------------------------------------------
# Magics for talking to scripts
#
# This defines a base `%%script` cell magic for running a cell with a program in
# a subprocess, and registers a few top-level magics that call %%script with
# common interpreters.
# Extra script cell magics to define
#
# This generates simple wrappers of `%%script foo` as `%%foo`.
#
# If you want to add script magics that aren't on your path, specify them in
# script_paths
# c.ScriptMagics.script_magics = []
# Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
#
# Only necessary for items in script_magics where the default path will not find
# the right interpreter.
# c.ScriptMagics.script_paths = {}
#------------------------------------------------------------------------------
# StoreMagics configuration
#------------------------------------------------------------------------------
# Lightweight persistence for python variables.
#
# Provides the %store magic.
# If True, any %store-d variables will be automatically restored when IPython
# starts.
# c.StoreMagics.autorestore = False
| mit |
ssheybani/sp17-i524 | project/S17-IO-3012/code/bin/benchmark_replicas_mapreduce.py | 19 | 5506 | import matplotlib.pyplot as plt
import sys
import pandas as pd
def get_parm():
"""retrieves mandatory parameter to program
@param: none
@type: n/a
"""
try:
return sys.argv[1]
except:
print ('Must enter file name as parameter')
exit()
def read_file(filename):
"""reads a file into a pandas dataframe
@param: filename The name of the file to read
@type: string
"""
try:
return pd.read_csv(filename)
except:
print ('Error retrieving file')
exit()
def select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica):
benchmark_df = benchmark_df[benchmark_df.mongo_version == 34]
benchmark_df = benchmark_df[benchmark_df.test_size == "large"]
if cloud != 'X':
benchmark_df = benchmark_df[benchmark_df.cloud == cloud]
if config_replicas != 'X':
benchmark_df = benchmark_df[benchmark_df.config_replicas == config_replicas]
if mongos_instances != 'X':
benchmark_df = benchmark_df[benchmark_df.mongos_instances == mongos_instances]
if shard_replicas != 'X':
benchmark_df = benchmark_df[benchmark_df.shard_replicas == shard_replicas]
if shards_per_replica != 'X':
benchmark_df = benchmark_df[benchmark_df.shards_per_replica == shards_per_replica]
# benchmark_df1 = benchmark_df.groupby(['cloud', 'config_replicas', 'mongos_instances', 'shard_replicas', 'shards_per_replica']).mean()
# http://stackoverflow.com/questions/10373660/converting-a-pandas-groupby-object-to-dataframe
benchmark_df = benchmark_df.groupby(
['cloud', 'config_replicas', 'mongos_instances', 'shard_replicas', 'shards_per_replica'], as_index=False).mean()
# http://stackoverflow.com/questions/10373660/converting-a-pandas-groupby-object-to-dataframe
# print benchmark_df1['shard_replicas']
# print benchmark_df1
# print benchmark_df
benchmark_df = benchmark_df.sort_values(by='shard_replicas', ascending=1)
return benchmark_df
def make_figure(mapreduce_seconds_kilo, replicas_kilo, mapreduce_seconds_chameleon, replicas_chameleon, mapreduce_seconds_jetstream, replicas_jetstream):
"""formats and creates a line chart
@param1: find_seconds_kilo Array with find_seconds from kilo
@type: numpy array
@param2: replicas_kilo Array with replicas from kilo
@type: numpy array
@param3: find_seconds_chameleon Array with find_seconds from chameleon
@type: numpy array
@param4: replicas_chameleon Array with replicas from chameleon
@type: numpy array
"""
fig = plt.figure()
#plt.title('Average Find Command Runtime by Shard Replication Factor')
plt.ylabel('Runtime in Seconds')
plt.xlabel('Degree of Replication Per Set')
# Make the chart
plt.plot(replicas_kilo, mapreduce_seconds_kilo, label='Kilo Cloud')
plt.plot(replicas_chameleon, mapreduce_seconds_chameleon, label='Chameleon Cloud')
plt.plot(replicas_jetstream, mapreduce_seconds_jetstream, label='Jetstream Cloud')
# http://stackoverflow.com/questions/11744990/how-to-set-auto-for-upper-limit-but-keep-a-fixed-lower-limit-with-matplotlib
plt.ylim(ymin=0)
plt.legend(loc='best')
# Show the chart (for testing)
# plt.show()
# Save the chart
fig.savefig('../report/replica_mapreduce.png')
# Run the program by calling the functions
if __name__ == "__main__":
filename = get_parm()
benchmark_df = read_file(filename)
cloud = 'kilo'
config_replicas = 1
mongos_instances = 1
shard_replicas = 1
shards_per_replica = 'X'
select_df = select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica)
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
# percentage death=\
mapreduce_seconds_kilo = select_df.as_matrix(columns=[select_df.columns[8]])
replicas_kilo = select_df.as_matrix(columns=[select_df.columns[4]])
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
cloud = 'chameleon'
config_replicas = 1
mongos_instances = 1
shard_replicas = 1
shards_per_replica = 'X'
select_df = select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica)
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
# percentage death=\
mapreduce_seconds_chameleon = select_df.as_matrix(columns=[select_df.columns[8]])
replicas_chameleon = select_df.as_matrix(columns=[select_df.columns[4]])
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
cloud = 'jetstream'
config_replicas = 1
mongos_instances = 1
shard_replicas = 1
shards_per_replica = 'X'
select_df = select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica)
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
# percentage death=\
mapreduce_seconds_jetstream = select_df.as_matrix(columns=[select_df.columns[8]])
replicas_jetstream = select_df.as_matrix(columns=[select_df.columns[4]])
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
make_figure(mapreduce_seconds_kilo, replicas_kilo, mapreduce_seconds_chameleon, replicas_chameleon, mapreduce_seconds_jetstream, replicas_jetstream)
| apache-2.0 |
yavalvas/yav_com | build/matplotlib/lib/mpl_examples/tests/backend_driver.py | 3 | 15270 | #!/usr/bin/env python
from __future__ import print_function, division
"""
This is used to drive many of the examples across the backends, for
regression testing, and comparing backend efficiency.
You can specify the backends to be tested either via the --backends
switch, which takes a comma-separated list, or as separate arguments,
e.g.
python backend_driver.py agg ps
would test the agg and ps backends. If no arguments are given, a
default list of backends will be tested.
Interspersed with the backend arguments can be switches for the Python
interpreter executing the tests. If entering such arguments causes an
option parsing error with the driver script, separate them from driver
switches with a --.
"""
import os
import time
import sys
import glob
from optparse import OptionParser
import matplotlib.rcsetup as rcsetup
from matplotlib.cbook import Bunch, dedent
all_backends = list(rcsetup.all_backends) # to leave the original list alone
# actual physical directory for each dir
dirs = dict(files=os.path.join('..', 'lines_bars_and_markers'),
shapes=os.path.join('..', 'shapes_and_collections'),
images=os.path.join('..', 'images_contours_and_fields'),
pie=os.path.join('..', 'pie_and_polar_charts'),
text=os.path.join('..', 'text_labels_and_annotations'),
ticks=os.path.join('..', 'ticks_and_spines'),
subplots=os.path.join('..', 'subplots_axes_and_figures'),
specialty=os.path.join('..', 'specialty_plots'),
showcase=os.path.join('..', 'showcase'),
pylab = os.path.join('..', 'pylab_examples'),
api = os.path.join('..', 'api'),
units = os.path.join('..', 'units'),
mplot3d = os.path.join('..', 'mplot3d'))
# files in each dir
files = dict()
files['lines'] = [
'barh_demo.py',
'fill_demo.py',
'fill_demo_features.py',
'line_demo_dash_control.py',
'line_styles_reference.py',
'scatter_with_legend.py'
]
files['shapes'] = [
'path_patch_demo.py',
'scatter_demo.py',
]
files['colors'] = [
'color_cycle_demo.py',
]
files['images'] = [
'imshow_demo.py',
]
files['statistics'] = [
'errorbar_demo.py',
'errorbar_demo_features.py',
'histogram_demo_cumulative.py',
'histogram_demo_features.py',
'histogram_demo_histtypes.py',
'histogram_demo_multihist.py',
]
files['pie'] = [
'pie_demo.py',
'polar_bar_demo.py',
'polar_scatter_demo.py',
]
files['text_labels_and_annotations'] = [
'text_demo_fontdict.py',
'unicode_demo.py',
]
files['ticks_and_spines'] = [
'spines_demo_bounds.py',
'ticklabels_demo_rotation.py',
]
files['subplots_axes_and_figures'] = [
'subplot_demo.py',
]
files['showcase'] = [
'integral_demo.py',
]
files['pylab'] = [
'accented_text.py',
'alignment_test.py',
'annotation_demo.py',
'annotation_demo.py',
'annotation_demo2.py',
'annotation_demo2.py',
'anscombe.py',
'arctest.py',
'arrow_demo.py',
'axes_demo.py',
'axes_props.py',
'axhspan_demo.py',
'axis_equal_demo.py',
'bar_stacked.py',
'barb_demo.py',
'barchart_demo.py',
'barcode_demo.py',
'boxplot_demo.py',
'broken_barh.py',
'clippedline.py',
'cohere_demo.py',
'color_by_yvalue.py',
'color_demo.py',
'colorbar_tick_labelling_demo.py',
'contour_demo.py',
'contour_image.py',
'contour_label_demo.py',
'contourf_demo.py',
'contourf_log.py',
'coords_demo.py',
'coords_report.py',
'csd_demo.py',
'cursor_demo.py',
'custom_cmap.py',
'custom_figure_class.py',
'custom_ticker1.py',
'customize_rc.py',
'dashpointlabel.py',
'date_demo1.py',
'date_demo2.py',
'date_demo_convert.py',
'date_demo_rrule.py',
'date_index_formatter.py',
'dolphin.py',
'ellipse_collection.py',
'ellipse_demo.py',
'ellipse_rotated.py',
'equal_aspect_ratio.py',
'errorbar_limits.py',
'fancyarrow_demo.py',
'fancybox_demo.py',
'fancybox_demo2.py',
'fancytextbox_demo.py',
'figimage_demo.py',
'figlegend_demo.py',
'figure_title.py',
'fill_between_demo.py',
'fill_spiral.py',
'finance_demo.py',
'findobj_demo.py',
'fonts_demo.py',
'fonts_demo_kw.py',
'ganged_plots.py',
'geo_demo.py',
'gradient_bar.py',
'griddata_demo.py',
'hatch_demo.py',
'hexbin_demo.py',
'hexbin_demo2.py',
'hist_colormapped.py',
'vline_hline_demo.py',
'image_clip_path.py',
'image_demo.py',
'image_demo2.py',
'image_interp.py',
'image_masked.py',
'image_nonuniform.py',
'image_origin.py',
'image_slices_viewer.py',
'interp_demo.py',
'invert_axes.py',
'layer_images.py',
'legend_demo2.py',
'legend_demo3.py',
'line_collection.py',
'line_collection2.py',
'log_bar.py',
'log_demo.py',
'log_test.py',
'major_minor_demo1.py',
'major_minor_demo2.py',
'manual_axis.py',
'masked_demo.py',
'mathtext_demo.py',
'mathtext_examples.py',
'matplotlib_icon.py',
'matshow.py',
'mri_demo.py',
'mri_with_eeg.py',
'multi_image.py',
'multiline.py',
'multiple_figs_demo.py',
'nan_test.py',
'newscalarformatter_demo.py',
'pcolor_demo.py',
'pcolor_log.py',
'pcolor_small.py',
'pie_demo2.py',
'plotfile_demo.py',
'polar_demo.py',
'polar_legend.py',
'psd_demo.py',
'psd_demo2.py',
'psd_demo3.py',
'quadmesh_demo.py',
'quiver_demo.py',
'scatter_custom_symbol.py',
'scatter_demo2.py',
'scatter_masked.py',
'scatter_profile.py',
'scatter_star_poly.py',
#'set_and_get.py',
'shared_axis_across_figures.py',
'shared_axis_demo.py',
'simple_plot.py',
'specgram_demo.py',
'spine_placement_demo.py',
'spy_demos.py',
'stem_plot.py',
'step_demo.py',
'stix_fonts_demo.py',
'stock_demo.py',
'subplots_adjust.py',
'symlog_demo.py',
'table_demo.py',
'text_handles.py',
'text_rotation.py',
'text_rotation_relative_to_line.py',
'transoffset.py',
'xcorr_demo.py',
'zorder_demo.py',
]
files['api'] = [
'agg_oo.py',
'barchart_demo.py',
'bbox_intersect.py',
'collections_demo.py',
'colorbar_only.py',
'custom_projection_example.py',
'custom_scale_example.py',
'date_demo.py',
'date_index_formatter.py',
'donut_demo.py',
'font_family_rc.py',
'image_zcoord.py',
'joinstyle.py',
'legend_demo.py',
'line_with_text.py',
'logo2.py',
'mathtext_asarray.py',
'patch_collection.py',
'quad_bezier.py',
'scatter_piecharts.py',
'span_regions.py',
'two_scales.py',
'unicode_minus.py',
'watermark_image.py',
'watermark_text.py',
]
files['units'] = [
'annotate_with_units.py',
#'artist_tests.py', # broken, fixme
'bar_demo2.py',
#'bar_unit_demo.py', # broken, fixme
#'ellipse_with_units.py', # broken, fixme
'radian_demo.py',
'units_sample.py',
#'units_scatter.py', # broken, fixme
]
files['mplot3d'] = [
'2dcollections3d_demo.py',
'bars3d_demo.py',
'contour3d_demo.py',
'contour3d_demo2.py',
'contourf3d_demo.py',
'lines3d_demo.py',
'polys3d_demo.py',
'scatter3d_demo.py',
'surface3d_demo.py',
'surface3d_demo2.py',
'text3d_demo.py',
'wire3d_demo.py',
]
# dict from dir to files we know we don't want to test (eg examples
# not using pyplot, examples requiring user input, animation examples,
# examples that may only work in certain environs (usetex examples?),
# examples that generate multiple figures
excluded = {
'pylab' : ['__init__.py', 'toggle_images.py',],
'units' : ['__init__.py', 'date_support.py',],
}
def report_missing(dir, flist):
'report the py files in dir that are not in flist'
globstr = os.path.join(dir, '*.py')
fnames = glob.glob(globstr)
pyfiles = set([os.path.split(fullpath)[-1] for fullpath in set(fnames)])
exclude = set(excluded.get(dir, []))
flist = set(flist)
missing = list(pyfiles-flist-exclude)
missing.sort()
if missing:
print ('%s files not tested: %s'%(dir, ', '.join(missing)))
def report_all_missing(directories):
for f in directories:
report_missing(dirs[f], files[f])
# tests known to fail on a given backend
failbackend = dict(
svg = ('tex_demo.py', ),
agg = ('hyperlinks.py', ),
pdf = ('hyperlinks.py', ),
ps = ('hyperlinks.py', ),
)
try:
import subprocess
def run(arglist):
try:
ret = subprocess.call(arglist)
except KeyboardInterrupt:
sys.exit()
else:
return ret
except ImportError:
def run(arglist):
os.system(' '.join(arglist))
def drive(backend, directories, python=['python'], switches = []):
exclude = failbackend.get(backend, [])
# Clear the destination directory for the examples
path = backend
if os.path.exists(path):
import glob
for fname in os.listdir(path):
os.unlink(os.path.join(path, fname))
else:
os.mkdir(backend)
failures = []
testcases = [os.path.join(dirs[d], fname)
for d in directories
for fname in files[d]]
for fullpath in testcases:
print ('\tdriving %-40s' % (fullpath)),
sys.stdout.flush()
fpath, fname = os.path.split(fullpath)
if fname in exclude:
print ('\tSkipping %s, known to fail on backend: %s'%backend)
continue
basename, ext = os.path.splitext(fname)
outfile = os.path.join(path, basename)
tmpfile_name = '_tmp_%s.py' % basename
tmpfile = open(tmpfile_name, 'w')
future_imports = 'from __future__ import division, print_function'
for line in open(fullpath):
line_lstrip = line.lstrip()
if line_lstrip.startswith("#"):
tmpfile.write(line)
elif 'unicode_literals' in line:
future_imports = future_imports + ', unicode_literals'
tmpfile.writelines((
future_imports+'\n',
'import sys\n',
'sys.path.append("%s")\n' % fpath.replace('\\', '\\\\'),
'import matplotlib\n',
'matplotlib.use("%s")\n' % backend,
'from pylab import savefig\n',
'import numpy\n',
'numpy.seterr(invalid="ignore")\n',
))
for line in open(fullpath):
line_lstrip = line.lstrip()
if (line_lstrip.startswith('from __future__ import') or
line_lstrip.startswith('matplotlib.use') or
line_lstrip.startswith('savefig') or
line_lstrip.startswith('show')):
continue
tmpfile.write(line)
if backend in rcsetup.interactive_bk:
tmpfile.write('show()')
else:
tmpfile.write('\nsavefig(r"%s", dpi=150)' % outfile)
tmpfile.close()
start_time = time.time()
program = [x % {'name': basename} for x in python]
ret = run(program + [tmpfile_name] + switches)
end_time = time.time()
print ("%s %s" % ((end_time - start_time), ret))
#os.system('%s %s %s' % (python, tmpfile_name, ' '.join(switches)))
os.remove(tmpfile_name)
if ret:
failures.append(fullpath)
return failures
def parse_options():
doc = (__doc__ and __doc__.split('\n\n')) or " "
op = OptionParser(description=doc[0].strip(),
usage='%prog [options] [--] [backends and switches]',
#epilog='\n'.join(doc[1:]) # epilog not supported on my python2.4 machine: JDH
)
op.disable_interspersed_args()
op.set_defaults(dirs='pylab,api,units,mplot3d',
clean=False, coverage=False, valgrind=False)
op.add_option('-d', '--dirs', '--directories', type='string',
dest='dirs', help=dedent('''
Run only the tests in these directories; comma-separated list of
one or more of: pylab (or pylab_examples), api, units, mplot3d'''))
op.add_option('-b', '--backends', type='string', dest='backends',
help=dedent('''
Run tests only for these backends; comma-separated list of
one or more of: agg, ps, svg, pdf, template, cairo,
Default is everything except cairo.'''))
op.add_option('--clean', action='store_true', dest='clean',
help='Remove result directories, run no tests')
op.add_option('-c', '--coverage', action='store_true', dest='coverage',
help='Run in coverage.py')
op.add_option('-v', '--valgrind', action='store_true', dest='valgrind',
help='Run in valgrind')
options, args = op.parse_args()
switches = [x for x in args if x.startswith('--')]
backends = [x.lower() for x in args if not x.startswith('--')]
if options.backends:
backends += [be.lower() for be in options.backends.split(',')]
result = Bunch(
dirs = options.dirs.split(','),
backends = backends or ['agg', 'ps', 'svg', 'pdf', 'template'],
clean = options.clean,
coverage = options.coverage,
valgrind = options.valgrind,
switches = switches)
if 'pylab_examples' in result.dirs:
result.dirs[result.dirs.index('pylab_examples')] = 'pylab'
#print result
return (result)
if __name__ == '__main__':
times = {}
failures = {}
options = parse_options()
if options.clean:
localdirs = [d for d in glob.glob('*') if os.path.isdir(d)]
all_backends_set = set(all_backends)
for d in localdirs:
if d.lower() not in all_backends_set:
continue
print ('removing %s'%d)
for fname in glob.glob(os.path.join(d, '*')):
os.remove(fname)
os.rmdir(d)
for fname in glob.glob('_tmp*.py'):
os.remove(fname)
print ('all clean...')
raise SystemExit
if options.coverage:
python = ['coverage.py', '-x']
elif options.valgrind:
python = ['valgrind', '--tool=memcheck', '--leak-check=yes',
'--log-file=%(name)s', sys.executable]
elif sys.platform == 'win32':
python = [sys.executable]
else:
python = [sys.executable]
report_all_missing(options.dirs)
for backend in options.backends:
print ('testing %s %s' % (backend, ' '.join(options.switches)))
t0 = time.time()
failures[backend] = \
drive(backend, options.dirs, python, options.switches)
t1 = time.time()
times[backend] = (t1-t0)/60.0
# print times
for backend, elapsed in times.items():
print ('Backend %s took %1.2f minutes to complete' % (backend, elapsed))
failed = failures[backend]
if failed:
print (' Failures: %s' % failed)
if 'template' in times:
print ('\ttemplate ratio %1.3f, template residual %1.3f' % (
elapsed/times['template'], elapsed-times['template']))
| mit |
lancezlin/ml_template_py | lib/python2.7/site-packages/matplotlib/testing/jpl_units/UnitDblConverter.py | 8 | 5600 | #===========================================================================
#
# UnitDblConverter
#
#===========================================================================
"""UnitDblConverter module containing class UnitDblConverter."""
#===========================================================================
# Place all imports after here.
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import numpy as np
import matplotlib.units as units
import matplotlib.ticker as ticker
import matplotlib.projections.polar as polar
from matplotlib.cbook import iterable
#
# Place all imports before here.
#===========================================================================
__all__ = [ 'UnitDblConverter' ]
#===========================================================================
# A special function for use with the matplotlib FuncFormatter class
# for formatting axes with radian units.
# This was copied from matplotlib example code.
def rad_fn(x, pos = None ):
"""Radian function formatter."""
n = int((x / np.pi) * 2.0 + 0.25)
if n == 0:
return str(x)
elif n == 1:
return r'$\pi/2$'
elif n == 2:
return r'$\pi$'
elif n % 2 == 0:
return r'$%s\pi$' % (n/2,)
else:
return r'$%s\pi/2$' % (n,)
#===========================================================================
class UnitDblConverter( units.ConversionInterface ):
""": A matplotlib converter class. Provides matplotlib conversion
functionality for the Monte UnitDbl class.
"""
# default for plotting
defaults = {
"distance" : 'km',
"angle" : 'deg',
"time" : 'sec',
}
#------------------------------------------------------------------------
@staticmethod
def axisinfo( unit, axis ):
""": Returns information on how to handle an axis that has Epoch data.
= INPUT VARIABLES
- unit The units to use for a axis with Epoch data.
= RETURN VALUE
- Returns a matplotlib AxisInfo data structure that contains
minor/major formatters, major/minor locators, and default
label information.
"""
# Delay-load due to circular dependencies.
import matplotlib.testing.jpl_units as U
# Check to see if the value used for units is a string unit value
# or an actual instance of a UnitDbl so that we can use the unit
# value for the default axis label value.
if ( unit ):
if ( isinstance( unit, six.string_types ) ):
label = unit
else:
label = unit.label()
else:
label = None
if ( label == "deg" ) and isinstance( axis.axes, polar.PolarAxes ):
# If we want degrees for a polar plot, use the PolarPlotFormatter
majfmt = polar.PolarAxes.ThetaFormatter()
else:
majfmt = U.UnitDblFormatter( useOffset = False )
return units.AxisInfo( majfmt = majfmt, label = label )
#------------------------------------------------------------------------
@staticmethod
def convert( value, unit, axis ):
""": Convert value using unit to a float. If value is a sequence, return
the converted sequence.
= INPUT VARIABLES
- value The value or list of values that need to be converted.
- unit The units to use for a axis with Epoch data.
= RETURN VALUE
- Returns the value parameter converted to floats.
"""
# Delay-load due to circular dependencies.
import matplotlib.testing.jpl_units as U
isNotUnitDbl = True
if ( iterable(value) and not isinstance(value, six.string_types) ):
if ( len(value) == 0 ):
return []
else:
return [ UnitDblConverter.convert( x, unit, axis ) for x in value ]
# We need to check to see if the incoming value is actually a UnitDbl and
# set a flag. If we get an empty list, then just return an empty list.
if ( isinstance(value, U.UnitDbl) ):
isNotUnitDbl = False
# If the incoming value behaves like a number, but is not a UnitDbl,
# then just return it because we don't know how to convert it
# (or it is already converted)
if ( isNotUnitDbl and units.ConversionInterface.is_numlike( value ) ):
return value
# If no units were specified, then get the default units to use.
if ( unit == None ):
unit = UnitDblConverter.default_units( value, axis )
# Convert the incoming UnitDbl value/values to float/floats
if isinstance( axis.axes, polar.PolarAxes ) and (value.type() == "angle"):
# Guarantee that units are radians for polar plots.
return value.convert( "rad" )
return value.convert( unit )
#------------------------------------------------------------------------
@staticmethod
def default_units( value, axis ):
""": Return the default unit for value, or None.
= INPUT VARIABLES
- value The value or list of values that need units.
= RETURN VALUE
- Returns the default units to use for value.
Return the default unit for value, or None.
"""
# Determine the default units based on the user preferences set for
# default units when printing a UnitDbl.
if ( iterable(value) and not isinstance(value, six.string_types) ):
return UnitDblConverter.default_units( value[0], axis )
else:
return UnitDblConverter.defaults[ value.type() ]
| mit |
dav-stott/phd-thesis | ais_mtbvi_stats.py | 1 | 9821 | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 18 23:33:37 2016
@author: dav
"""
import numpy as np
from osgeo import gdal
from osgeo import osr
import numpy.ma as ma
import os
import matplotlib.pyplot as plt
#import Image
import fiona
from shapely.geometry import shape
from shapely.geometry import asPoint
from scipy import stats
def writeimage(outpath,
outname,
image,
spatial):
data_out = image
print('ROWS,COLS',image.shape)
print('Call to write image')
os.chdir(outpath)
print('OUTPATH',outpath)
print('OUTNAME',outname)
#load the driver for the format of choice
driver = gdal.GetDriverByName("Gtiff")
#create an empty output file
#get the number of bands we'll need:
print (image.shape)
if len(image.shape)>2:
bands = image.shape[2]
else:
bands =1
print('BANDS OUT', bands)
#file name, x columns, y columns, bands, dtype
out = driver.Create(outname, image.shape[1], image.shape[0], bands, gdal.GDT_Float32)
#define the location using coords of top-left corner
# minimum x, e-w pixel size, rotation, maximum y, n-s pixel size, rotation
out.SetGeoTransform(spatial)
srs = osr.SpatialReference()
#get the coodrinate system using the ESPG code
srs.SetWellKnownGeogCS("EPSG:4277")
#set pstackedstackedstackedtojection of output file
out.SetProjection(srs.ExportToWkt())
band = 1
if bands == 1:
out.GetRasterBand(band).WriteArray(data_out)
#set the no data value
out.GetRasterBand(band).SetNoDataValue(-999)
#apend the statistics to dataset
out.GetRasterBand(band).GetStatistics(0,1)
print('Saving %s/%s' % (band,bands))
else:
while (band<=bands):
data = data_out[:,:,band-1]
#write values to empty array
out.GetRasterBand(band).WriteArray( data )
#set the no data value
out.GetRasterBand(band).SetNoDataValue(-999)
#apend the statistics to dataset
out.GetRasterBand(band).GetStatistics(0,1)
print('Saving %s/%s' % (band,bands))
band = band+1
out = None
print('Processing of %s complete' % (outname))
return outname
#Class to load the raster image and create an empty raster of the same dimensions
#LoadImage: loads an image using gdal
class LoadImage():
def __init__(self,infile):
# open the dataset
self.image_name = infile[:-4]
self.dataset = gdal.Open(infile) #GA_ReadOnly)
# if there's nothign there print error
#self.stacked = None
if self.dataset is None:
print('BORK: Could not load file: %s' %(infile))
# otherwise do stuff
else:
#get the bit depth of the source image
'''try:
pillow_image = Image.open(infile)
self.bit_depth = pillow_image.bits()
pillow_image.close()
except:
print ('Cant get the bit-depth of the image with pillow')'''
#get the format
self.driver = self.dataset.GetDriver().ShortName
#get the x dimension
self.xsize = self.dataset.RasterXSize
#get the y dimension
self.ysize = self.dataset.RasterYSize
#get the projection
self.proj = self.dataset.GetProjection()
#get the number of bands
bands = self.dataset.RasterCount
print('BANDS:',bands)
#get the geotransform Returns a list object. This is standard GDAL ordering:
#spatial[0] = top left x
#spatial[1] = w-e pixel size
#spatial[2] = rotation (should be 0)
#spatial[3] = top left y
#spatial[4] = rotation (should be 0)
#spatial[5] = n-s pixel size
self.spatial = self.dataset.GetGeoTransform()
#print some stuff to console to show we're paying attention
print('Found raster in %s format. Raster has %s bands' %(self.driver,bands))
print('Projected as %s' %(self.proj))
print('Dimensions: %s x %s' %(self.xsize,self.ysize))
#instantiate a counter
count = 1
#OK. This is the bit that catually loads the bands in in a while loop
# Loop through bands as long as count is equal to or less than total
while (count<=bands):
print('BANDS less than COUNT')
#show that your computer's fans are whining for a reason
print('Loading band: %s of %s' %(count,bands))
#get the band
band = self.dataset.GetRasterBand(count)
# load this as a numpy array
#mask the no data values
data_array = band.ReadAsArray()
data_array = ma.masked_where(data_array == 0, data_array)
data_array = data_array.filled(-999)
data_array = data_array.astype(np.float32, copy=False)
# close the band object
band = None
#this bit stacks the bands into a combined numpy array
#if it's the first band copy the array directly to the combined one
if count == 1:
self.stacked = data_array
#else combine these
else:
self.stacked = np.dstack((self.stacked,data_array))
# increment the counter
count = count+1
#self.coords_matrix = self.coords()
#print self.coords_matrix.shape
#print self.coords_matrix
def mtbvi_stats(image, classes, lwls, rwls):
out= np.zeros((image.shape[2],2))
cols = lwls.shape[0]
rows = rwls.shape[0]
for i in range(image.shape[2]):
values=image[:,:,i]
arc = values[np.where(classes==1)]
bac = values[np.where(classes==2)]
#print (arc)
#print(bac)
t = stats.ttest_ind(arc,bac, equal_var=False)
out[i,0]=t[0]
out[i,1]=t[1]
mt_grid = np.reshape(out[:,0],(cols,rows))
finite = np.isfinite(out[:,0])
best = np.argmax(np.abs(out[finite,0]))
lwl_otsh = np.repeat(lwls,out.shape[0]/cols)[finite]
rwl_otsh = np.tile(rwls,out.shape[0]/rows)[finite]
best = np.array([best,lwl_otsh[best],rwl_otsh[best],out[best,0],out[best,1]])
'''best = np.argmax(np.abs(out[:,0]))
lwl_otsh = np.repeat(lwls,out.shape[0]/cols)
rwl_otsh = np.tile(rwls,out.shape[0]/rows)
best = np.array([best,lwl_otsh[best],rwl_otsh[best],out[best,0],out[best,1]])'''
print (best)
return mt_grid, best
if __name__ == '__main__':
root_dir = '/home/dav/data/temp/test/mtbvi/subsets'
for dir in os.listdir(root_dir):
d = os.path.join(root_dir,dir)
outdir = os.path.join(d,'results')
classes = LoadImage(os.path.join(d,'grid_output','mask.tif')).stacked
if not os.path.exists(outdir):
os.mkdir(outdir)
image_dir = os.path.join(d,'output')
lwl = np.genfromtxt(os.path.join(image_dir,dir+'_lWL.txt'))
rwl = np.genfromtxt(os.path.join(image_dir,dir+'_rWL.txt'))
corr_dir = '/home/dav/data/temp/test/correlation'
for i in os.listdir(image_dir):
if i[-3:]=='tif':
im = LoadImage(os.path.join(image_dir,i))
image = im.stacked
spatial = im.spatial
stats_ot = mtbvi_stats(image,classes,lwl,rwl)
plt.imshow(np.rot90(stats_ot[0],k=1),
extent=[lwl[0],lwl[-1],rwl[0],rwl[-1]],
interpolation='none',
cmap='gnuplot2')
plt.colorbar()
plt.xlabel('RED $\lambda (nm)$')
plt.ylabel('NIR $\lambda (nm)$')
i_name =dir.split('_')[0]
i_date =dir.split('_')[1]
if i_name == 'ddt1':
tname = 'Didington Transect 1 %s' %(i_date)
if i_name == 'ddch':
tname = 'Didington Clay Field %s' %(i_date)
if i_name == 'hhcc':
tname = 'Harnhill Cherry Copse %s' %(i_date)
if i_name == 'hhqf':
tname = 'Harnhill Quarry Field %s' %(i_date)
plt.title(tname+':\n NDVI variable wavelength: Contrast')
plt.tight_layout()
plt.savefig(os.path.join(outdir,'_'+dir+'_mtbvi.png'))
plt.show()
plt.close()
np.savetxt(os.path.join(outdir,'_'+dir+'_bestmtbvi.txt'),stats_ot[1])
'''writeimage(os.path.join(corr_dir,dir),
str(dir+'_'+str(int(stats_ot[1][1]))+'_'+str(int(stats_ot[1][2]))+'_mtbvi.tif'),
image[:,:,int(stats_ot[1][0])],
spatial)'''
| mit |
pythonvietnam/scikit-learn | examples/manifold/plot_lle_digits.py | 181 | 8510 | """
=============================================================================
Manifold learning on handwritten digits: Locally Linear Embedding, Isomap...
=============================================================================
An illustration of various embeddings on the digits dataset.
The RandomTreesEmbedding, from the :mod:`sklearn.ensemble` module, is not
technically a manifold embedding method, as it learn a high-dimensional
representation on which we apply a dimensionality reduction method.
However, it is often useful to cast a dataset into a representation in
which the classes are linearly-separable.
t-SNE will be initialized with the embedding that is generated by PCA in
this example, which is not the default setting. It ensures global stability
of the embedding, i.e., the embedding does not depend on random
initialization.
"""
# Authors: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble, lda,
random_projection)
digits = datasets.load_digits(n_class=6)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
n_neighbors = 30
#----------------------------------------------------------------------
# Scale and visualize the embedding vectors
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(digits.data.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 4e-3:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
#----------------------------------------------------------------------
# Plot images of the digits
n_img_per_row = 20
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
#----------------------------------------------------------------------
# Random 2D projection using a random unitary matrix
print("Computing random projection")
rp = random_projection.SparseRandomProjection(n_components=2, random_state=42)
X_projected = rp.fit_transform(X)
plot_embedding(X_projected, "Random Projection of the digits")
#----------------------------------------------------------------------
# Projection on to the first 2 principal components
print("Computing PCA projection")
t0 = time()
X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X)
plot_embedding(X_pca,
"Principal Components projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Projection on to the first 2 linear discriminant components
print("Computing LDA projection")
X2 = X.copy()
X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible
t0 = time()
X_lda = lda.LDA(n_components=2).fit_transform(X2, y)
plot_embedding(X_lda,
"Linear Discriminant projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Isomap projection of the digits dataset
print("Computing Isomap embedding")
t0 = time()
X_iso = manifold.Isomap(n_neighbors, n_components=2).fit_transform(X)
print("Done.")
plot_embedding(X_iso,
"Isomap projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Locally linear embedding of the digits dataset
print("Computing LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='standard')
t0 = time()
X_lle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_lle,
"Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Modified Locally linear embedding of the digits dataset
print("Computing modified LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='modified')
t0 = time()
X_mlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_mlle,
"Modified Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# HLLE embedding of the digits dataset
print("Computing Hessian LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='hessian')
t0 = time()
X_hlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_hlle,
"Hessian Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# LTSA embedding of the digits dataset
print("Computing LTSA embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='ltsa')
t0 = time()
X_ltsa = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_ltsa,
"Local Tangent Space Alignment of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# MDS embedding of the digits dataset
print("Computing MDS embedding")
clf = manifold.MDS(n_components=2, n_init=1, max_iter=100)
t0 = time()
X_mds = clf.fit_transform(X)
print("Done. Stress: %f" % clf.stress_)
plot_embedding(X_mds,
"MDS embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Random Trees embedding of the digits dataset
print("Computing Totally Random Trees embedding")
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
t0 = time()
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
plot_embedding(X_reduced,
"Random forest embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Spectral embedding of the digits dataset
print("Computing Spectral embedding")
embedder = manifold.SpectralEmbedding(n_components=2, random_state=0,
eigen_solver="arpack")
t0 = time()
X_se = embedder.fit_transform(X)
plot_embedding(X_se,
"Spectral embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# t-SNE embedding of the digits dataset
print("Computing t-SNE embedding")
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
t0 = time()
X_tsne = tsne.fit_transform(X)
plot_embedding(X_tsne,
"t-SNE embedding of the digits (time %.2fs)" %
(time() - t0))
plt.show()
| bsd-3-clause |
flohorovicic/pygeomod | pygeomod/geogrid.py | 1 | 38875 | '''Module with classes and methods to analyse and process exported geomodel grids
Created on 21/03/2014
@author: Florian Wellmann (some parts originally developed by Erik Schaeffer)
'''
import numpy as np
try:
import matplotlib.pyplot as plt
except ImportError:
print("\n\n\tMatplotlib not installed - plotting functions will not work!\n\n\n")
# import mpl_toolkits
# from matplotlib.ticker import MultipleLocator, FormatStrFormatter
# to convert python variable types to cpp types
import ctypes
# to create array
from numpy.ctypeslib import ndpointer
# to create folder
import os
# read out and change xml file (here only used to read out model boundary information)
import geomodeller_xml_obj as GO
class GeoGrid():
"""Object definition for exported geomodel grids"""
def __init__(self, **kwds):
"""GeoGrid contains methods to load, analyse, and process exported geomodel grids
**Optional Keywords**:
- *grid_filename* = string : filename of exported grid
- *delxyz_filename* = string : file with model discretisation
- *dimensions_filename* = string : file with model dimension (coordinates)
"""
if kwds.has_key('grid_filename'):
self.grid_filename = kwds['grid_filename']
if kwds.has_key('delxyz_filename'):
self.delxyz_filename = kwds['delxyz_filename']
if kwds.has_key('dimensions_filename'):
self.dimensions_filename = kwds['dimensions_filename']
def __add__(self, G_other):
"""Combine grid with another GeoGrid if regions are overlapping"""
# check overlap
print self.ymin, self.ymax
print G_other.ymin, G_other.ymax
if (G_other.ymin < self.ymax and G_other.ymin > self.ymin):
print("Grids overlapping in y-direction between %.0f and %.0f" %
(G_other.ymin, self.ymax))
def load_grid(self):
"""Load exported grid, discretisation and dimensions from file"""
if not hasattr(self, 'grid_filename'):
raise AttributeError("Grid filename is not defined!")
self.grid = np.loadtxt(self.grid_filename,
delimiter = ',',
dtype='int',
unpack=False)
if hasattr(self, 'delxyz_filename'):
self.load_delxyz(self.delxyz_filename)
self.adjust_gridshape()
if hasattr(self, 'dimensions_filename'):
self.load_dimensions(self.dimensions_filename)
def load_delxyz(self, delxyz_filename):
"""Load grid discretisation from file"""
del_lines = open(delxyz_filename, 'r').readlines()
d0 = del_lines[0].split("*")
self.delx = np.array([float(d0[1]) for _ in range(int(d0[0]))])
d1 = del_lines[1].split("*")
self.dely = np.array([float(d1[1]) for _ in range(int(d1[0]))])
d2 = del_lines[2].split(",")[:-1]
self.delz = np.array([float(d) for d in d2])
(self.nx, self.ny, self.nz) = (len(self.delx), len(self.dely), len(self.delz))
(self.extent_x, self.extent_y, self.extent_z) = (sum(self.delx), sum(self.dely), sum(self.delz))
def set_delxyz(self, delxyz):
"""Set delx, dely, delz arrays explicitly and update additional attributes
**Arguments**:
- *delxyz* = (delx-array, dely-array, delz-array): arrays with cell dimensions
"""
self.delx, self.dely, self.delz = delxyz
(self.nx, self.ny, self.nz) = (len(self.delx), len(self.dely), len(self.delz))
(self.extent_x, self.extent_y, self.extent_z) = (sum(self.delx), sum(self.dely), sum(self.delz))
def set_basename(self, name):
"""Set basename for grid exports, etc.
**Arguments**:
- *name* = string: basename
"""
self.basename = name
def load_dimensions(self, dimensions_filename):
"""Load project dimensions from file"""
dim = [float(d) for d in open(dimensions_filename, 'r').readlines()[1].split(",")]
(self.xmin, self.xmax, self.ymin, self.ymax, self.zmin, self.zmax) = dim
# calculate cell centre positions in real world coordinates
def define_regular_grid(self, nx, ny, nz):
"""Define a regular grid from defined project boundaries and given discretisations"""
self.nx = nx
self.ny = ny
self.nz = nz
self.delx = np.ones(nx) * (self.xmax - self.xmin) / nx
self.dely = np.ones(ny) * (self.ymax - self.ymin) / ny
self.delz = np.ones(nz) * (self.zmax - self.zmin) / nz
# create (empty) grid object
self.grid = np.ndarray((nx, ny, nz))
# update model extent
(self.extent_x, self.extent_y, self.extent_z) = (sum(self.delx), sum(self.dely), sum(self.delz))
def define_irregular_grid(self, delx, dely, delz):
"""Set irregular grid according to delimter arrays in each direction"""
self.delx = delx
self.dely = dely
self.delz = delz
self.nx = len(delx)
self.ny = len(dely)
self.nz = len(delz)
# create (empty) grid object
self.grid = np.ndarray((self.nx, self.ny, self.nz))
# update model extent
(self.extent_x, self.extent_y, self.extent_z) = (sum(self.delx), sum(self.dely), sum(self.delz))
def get_dimensions_from_geomodeller_xml_project(self, xml_filename):
"""Get grid dimensions from Geomodeller project
**Arguments**:
- *xml_filename* = string: filename of Geomodeller XML file
"""
# Note: this implementation is based on the Geomodeller API
# The boundaries could theoretically also be extracted from the XML file
# directly, e.g. using the geomodeller_xml_obj module - but this would
# require an additional module being loaded, so avoid here!
filename_ctypes = ctypes.c_char_p(xml_filename)
# get model boundaries
lib = ctypes.CDLL('./libgeomod.so') #linux
#lib = ctypes.windll.libgeomod #windows
lib.get_model_bounds.restype = ndpointer(dtype=ctypes.c_int, shape=(6,))
boundaries = lib.get_model_bounds(filename_ctypes)
(self.xmin, self.xmax, self.ymin, self.ymax, self.zmin, self.zmax) = boundaries
self.extent_x = self.xmax - self.xmin
self.extent_y = self.ymax - self.ymin
self.extent_z = self.zmax - self.zmin
def update_from_geomodeller_project(self, xml_filename):
"""Update grid properties directly from Geomodeller project
**Arguments**:
- *xml_filename* = string: filename of Geomodeller XML file
"""
filename_ctypes = ctypes.c_char_p(xml_filename)
# create cell position list with [x0, y0, z0, ... xn, yn, zn]
cell_position = []
ids = []
# check if cell centers are defined - if not, do so!
if not hasattr(self, 'cell_centers_x'):
self.determine_cell_centers()
for k in range(self.nz):
for j in range(self.ny):
for i in range(self.nx):
cell_position.append(self.cell_centers_x[i])
cell_position.append(self.cell_centers_y[j])
cell_position.append(self.cell_centers_z[k])
ids.append((i,j,k))
# prepare variables for cpp function
coord_ctypes = (ctypes.c_double * len(cell_position))(*cell_position)
coord_len = len(cell_position)
# call cpp function
lib = ctypes.CDLL('./libgeomod.so')
lib.compute_irregular_grid.restype = ndpointer(dtype=ctypes.c_int, shape=(coord_len/3,))
formations_raw = lib.compute_irregular_grid(filename_ctypes, coord_ctypes, coord_len)
# re-sort formations into array
for i in range(len(formations_raw)):
self.grid[ids[i][0],ids[i][1],ids[i][2]] = formations_raw[i]
def set_densities(self, densities):
"""Set layer densities
**Arguments**:
- *densities* = dictionary of floats: densities for geology ids
"""
self.densities = densities
def set_sus(self, sus):
"""Set layer susceptibilities
**Arguments**:
- *us* = dictionary of floats: magnetic susceptibilities for geology ids
"""
self.sus = sus
def write_noddy_files(self, **kwds):
"""Create Noddy block model files (for grav/mag calculation)
**Optional keywords**:
- *gps_range* = float : set GPS range (default: 1200.)
Method generates the files required to run the forward gravity/ magnetics response
from the block model:
- model.g00 = file with basic model information
- model.g12 = discretised geological (block) model
- base.his = Noddy history file with some basic settings
"""
self.gps_range = kwds.get("gps_range", 1200.)
if not hasattr(self, 'basename'):
self.basename = "geogrid"
f_g12 = open(self.basename + ".g12", 'w')
f_g01 = open(self.basename + ".g00", 'w')
# method = 'numpy' # using numpy should be faster - but it messes up the order... possible to fix?
# if method == 'standard':
# i = 0
# j = 0
# k = 0
# self.block = np.ndarray((self.nx,self.ny,self.nz))
# for line in f.readlines():
# if line == '\n':
# # next z-slice
# k += 1
# # reset x counter
# i = 0
# continue
# l = [int(l1) for l1 in line.strip().split("\t")]
# self.block[i,:,self.nz-k-1] = np.array(l)[::-1]
# i += 1
if not hasattr(self, "unit_ids"):
self.determine_geology_ids()
#=======================================================================
# # create file with base settings (.g00)
#=======================================================================
f_g01.write("VERSION = 7.11\n")
f_g01.write("FILE PREFIX = " + self.basename + "\n")
import time
t = time.localtime() # get current time
f_g01.write("DATE = %d/%d/%d\n" % (t.tm_mday, t.tm_mon, t.tm_year))
f_g01.write("TIME = %d:%d:%d\n" % (t.tm_hour, t.tm_min, t.tm_sec))
f_g01.write("UPPER SW CORNER (X Y Z) = %.1f %.1f %.1f\n" % (self.xmin - self.gps_range,
self.ymin - self.gps_range,
self.zmax))
f_g01.write("LOWER NE CORNER (X Y Z) = %.1f %.1f %.1f\n" % (self.xmax + self.gps_range,
self.ymax + self.gps_range,
self.zmin))
f_g01.write("NUMBER OF LAYERS = %d\n" % self.nz)
for k in range(self.nz):
f_g01.write("\tLAYER %d DIMENSIONS (X Y) = %d %d\n" % (k,
self.nx + 2 * (self.gps_range / self.delx[0]),
self.ny + 2 * (self.gps_range / self.dely[0])))
f_g01.write("NUMBER OF CUBE SIZES = %d\n" % self.nz)
for k in range(self.nz):
f_g01.write("\tCUBE SIZE FOR LAYER %d = %d\n" % (k, self.delx[0]))
f_g01.write("CALCULATION RANGE = %d\n" % (self.gps_range / self.delx[0]))
f_g01.write("""INCLINATION OF EARTH MAG FIELD = -67.00
INTENSITY OF EARTH MAG FIELD = 63000.00
DECLINATION OF VOL. WRT. MAG NORTH = 0.00
DENSITY CALCULATED = Yes
SUSCEPTIBILITY CALCULATED = Yes
REMANENCE CALCULATED = No
ANISOTROPY CALCULATED = No
INDEXED DATA FORMAT = Yes
""")
f_g01.write("NUM ROCK TYPES = %d\n" % len(self.unit_ids))
for i in self.unit_ids:
f_g01.write("ROCK DEFINITION Layer %d = %d\n" % (i, i))
f_g01.write("\tDensity = %f\n" % self.densities[int(i)])
f_g01.write("\tSus = %f\n" % self.sus[int(i)])
#=======================================================================
# Create g12 file
#=======================================================================
# write geology blocks to file
for k in range(self.nz):
# this worked for geophysics, but not for re-import with pynoddy:
# for val in self.grid[:,:,k].ravel(order = 'A'):
# f_g12.write("%d\t" % val)
for i in range(self.nx):
for val in self.grid[i,:,k]:
f_g12.write("%d\t" % val)
f_g12.write("\n")
# f_g12.write(['%d\t' % i for i in self.grid[:,:,k].ravel()])
f_g12.write("\n")
f_g12.close()
f_g01.close()
#=======================================================================
# # create noddy history file for base settings
#=======================================================================
import pynoddy.history
history = self.basename + "_base.his"
nm = pynoddy.history.NoddyHistory()
# add stratigraphy
# create dummy names and layers for base stratigraphy
layer_names = []
layer_thicknesses = []
for i in self.unit_ids:
layer_names.append('Layer %d' % i)
layer_thicknesses.append(500)
strati_options = {'num_layers' : len(self.unit_ids),
'layer_names' : layer_names,
'layer_thickness' : layer_thicknesses}
nm.add_event('stratigraphy', strati_options)
# set grid origin and extent:
nm.set_origin(self.xmin, self.ymin, self.zmin)
nm.set_extent(self.extent_x, self.extent_y, self.extent_z)
nm.write_history(history)
def set_dimensions(self, **kwds):
"""Set model dimensions, if no argument provided: xmin = 0, max = sum(delx) and accordingly for y,z
**Optional keywords**:
- *dim* = (xmin, xmax, ymin, ymax, zmin, zmax) : set dimensions explicitly
"""
if kwds.has_key("dim"):
(self.xmin, self.xmax, self.ymin, self.ymax, self.zmin, self.zmax) = kwds['dim']
else:
self.xmin, self.ymin, self.zmin = (0., 0., 0.)
self.xmax, self.ymax, self.zmax = (sum(self.delx), sum(self.dely), sum(self.delz))
def determine_cell_centers(self):
"""Determine cell centers for all coordinate directions in "real-world" coordinates"""
if not hasattr(self, 'xmin'):
raise AttributeError("Please define grid dimensions first")
sum_delx = np.cumsum(self.delx)
sum_dely = np.cumsum(self.dely)
sum_delz = np.cumsum(self.delz)
self.cell_centers_x = np.array([sum_delx[i] - self.delx[i] / 2. for i in range(self.nx)]) + self.xmin
self.cell_centers_y = np.array([sum_dely[i] - self.dely[i] / 2. for i in range(self.ny)]) + self.ymin
self.cell_centers_z = np.array([sum_delz[i] - self.delz[i] / 2. for i in range(self.nz)]) + self.zmin
def determine_cell_boundaries(self):
"""Determine cell boundaries for all coordinates in "real-world" coordinates"""
if not hasattr(self, 'xmin'):
raise AttributeError("Please define grid dimensions first")
sum_delx = np.cumsum(self.delx)
sum_dely = np.cumsum(self.dely)
sum_delz = np.cumsum(self.delz)
self.boundaries_x = np.ndarray((self.nx+1))
self.boundaries_x[0] = 0
self.boundaries_x[1:] = sum_delx
self.boundaries_y = np.ndarray((self.ny+1))
self.boundaries_y[0] = 0
self.boundaries_y[1:] = sum_dely
self.boundaries_z = np.ndarray((self.nz+1))
self.boundaries_z[0] = 0
self.boundaries_z[1:] = sum_delz
# create a list with all bounds
self.bounds = [self.boundaries_y[0], self.boundaries_y[-1],
self.boundaries_x[0], self.boundaries_x[-1],
self.boundaries_z[0], self.boundaries_z[-1]]
def adjust_gridshape(self):
"""Reshape numpy array to reflect model dimensions"""
self.grid = np.reshape(self.grid, (self.nz, self.ny, self.nx))
self.grid = np.swapaxes(self.grid, 0, 2)
# self.grid = np.swapaxes(self.grid, 0, 1)
def plot_section(self, direction, cell_pos='center', **kwds):
"""Plot a section through the model in a given coordinate direction
**Arguments**:
- *direction* = 'x', 'y', 'z' : coordinate direction for section position
- *cell_pos* = int/'center','min','max' : cell position, can be given as
value of cell id, or as 'center' (default), 'min', 'max' for simplicity
**Optional Keywords**:
- *cmap* = mpl.colormap : define colormap for plot (default: jet)
- *colorbar* = bool: attach colorbar (default: True)
- *rescale* = bool: rescale color bar to range of visible slice (default: False)
- *ve* = float : vertical exageration (for plots in x,y-direction)
- *figsize* = (x,y) : figsize settings for plot
- *ax* = matplotlib.axis : add plot to this axis (default: new axis)
if axis is defined, the axis is returned and the plot not shown
Note: if ax is passed, colorbar is False per default!
- *savefig* = bool : save figure to file (default: show)
- *fig_filename* = string : filename to save figure
"""
colorbar = kwds.get('colorbar', True)
cmap = kwds.get('cmap', 'jet')
rescale = kwds.get('rescale', False)
ve = kwds.get('ve', 1.)
figsize = kwds.get('figsize', (8,4))
if direction == 'x':
if type(cell_pos) == str:
# decipher cell position
if cell_pos == 'center' or cell_pos == 'centre':
pos = self.nx / 2
elif cell_pos == 'min':
pos = 0
elif cell_pos == 'max':
pos = self.nx
else:
pos = cell_pos
grid_slice = self.grid[pos,:,:]
grid_slice = grid_slice.transpose()
aspect = self.extent_z/self.extent_x * ve
elif direction == 'y':
if type(cell_pos) == str:
# decipher cell position
if cell_pos == 'center' or cell_pos == 'centre':
pos = self.ny / 2
elif cell_pos == 'min':
pos = 0
elif cell_pos == 'max':
pos = self.ny
else:
pos = cell_pos
grid_slice = self.grid[:,pos,:]
grid_slice = grid_slice.transpose()
aspect = self.extent_z/self.extent_y * ve
elif direction == 'z' :
if type(cell_pos) == str:
# decipher cell position
if cell_pos == 'center' or cell_pos == 'centre':
pos = self.nz / 2
elif cell_pos == 'min':
pos = 0
elif cell_pos == 'max':
pos = self.nz
else:
pos = cell_pos
grid_slice = self.grid[:,:,pos].transpose()
aspect = 1.
if not kwds.has_key('ax'):
colorbar = kwds.get('colorbar', True)
# create new axis for plot
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
else:
colorbar = False
ax = kwds['ax']
if not hasattr(self, 'unit_ids'):
self.determine_geology_ids()
if rescale:
vmin = np.min(grid_slice)
vmax = np.max(grid_slice)
else: # use global range for better comparison
vmin = min(self.unit_ids)
vmax = max(self.unit_ids)
im = ax.imshow(grid_slice, interpolation='nearest',
cmap = cmap,
origin='lower_left',
vmin = vmin,
vmax = vmax,
aspect = aspect)
if colorbar:
# divider = mpl_toolkits.axes_grid1.make_axes_locatable(ax)
# cax = divider.append_axes("bottom", size="5%", pad=0.2)
cbar1 = fig.colorbar(im, orientation="horizontal")
ticks = np.arange(vmin, vmax+0.1, int(np.log2(vmax-vmin)/1.2), dtype='int')
cbar1.set_ticks(ticks)
# cbar1.set_ticks(self.unit_ids[::int(np.log2(len(self.unit_ids)/2))])
cbar1.set_label("Geology ID")
# cax.xaxis.set_major_formatter(FormatStrFormatter("%d"))
if kwds.has_key("ax"):
# return image and do not show
return im
if kwds.has_key('savefig') and kwds['savefig']:
# save to file
filename = kwds.get("fig_filename", "grid_section_direction_%s_pos_%d.png" %
(direction, cell_pos))
plt.savefig(filename)
else:
plt.show()
def export_to_vtk(self, vtk_filename="geo_grid", real_coords = True, **kwds):
"""Export grid to VTK for visualisation
**Arguments**:
- *vtk_filename* = string : vtk filename (obviously...)
- *real_coords* = bool : model extent in "real world" coordinates
**Optional Keywords**:
- *grid* = numpy grid : grid to save to vtk (default: self.grid)
- *var_name* = string : name of variable to plot (default: Geology)
Note: requires pyevtk, available at: https://bitbucket.org/pauloh/pyevtk
"""
grid = kwds.get("grid", self.grid)
var_name = kwds.get("var_name", "Geology")
from evtk.hl import gridToVTK
# define coordinates
x = np.zeros(self.nx + 1)
y = np.zeros(self.ny + 1)
z = np.zeros(self.nz + 1)
x[1:] = np.cumsum(self.delx)
y[1:] = np.cumsum(self.dely)
z[1:] = np.cumsum(self.delz)
# plot in coordinates
if real_coords:
x += self.xmin
y += self.ymin
z += self.zmin
gridToVTK(vtk_filename, x, y, z,
cellData = {var_name: grid})
def export_to_csv(self, filename = "geo_grid.csv"):
"""Export grid to x,y,z,value pairs in a csv file
Ordering is x-dominant (first increase in x, then y, then z)
**Arguments**:
- *filename* = string : filename of csv file (default: geo_grid.csv)
"""
f = open(filename, 'w')
for zz in self.delz:
for yy in self.dely:
for xx in self.delx:
f.write("%.1f,%.1f,%.1f,%.d" % (xx,yy,zz,self.grid[xx,yy,zz]))
f.close()
def determine_geology_ids(self):
"""Determine all ids assigned to cells in the grid"""
self.unit_ids = np.unique(self.grid)
def get_name_mapping_from_file(self, filename):
"""Get the mapping between unit_ids in the model and real geological names
from a csv file (e.g. the SHEMAT property file)
**Arguments**:
- *filename* = string : filename of csv file with id, name entries
"""
self.unit_name = {}
filelines = open(filename, 'r').readlines()[1:]
for line in filelines:
l = line.split(",")
self.unit_name[int(l[1])] = l[0]
def get_name_mapping_from_dict(self, unit_name_dict):
"""Get the name mapping directly from a dictionary
**Arguments**:
- *unit_name_dict* = dict with "name" : unit_id (int) pairs
"""
self.unit_name = unit_name_dict
def remap_ids(self, mapping_dictionary):
"""Remap geological unit ids to new ids as defined in mapping dictionary
**Arguments**:
- *mapping_dictionary* = dict : {1 : 1, 2 : 3, ...} : e.g.: retain
id 1, but map id 2 to 3 (note: if id not specified, it will be retained)
"""
# first step: create a single mesh for each id to avoid accidential
# overwriting below (there might be a better solution...)
if not hasattr(self, 'unit_ids'):
self.determine_geology_ids()
geol_grid_ind = {}
for k,v in mapping_dictionary.items():
geol_grid_ind[k] = self.grid == k
print("Remap id %d -> %d" % (k,v))
# now reassign values in actual grid
for k,v in mapping_dictionary.items():
print("Reassign id %d to grid" % v)
self.grid[geol_grid_ind[k]] = v
# update global geology ids
self.determine_geology_ids()
def determine_cell_volumes(self):
"""Determine cell volumes for each cell (e.g. for total formation volume calculation)"""
self.cell_volume = np.ndarray(np.shape(self.grid))
for k,dz in enumerate(self.delz):
for j,dy in enumerate(self.dely):
for i,dx in enumerate(self.delx):
self.cell_volume[i,j,k] = dx * dy * dz
def determine_indicator_grids(self):
"""Determine indicator grids for all geological units"""
self.indicator_grids = {}
if not hasattr(self, 'unit_ids'):
self.determine_geology_ids()
grid_ones = np.ones(np.shape(self.grid))
for unit_id in self.unit_ids:
self.indicator_grids[unit_id] = grid_ones * (self.grid == unit_id)
def determine_id_volumes(self):
"""Determine the total volume of each unit id in the grid
(for example for cell discretisation studies, etc."""
if not hasattr(self, 'cell_volume'):
self.determine_cell_volumes()
if not hasattr(self, 'indicator_grids'):
self.determine_indicator_grids()
self.id_volumes = {}
for unit_id in self.unit_ids:
self.id_volumes[unit_id] = np.sum(self.indicator_grids[unit_id] * self.cell_volume)
def print_unit_names_volumes(self):
"""Formatted output to STDOUT of unit names (or ids, if names are note
defined) and calculated volumes
"""
if not hasattr(self, 'id_vikumes'):
self.determine_id_volumes()
if hasattr(self, "unit_name"):
# print with real geological names
print("Total volumes of modelled geological units:\n")
for unit_id in self.unit_ids:
print("%26s : %.2f km^3" % (self.unit_name[unit_id],
self.id_volumes[unit_id]/1E9))
else:
# print with unit ids only
print("Total volumes of modelled geological units:\n")
for unit_id in self.unit_ids:
print("%3d : %.2f km^3" % (unit_id,
self.id_volumes[unit_id]/1E9))
def extract_subgrid(self, subrange, **kwds):
"""Extract a subgrid model from existing grid
**Arguments**:
- *subrange* = (x_from, x_to, y_from, y_to, z_from, z_to) : range for submodel in either cell or world coords
**Optional keywords**:
- *range_type* = 'cell', 'world' : define if subrange in cell ids (default) or real-world coordinates
"""
range_type = kwds.get('range_type', 'cell')
if not hasattr(self, 'boundaries_x'):
self.determine_cell_boundaries()
if range_type == 'world':
# determine cells
subrange[0] = np.argwhere(self.boundaries_x > subrange[0])[0][0]
subrange[1] = np.argwhere(self.boundaries_x < subrange[1])[-1][0]
subrange[2] = np.argwhere(self.boundaries_y > subrange[2])[0][0]
subrange[3] = np.argwhere(self.boundaries_y < subrange[3])[-1][0]
subrange[4] = np.argwhere(self.boundaries_z > subrange[4])[0][0]
subrange[5] = np.argwhere(self.boundaries_z < subrange[5])[-1][0]
# create a copy of the original grid
import copy
subgrid = copy.deepcopy(self)
# extract grid
subgrid.grid = self.grid[subrange[0]:subrange[1],
subrange[2]:subrange[3],
subrange[4]:subrange[5]]
subgrid.nx = subrange[1] - subrange[0]
subgrid.ny = subrange[3] - subrange[2]
subgrid.nz = subrange[5] - subrange[4]
# update extent
subgrid.xmin = self.boundaries_x[subrange[0]]
subgrid.xmax = self.boundaries_x[subrange[1]]
subgrid.ymin = self.boundaries_y[subrange[2]]
subgrid.ymax = self.boundaries_y[subrange[3]]
subgrid.zmin = self.boundaries_z[subrange[4]]
subgrid.zmax = self.boundaries_z[subrange[5]]
subgrid.extent_x = subgrid.xmax - subgrid.xmin
subgrid.extent_y = subgrid.ymax - subgrid.ymin
subgrid.extent_z = subgrid.zmax - subgrid.zmin
# update cell spacings
subgrid.delx = self.delx[subrange[0]:subrange[1]]
subgrid.dely = self.dely[subrange[2]:subrange[3]]
subgrid.delz = self.delz[subrange[4]:subrange[5]]
# now: update other attributes:
subgrid.determine_cell_centers()
subgrid.determine_cell_boundaries()
subgrid.determine_cell_volumes()
subgrid.determine_geology_ids()
# finally: return subgrid
return subgrid
# ******************************************************************************
# Some additional helper functions
# ******************************************************************************
def combine_grids(G1, G2, direction, merge_type = 'keep_first', **kwds):
"""Combine two grids along one axis
..Note: this implementation assumes (for now) that the overlap is perfectly matching,
i.e. grid cell sizes identical and at equal positions, or that they are perfectly adjacent!
**Arguments**:
- G1, G2 = GeoGrid : grids to be combined
- direction = 'x', 'y', 'z': direction in which grids are combined
- merge_type = method to combine grid:
'keep_first' : keep elements of first grid (default)
'keep_second' : keep elements of second grid
'random' : randomly choose an element to retain
..Note: all other dimensions must be matching perfectly!!
**Optional keywords**:
- *overlap_analysis* = bool : perform a detailed analysis of the overlapping area, including
mismatch. Also returns a second item, a GeoGrid with information on mismatch!
**Returns**:
- *G_comb* = GeoGrid with combined grid
- *G_overlap* = Geogrid with analysis of overlap (of overlap_analysis=True)
"""
overlap_analysis = kwds.get("overlap_analysis", False)
# first step: determine overlap
if direction == 'x':
if G2.xmax > G1.xmax:
overlap_min = G2.xmin
overlap_max = G1.xmax
# identifier alias for grids with higher/ lower values
G_high = G2
G_low = G1
else:
overlap_min = G1.xmin
overlap_max = G2.xmax
# identifier alias for grids with higher/ lower values
G_high = G1
G_low = G2
# check if all other dimensions are perfectly matching
if (G1.ymin != G2.ymin) or (G1.zmin != G2.zmin) or \
(G1.ymax != G2.ymax) or (G1.zmax != G2.zmax):
raise ValueError("Other dimensions (apart from %s) not perfectly matching! Check and try again!" % direction)
elif direction == 'y':
if G2.ymax > G1.ymax:
overlap_min = G2.ymin
overlap_max = G1.ymax
# identifier alias for grids with higher/ lower values
G_high = G2
G_low = G1
else:
overlap_min = G1.ymin
overlap_max = G2.ymax
# identifier alias for grids with higher/ lower values
G_high = G1
G_low = G2
# check if all other dimensions are perfectly matching
if (G1.xmin != G2.xmin) or (G1.zmin != G2.zmin) or \
(G1.xmax != G2.xmax) or (G1.zmax != G2.zmax):
raise ValueError("Other dimensions (apart from %s) not perfectly matching! Check and try again!" % direction)
elif direction == 'z':
if G2.zmax > G1.zmax:
overlap_min = G2.zmin
overlap_max = G1.zmax
# identifier alias for grids with higher/ lower values
G_high = G2
G_low = G1
else:
overlap_min = G1.zmin
overlap_max = G2.zmax
# identifier alias for grids with higher/ lower values
G_high = G1
G_low = G2
# check if all other dimensions are perfectly matching
if (G1.ymin != G2.ymin) or (G1.xmin != G2.xmin) or \
(G1.ymax != G2.ymax) or (G1.xmax != G2.xmax):
raise ValueError("Other dimensions (apart from %s) not perfectly matching! Check and try again!" % direction)
overlap = overlap_max - overlap_min
if overlap == 0:
print("Grids perfectly adjacent")
elif overlap < 0:
raise ValueError("No overlap between grids! Check and try again!")
else:
print("Positive overlap in %s direction of %f meters" % (direction, overlap))
# determine cell centers
G1.determine_cell_centers()
G2.determine_cell_centers()
# intialise new grid
G_comb = GeoGrid()
# initialise overlap grid, if analyis performed
if overlap_analysis:
G_overlap = GeoGrid()
if direction == 'x':
pass
elif direction == 'y':
#=======================================================================
# Perform overlap analysis
#=======================================================================
# initialise overlap grid with dimensions of overlap
G_overlap.set_dimensions(dim = (G1.xmin, G1.xmax, overlap_min, overlap_max, G1.zmin, G1.zmax))
G_low_ids = np.where(G_low.cell_centers_y > overlap_min)[0]
G_high_ids = np.where(G_high.cell_centers_y < overlap_max)[0]
delx = G1.delx
dely = G_low.dely[G_low_ids]
delz = G1.delz
G_overlap.set_delxyz((delx, dely, delz))
# check if overlap region is identical
if not (len(G_low_ids) == len(G_high_ids)):
raise ValueError("Overlap length not identical, please check and try again!")
# now: determine overlap mismatch
G_overlap.grid = G_low.grid[:,G_low_ids,:] - G_high.grid[:,G_high_ids,:]
# for some very strange reason, this next step is necessary to enable the VTK
# export with pyevtk - looks like a bug in pyevtk...
G_overlap.grid = G_overlap.grid + np.zeros(G_overlap.grid.shape)
#
#=======================================================================
# Set up combined grid
#=======================================================================
G_comb.set_dimensions(dim = (G1.xmin, G1.xmax, G_low.ymin, G_high.ymax, G1.zmin, G1.zmax))
# combine dely arrays
dely = np.hstack((G_low.dely[:G_low_ids[0]], G_high.dely))
G_comb.set_delxyz((delx, dely, delz))
#=======================================================================
# Now merge grids
#=======================================================================
if merge_type == 'keep_first':
if G1.ymax > G2.ymax:
G_comb.grid = np.concatenate((G2.grid[:,:G_low_ids[0],:], G1.grid), axis=1)
else:
G_comb.grid = np.concatenate((G1.grid, G2.grid[:,:G_low_ids[0],:]), axis=1)
elif merge_type == 'keep_second':
pass
elif merge_type == 'random':
pass
else:
raise ValueError("Merge type %s not recognised! Please check and try again!" % merge_type)
elif direction == 'z':
pass
# Return combined grid and results of overlap analysis, if determined
if overlap_analysis:
return G_comb, G_overlap
else:
return G_comb
def optimial_cell_increase(starting_cell_width, n_cells, width):
"""Determine an array with optimal cell width for a defined starting cell width,
total number of cells, and total width
Basically, this function optimised a factor between two cells to obtain a total
width
**Arguments**:
- *starting_cell_width* = float : width of starting/ inner cell
- *n_cells* = int : total number of cells
- *total_width* = float : total width (sum over all elements in array)
**Returns**:
del_array : numpy.ndarray with cell discretisations
Note: optmisation with scipy.optimize - better (analytical?) methods might exist but
I can't think of them at the moment
"""
import scipy.optimize
# define some helper functions
def width_sum(inc_factor, inner_cell, n_cells, total_width):
return sum(del_array(inc_factor, inner_cell, n_cells)) - total_width
def del_array(inc_factor, inner_cell, n_cells):
return np.array([inner_cell * inc_factor**i for i in range(n_cells)])
# now the actual optimisation step:
opti_factor = scipy.optimize.fsolve(width_sum, 1.1, (starting_cell_width, n_cells, width))
# return the discretisation array
return del_array(opti_factor, starting_cell_width, n_cells).flatten()
if __name__ == '__main__':
pass
| mit |
booya-at/paraBEM | examples/plots/panel_src.py | 2 | 1530 | import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import parabem
from parabem.pan3d import src_3_0_vsaero
from parabem.utils import check_path
pnt1 = parabem.PanelVector3(-1, -1, 0)
pnt2 = parabem.PanelVector3(1, -1, 0)
pnt3 = parabem.PanelVector3(1, 1, 0)
pnt4 = parabem.PanelVector3(-1, 1, 0)
source = parabem.Panel3([pnt1, pnt2, pnt3, pnt4])
fig = plt.figure()
x = np.arange(-4, 4, 0.01)
y = []
for xi in x:
target1 = parabem.Vector3(xi, 0, 0.0)
target2 = parabem.Vector3(xi, 0, 0.5)
target3 = parabem.Vector3(xi, 0, 1)
val1 = src_3_0_vsaero(target1, source)
val2 = src_3_0_vsaero(target2, source)
val3 = src_3_0_vsaero(target3, source)
y.append([val1, val2, val3])
ax1 = fig.add_subplot(131)
ax1.plot(x, y)
y = []
for xi in x:
target1 = parabem.Vector3(0, xi,0.0)
target2 = parabem.Vector3(0, xi, 0.5)
target3 = parabem.Vector3(0, xi, 1)
val1 = src_3_0_vsaero(target1, source)
val2 = src_3_0_vsaero(target2, source)
val3 = src_3_0_vsaero(target3, source)
y.append([val1, val2, val3])
ax2 = fig.add_subplot(132)
ax2.plot(x, y)
y = []
for xi in x:
target1 = parabem.Vector3(0, 0, xi)
target2 = parabem.Vector3(0.5, 0, xi)
target3 = parabem.Vector3(1, 0, xi)
val1 = src_3_0_vsaero(target1, source)
val2 = src_3_0_vsaero(target2, source)
val3 = src_3_0_vsaero(target3, source)
y.append([val1, val2, val3])
ax3 = fig.add_subplot(133)
ax3.plot(x, y)
plt.savefig(check_path("results/3d/source.png"))
| gpl-3.0 |
hrjn/scikit-learn | sklearn/svm/tests/test_bounds.py | 49 | 2386 | import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
from sklearn.utils.testing import assert_true, raises
from sklearn.utils.testing import assert_raise_message
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
# loss='l2' should raise ValueError
assert_raise_message(ValueError, "loss type not in",
l1_min_c, dense_X, Y1, "l2")
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| bsd-3-clause |
bgris/ODL_bgris | lib/python3.5/site-packages/numpydoc/tests/test_docscrape.py | 3 | 21833 | # -*- encoding:utf-8 -*-
from __future__ import division, absolute_import, print_function
import sys, textwrap
from numpydoc.docscrape import NumpyDocString, FunctionDoc, ClassDoc
from numpydoc.docscrape_sphinx import SphinxDocString, SphinxClassDoc
from nose.tools import *
if sys.version_info[0] >= 3:
sixu = lambda s: s
else:
sixu = lambda s: unicode(s, 'unicode_escape')
doc_txt = '''\
numpy.multivariate_normal(mean, cov, shape=None, spam=None)
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
Parameters
----------
mean : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
cov : (N, N) ndarray
Covariance matrix of the distribution.
shape : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
Returns
-------
out : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
list of str
This is not a real return value. It exists to test
anonymous return values.
Other Parameters
----------------
spam : parrot
A parrot off its mortal coil.
Raises
------
RuntimeError
Some error
Warns
-----
RuntimeWarning
Some warning
Warnings
--------
Certain warnings apply.
Notes
-----
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
References
----------
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
See Also
--------
some, other, funcs
otherfunc : relationship
Examples
--------
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
.. index:: random
:refguide: random;distributions, random;gauss
'''
doc = NumpyDocString(doc_txt)
doc_yields_txt = """
Test generator
Yields
------
a : int
The number of apples.
b : int
The number of bananas.
int
The number of unknowns.
"""
doc_yields = NumpyDocString(doc_yields_txt)
def test_signature():
assert doc['Signature'].startswith('numpy.multivariate_normal(')
assert doc['Signature'].endswith('spam=None)')
def test_summary():
assert doc['Summary'][0].startswith('Draw values')
assert doc['Summary'][-1].endswith('covariance.')
def test_extended_summary():
assert doc['Extended Summary'][0].startswith('The multivariate normal')
def test_parameters():
assert_equal(len(doc['Parameters']), 3)
assert_equal([n for n,_,_ in doc['Parameters']], ['mean','cov','shape'])
arg, arg_type, desc = doc['Parameters'][1]
assert_equal(arg_type, '(N, N) ndarray')
assert desc[0].startswith('Covariance matrix')
assert doc['Parameters'][0][-1][-2] == ' (1+2+3)/3'
def test_other_parameters():
assert_equal(len(doc['Other Parameters']), 1)
assert_equal([n for n,_,_ in doc['Other Parameters']], ['spam'])
arg, arg_type, desc = doc['Other Parameters'][0]
assert_equal(arg_type, 'parrot')
assert desc[0].startswith('A parrot off its mortal coil')
def test_returns():
assert_equal(len(doc['Returns']), 2)
arg, arg_type, desc = doc['Returns'][0]
assert_equal(arg, 'out')
assert_equal(arg_type, 'ndarray')
assert desc[0].startswith('The drawn samples')
assert desc[-1].endswith('distribution.')
arg, arg_type, desc = doc['Returns'][1]
assert_equal(arg, 'list of str')
assert_equal(arg_type, '')
assert desc[0].startswith('This is not a real')
assert desc[-1].endswith('anonymous return values.')
def test_yields():
section = doc_yields['Yields']
assert_equal(len(section), 3)
truth = [('a', 'int', 'apples.'),
('b', 'int', 'bananas.'),
('int', '', 'unknowns.')]
for (arg, arg_type, desc), (arg_, arg_type_, end) in zip(section, truth):
assert_equal(arg, arg_)
assert_equal(arg_type, arg_type_)
assert desc[0].startswith('The number of')
assert desc[0].endswith(end)
def test_returnyield():
doc_text = """
Test having returns and yields.
Returns
-------
int
The number of apples.
Yields
------
a : int
The number of apples.
b : int
The number of bananas.
"""
assert_raises(ValueError, NumpyDocString, doc_text)
def test_notes():
assert doc['Notes'][0].startswith('Instead')
assert doc['Notes'][-1].endswith('definite.')
assert_equal(len(doc['Notes']), 17)
def test_references():
assert doc['References'][0].startswith('..')
assert doc['References'][-1].endswith('2001.')
def test_examples():
assert doc['Examples'][0].startswith('>>>')
assert doc['Examples'][-1].endswith('True]')
def test_index():
assert_equal(doc['index']['default'], 'random')
assert_equal(len(doc['index']), 2)
assert_equal(len(doc['index']['refguide']), 2)
def non_blank_line_by_line_compare(a,b):
a = textwrap.dedent(a)
b = textwrap.dedent(b)
a = [l.rstrip() for l in a.split('\n') if l.strip()]
b = [l.rstrip() for l in b.split('\n') if l.strip()]
for n,line in enumerate(a):
if not line == b[n]:
raise AssertionError("Lines %s of a and b differ: "
"\n>>> %s\n<<< %s\n" %
(n,line,b[n]))
def test_str():
# doc_txt has the order of Notes and See Also sections flipped.
# This should be handled automatically, and so, one thing this test does
# is to make sure that See Also precedes Notes in the output.
non_blank_line_by_line_compare(str(doc),
"""numpy.multivariate_normal(mean, cov, shape=None, spam=None)
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
Parameters
----------
mean : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
cov : (N, N) ndarray
Covariance matrix of the distribution.
shape : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
Returns
-------
out : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
list of str
This is not a real return value. It exists to test
anonymous return values.
Other Parameters
----------------
spam : parrot
A parrot off its mortal coil.
Raises
------
RuntimeError
Some error
Warns
-----
RuntimeWarning
Some warning
Warnings
--------
Certain warnings apply.
See Also
--------
`some`_, `other`_, `funcs`_
`otherfunc`_
relationship
Notes
-----
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
References
----------
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
Examples
--------
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
.. index:: random
:refguide: random;distributions, random;gauss""")
def test_yield_str():
non_blank_line_by_line_compare(str(doc_yields),
"""Test generator
Yields
------
a : int
The number of apples.
b : int
The number of bananas.
int
The number of unknowns.
.. index:: """)
def test_sphinx_str():
sphinx_doc = SphinxDocString(doc_txt)
non_blank_line_by_line_compare(str(sphinx_doc),
"""
.. index:: random
single: random;distributions, random;gauss
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
:Parameters:
**mean** : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
**cov** : (N, N) ndarray
Covariance matrix of the distribution.
**shape** : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
:Returns:
**out** : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
list of str
This is not a real return value. It exists to test
anonymous return values.
:Other Parameters:
**spam** : parrot
A parrot off its mortal coil.
:Raises:
**RuntimeError**
Some error
:Warns:
**RuntimeWarning**
Some warning
.. warning::
Certain warnings apply.
.. seealso::
:obj:`some`, :obj:`other`, :obj:`funcs`
:obj:`otherfunc`
relationship
.. rubric:: Notes
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
.. rubric:: References
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
.. only:: latex
[1]_, [2]_
.. rubric:: Examples
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
""")
def test_sphinx_yields_str():
sphinx_doc = SphinxDocString(doc_yields_txt)
non_blank_line_by_line_compare(str(sphinx_doc),
"""Test generator
:Yields:
**a** : int
The number of apples.
**b** : int
The number of bananas.
int
The number of unknowns.
""")
doc2 = NumpyDocString("""
Returns array of indices of the maximum values of along the given axis.
Parameters
----------
a : {array_like}
Array to look in.
axis : {None, integer}
If None, the index is into the flattened array, otherwise along
the specified axis""")
def test_parameters_without_extended_description():
assert_equal(len(doc2['Parameters']), 2)
doc3 = NumpyDocString("""
my_signature(*params, **kwds)
Return this and that.
""")
def test_escape_stars():
signature = str(doc3).split('\n')[0]
assert_equal(signature, 'my_signature(\*params, \*\*kwds)')
def my_func(a, b, **kwargs):
pass
fdoc = FunctionDoc(func=my_func)
assert_equal(fdoc['Signature'], 'my_func(a, b, \*\*kwargs)')
doc4 = NumpyDocString(
"""a.conj()
Return an array with all complex-valued elements conjugated.""")
def test_empty_extended_summary():
assert_equal(doc4['Extended Summary'], [])
doc5 = NumpyDocString(
"""
a.something()
Raises
------
LinAlgException
If array is singular.
Warns
-----
SomeWarning
If needed
""")
def test_raises():
assert_equal(len(doc5['Raises']), 1)
name,_,desc = doc5['Raises'][0]
assert_equal(name,'LinAlgException')
assert_equal(desc,['If array is singular.'])
def test_warns():
assert_equal(len(doc5['Warns']), 1)
name,_,desc = doc5['Warns'][0]
assert_equal(name,'SomeWarning')
assert_equal(desc,['If needed'])
def test_see_also():
doc6 = NumpyDocString(
"""
z(x,theta)
See Also
--------
func_a, func_b, func_c
func_d : some equivalent func
foo.func_e : some other func over
multiple lines
func_f, func_g, :meth:`func_h`, func_j,
func_k
:obj:`baz.obj_q`
:class:`class_j`: fubar
foobar
""")
assert len(doc6['See Also']) == 12
for func, desc, role in doc6['See Also']:
if func in ('func_a', 'func_b', 'func_c', 'func_f',
'func_g', 'func_h', 'func_j', 'func_k', 'baz.obj_q'):
assert(not desc)
else:
assert(desc)
if func == 'func_h':
assert role == 'meth'
elif func == 'baz.obj_q':
assert role == 'obj'
elif func == 'class_j':
assert role == 'class'
else:
assert role is None
if func == 'func_d':
assert desc == ['some equivalent func']
elif func == 'foo.func_e':
assert desc == ['some other func over', 'multiple lines']
elif func == 'class_j':
assert desc == ['fubar', 'foobar']
def test_see_also_print():
class Dummy(object):
"""
See Also
--------
func_a, func_b
func_c : some relationship
goes here
func_d
"""
pass
obj = Dummy()
s = str(FunctionDoc(obj, role='func'))
assert(':func:`func_a`, :func:`func_b`' in s)
assert(' some relationship' in s)
assert(':func:`func_d`' in s)
doc7 = NumpyDocString("""
Doc starts on second line.
""")
def test_empty_first_line():
assert doc7['Summary'][0].startswith('Doc starts')
def test_no_summary():
str(SphinxDocString("""
Parameters
----------"""))
def test_unicode():
doc = SphinxDocString("""
öäöäöäöäöåååå
öäöäöäööäååå
Parameters
----------
ååå : äää
ööö
Returns
-------
ååå : ööö
äää
""")
assert isinstance(doc['Summary'][0], str)
assert doc['Summary'][0] == 'öäöäöäöäöåååå'
def test_plot_examples():
cfg = dict(use_plots=True)
doc = SphinxDocString("""
Examples
--------
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3],[4,5,6])
>>> plt.show()
""", config=cfg)
assert 'plot::' in str(doc), str(doc)
doc = SphinxDocString("""
Examples
--------
.. plot::
import matplotlib.pyplot as plt
plt.plot([1,2,3],[4,5,6])
plt.show()
""", config=cfg)
assert str(doc).count('plot::') == 1, str(doc)
def test_class_members():
class Dummy(object):
"""
Dummy class.
"""
def spam(self, a, b):
"""Spam\n\nSpam spam."""
pass
def ham(self, c, d):
"""Cheese\n\nNo cheese."""
pass
@property
def spammity(self):
"""Spammity index"""
return 0.95
class Ignorable(object):
"""local class, to be ignored"""
pass
for cls in (ClassDoc, SphinxClassDoc):
doc = cls(Dummy, config=dict(show_class_members=False))
assert 'Methods' not in str(doc), (cls, str(doc))
assert 'spam' not in str(doc), (cls, str(doc))
assert 'ham' not in str(doc), (cls, str(doc))
assert 'spammity' not in str(doc), (cls, str(doc))
assert 'Spammity index' not in str(doc), (cls, str(doc))
doc = cls(Dummy, config=dict(show_class_members=True))
assert 'Methods' in str(doc), (cls, str(doc))
assert 'spam' in str(doc), (cls, str(doc))
assert 'ham' in str(doc), (cls, str(doc))
assert 'spammity' in str(doc), (cls, str(doc))
if cls is SphinxClassDoc:
assert '.. autosummary::' in str(doc), str(doc)
else:
assert 'Spammity index' in str(doc), str(doc)
class SubDummy(Dummy):
"""
Subclass of Dummy class.
"""
def ham(self, c, d):
"""Cheese\n\nNo cheese.\nOverloaded Dummy.ham"""
pass
def bar(self, a, b):
"""Bar\n\nNo bar"""
pass
for cls in (ClassDoc, SphinxClassDoc):
doc = cls(SubDummy, config=dict(show_class_members=True,
show_inherited_class_members=False))
assert 'Methods' in str(doc), (cls, str(doc))
assert 'spam' not in str(doc), (cls, str(doc))
assert 'ham' in str(doc), (cls, str(doc))
assert 'bar' in str(doc), (cls, str(doc))
assert 'spammity' not in str(doc), (cls, str(doc))
if cls is SphinxClassDoc:
assert '.. autosummary::' in str(doc), str(doc)
else:
assert 'Spammity index' not in str(doc), str(doc)
doc = cls(SubDummy, config=dict(show_class_members=True,
show_inherited_class_members=True))
assert 'Methods' in str(doc), (cls, str(doc))
assert 'spam' in str(doc), (cls, str(doc))
assert 'ham' in str(doc), (cls, str(doc))
assert 'bar' in str(doc), (cls, str(doc))
assert 'spammity' in str(doc), (cls, str(doc))
if cls is SphinxClassDoc:
assert '.. autosummary::' in str(doc), str(doc)
else:
assert 'Spammity index' in str(doc), str(doc)
def test_duplicate_signature():
# Duplicate function signatures occur e.g. in ufuncs, when the
# automatic mechanism adds one, and a more detailed comes from the
# docstring itself.
doc = NumpyDocString(
"""
z(x1, x2)
z(a, theta)
""")
assert doc['Signature'].strip() == 'z(a, theta)'
class_doc_txt = """
Foo
Parameters
----------
f : callable ``f(t, y, *f_args)``
Aaa.
jac : callable ``jac(t, y, *jac_args)``
Bbb.
Attributes
----------
t : float
Current time.
y : ndarray
Current variable values.
x : float
Some parameter
Methods
-------
a
b
c
Examples
--------
For usage examples, see `ode`.
"""
def test_class_members_doc():
doc = ClassDoc(None, class_doc_txt)
non_blank_line_by_line_compare(str(doc),
"""
Foo
Parameters
----------
f : callable ``f(t, y, *f_args)``
Aaa.
jac : callable ``jac(t, y, *jac_args)``
Bbb.
Examples
--------
For usage examples, see `ode`.
Attributes
----------
t : float
Current time.
y : ndarray
Current variable values.
x : float
Some parameter
Methods
-------
a
b
c
.. index::
""")
def test_class_members_doc_sphinx():
class Foo:
@property
def x(self):
"""Test attribute"""
return None
doc = SphinxClassDoc(Foo, class_doc_txt)
non_blank_line_by_line_compare(str(doc),
"""
Foo
:Parameters:
**f** : callable ``f(t, y, *f_args)``
Aaa.
**jac** : callable ``jac(t, y, *jac_args)``
Bbb.
.. rubric:: Examples
For usage examples, see `ode`.
.. rubric:: Attributes
.. autosummary::
:toctree:
x
=== ==========
t (float) Current time.
y (ndarray) Current variable values.
=== ==========
.. rubric:: Methods
=== ==========
a
b
c
=== ==========
""")
if __name__ == "__main__":
import nose
nose.run()
| gpl-3.0 |
maxlikely/scikit-learn | sklearn/naive_bayes.py | 2 | 14983 | # -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <vincent.michel@inria.fr>
# Minor fixes by Fabian Pedregosa
# Amit Aides <amitibo@tx.technion.ac.il>
# Yehuda Finkelstein <yehudaf@tx.technion.ac.il>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD Style.
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
import warnings
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize, LabelBinarizer
from .utils import array2d, atleast2d_or_csr
from .utils.extmath import safe_sparse_dot, logsumexp
from .utils import check_arrays
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB']
class BaseNB(BaseEstimator, ClassifierMixin):
"""Abstract base class for naive Bayes estimators"""
__metaclass__ = ABCMeta
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape [n_classes, n_samples].
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Predicted target values for X
"""
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class
in the model, where classes are ordered arithmetically.
"""
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in
the model, where classes are ordered arithmetically.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target vector relative to X
Attributes
----------
`class_prior_` : array, shape = [n_classes]
probability of each class.
`theta_` : array, shape = [n_classes, n_features]
mean of each feature per class
`sigma_` : array, shape = [n_classes, n_features]
variance of each feature per class
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB()
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def fit(self, X, y):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns self.
"""
X, y = check_arrays(X, y, sparse_format='dense')
n_samples, n_features = X.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have incompatible shapes")
self.classes_ = unique_y = np.unique(y)
n_classes = unique_y.shape[0]
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_prior_ = np.zeros(n_classes)
epsilon = 1e-9
for i, y_i in enumerate(unique_y):
self.theta_[i, :] = np.mean(X[y == y_i, :], axis=0)
self.sigma_[i, :] = np.var(X[y == y_i, :], axis=0) + epsilon
self.class_prior_[i] = np.float(np.sum(y == y_i)) / n_samples
return self
def _joint_log_likelihood(self, X):
X = array2d(X)
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
class BaseDiscreteNB(BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per BaseNB
"""
def fit(self, X, y, sample_weight=None, class_prior=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X = atleast2d_or_csr(X)
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
n_classes = len(self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
if X.shape[0] != Y.shape[0]:
msg = "X and y have incompatible shapes."
if issparse(X):
msg += "\nNote: Sparse matrices cannot be indexed w/ boolean \
masks (use `indices=True` in CV)."
raise ValueError(msg)
if sample_weight is not None:
Y *= array2d(sample_weight).T
if class_prior is not None:
warnings.warn('class_prior has been made an ``__init__`` parameter'
' and will be removed from fit in version 0.15.',
DeprecationWarning)
else:
class_prior = self.class_prior
if class_prior:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
# empirical prior, with sample_weight taken into account
y_freq = Y.sum(axis=0)
self.class_log_prior_ = np.log(y_freq) - np.log(y_freq.sum())
else:
self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes)
# N_c_i is the count of feature i in all samples of class c.
# N_c is the denominator.
N_c, N_c_i = self._count(X, Y)
self.feature_log_prob_ = np.log(N_c_i) - np.log(N_c.reshape(-1, 1))
return self
# XXX The following is a stopgap measure; we need to set the dimensions
# of class_log_prior_ and feature_log_prob_ correctly.
def _get_coef(self):
return (self.feature_log_prob_[1]
if len(self.classes_) == 2 else self.feature_log_prob_)
def _get_intercept(self):
return (self.class_log_prior_[1]
if len(self.classes_) == 2 else self.class_log_prior_)
coef_ = property(_get_coef)
intercept_ = property(_get_intercept)
class MultinomialNB(BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size=[n_classes,]
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
`intercept_`, `class_log_prior_` : array, shape = [n_classes]
Smoothed empirical log probability for each class.
`feature_log_prob_`, `coef_` : array, shape = [n_classes, n_features]
Empirical log probability of features
given a class, P(x_i|y).
(`intercept_` and `coef_` are properties
referring to `class_log_prior_` and
`feature_log_prob_`, respectively.)
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, Y)
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
"""
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative.")
N_c_i = safe_sparse_dot(Y.T, X) + self.alpha
N_c = np.sum(N_c_i, axis=1)
return N_c, N_c_i
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
X = atleast2d_or_csr(X)
return (safe_sparse_dot(X, self.feature_log_prob_.T)
+ self.class_log_prior_)
class BernoulliNB(BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, optional
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size=[n_classes,]
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
`class_log_prior_` : array, shape = [n_classes]
Log probability of each class (smoothed).
`feature_log_prob_` : array, shape = [n_classes, n_features]
Empirical log probability of features given a class, P(x_i|y).
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(2, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schütze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234–265.
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41–48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(self, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
N_c_i = safe_sparse_dot(Y.T, X) + self.alpha
N_c = Y.sum(axis=0) + self.alpha * Y.shape[1]
return N_c, N_c_i
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
X = atleast2d_or_csr(X)
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
X_neg_prob = (neg_prob.sum(axis=1)
- safe_sparse_dot(X, neg_prob.T))
jll = safe_sparse_dot(X, self.feature_log_prob_.T) + X_neg_prob
return jll + self.class_log_prior_
| bsd-3-clause |
h2educ/scikit-learn | sklearn/utils/tests/test_validation.py | 79 | 18547 | """Tests for input validation functions"""
import warnings
from tempfile import NamedTemporaryFile
from itertools import product
import numpy as np
from numpy.testing import assert_array_equal
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true, assert_false, assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils import as_float_array, check_array, check_symmetric
from sklearn.utils import check_X_y
from sklearn.utils.mocking import MockDataFrame
from sklearn.utils.estimator_checks import NotAnArray
from sklearn.random_projection import sparse_random_matrix
from sklearn.linear_model import ARDRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from sklearn.datasets import make_blobs
from sklearn.utils.validation import (
NotFittedError,
has_fit_parameter,
check_is_fitted,
check_consistent_length,
DataConversionWarning,
)
from sklearn.utils.testing import assert_raise_message
def test_as_float_array():
# Test function for as_float_array
X = np.ones((3, 10), dtype=np.int32)
X = X + np.arange(10, dtype=np.int32)
# Checks that the return type is ok
X2 = as_float_array(X, copy=False)
np.testing.assert_equal(X2.dtype, np.float32)
# Another test
X = X.astype(np.int64)
X2 = as_float_array(X, copy=True)
# Checking that the array wasn't overwritten
assert_true(as_float_array(X, False) is not X)
# Checking that the new type is ok
np.testing.assert_equal(X2.dtype, np.float64)
# Here, X is of the right type, it shouldn't be modified
X = np.ones((3, 2), dtype=np.float32)
assert_true(as_float_array(X, copy=False) is X)
# Test that if X is fortran ordered it stays
X = np.asfortranarray(X)
assert_true(np.isfortran(as_float_array(X, copy=True)))
# Test the copy parameter with some matrices
matrices = [
np.matrix(np.arange(5)),
sp.csc_matrix(np.arange(5)).toarray(),
sparse_random_matrix(10, 10, density=0.10).toarray()
]
for M in matrices:
N = as_float_array(M, copy=True)
N[0, 0] = np.nan
assert_false(np.isnan(M).any())
def test_np_matrix():
# Confirm that input validation code does not return np.matrix
X = np.arange(12).reshape(3, 4)
assert_false(isinstance(as_float_array(X), np.matrix))
assert_false(isinstance(as_float_array(np.matrix(X)), np.matrix))
assert_false(isinstance(as_float_array(sp.csc_matrix(X)), np.matrix))
def test_memmap():
# Confirm that input validation code doesn't copy memory mapped arrays
asflt = lambda x: as_float_array(x, copy=False)
with NamedTemporaryFile(prefix='sklearn-test') as tmp:
M = np.memmap(tmp, shape=(10, 10), dtype=np.float32)
M[:] = 0
for f in (check_array, np.asarray, asflt):
X = f(M)
X[:] = 1
assert_array_equal(X.ravel(), M.ravel())
X[:] = 0
def test_ordering():
# Check that ordering is enforced correctly by validation utilities.
# We need to check each validation utility, because a 'copy' without
# 'order=K' will kill the ordering.
X = np.ones((10, 5))
for A in X, X.T:
for copy in (True, False):
B = check_array(A, order='C', copy=copy)
assert_true(B.flags['C_CONTIGUOUS'])
B = check_array(A, order='F', copy=copy)
assert_true(B.flags['F_CONTIGUOUS'])
if copy:
assert_false(A is B)
X = sp.csr_matrix(X)
X.data = X.data[::-1]
assert_false(X.data.flags['C_CONTIGUOUS'])
@ignore_warnings
def test_check_array():
# accept_sparse == None
# raise error on sparse inputs
X = [[1, 2], [3, 4]]
X_csr = sp.csr_matrix(X)
assert_raises(TypeError, check_array, X_csr)
# ensure_2d
assert_warns(DeprecationWarning, check_array, [0, 1, 2])
X_array = check_array([0, 1, 2])
assert_equal(X_array.ndim, 2)
X_array = check_array([0, 1, 2], ensure_2d=False)
assert_equal(X_array.ndim, 1)
# don't allow ndim > 3
X_ndim = np.arange(8).reshape(2, 2, 2)
assert_raises(ValueError, check_array, X_ndim)
check_array(X_ndim, allow_nd=True) # doesn't raise
# force_all_finite
X_inf = np.arange(4).reshape(2, 2).astype(np.float)
X_inf[0, 0] = np.inf
assert_raises(ValueError, check_array, X_inf)
check_array(X_inf, force_all_finite=False) # no raise
# nan check
X_nan = np.arange(4).reshape(2, 2).astype(np.float)
X_nan[0, 0] = np.nan
assert_raises(ValueError, check_array, X_nan)
check_array(X_inf, force_all_finite=False) # no raise
# dtype and order enforcement.
X_C = np.arange(4).reshape(2, 2).copy("C")
X_F = X_C.copy("F")
X_int = X_C.astype(np.int)
X_float = X_C.astype(np.float)
Xs = [X_C, X_F, X_int, X_float]
dtypes = [np.int32, np.int, np.float, np.float32, None, np.bool, object]
orders = ['C', 'F', None]
copys = [True, False]
for X, dtype, order, copy in product(Xs, dtypes, orders, copys):
X_checked = check_array(X, dtype=dtype, order=order, copy=copy)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if order == 'C':
assert_true(X_checked.flags['C_CONTIGUOUS'])
assert_false(X_checked.flags['F_CONTIGUOUS'])
elif order == 'F':
assert_true(X_checked.flags['F_CONTIGUOUS'])
assert_false(X_checked.flags['C_CONTIGUOUS'])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and
X_checked.flags['C_CONTIGUOUS'] == X.flags['C_CONTIGUOUS']
and X_checked.flags['F_CONTIGUOUS'] == X.flags['F_CONTIGUOUS']):
assert_true(X is X_checked)
# allowed sparse != None
X_csc = sp.csc_matrix(X_C)
X_coo = X_csc.tocoo()
X_dok = X_csc.todok()
X_int = X_csc.astype(np.int)
X_float = X_csc.astype(np.float)
Xs = [X_csc, X_coo, X_dok, X_int, X_float]
accept_sparses = [['csr', 'coo'], ['coo', 'dok']]
for X, dtype, accept_sparse, copy in product(Xs, dtypes, accept_sparses,
copys):
with warnings.catch_warnings(record=True) as w:
X_checked = check_array(X, dtype=dtype,
accept_sparse=accept_sparse, copy=copy)
if (dtype is object or sp.isspmatrix_dok(X)) and len(w):
message = str(w[0].message)
messages = ["object dtype is not supported by sparse matrices",
"Can't check dok sparse matrix for nan or inf."]
assert_true(message in messages)
else:
assert_equal(len(w), 0)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if X.format in accept_sparse:
# no change if allowed
assert_equal(X.format, X_checked.format)
else:
# got converted
assert_equal(X_checked.format, accept_sparse[0])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and X.format == X_checked.format):
assert_true(X is X_checked)
# other input formats
# convert lists to arrays
X_dense = check_array([[1, 2], [3, 4]])
assert_true(isinstance(X_dense, np.ndarray))
# raise on too deep lists
assert_raises(ValueError, check_array, X_ndim.tolist())
check_array(X_ndim.tolist(), allow_nd=True) # doesn't raise
# convert weird stuff to arrays
X_no_array = NotAnArray(X_dense)
result = check_array(X_no_array)
assert_true(isinstance(result, np.ndarray))
def test_check_array_pandas_dtype_object_conversion():
# test that data-frame like objects with dtype object
# get converted
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.object)
X_df = MockDataFrame(X)
assert_equal(check_array(X_df).dtype.kind, "f")
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
# smoke-test against dataframes with column named "dtype"
X_df.dtype = "Hans"
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
def test_check_array_dtype_stability():
# test that lists with ints don't get converted to floats
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
assert_equal(check_array(X).dtype.kind, "i")
assert_equal(check_array(X, ensure_2d=False).dtype.kind, "i")
def test_check_array_dtype_warning():
X_int_list = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
X_float64 = np.asarray(X_int_list, dtype=np.float64)
X_float32 = np.asarray(X_int_list, dtype=np.float32)
X_int64 = np.asarray(X_int_list, dtype=np.int64)
X_csr_float64 = sp.csr_matrix(X_float64)
X_csr_float32 = sp.csr_matrix(X_float32)
X_csc_float32 = sp.csc_matrix(X_float32)
X_csc_int32 = sp.csc_matrix(X_int64, dtype=np.int32)
y = [0, 0, 1]
integer_data = [X_int64, X_csc_int32]
float64_data = [X_float64, X_csr_float64]
float32_data = [X_float32, X_csr_float32, X_csc_float32]
for X in integer_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_warns(DataConversionWarning, check_array, X,
dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
# Check that the warning message includes the name of the Estimator
X_checked = assert_warns_message(DataConversionWarning,
'SomeEstimator',
check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True,
warn_on_dtype=True,
estimator='SomeEstimator')
assert_equal(X_checked.dtype, np.float64)
X_checked, y_checked = assert_warns_message(
DataConversionWarning, 'KNeighborsClassifier',
check_X_y, X, y, dtype=np.float64, accept_sparse=True,
warn_on_dtype=True, estimator=KNeighborsClassifier())
assert_equal(X_checked.dtype, np.float64)
for X in float64_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=False)
assert_equal(X_checked.dtype, np.float64)
for X in float32_data:
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True)
assert_equal(X_checked.dtype, np.float32)
assert_true(X_checked is X)
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=True)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X)
X_checked = assert_no_warnings(check_array, X_csc_float32,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=False)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X_csc_float32)
assert_equal(X_checked.format, 'csr')
def test_check_array_min_samples_and_features_messages():
# empty list is considered 2D by default:
msg = "0 feature(s) (shape=(1, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [[]])
# If considered a 1D collection when ensure_2d=False, then the minimum
# number of samples will break:
msg = "0 sample(s) (shape=(0,)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [], ensure_2d=False)
# Invalid edge case when checking the default minimum sample of a scalar
msg = "Singleton array array(42) cannot be considered a valid collection."
assert_raise_message(TypeError, msg, check_array, 42, ensure_2d=False)
# But this works if the input data is forced to look like a 2 array with
# one sample and one feature:
X_checked = assert_warns(DeprecationWarning, check_array, [42],
ensure_2d=True)
assert_array_equal(np.array([[42]]), X_checked)
# Simulate a model that would need at least 2 samples to be well defined
X = np.ones((1, 10))
y = np.ones(1)
msg = "1 sample(s) (shape=(1, 10)) while a minimum of 2 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2)
# The same message is raised if the data has 2 dimensions even if this is
# not mandatory
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2, ensure_2d=False)
# Simulate a model that would require at least 3 features (e.g. SelectKBest
# with k=3)
X = np.ones((10, 2))
y = np.ones(2)
msg = "2 feature(s) (shape=(10, 2)) while a minimum of 3 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3)
# Only the feature check is enabled whenever the number of dimensions is 2
# even if allow_nd is enabled:
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3, allow_nd=True)
# Simulate a case where a pipeline stage as trimmed all the features of a
# 2D dataset.
X = np.empty(0).reshape(10, 0)
y = np.ones(10)
msg = "0 feature(s) (shape=(10, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y)
# nd-data is not checked for any minimum number of features by default:
X = np.ones((10, 0, 28, 28))
y = np.ones(10)
X_checked, y_checked = check_X_y(X, y, allow_nd=True)
assert_array_equal(X, X_checked)
assert_array_equal(y, y_checked)
def test_has_fit_parameter():
assert_false(has_fit_parameter(KNeighborsClassifier, "sample_weight"))
assert_true(has_fit_parameter(RandomForestRegressor, "sample_weight"))
assert_true(has_fit_parameter(SVR, "sample_weight"))
assert_true(has_fit_parameter(SVR(), "sample_weight"))
def test_check_symmetric():
arr_sym = np.array([[0, 1], [1, 2]])
arr_bad = np.ones(2)
arr_asym = np.array([[0, 2], [0, 2]])
test_arrays = {'dense': arr_asym,
'dok': sp.dok_matrix(arr_asym),
'csr': sp.csr_matrix(arr_asym),
'csc': sp.csc_matrix(arr_asym),
'coo': sp.coo_matrix(arr_asym),
'lil': sp.lil_matrix(arr_asym),
'bsr': sp.bsr_matrix(arr_asym)}
# check error for bad inputs
assert_raises(ValueError, check_symmetric, arr_bad)
# check that asymmetric arrays are properly symmetrized
for arr_format, arr in test_arrays.items():
# Check for warnings and errors
assert_warns(UserWarning, check_symmetric, arr)
assert_raises(ValueError, check_symmetric, arr, raise_exception=True)
output = check_symmetric(arr, raise_warning=False)
if sp.issparse(output):
assert_equal(output.format, arr_format)
assert_array_equal(output.toarray(), arr_sym)
else:
assert_array_equal(output, arr_sym)
def test_check_is_fitted():
# Check is ValueError raised when non estimator instance passed
assert_raises(ValueError, check_is_fitted, ARDRegression, "coef_")
assert_raises(TypeError, check_is_fitted, "SVR", "support_")
ard = ARDRegression()
svr = SVR()
try:
assert_raises(NotFittedError, check_is_fitted, ard, "coef_")
assert_raises(NotFittedError, check_is_fitted, svr, "support_")
except ValueError:
assert False, "check_is_fitted failed with ValueError"
# NotFittedError is a subclass of both ValueError and AttributeError
try:
check_is_fitted(ard, "coef_", "Random message %(name)s, %(name)s")
except ValueError as e:
assert_equal(str(e), "Random message ARDRegression, ARDRegression")
try:
check_is_fitted(svr, "support_", "Another message %(name)s, %(name)s")
except AttributeError as e:
assert_equal(str(e), "Another message SVR, SVR")
ard.fit(*make_blobs())
svr.fit(*make_blobs())
assert_equal(None, check_is_fitted(ard, "coef_"))
assert_equal(None, check_is_fitted(svr, "support_"))
def test_check_consistent_length():
check_consistent_length([1], [2], [3], [4], [5])
check_consistent_length([[1, 2], [[1, 2]]], [1, 2], ['a', 'b'])
check_consistent_length([1], (2,), np.array([3]), sp.csr_matrix((1, 2)))
assert_raises_regexp(ValueError, 'inconsistent numbers of samples',
check_consistent_length, [1, 2], [1])
assert_raises_regexp(TypeError, 'got <\w+ \'int\'>',
check_consistent_length, [1, 2], 1)
assert_raises_regexp(TypeError, 'got <\w+ \'object\'>',
check_consistent_length, [1, 2], object())
assert_raises(TypeError, check_consistent_length, [1, 2], np.array(1))
# Despite ensembles having __len__ they must raise TypeError
assert_raises_regexp(TypeError, 'estimator', check_consistent_length,
[1, 2], RandomForestRegressor())
# XXX: We should have a test with a string, but what is correct behaviour?
| bsd-3-clause |
gpfreitas/bokeh | bokeh/crossfilter/models.py | 40 | 30635 | from __future__ import absolute_import
import logging
import six
import pandas as pd
import numpy as np
from ..plotting import curdoc
from ..models import ColumnDataSource, GridPlot, Panel, Tabs, Range
from ..models.widgets import Select, MultiSelect, InputWidget
# crossfilter plotting utilities
from .plotting import make_histogram_source, make_histogram, cross, hide_axes
from .plugins import CrossScatterPlugin, CrossBarPlugin, CrossLinePlugin
# bokeh plotting functions
from ..plot_object import PlotObject
from ..properties import Dict, Enum, Instance, List, String, Any, Int
logger = logging.getLogger(__name__)
class DiscreteFacet(object):
"""Pairing of a field and a unique value, representing a subset of the
total data."""
def __init__(self, field, value, label=None):
"""Sets object properties and creates label if not provided.
Args:
field (str): name of the column
value: unique value defined for the column
label (str, optional): string representation of the value
"""
if label is None:
label = str(value)
self.field = field
self.label = label
self._value = value
def __repr__(self):
return "%s:%s"%(self.field, self.label)
def filter(self, df):
"""Filters the provided DataFrame to the subset corresponding to value.
Args:
df (DataFrame): contains a column of ``field``
Returns:
DataFrame: filtered to rows, where column ``field`` has values
equal to ``_value``.
"""
return df[df[self.field] == self._value]
class ContinuousFacet(DiscreteFacet):
"""Represents a range of values for a field in a DataFrame."""
def __init__(self, field, value, bins, label=None):
"""Calls parent ``DiscreteFacet`` and stores bins for later filtering.
Args:
field (str): name of the column
value (str): center of range of values in the column
bins (list[float]): start and inclusive stop value for the bin
label (str, optional): string representation
"""
super(ContinuousFacet, self).__init__(field, value, label=label)
self.bins = bins
def filter(self, df):
"""Filters the provided DataFrame to the subset corresponding to bins.
Args:
df (DataFrame): contains a column of ``field``
Returns:
DataFrame: filtered to rows, where column ``field`` has values
within the bounds of ``bins``.
"""
if self.bins[0] is not None:
df = df[df[self.field] > self.bins[0]]
if self.bins[1] is not None:
df = df[df[self.field] <= self.bins[1]]
return df
class CrossFilter(PlotObject):
"""Interactive filtering and faceting application with multiple plot types"""
# identify properties for the data
columns = List(Dict(String, Any))
data = Instance(ColumnDataSource)
filtered_data = Instance(ColumnDataSource)
# list of datasources to use for filtering widgets
filter_sources = Dict(String, Instance(ColumnDataSource))
# list of columns we are filtering
filtering_columns = List(String)
# dict of column name to filtering widgets
filter_widgets = Dict(String, Instance(PlotObject))
# dict which aggregates all the selections from the different filtering
# widgets
filtered_selections = Dict(String, Dict(String, Any))
# list of facet vars
facet_x = List(String, default=[])
facet_y = List(String, default=[])
facet_tab = List(String, default=[])
# the displayed plot object
plot = Instance(PlotObject)
x_range = Instance(Range)
y_range = Instance(Range)
# configuration properties for the plot
plot_type = Enum("line", "scatter", "bar")
plot_map = {'line': CrossLinePlugin,
'scatter': CrossScatterPlugin,
'bar': CrossBarPlugin}
x = String
y = String
agg = String
color = String
title = String
height = Int()
width = Int()
# identify the selector/drop-down properties
plot_selector = Instance(Select)
x_selector = Instance(Select)
y_selector = Instance(Select)
agg_selector = Instance(Select)
def __init__(self, *args, **kwargs):
"""Creates original and filtered ColumnDataSource and handles defaults.
The df and starting configuration are only provided the first time
init is called, within the create method.
Kwargs:
df (DataFrame): the data to use in the crossfilter app
plot_type (str, optional): starting plot type
agg (str, optional): starting aggregation type
"""
if 'df' in kwargs:
self._df = kwargs.pop('df')
# initialize a "pure" and filtered data source based on df
kwargs['data'] = ColumnDataSource(data=self.df)
kwargs['filtered_data'] = ColumnDataSource(data=self.df)
# default plot type
if 'plot_type' not in kwargs:
kwargs['plot_type'] = "scatter"
# default aggregation type
if 'agg' not in kwargs:
kwargs['agg'] = 'sum'
if 'plot_map' in kwargs:
self.plot_map = kwargs.pop('plot_map')
super(CrossFilter, self).__init__(**kwargs)
@classmethod
def create(cls, **kwargs):
"""Performs all one-time construction of bokeh objects.
This classmethod is required due to the way that bokeh handles the
python and javascript components. The initialize method will be
called each additional time the app is updated (including once in
the create method), but the PlotObject infrastructure will find that
the object already exists in any future calls, and will not create a
new object.
Kwargs:
df (DataFrame): the data to use in the crossfilter app
plot_type (str, optional): starting plot type
agg (str, optional): starting aggregation type
"""
obj = cls(**kwargs)
obj.set_metadata()
choices = obj.make_plot_choices()
obj.update_plot_choices(choices)
obj.set_plot()
obj.set_input_selector()
return obj
def set_input_selector(self):
"""Creates and configures each selector (drop-down menu)."""
col_names = [x['name'] for x in self.columns]
col_names.append('None')
self.plot_selector = Select.create(
title="PlotType",
name="plot_type",
value=self.plot_type,
options=["line", "scatter", "bar"],
)
self.x_selector = Select.create(
name="x",
value=self.x,
options=col_names,
)
self.y_selector = Select.create(
name="y",
value=self.y,
options=col_names,
)
self.agg_selector = Select.create(
name='agg',
value=self.agg,
options=['sum', 'mean', 'last', 'count', 'percent'],
)
def update_plot_choices(self, input_dict):
"""Sets object attributes corresponding to input_dict's values.
Args:
input_dict (dict): dict with x, y, and plot_type keys
"""
for k, v in input_dict.items():
if getattr(self, k) is None:
setattr(self, k, v)
def get_plot_class(self):
"""Return the class for the current plot selection."""
return self.plot_map[self.plot_type]
def column_descriptor_dict(self):
"""Creates column stats dict with keys of column names.
Returns:
dict: dict with key per column in data, where values are column stats
"""
column_descriptors = {}
for x in self.columns:
column_descriptors[x['name']] = x
return column_descriptors
@property
def continuous_columns(self):
"""Returns list of column descriptors for the non-Discrete columns.
Returns:
list(dict): list of dicts, containing metadata about columns
"""
return [x for x in self.columns if x['type'] != 'DiscreteColumn']
@property
def discrete_columns(self):
"""Returns list of column descriptors for the Discrete columns.
Returns:
list(dict): list of dicts, containing metadata about columns
"""
return [x for x in self.columns if x['type'] == 'DiscreteColumn']
def make_plot_choices(self):
"""Selects first two continuous columns for x,y during initial setup
Returns:
dict: x, y, and plot_type keys and values for initial setup
"""
# prefer continuous columns to initialize with, otherwise use what we have
if len(self.continuous_columns) > 1:
x, y = [x['name'] for x in self.continuous_columns[:2]]
else:
x, y = [x['name'] for x in self.columns[:2]]
return {'x': x, 'y': y, 'plot_type': 'scatter'}
def set_plot(self):
"""Makes and sets the plot based on the current configuration of app."""
self.update_xy_ranges(source=self.df)
plot = self.make_plot()
self.plot = plot
curdoc()._add_all()
def make_plot(self):
"""Makes the correct plot layout type, based on app's current config.
Returns:
PlotObject: one plot, grid of plots, or tabs of plots/grids of plots
"""
if self.facet_tab:
facets = self.make_facets(dimension='tab')
# generate a list of panels, containing plot/plots for each facet
tabs = [self.make_tab(content=self.create_plot_page(
tab_facet=facet), tab_label=self.facet_title(facet)) for facet
in facets]
return Tabs(tabs=tabs)
else:
return self.create_plot_page()
def create_plot_page(self, tab_facet=None):
"""Generates a single visible page of a plot or plots.
Args:
tab_facet (DiscreteFacet or ContinuousFacet): a facet to filter on
Returns:
PlotObject: a single or grid of plots
"""
# no faceting
if all([len(self.facet_x) == 0,
len(self.facet_y) == 0]):
plot_page = self.make_single_plot(facet=tab_facet)
# x xor y faceting
if all([(len(self.facet_x) != 0) ^ (len(self.facet_y) != 0)]):
plot_page = self.make_1d_facet_plot(facet=tab_facet)
# x and y faceting
if all([len(self.facet_x) != 0,
len(self.facet_y) != 0]):
plot_page = self.make_2d_facet_plot(facet=tab_facet)
if isinstance(plot_page, GridPlot):
self.apply_grid_style(plot_page)
return plot_page
@staticmethod
def make_tab(content, tab_label):
"""Creates a container for the contents of a tab.
Args:
content (PlotObject): the primary content of the tab
tab_label (str): the text to place in the tab
Returns:
Panel: represents a single tab in a group of tabs
"""
return Panel(child=content, title=tab_label)
def make_facets(self, dimension):
"""Creates combination of all facets for the provided dimension
Args:
dimension (str): name of the dimension to create facets for
Returns:
list(list(DiscreteFacet or ContinuousFacet)): list of list of
unique facet combinations
"""
if dimension == 'x':
facets = self.facet_x
elif dimension == 'y':
facets = self.facet_y
else:
facets = self.facet_tab
# create facets for each column
column_descriptor_dict = self.column_descriptor_dict()
all_facets = [[]]
for field in facets:
# create facets from discrete columns
if column_descriptor_dict[field]['type'] == 'DiscreteColumn':
field_facets = [DiscreteFacet(field, val) for val in
np.unique(self.df[field].values)]
# combine any facets as required
all_facets = cross(all_facets, field_facets)
else:
# create quantile based discrete data and pairs of bins
categorical, bins = pd.qcut(self.df[field], 4, retbins=True)
cats = categorical.cat.categories
bins = [[bins[idx], bins[idx + 1]] for idx in
range(len(bins) - 1)]
bins[0][0] = None
# create list of facets
field_facets = [ContinuousFacet(field, value, bin) for
bin, value in zip(bins, cats)]
# combine any facets as required
all_facets = cross(all_facets, field_facets)
return all_facets
@staticmethod
def facet_title(facets):
"""Joins list of facets by commas.
Args:
facets (list(DiscreteFacet or ContinuousFacet)): list of facets,
which are a combination of column and unique value within it
Returns:
str: string representation of the combination of facets
"""
title = ",".join([str(x) for x in facets])
return title
def facet_data(self, facets, df=None):
"""Filters data to the rows associated with the given facet.
Args:
facets (list(DiscreteFacet or ContinuousFacet)): list of facets,
which are a combination of column and unique value within it
df (DataFrame, optional): data to be filtered on
Returns:
DataFrame: filtered DataFrame based on provided facets
"""
if df is None:
df = self.filtered_df
for f in facets:
df = f.filter(df)
return df
def make_1d_facet_plot(self, facet=None):
"""Creates the faceted plots when a facet is added to the x axis.
Returns:
GridPlot: a grid of plots, where each plot has subset of data
"""
if self.facet_x:
all_facets = self.make_facets('x')
else:
all_facets = self.make_facets('y')
plots = []
# loop over facets and create single plots for data subset
for facets in all_facets:
title = self.facet_title(facets)
if facet:
facets += facet
df = self.facet_data(facets, self.filtered_df)
plot = self.make_single_plot(
df=df, title=title, plot_height=200, plot_width=200,
tools="pan,wheel_zoom,reset", facet=facets
)
# append single plot to list of plots
plots.append(plot)
# create squarish grid based on number of plots
chunk_size = int(np.ceil(np.sqrt(len(plots))))
# create list of lists of plots, where each list of plots is a row
grid_plots = []
for i in range(0, len(plots), chunk_size):
chunk = plots[i:i + chunk_size]
grid_plots.append(chunk)
self.hide_internal_axes(grid_plots)
# return the grid as the plot
return GridPlot(children=grid_plots, plot_width=200*chunk_size)
def make_2d_facet_plot(self, facet=None):
"""Creates the grid of plots when there are both x and y facets.
Returns:
GridPlot: grid of x and y facet combinations
"""
# ToDo: gracefully handle large combinations of facets
all_facets_x = self.make_facets('x')
all_facets_y = self.make_facets('y')
grid_plots = []
# y faceting down column
for facets_y in all_facets_y:
# x faceting across row
row = []
for facets_x in all_facets_x:
# build the facets and title
facets = facets_x + facets_y
title = self.facet_title(facets)
# must filter by any extra facets provided for facet tab
if facet:
filter_facets = facets + facet
else:
filter_facets = facets
df = self.facet_data(filter_facets, self.filtered_df)
plot = self.make_single_plot(
df=df, title=title, plot_height=200, plot_width=200,
tools="pan,wheel_zoom,reset", facet=facets
)
row.append(plot)
# append the row to the list of rows
grid_plots.append(row)
self.hide_internal_axes(grid_plots)
# return the grid of plots as the plot
return GridPlot(children=grid_plots, plot_width=200*len(all_facets_x))
@staticmethod
def apply_facet_style(plot):
"""Applies facet-specific style for a given plot.
Override this method to modify the look of a customized CrossFilter
for all plugins. Or, apply custom styles in the plugin, since the
plugin will be told if it is currently being faceted.
"""
plot.title_text_font_size = "9pt"
plot.min_border = 0
def apply_single_plot_style(self, plot):
"""Applies styles when we have only one plot.
Override this method to modify the look of a customized CrossFilter
for all plugins.
"""
plot.min_border_left = 60
def apply_grid_style(self, grid_plot):
"""Applies facet-specific style for the grid of faceted plots.
Override this method to modify the look of a customized CrossFilter
for all plugins. Or, apply custom styles in the plugin, since the
plugin will be told if it is currently being faceted.
"""
grid_plot.title_text_font_size = "12pt"
grid_plot.title_text_font_style = "bold"
grid_plot.title = self.title
@staticmethod
def hide_internal_axes(grid_plots):
"""Hides the internal axes for a grid of plots.
Args:
grid_plots (list(list(Figure))): list of rows (list), containing plots
"""
for i, row in enumerate(grid_plots):
is_bottom = i + 1 == len(grid_plots)
for j, plot in enumerate(row):
if j != 0:
if is_bottom:
hide_axes(plot, axes='y')
else:
hide_axes(plot)
elif j == 0 and not is_bottom:
hide_axes(plot, axes='x')
def make_single_plot(self, df=None, title=None,
plot_width=700,
plot_height=680,
tools="pan,wheel_zoom,box_zoom,save,resize,"
"box_select,reset",
facet=None):
"""Creates a plot based on the current app configuration.
Args:
df (DataFrame, optional): data to use for the plot
title (str, optional): plot title
plot_width (float, optional): width of plot in pixels
plot_height (float, optional): height of plot in pixels
tools (str, optional): comma separated string of tool names
Returns:
PlotObject: the generated plot
"""
faceting = False
# df is not provided when we are not faceting
if df is None:
source = self.filtered_data
else:
df = self.facet_data(facets=facet, df=df)
# create column data source with filtered df
source = ColumnDataSource(data=df)
faceting = True
# check for tab faceting and filter if provided
if facet:
df = self.facet_data(facets=facet, df=df)
source = ColumnDataSource(data=df)
# get the helper class for the plot type selected
plot_class = self.get_plot_class()
# initialize the plugin class
plugin = plot_class(source=source,
title_text_font_size="12pt",
title_text_font_style = "bold",
plot_height=plot_height,
plot_width=plot_width,
tools=tools,
title=title,
x_range=self.x_range,
y_range=self.y_range,
facet=faceting,
crossfilter=self)
# generate plot
plot = plugin.get_plot()
# apply faceting-specific styling if required
if facet:
self.apply_facet_style(plot)
self.title = plugin.title
else:
self.apply_single_plot_style(plot)
self.title = plot.title
return plot
def update_xy_ranges(self, source):
"""Updates common x_range, y_range to use for creating figures.
Args:
source (ColumnDataSource): the source to return correct range for
"""
plt_cls = self.get_plot_class()
x_range, y_range = plt_cls.make_xy_ranges(cf=self)
# store x and y range from the plot class
self.x_range = x_range
self.y_range = y_range
def plot_attribute_change(self, obj, attrname, old, new):
"""Updates app's attribute and plot when view configuration changes.
Args:
obj (Widget): the object that has an attribute change
attrname (str): name of the attribute
old (type): the previous value of unknown type
new (type): the new value of unknown type
"""
setattr(self, obj.name, new)
self.set_plot()
def facet_change(self, obj, attrname, old, new):
"""Updates plot when any facet configuration changes.
Args:
obj (Widget): the object that has an attribute change
attrname (str): name of the attribute
old (type): the previous value of unknown type
new (type): the new value of unknown type
"""
self.set_plot()
@property
def df(self):
"""The core data that is used by the app for plotting.
Returns:
DataFrame: the original data structure
"""
if hasattr(self, '_df'):
return self._df
else:
if self.data:
return self.data.to_df()
@property
def filtered_df(self):
"""The subset of the data to use for plotting.
Returns:
DataFrame: the original data structure
"""
if hasattr(self, '_df'):
return self._df
else:
if self.filtered_data:
return self.filtered_data.to_df()
def update(self, **kwargs):
"""Updates CrossFilter attributes each time the model changes.
The events are setup each time so that we can add event handlers to
the selection/filtering widgets as they are added.
"""
super(CrossFilter, self).update(**kwargs)
self.setup_events()
def setup_events(self):
"""Registers events each time the app changes state."""
# watch the app's filtering_columns attribute to setup filters
self.on_change('filtering_columns', self, 'setup_filter_widgets')
# register any available filter widget
for obj in self.filter_widgets.values():
if isinstance(obj, InputWidget):
obj.on_change('value', self, 'handle_filter_selection')
# watch app column data source attribute for changes
for obj in self.filter_sources.values():
obj.on_change('selected', self, 'handle_filter_selection')
# selector event registration
if self.plot_selector:
self.plot_selector.on_change('value', self, 'plot_attribute_change')
if self.x_selector:
self.x_selector.on_change('value', self, 'plot_attribute_change')
if self.y_selector:
self.y_selector.on_change('value', self, 'plot_attribute_change')
if self.agg_selector:
self.agg_selector.on_change('value', self, 'plot_attribute_change')
# register to watch the app's facet attributes
self.on_change('facet_x', self, 'facet_change')
self.on_change('facet_y', self, 'facet_change')
self.on_change('facet_tab', self, 'facet_change')
def handle_filter_selection(self, obj, attrname, old, new):
"""Filters the data source whenever a filter widget changes.
Args:
obj (Widget): the object that has an attribute change
attrname (str): name of the attribute
old (type): the previous value of unknown type
new (type): the new value of unknown type
"""
df = self.df
# loop over the column metadata
for descriptor in self.columns:
colname = descriptor['name']
# handle discrete selections
if descriptor['type'] == 'DiscreteColumn' and \
colname in self.filter_widgets:
selected = self.filter_widgets[colname].value
if not selected:
continue
if isinstance(selected, six.string_types):
df = df[colname == selected]
else:
df = df[np.in1d(df[colname], selected)]
# handle time or continuous selections
elif descriptor['type'] in ('TimeColumn', 'ContinuousColumn') and \
colname in self.filter_widgets:
obj = self.filter_sources[colname]
# hack because we don't have true range selection
if not obj.selected:
continue
# TODO: (bev) This works until CF selections are not made on
# [multi]lines and [multi]patches
min_idx = np.min(obj.selected['1d']['indices'])
max_idx = np.max(obj.selected['1d']['indices'])
min_val = obj.data['centers'][min_idx]
max_val = obj.data['centers'][max_idx]
df = df[(df[colname] >= min_val) & (df[colname] <= max_val)]
# update filtered data and force plot update
for colname in self.data.column_names:
self.filtered_data.data[colname] = df[colname]
self.filtered_data._dirty = True
self.set_plot()
def clear_selections(self, obj, attrname, old, new):
"""Updates filter widgets and sources as they are removed.
Args:
obj (Widget): the object that has an attribute change
attrname (str): name of the attribute
old (type): the previous value of unknown type
new (type): the new value of unknown type
"""
diff = set(old) - set(new)
column_descriptor_dict = self.column_descriptor_dict()
# delete any removed filter widgets
if len(diff) > 0:
for col in diff:
metadata = column_descriptor_dict[col]
if metadata['type'] != 'DiscreteColumn':
del self.filter_sources[col]
del self.filter_widgets[col]
# update the data based on latest changes
if diff:
self.handle_filter_selection(obj, attrname, old, new)
def setup_filter_widgets(self, obj, attrname, old, new):
"""Creates new filter widget each time a new column is added to filters.
Args:
obj (Widget): the object that has an attribute change
attrname (str): name of the attribute
old (type): the previous value of unknown type
new (type): the new value of unknown type
"""
self.clear_selections(obj, attrname, old, new)
# add new widget as required for each column set to filter on
column_descriptor_dict = self.column_descriptor_dict()
for col in self.filtering_columns:
metadata = column_descriptor_dict[col]
if not col in self.filter_widgets:
# discrete
if metadata['type'] == 'DiscreteColumn':
select = MultiSelect.create(
name=col,
options=self.df[col].unique().tolist())
self.filter_widgets[col] = select
# continuous
else:
source = make_histogram_source(self.df[col])
self.filter_sources[col] = source
hist_plot = make_histogram(self.filter_sources[col],
plot_width=200, plot_height=100,
title_text_font_size='8pt',
tools='box_select'
)
hist_plot.title = col
self.filter_widgets[col] = hist_plot
curdoc()._add_all()
def set_metadata(self):
"""Creates a list of dicts, containing summary info for each column.
The descriptions are stored in the ``columns`` property.
"""
descriptors = []
columns = self.df.columns
for c in columns:
# get description for column from pandas DataFrame
desc = self.df[c].describe()
# DiscreteColumn
if self.df[c].dtype == object:
descriptors.append({
'type': "DiscreteColumn",
'name': c,
'count': desc['count'],
'unique': desc['unique'],
'top': desc['top'],
'freq': desc['freq'],
})
# TimeColumn
elif self.df[c].dtype == np.datetime64:
descriptors.append({
'type': "TimeColumn",
'name': c,
'count': desc['count'],
'unique': desc['unique'],
'first': desc['first'],
'last': desc['last'],
})
# ContinuousColumn
else:
descriptors.append({
'type': "ContinuousColumn",
'name': c,
'count': desc['count'],
'mean': "%.2f"%desc['mean'],
'std': "%.2f"%desc['std'],
'min': "%.2f"%desc['min'],
'max': "%.2f"%desc['max'],
})
self.columns = descriptors
| bsd-3-clause |
xiamike/stanford-ctc | ctc_fast/swbd-utils/errorAnalysis.py | 2 | 5857 | '''
Error analysis
TODO Make generic/modular and move to nn
'''
import numpy as np
import cPickle as pickle
from editDist import edit_distance as ed
#from progressbar import ProgressBar
from colorama import Fore, Back
def disp_corr(hyp, ref):
'''
Display correspondences between hyp and ref
'''
pass
def disp_errs_by_pos(err_by_pos, out_file):
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.plot(err_by_pos)
#plt.show()
plt.savefig(out_file)
def disp_err_corr(hyp_corr, ref_corr):
hyp_str = ''
ref_str = ''
assert len(hyp_corr) == len(ref_corr)
for k in xrange(len(hyp_corr)):
if hyp_corr[k] == '[space]':
hc = ' '
elif hyp_corr[k] == '<ins>':
hc = Back.GREEN + ' ' + Back.RESET
else:
hc = hyp_corr[k]
if ref_corr[k] == '[space]':
rc = ' '
elif ref_corr[k] == '<del>':
rc = Back.RED + ' ' + Back.RESET
else:
rc = ref_corr[k]
if hc != rc and len(hc) == 1 and len(rc) == 1:
hc = Back.BLUE + Fore.BLACK + hc + Fore.RESET + Back.RESET
rc = Back.BLUE + Fore.BLACK + rc + Fore.RESET + Back.RESET
hyp_str += hc
ref_str += rc
print hyp_str
print ref_str
def replace_contractions(utt):
while len(utt) and utt[-1] == '[space]':
utt = utt[:-1]
while len(utt) and utt[0] == '[space]':
utt = utt[1:]
# TODO Replace in training text instead
utt_str = ''.join([c if c != '[space]' else ' ' for c in utt])
'''
utt_str = utt_str.replace('can\'t', 'cannot')
utt_str = utt_str.replace('let\'s', 'let us')
# Possessive vs " is"
utt_str = utt_str.replace('ere\'s', 'ere is')
utt_str = utt_str.replace('that\'s', 'that is')
utt_str = utt_str.replace('he\'s', 'he is')
utt_str = utt_str.replace('it\'s', 'it is')
utt_str = utt_str.replace('how\'s', 'how is')
utt_str = utt_str.replace('what\'s', 'what is')
utt_str = utt_str.replace('when\'s', 'when is')
utt_str = utt_str.replace('why\'s', 'why is')
utt_str = utt_str.replace('\'re', ' are')
utt_str = utt_str.replace('i\'m', 'i am')
utt_str = utt_str.replace('\'ll', ' will')
utt_str = utt_str.replace('\'d', ' would') # had / would ambiguity
utt_str = utt_str.replace('n\'t', ' not')
utt_str = utt_str.replace('\'ve', ' have')
utt_str = utt_str.replace(' uh', '')
utt_str = utt_str.replace(' um', '')
utt_str = utt_str.replace('uh ', '')
utt_str = utt_str.replace('um ', '')
'''
utt = [c if c != ' ' else '[space]' for c in list(utt_str)]
return utt
def compute_and_display_stats(hyps, refs, hypscores, refscores, numphones, subsets, subset=None, display=False):
# Filter by subset
if subset:
print 'USING SUBSET: %s' % subset
filt = subsets == subset
hyps = hyps[filt]
refs = refs[filt]
hypscores = hypscores[filt]
refscores = refscores[filt]
numphones = numphones[filt]
'''
Compute stats
'''
hyp_lens = [len(s) for s in hyps]
ref_lens = [len(s) for s in refs]
max_hyp_len = max([len(hyp) for hyp in hyps])
tot_errs_by_pos = np.zeros(max_hyp_len)
counts_by_pos = np.zeros(max_hyp_len, dtype=np.int32)
tot_dist = tot_eq = tot_ins = tot_dels = tot_subs = 0.0
num_sents_correct = 0
correct_sents_len = 0
#pbar = ProgressBar(maxval=len(hyps)).start()
k = 0
for (hyp, ref, hypscore, refscore) in reversed(zip(hyps, refs, hypscores, refscores)):
#hyp = replace_contractions(hyp)
dist, eq, ins, dels, subs, errs_by_pos, hyp_corr, ref_corr = ed(hyp, ref)
tot_eq += eq
tot_ins += ins
tot_dels += dels
tot_subs += subs
tot_errs_by_pos[0:errs_by_pos.shape[0]] += errs_by_pos
counts_by_pos[0:errs_by_pos.shape[0]] += 1
k += 1
#pbar.update(k)
if dist == 0:
num_sents_correct += 1
correct_sents_len += len(ref)
tot_dist += dist
if display:
disp_err_corr(hyp_corr, ref_corr)
print
'''
Display aggregate stats
'''
print 'avg len hyp: %f' % np.mean(hyp_lens)
print 'avg len ref: %f' % np.mean(ref_lens)
print 'avg num phones: %f' % np.mean(numphones)
print 'avg ref score: %f' % (sum(refscores) / len(refscores))
print 'avg hyp score: %f' % (sum(hypscores) / len(hypscores))
tot_comp_len = float(np.sum([max(h, r) for (h, r) in zip(hyp_lens, ref_lens)]))
print 'frac eq: %f ins: %f del: %f sub: %f' %\
tuple(np.array([tot_eq, tot_ins, tot_dels, tot_subs]) / tot_comp_len)
print 'CER: %f' % (100.0 * tot_dist / np.sum(numphones))
print '%d/%d sents correct' % (num_sents_correct, len(hyps))
print 'avg len of correct sent: %f' % (correct_sents_len / float(num_sents_correct))
disp_errs_by_pos(tot_errs_by_pos / counts_by_pos, 'err_by_pos.%s.png' % ('all' if not subset else subset))
def main(args):
'''
Read in data
'''
# NOTE Make sure synced with order dumped in runDecode.py
fid = open(args.pk_file, 'rb')
hyps = np.array(pickle.load(fid))
refs = np.array(pickle.load(fid))
hypscores = np.array(pickle.load(fid))
refscores = np.array(pickle.load(fid))
numphones = np.array(pickle.load(fid))
subsets = pickle.load(fid)
fid.close()
compute_and_display_stats(hyps, refs, hypscores, refscores, numphones, subsets, subset=None, display=args.display)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('pk_file', default='hyp.pk', help='Pickle file with data')
parser.add_argument('--display', action='store_true')
args = parser.parse_args()
main(args)
| apache-2.0 |
antiface/mne-python | examples/plot_compute_mne_inverse.py | 21 | 1885 | """
================================================
Compute MNE-dSPM inverse solution on evoked data
================================================
Compute dSPM inverse solution on MNE evoked dataset
and stores the solution in stc files for visualisation.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
from mne.datasets import sample
from mne import read_evokeds
from mne.minimum_norm import apply_inverse, read_inverse_operator
print(__doc__)
data_path = sample.data_path()
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
subjects_dir = data_path + '/subjects'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
# Load data
evoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
inverse_operator = read_inverse_operator(fname_inv)
# Compute inverse solution
stc = apply_inverse(evoked, inverse_operator, lambda2, method,
pick_ori=None)
# Save result in stc files
stc.save('mne_%s_inverse' % method)
###############################################################################
# View activation time-series
plt.plot(1e3 * stc.times, stc.data[::100, :].T)
plt.xlabel('time (ms)')
plt.ylabel('%s value' % method)
plt.show()
# Plot brain in 3D with PySurfer if available
brain = stc.plot(hemi='rh', subjects_dir=subjects_dir)
brain.show_view('lateral')
# use peak getter to move vizualization to the time point of the peak
vertno_max, time_idx = stc.get_peak(hemi='rh', time_as_index=True)
brain.set_data_time_index(time_idx)
# draw marker at maximum peaking vertex
brain.add_foci(vertno_max, coords_as_verts=True, hemi='rh', color='blue',
scale_factor=0.6)
brain.save_image('dSPM_map.png')
| bsd-3-clause |
dtrckd/pymake | pymake/frontend/drivers.py | 1 | 17689 | from numpy import ma
import numpy as np
from pymake import logger
class DatasetDriver(object):
''' Parse dataset file using pandas'''
_comment = '%'
log = logger
# No pandas here....
@classmethod
def parse_tnet(cls, fn, sep=' '):
''' Grammar retro-ingennired from fb/emaileu.txt. tnet format is official ? '''
cls.log.debug('opening file: %s' % fn)
with open(fn) as f:
content = f.read()
lines = list(filter(None, content.split('\n')))
line1_length = lines[0].strip().split(sep)
edges = {}
if len(line1_length) == 2:
# format 'i j' if edges.
data_file_format = 'txt'
for line in lines:
dyad = line.strip().split(sep)
dyad = '.'.join(dyad)
edges[dyad] = edges.get(dyad, 0) + 1
#edges = [l.strip().split(sep) for l in lines]
elif len(line1_length) == 5:
# format '"date" i j weight'.
data_file_format = 'tnet'
for line in lines:
_line = line.strip().split(sep)
dyad = _line[-3:-1]
dyad = '.'.join(dyad)
w = int(_line[-1])
edges[dyad] = edges.get(dyad, 0) + w
#edges = [l.strip().split(sep)[-3:-1] for l in lines]
edges = np.array([ (e.split('.')[0], e.split('.')[1], w+1) for e, w in edges.items()], dtype=int) -1
edges[:, 0:2] -= edges[:, 0:2].min()
N = edges[:, 0:2].max()+1
g = np.zeros((N,N))
g[tuple(edges[:, :2].T)] = edges[:, 2]
data = dict(data=g)
return data
# No pandas here....
@classmethod
def parse_csv(cls, fn, sep=';'):
''' Grammar retro-ingennired from manufacturing.csv '''
cls.log.debug('opening file: %s' % fn)
with open(fn, 'r') as f:
content = f.read()
lines = list(filter(None, content.split('\n')))[1:]
edges = {}
for line in lines:
dyad = line.strip().split(sep)[0:2]
dyad = '.'.join(dyad)
edges[dyad] = edges.get(dyad, 0) + 1
#edges = [l.strip().split(sep)[0:2] for l in lines]
#edges = np.array([ (e[0], e[1]) for e in edges], dtype=int) -1
edges = np.array([ (e.split('.')[0], e.split('.')[1], w+1) for e, w in edges.items()], dtype=int) -1
edges[:, 0:2] -= edges[:, 0:2].min()
N = edges[:, 0:2].max()+1
g = np.zeros((N,N))
g[tuple(edges[:, :2].T)] = edges[:, 2]
data = dict(data=g)
return data
@classmethod
def parse_dancer(cls, fn, sep=';'):
""" Parse Network data depending on type/extension """
import pandas as pd
cls.log.debug('opening file: %s' % fn)
data = pd.read_csv(fn, sep=sep, names=['n', 'feat', 'cluster' ], comment=cls._comment)
parameters = data.dropna()
clusters = parameters['cluster'].values.astype(int)
features = np.array([list(map(float, f.split('|'))) for f in parameters['feat'].values])
data = data.ix[data['cluster'].isna()]
data['cluster'] = 1 # <= the weight
data = data.loc[pd.to_numeric(data['n'], errors='coerce').dropna().index].as_matrix().astype(int)
data[:, 0:2] -= data[:, 0:2].min()
N = data[:, 0:2].max()+1
y = np.zeros((N,N))
e_l = data[:,2] > 0
e_ix = data[:, 0:2][e_l]
ix = list(zip(*e_ix))
y[ix] = data[:,2][e_l]
data = dict(data=y, clusters=clusters, features=features)
return data
@classmethod
def parse_dat(cls, fn, sep="\s+"):
""" Parse Network data depending on type/extension """
import pandas as pd
cls.log.debug('opening file: %s' % fn)
def _row_len(fn):
''' Seek for the length of the csv row, then break quicly '''
inside = {'vertices':False, 'edges':False }
data = []
for _line in open(fn):
line = _line.strip()
if line.startswith(('ROW LABELS:', '*vertices')) or inside['vertices']:
if not inside['vertices']:
inside['vertices'] = True
continue
if line.startswith('#') or not line.strip():
inside['vertices'] = False # break
elif line.startswith(('DATA','*edges' )):
inside['vertices'] = False # break
inside['edges'] = True
else:
continue
elif line.startswith(('DATA','*edges' )) or inside['edges']:
if not inside['edges']:
inside['edges'] = True # break
continue
if line.startswith('#') or not line.strip() or len(line.split()) < 2 :
inside['edges'] = False
else:
# Parsing assignation
data.append( line.split() )
break
return len(data[0])
# Sender, Reiceiver, Edges
row_len = _row_len(fn)
if row_len == 3:
cols = ['s', 'r', 'weight']
elif row_len == 2:
cols = ['s', 'r']
else:
raise ValueError('I/O error for dataset file: %s' % fn)
data = pd.read_csv(fn, sep=sep, names=cols, comment=cls._comment)
if len(cols) == 2:
data['weight'] = np.ones(data.shape[0])
cols = ['s', 'r', 'weight']
cond = pd.to_numeric(data['s'], errors='coerce').dropna().index & pd.to_numeric(data['r'], errors='coerce').dropna().index
data = data.loc[cond].as_matrix().astype(int)
data[:, 0:2] -= data[:, 0:2].min()
N = data[:, 0:2].max()+1
y = np.zeros((N,N))
e_l = data[:,2] > 0
e_ix = data[:, 0:2][e_l]
ix = list(zip(*e_ix))
y[ix] = data[:,2][e_l]
data = dict(data=y)
return data
class OnlineDatasetDriver(object):
''' Parse dataset file using pandas'''
_comment = '%'
log = logger
@classmethod
def parse_tnet(cls, fn, sep=' '):
''' Grammar retro-ingennired from fb/emaileu.txt. tnet format is official ? '''
cls.log.debug('opening file: %s' % fn)
for line in open(fn):
line = line.strip()
if not line:
continue
line1_length = line.split(sep)
if len(line1_length) == 2:
# format 'i j' if edges.
data_file_format = 'txt'
v1, v2 = line.strip().split(sep)
w = 1
yield int(v1), int(v2), w, None
elif len(line1_length) == 5:
# format '"date" i j weight'.
data_file_format = 'tnet'
_line = line.strip().split(sep)
v1, v2 = _line[-3:-1]
w = int(_line[-1])
if w == 0:
continue
else:
yield int(v1), int(v2), w, None
@classmethod
def parse_csv(cls, fn, sep=';'):
''' Grammar retro-ingennired from manufacturing.csv '''
cls.log.debug('opening file: %s' % fn)
cpt = 0
for line in open(fn):
if cpt == 0:
# Ignore first status line
cpt += 1
continue
v1, v2 = line.strip().split(sep)[0:2]
w = 1
yield int(v1), int(v2), w, None
@classmethod
def parse_dancer(cls, fn, sep=';'):
cls.log.debug('opening file: %s' % fn)
inside = {'vertices':False, 'edges':False }
for line in open(fn):
line = line.strip()
if line.startswith('# Vertices') or inside['vertices']:
if not inside['vertices']:
inside['vertices'] = True
continue
if line.startswith('#') or not line.strip() :
inside['vertices'] = False # break
else:
# Parsing assignation
elements = line.strip().split(sep)
index = int(elements[0])
clust = int(elements[-1])
feats = list(map(float, elements[-2].split('|')))
obj = {'cluster': clust, 'features': feats, 'index':index}
yield obj
elif line.startswith('# Edges') or inside['edges']:
if not inside['edges']:
inside['edges'] = True
continue
if line.startswith('#') or not line.strip() :
inside['edges'] = False # break
else:
# Parsing assignation
v1, v2 = line.split(sep)
w = 1
yield int(v1), int(v2), w, None
@classmethod
def parse_dat(cls, fn, sep=" "):
""" Parse Network data depending on type/extension """
cls.log.debug('opening file: %s' % fn)
inside = {'vertices':False, 'edges':False }
for line in open(fn):
line = line.strip()
if line.startswith(('ROW LABELS:', '*vertices')) or inside['vertices']:
if not inside['vertices']:
inside['vertices'] = True
continue
if line.startswith('#') or not line.strip():
inside['vertices'] = False # break
elif line.startswith(('DATA','*edges' )):
inside['vertices'] = False # break
inside['edges'] = True
else:
continue
elif line.startswith(('DATA','*edges' )) or inside['edges']:
if not inside['edges']:
inside['edges'] = True # break
continue
if line.startswith('#') or not line.strip() or len(line.split()) < 2 :
inside['edges'] = False
else:
# Parsing assignation
splitline = line.split(sep)
row_size = len(splitline)
if row_size == 2:
# like .txt
v1, v2 = splitline
w = 1
yield int(v1), int(v2), w, None
elif row_size == 3:
v1, v2 = splitline[0:2]
w = int(splitline[2])
if w == 0:
continue
else:
yield int(v1), int(v2), w, None
else:
raise NotImplementedError
class RawDatasetDriver(object):
''' Parse dataset file using python loop (deprecated) '''
_comment = '%'
log = logger
@classmethod
def parse_tnet(cls, fn, sep=' '):
''' Grammar retro-ingennired from fb/emaileu.txt '''
cls.log.debug('opening file: %s' % fn)
with open(fn) as f:
content = f.read()
lines = list(filter(None, content.split('\n')))
line1_length = lines[0].strip().split(sep)
edges = {}
if len(line1_length) == 2:
# format 'i j' if edges.
data_file_format = 'txt'
for line in lines:
dyad = line.strip().split(sep)
dyad = '.'.join(dyad)
edges[dyad] = edges.get(dyad, 0) + 1
#edges = [l.strip().split(sep) for l in lines]
elif len(line1_length) == 5:
# format '"date" i j weight'.
data_file_format = 'tnet'
for line in lines:
_line = line.strip().split(sep)
dyad = _line[-3:-1]
dyad = '.'.join(dyad)
w = int(_line[-1])
edges[dyad] = edges.get(dyad, 0) + w
#edges = [l.strip().split(sep)[-3:-1] for l in lines]
edges = np.array([ (e.split('.')[0], e.split('.')[1], w+1) for e, w in edges.items()], dtype=int) -1
N = edges.max() +1
#N = max(list(itertools.chain(*edges))) + 1
g = np.zeros((N,N))
g[tuple(edges[:, :2].T)] = edges[:, 2]
data = dict(data=g)
return data
@classmethod
def parse_csv(cls, fn, sep=';'):
''' Grammar retro-ingennired from manufacturing.csv '''
cls.log.debug('opening file: %s' % fn)
with open(fn) as f:
content = f.read()
lines = list(filter(None, content.split('\n')))[1:]
edges = {}
for line in lines:
dyad = line.strip().split(sep)[0:2]
dyad = '.'.join(dyad)
edges[dyad] = edges.get(dyad, 0) + 1
#edges = [l.strip().split(sep)[0:2] for l in lines]
#edges = np.array([ (e[0], e[1]) for e in edges], dtype=int) -1
edges = np.array([ (e.split('.')[0], e.split('.')[1], w+1) for e, w in edges.items()], dtype=int) -1
N = edges.max() +1
#N = max(list(itertools.chain(*edges))) + 1
g = np.zeros((N,N))
g[tuple(edges[:, :2].T)] = edges[:, 2]
data = dict(data=g)
return data
@classmethod
def parse_dancer(cls, fn, sep=';'):
""" Parse Network data depending on type/extension """
cls.log.debug('opening file: %s' % fn)
data = []
inside = {'vertices':False, 'edges':False }
clusters = []
features = []
for line in open(fn):
if line.startswith('# Vertices') or inside['vertices']:
if not inside['vertices']:
inside['vertices'] = True
N = 0
continue
if line.startswith('#') or not line.strip() :
inside['vertices'] = False # break
else:
# Parsing assignation
elements = line.strip().split(sep)
clust = int(elements[-1])
feats = list(map(float, elements[-2].split('|')))
clusters.append(clust)
features.append(feats)
N += 1
elif line.startswith('# Edges') or inside['edges']:
if not inside['edges']:
inside['edges'] = True
continue
if line.startswith('#') or not line.strip() :
inside['edges'] = False # break
else:
# Parsing assignation
data.append( line.strip() )
edges = np.array([tuple(row.split(sep)) for row in data]).astype(int)
g = np.zeros((N,N))
g[[e[0] for e in edges], [e[1] for e in edges]] = 1
g[[e[1] for e in edges], [e[0] for e in edges]] = 1
# ?! .T
try:
parameters = parse_file_conf(os.path.join(os.path.dirname(fn), 'parameters'))
parameters['devs'] = list(map(float, parameters['devs'].split(sep)))
except IOError:
parameters = {}
finally:
# @Obsolete !
parameters_ = parameters
clusters = clusters
features = np.array(features)
data = dict(data=g, clusters=clusters, features=features)
return data
@classmethod
def parse_dat(cls, fn, sep=' '):
""" Parse Network data depending on type/extension """
cls.log.debug('opening file: %s' % fn)
data = []
inside = {'vertices':False, 'edges':False }
for _line in open(fn):
line = _line.strip()
if line.startswith(('ROW LABELS:', '*vertices')) or inside['vertices']:
if not inside['vertices']:
inside['vertices'] = True
continue
if line.startswith('#') or not line.strip():
inside['vertices'] = False # break
elif line.startswith(('DATA','*edges' )):
inside['vertices'] = False # break
inside['edges'] = True
else:
# todo if needed
continue
elif line.startswith(('DATA','*edges' )) or inside['edges']:
if not inside['edges']:
inside['edges'] = True # break
continue
if line.startswith('#') or not line.strip() or len(line.split(sep)) < 2 :
inside['edges'] = False
else:
# Parsing assignation
data.append( line.strip() )
row_size = len(data[0].split(sep))
edges = np.array([tuple(row.split(sep)) for row in data]).astype(int)-1
edges = {}
if row_size == 2:
# like .txt
for line in data:
dyad = line.strip().split(sep)
dyad = '.'.join(dyad)
edges[dyad] = edges.get(dyad, 0) + 1
elif row_size == 3:
for line in data:
_line = line.strip().split(sep)
dyad = _line[0:2]
dyad = '.'.join(dyad)
w = int(_line[-1]) # can be zeros
edges[dyad] = edges.get(dyad, 0) + int(w)
else:
raise NotImplementedError
edges = np.array([ (e.split('.')[0], e.split('.')[1], w+1) for e, w in edges.items()], dtype=int) -1
N = edges.max() +1
g = np.zeros((N,N))
g[tuple(edges[:, :2].T)] = edges[:, 2]
data = dict(data=g)
return data
| gpl-3.0 |
anurag313/scikit-learn | sklearn/tests/test_dummy.py | 186 | 17778 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.base import clone
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.stats import _weighted_percentile
from sklearn.dummy import DummyClassifier, DummyRegressor
@ignore_warnings
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in range(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_equal(proba[k].sum(axis=1), np.ones(len(X)))
# We know that we can have division by zero
assert_array_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
# 2d case
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3],
[2, 0, 1, 2, 5],
[1, 0, 4, 5, 2],
[1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_equality_regressor(statistic, y_learn, y_pred_learn,
y_test, y_pred_test):
assert_array_equal(np.tile(statistic, (y_learn.shape[0], 1)),
y_pred_learn)
assert_array_equal(np.tile(statistic, (y_test.shape[0], 1)),
y_pred_test)
def test_most_frequent_and_prior_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
if strategy == "prior":
assert_array_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)))
else:
assert_array_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)) > 0.5)
def test_most_frequent_and_prior_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
n_samples = len(X)
for strategy in ("prior", "most_frequent"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 1],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="quantile", quantile=0.5)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.min(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=1)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.max(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0.3)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.percentile(y, q=30)] * len(X))
def test_quantile_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
quantile_values = np.percentile(y_learn, axis=0, q=80).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.5)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.8)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
quantile_values, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_invalid():
X = [[0]] * 5 # ignored
y = [0] * 5 # ignored
est = DummyRegressor(strategy="quantile")
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=None)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=[0])
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=-0.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=1.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile='abc')
assert_raises(TypeError, est.fit, X, y)
def test_quantile_strategy_empty_train():
est = DummyRegressor(strategy="quantile", quantile=0.4)
assert_raises(ValueError, est.fit, [], [])
def test_constant_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="constant", constant=[43])
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
reg = DummyRegressor(strategy="constant", constant=43)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
def test_constant_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
# test with 2d array
constants = random_state.randn(5)
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="constant", constant=constants)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
constants, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d_for_constant(est)
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy='mean')
est.fit(X, y)
assert_equal(est.constant_, np.mean(y))
def test_unknown_strategey_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='gona')
assert_raises(ValueError, est.fit, X, y)
def test_constants_not_specified_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='constant')
assert_raises(TypeError, est.fit, X, y)
def test_constant_size_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X = random_state.randn(10, 10)
y = random_state.randn(10, 5)
est = DummyRegressor(strategy='constant', constant=[1, 2, 3, 4])
assert_raises(ValueError, est.fit, X, y)
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ['two', 'one', 'two', 'two']
clf = DummyClassifier(strategy="constant", random_state=0, constant='one')
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(['one'] * 4))
_check_predict_proba(clf, X, y)
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3],
[1, 3],
[2, 3],
[2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[1, 0])
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
def test_constant_strategy_exceptions():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0)
assert_raises(ValueError, clf.fit, X, y)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[2, 0])
assert_raises(ValueError, clf.fit, X, y)
def test_classification_sample_weight():
X = [[0], [0], [1]]
y = [0, 1, 0]
sample_weight = [0.1, 1., 0.1]
clf = DummyClassifier().fit(X, y, sample_weight)
assert_array_almost_equal(clf.class_prior_, [0.2 / 1.2, 1. / 1.2])
def test_constant_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[0, 1],
[4, 0],
[1, 1],
[1, 4],
[1, 1]]))
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
def test_uniform_strategy_sparse_target_warning():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[2, 1],
[2, 2],
[1, 4],
[4, 2],
[1, 1]]))
clf = DummyClassifier(strategy="uniform", random_state=0)
assert_warns_message(UserWarning,
"the uniform strategy would not save memory",
clf.fit, X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 1 / 3, decimal=1)
assert_almost_equal(p[2], 1 / 3, decimal=1)
assert_almost_equal(p[4], 1 / 3, decimal=1)
def test_stratified_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[4, 1],
[0, 0],
[1, 1],
[1, 4],
[1, 1]]))
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
y_pred = y_pred.toarray()
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[0], 1. / 5, decimal=1)
assert_almost_equal(p[4], 1. / 5, decimal=1)
def test_most_frequent_and_prior_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[1, 0],
[1, 3],
[4, 0],
[0, 1],
[1, 0]]))
n_samples = len(X)
y_expected = np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))])
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), y_expected)
def test_dummy_regressor_sample_weight(n_samples=10):
random_state = np.random.RandomState(seed=1)
X = [[0]] * n_samples
y = random_state.rand(n_samples)
sample_weight = random_state.rand(n_samples)
est = DummyRegressor(strategy="mean").fit(X, y, sample_weight)
assert_equal(est.constant_, np.average(y, weights=sample_weight))
est = DummyRegressor(strategy="median").fit(X, y, sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 50.))
est = DummyRegressor(strategy="quantile", quantile=.95).fit(X, y,
sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 95.))
| bsd-3-clause |
slipguru/l1l2py | l1l2py/proximal.py | 1 | 13720 | # Author: Salvatore Masecchia <salvatore.masecchia@disi.unige.it>
#
# License: BSD Style.
import warnings
from math import sqrt
import numpy as np
try:
from scipy import linalg as la
except ImportError:
from numpy import linalg as la
from .base import AbstractLinearModel
from .metrics import regression_error
from .cross_val import KFold
##############################################################################
# Algorithm
def l1l2_regularization(data, labels, mu, tau, beta=None, kmax=100000,
tolerance=1e-5, return_iterations=False,
adaptive=False):
r"""Implementation of the Fast Iterative Shrinkage-Thresholding Algorithm
to solve a least squares problem with `l1l2` penalty.
It solves the `l1l2` regularization problem with parameter ``mu`` on the
`l2-norm` and parameter ``tau`` on the `l1-norm`.
Parameters
----------
data : (N, P) ndarray
Data matrix.
labels : (N,) or (N, 1) ndarray
Labels vector.
mu : float
`l2-norm` penalty.
tau : float
`l1-norm` penalty.
beta : (P,) or (P, 1) ndarray, optional (default is `None`)
Starting value for the iterations.
If `None`, then iterations starts from the empty model.
kmax : int, optional (default is `1e5`)
Maximum number of iterations.
tolerance : float, optional (default is `1e-5`)
Convergence tolerance.
return_iterations : bool, optional (default is `False`)
If `True`, returns the number of iterations performed.
The algorithm has a predefined minimum number of iterations
equal to `10`.
adaptive : bool, optional (default is `False`)
If `True`, minimization is performed calculating an adaptive step size
for each iteration.
Returns
-------
beta : (P, 1) ndarray
`l1l2` solution.
k : int, optional
Number of iterations performed.
Examples
--------
>>> X = numpy.array([[0.1, 1.1, 0.3], [0.2, 1.2, 1.6], [0.3, 1.3, -0.6]])
>>> beta = numpy.array([0.1, 0.1, 0.0])
>>> Y = numpy.dot(X, beta)
>>> beta = l1l2py.algorithms.l1l2_regularization(X, Y, 0.1, 0.1)
>>> len(numpy.flatnonzero(beta))
1
"""
n, d = data.shape
# beta starts from 0 and we assume also that the previous value is 0
if beta is None:
beta = np.zeros(d)
else:
beta = beta.ravel()
# Useful quantities
X = data
Y = labels.ravel()
if n > d:
XTY = np.dot(X.T, Y)
# First iteration with standard sigma
sigma = _sigma(data, mu)
if sigma < np.finfo(float).eps: # is zero...
return np.zeros(d), 0
mu_s = mu / sigma
tau_s = tau / (2.0 * sigma)
nsigma = n * sigma
# Starting conditions
auxcoef_ = beta
t = 1.
for k in xrange(kmax):
# Pre-calculated "heavy" computation
if n > d:
precalc = XTY - np.dot(X.T, np.dot(X, auxcoef_))
else:
precalc = np.dot(X.T, Y - np.dot(X, auxcoef_))
# TODO: stopping rule based on r = Y - Xbeta ??
# Soft-Thresholding
value = (precalc / nsigma) + ((1.0 - mu_s) * auxcoef_)
beta_next = np.sign(value) * np.clip(np.abs(value) - tau_s, 0, np.inf)
######## Adaptive step size #######################################
if adaptive:
beta_diff = (auxcoef_ - beta_next)
# Only if there is an increment of the solution
# we can calculate the adaptive step-size
if np.any(beta_diff):
# grad_diff = np.dot(XTn, np.dot(X, beta_diff))
# num = np.dot(beta_diff, grad_diff)
tmp = np.dot(X, beta_diff) # <-- adaptive-step-size drawback
num = np.dot(tmp, tmp) / n
sigma = (num / np.dot(beta_diff, beta_diff))
mu_s = mu / sigma
tau_s = tau / (2.0*sigma)
nsigma = n * sigma
# Soft-Thresholding
value = (precalc / nsigma) + ((1.0 - mu_s) * auxcoef_)
beta_next = value * np.maximum(0, 1 - tau_s/np.abs(value))
######## FISTA ####################################################
beta_diff = (beta_next - beta)
t_next = 0.5 * (1.0 + sqrt(1.0 + 4.0 * t*t))
auxcoef_ = beta_next + ((t - 1.0)/t_next)*beta_diff
# Convergence values
max_diff = np.abs(beta_diff).max()
max_coef = np.abs(beta_next).max()
# Values update
t = t_next
beta = beta_next
# Stopping rule (exit even if beta_next contains only zeros)
if max_coef == 0.0 or (max_diff / max_coef) <= tolerance: break
if return_iterations:
return beta, k+1
return beta
def _sigma(matrix, mu):
n, p = matrix.shape
if p > n:
tmp = np.dot(matrix, matrix.T)
else:
tmp = np.dot(matrix.T, matrix)
return (la.norm(tmp, 2)/n) + mu
##############################################################################
# Models
class ElasticNet(AbstractLinearModel):
"""
scikits.learn model is:
(1/2n)*||y - X*b||^2 + alpha*rho*||b||_1 + 0.5*alpha*(1-rho)||b||^2
l1l2py model is:
(1/n)*||y - X*b||^2 + tau*||b||_1 + mu*||b||^2
Now, we keep our definition... we have to think if a different one
is better or not.
Notes:
- with alpha and rho the default parameters (1.0 and 0.5)
have a meaning: equal balance between l1 and l2 penalties.
We do not have this balancing because the penalties parameter
are unrelated.
For now we choose 0.5 for both (but this value has no meaning
right now)
- We have to introduce the precompute behaviour... see Sofia's mail
about coordinate descent and proximal methods
- In l1l2py we have max_iter equal 100.000 instead of 1.000 and
tol equal to 1e-5 instead of 1e-4... are them differences
between cd and prox???
"""
def __init__(self, fit_intercept=True, tau=0.5, mu=0.5,
adaptive_step_size=False, max_iter=10000, tol=1e-4,
precompute=False, normalize=False):
self.tau = tau
self.mu = mu
self.max_iter = max_iter
self.tol = tol
self.adaptive_step_size = adaptive_step_size
self.fit_intercept = fit_intercept
self.precompute = precompute
self.normalize = normalize
self.intercept_ = 0.0
def _fit(self, X, y, warm_start=None):
if warm_start is None:
self.coef_ = np.zeros(X.shape[1])
else:
self.coef_ = np.asanyarray(warm_start)
l1l2_proximal = l1l2_regularization
self.coef_, self.niter_ = l1l2_proximal(X, y,
self.mu, self.tau,
beta=self.coef_,
kmax=self.max_iter,
tolerance=self.tol,
return_iterations=True,
adaptive=self.adaptive_step_size)
if self.niter_ == self.max_iter:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
return self
class Lasso(ElasticNet):
def __init__(self, tau=0.5, fit_intercept=True,
adaptive_step_size=False,
max_iter=10000, tol=1e-4, precompute=False, normalize=False):
super(Lasso, self).__init__(tau=tau, mu=0.0,
fit_intercept=fit_intercept,
adaptive_step_size=adaptive_step_size,
max_iter=max_iter,
tol=tol, precompute=precompute,
normalize=normalize)
##############################################################################
# Paths
#def lasso_path(X, y, eps=1e-3, n_taus=100, taus=None,
# fit_intercept=True, verbose=False, **fit_params):
# return enet_path(X, y, mu=0.0, eps=eps, n_taus=n_taus, taus=taus,
# fit_intercept=fit_intercept, verbose=verbose,
# adaptive_step_size=False, max_iter=10000, tol=1e-4)
#
#def enet_path(X, y, mu=0.5, eps=1e-3, n_taus=100, taus=None,
# fit_intercept=True, verbose=False,
# adaptive_step_size=False, max_iter=10000, tol=1e-4):
# r"""The code is very similar to the scikits one.... mumble mumble"""
#
# X, y, Xmean, ymean = LinearModel._center_data(X, y, fit_intercept)
#
# n_samples = X.shape[0]
# if taus is None:
# tau_max = np.abs(np.dot(X.T, y)).max() * (2.0 / n_samples)
# taus = np.logspace(np.log10(tau_max * eps), np.log10(tau_max),
# num=n_taus)[::-1]
# else:
# taus = np.sort(taus)[::-1] # make sure taus are properly ordered
# coef_ = None # init coef_
# models = []
#
# for tau in taus:
# model = ElasticNet(tau=tau, mu=mu, fit_intercept=False,
# adaptive_step_size=adaptive_step_size,
# max_iter=max_iter, tol=tol)
# model.fit(X, y, coef_init=coef_)
# if fit_intercept:
# model.fit_intercept = True
# model._set_intercept(Xmean, ymean)
# if verbose:
# print model
# coef_ = model.coef_
# models.append(model)
#
# return models
#
################################################################################
## CV Estimators
#
#class ElasticNetCV(LinearModel):
# path = staticmethod(enet_path)
# estimator = ElasticNet
#
# def __init__(self, mu=0.5, eps=1e-3, n_taus=100, taus=None,
# fit_intercept=True, max_iter=10000,
# tol=1e-4, cv=None,
# adaptive_step_size=False,
# loss=None):
# self.mu = mu
# self.eps = eps
# self.n_taus = n_taus
# self.taus = taus
# self.fit_intercept = fit_intercept
# self.max_iter = max_iter
# self.tol = tol
# self.cv = cv
# self.adaptive_step_size = adaptive_step_size
# self.loss = loss
# self.coef_ = None
#
# def fit(self, X, y):
# X = np.asanyarray(X)
# y = np.asanyarray(y)
# n_samples = X.shape[0]
#
# # Path parmeters creation
# path_params = self.__dict__.copy()
# for p in ('cv', 'loss', 'coef_'):
# del path_params[p]
#
# # TODO: optional????
# # Start to compute path on full data
# models = self.path(X, y, **path_params)
#
# # Update the taus list
# taus = [model.tau for model in models]
# n_taus = len(taus)
# path_params.update({'taus': taus, 'n_taus': n_taus})
#
# # init cross-validation generator
# cv = self.cv if self.cv else KFold(len(y), 5)
#
# # init loss function
# loss = self.loss if self.loss else regression_error
#
# # Compute path for all folds and compute MSE to get the best tau
# folds = list(cv)
# loss_taus = np.zeros((len(folds), n_taus))
# for i, (train, test) in enumerate(folds):
# models_train = self.path(X[train], y[train], **path_params)
# for i_tau, model in enumerate(models_train):
# y_ = model.predict(X[test])
# loss_taus[i, i_tau] += loss(y_, y[test])
#
# i_best_tau = np.argmin(np.mean(loss_taus, axis=0))
# model = models[i_best_tau]
#
# self.coef_ = model.coef_
# self.intercept_ = model.intercept_
# self.tau = model.tau
# self.taus = np.asarray(taus)
# self.coef_path_ = np.asarray([model.coef_ for model in models])
# self.loss_path = loss_taus.T
# return self
#
#class LassoCV(ElasticNetCV):
# path = staticmethod(lasso_path)
# estimator = Lasso
#
# def __init__(self, eps=1e-3, n_taus=100, taus=None,
# fit_intercept=True, max_iter=10000,
# tol=1e-4, cv=None,
# adaptive_step_size=False,
# loss=None):
# super(LassoCV, self).__init__(mu=0.0,
# eps=eps,
# n_taus=n_taus, taus=taus,
# fit_intercept=fit_intercept,
# max_iter=max_iter,
# tol=tol, cv=cv,
# adaptive_step_size=adaptive_step_size,
# loss=loss)
##############################################################################
# GLMNet models
#try:
# from sklearn.linear_model import ElasticNet as _GlmElasticNet
# from sklearn.linear_model import Lasso as _GlmLasso
# from sklearn.linear_model import ElasticNetCV as _GlmElasticNetCV
# from sklearn.linear_model import LassoCV as _GlmLassoCV
#
# ## TODO better.....
# class GlmElasticNet(ElasticNet):
# def __init__(self, tau=0.5, mu=0.5, **params):
# alpha = tau + mu
# if tau == mu == 0.0:
# rho = 0.0
# else:
# rho = tau / (tau + mu)
#
# self.tau = tau
# self.mu = mu
# self._estimator = _GlmElasticNet(alpha=alpha, rho=rho, **params)
#
# def fit(self, X, y):
# return self._estimator.fit(X, y)
#
# def __getattr__(self, key):
# return getattr(self._estimator, key)
#
#except:
# pass
| gpl-3.0 |
hammerlab/mhcflurry | mhcflurry/downloads_command.py | 1 | 10774 | '''
Download MHCflurry released datasets and trained models.
Examples
Fetch the default downloads:
$ mhcflurry-downloads fetch
Fetch a specific download:
$ mhcflurry-downloads fetch models_class1_pan
Get the path to a download:
$ mhcflurry-downloads path models_class1_pan
Get the URL of a download:
$ mhcflurry-downloads url models_class1_pan
Summarize available and fetched downloads:
$ mhcflurry-downloads info
'''
from __future__ import (
print_function,
division,
absolute_import,
)
import sys
import argparse
import logging
import os
from pipes import quote
import errno
import tarfile
from shutil import copyfileobj
from tempfile import NamedTemporaryFile
from tqdm import tqdm
tqdm.monitor_interval = 0 # see https://github.com/tqdm/tqdm/issues/481
import posixpath
import pandas
try:
from urllib.request import urlretrieve
from urllib.parse import urlsplit
except ImportError:
from urllib import urlretrieve
from urlparse import urlsplit
from .downloads import (
get_current_release,
get_current_release_downloads,
get_downloads_dir,
get_path,
ENVIRONMENT_VARIABLES)
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"--quiet",
action="store_true",
default=False,
help="Output less")
parser.add_argument(
"--verbose",
"-v",
action="store_true",
default=False,
help="Output more")
subparsers = parser.add_subparsers(dest="subparser_name")
parser_fetch = subparsers.add_parser('fetch')
parser_fetch.add_argument(
'download_name',
metavar="DOWNLOAD",
nargs="*",
help="Items to download")
parser_fetch.add_argument(
"--keep",
action="store_true",
default=False,
help="Don't delete archives after they are extracted")
parser_fetch.add_argument(
"--release",
default=get_current_release(),
help="Release to download. Default: %(default)s")
parser_fetch.add_argument(
"--already-downloaded-dir",
metavar="DIR",
help="Don't download files, get them from DIR")
parser_info = subparsers.add_parser('info')
parser_path = subparsers.add_parser('path')
parser_path.add_argument(
"download_name",
nargs="?",
default='')
parser_url = subparsers.add_parser('url')
parser_url.add_argument(
"download_name",
nargs="?",
default='')
def run(argv=sys.argv[1:]):
args = parser.parse_args(argv)
if not args.quiet:
logging.basicConfig(level="INFO")
if args.verbose:
logging.basicConfig(level="DEBUG")
command_functions = {
"fetch": fetch_subcommand,
"info": info_subcommand,
"path": path_subcommand,
"url": url_subcommand,
None: lambda args: parser.print_help(),
}
command_functions[args.subparser_name](args)
def mkdir_p(path):
"""
Make directories as needed, similar to mkdir -p in a shell.
From:
http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
"""
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def yes_no(boolean):
return "YES" if boolean else "NO"
# For progress bar on download. See https://pypi.python.org/pypi/tqdm
class TqdmUpTo(tqdm):
"""Provides `update_to(n)` which uses `tqdm.update(delta_n)`."""
def update_to(self, b=1, bsize=1, tsize=None):
"""
b : int, optional
Number of blocks transferred so far [default: 1].
bsize : int, optional
Size of each block (in tqdm units) [default: 1].
tsize : int, optional
Total size (in tqdm units). If [default: None] remains unchanged.
"""
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n) # will also set self.n = b * bsize
def fetch_subcommand(args):
def qprint(msg):
if not args.quiet:
print(msg)
if not args.release:
raise RuntimeError(
"No release defined. This can happen when you are specifying "
"a custom models directory. Specify --release to indicate "
"the release to download.")
downloads = get_current_release_downloads()
invalid_download_names = set(
item for item in args.download_name if item not in downloads)
if invalid_download_names:
raise ValueError("Unknown download(s): %s. Valid downloads are: %s" % (
', '.join(invalid_download_names), ', '.join(downloads)))
items_to_fetch = set()
for (name, info) in downloads.items():
default = not args.download_name and info['metadata']['default']
if name in args.download_name and info['downloaded']:
print((
"*" * 40 +
"\nThe requested download '%s' has already been downloaded. "
"To re-download this data, first run: \n\t%s\nin a shell "
"and then re-run this command.\n" +
"*" * 40) % (name, 'rm -rf ' + quote(get_path(name))))
if not info['downloaded'] and (name in args.download_name or default):
items_to_fetch.add(name)
mkdir_p(get_downloads_dir())
qprint("Fetching %d/%d downloads from release %s" % (
len(items_to_fetch), len(downloads), args.release))
format_string = "%-40s %-20s %-20s %-20s "
qprint(format_string % (
"DOWNLOAD NAME", "ALREADY DOWNLOADED?", "WILL DOWNLOAD NOW?", "URL"))
for (item, info) in downloads.items():
urls = (
[info['metadata']["url"]]
if "url" in info['metadata']
else info['metadata']["part_urls"])
url_description = urls[0]
if len(urls) > 1:
url_description += " + %d more parts" % (len(urls) - 1)
qprint(format_string % (
item,
yes_no(info['downloaded']),
yes_no(item in items_to_fetch),
url_description))
# TODO: may want to extract into somewhere temporary and then rename to
# avoid making an incomplete extract if the process is killed.
for item in items_to_fetch:
metadata = downloads[item]['metadata']
urls = (
[metadata["url"]] if "url" in metadata else metadata["part_urls"])
temp = NamedTemporaryFile(delete=False, suffix=".tar.bz2")
try:
for (url_num, url) in enumerate(urls):
delete_downloaded = True
if args.already_downloaded_dir:
filename = posixpath.basename(urlsplit(url).path)
downloaded_path = os.path.join(
args.already_downloaded_dir, filename)
delete_downloaded = False
else:
qprint("Downloading [part %d/%d]: %s" % (
url_num + 1, len(urls), url))
(downloaded_path, _) = urlretrieve(
url,
temp.name if len(urls) == 1 else None,
reporthook=TqdmUpTo(
unit='B', unit_scale=True, miniters=1).update_to)
qprint("Downloaded to: %s" % quote(downloaded_path))
if downloaded_path != temp.name:
qprint("Copying to: %s" % temp.name)
with open(downloaded_path, "rb") as fd:
copyfileobj(fd, temp, length=64*1024*1024)
if delete_downloaded:
os.remove(downloaded_path)
temp.close()
tar = tarfile.open(temp.name, 'r:bz2')
names = tar.getnames()
logging.debug("Extracting: %s" % names)
bad_names = [
n for n in names
if n.strip().startswith("/") or n.strip().startswith("..")
]
if bad_names:
raise RuntimeError(
"Archive has suspicious names: %s" % bad_names)
result_dir = get_path(item, test_exists=False)
os.mkdir(result_dir)
for member in tqdm(tar.getmembers(), desc='Extracting'):
tar.extractall(path=result_dir, members=[member])
tar.close()
# Save URLs that were used for this download.
pandas.DataFrame({"url": urls}).to_csv(
os.path.join(result_dir, "DOWNLOAD_INFO.csv"), index=False)
qprint("Extracted %d files to: %s" % (
len(names), quote(result_dir)))
finally:
if not args.keep:
os.remove(temp.name)
def info_subcommand(args):
print("Environment variables")
for variable in ENVIRONMENT_VARIABLES:
value = os.environ.get(variable)
if value:
print(' %-35s = %s' % (variable, quote(value)))
else:
print(" %-35s [unset or empty]" % variable)
print("")
print("Configuration")
def exists_string(path):
return (
"exists" if os.path.exists(path) else "does not exist")
items = [
("current release", get_current_release(), ""),
("downloads dir",
get_downloads_dir(),
"[%s]" % exists_string(get_downloads_dir())),
]
for (key, value, extra) in items:
print(" %-35s = %-20s %s" % (key, quote(value), extra))
print("")
downloads = get_current_release_downloads()
format_string = "%-40s %-12s %-12s %-20s "
print(format_string % ("DOWNLOAD NAME", "DOWNLOADED?", "UP TO DATE?", "URL"))
for (item, info) in downloads.items():
urls = (
[info['metadata']["url"]]
if "url" in info['metadata']
else info['metadata']["part_urls"])
url_description = urls[0]
if len(urls) > 1:
url_description += " + %d more parts" % (len(urls) - 1)
print(format_string % (
item,
yes_no(info['downloaded']),
"" if not info['downloaded'] else (
"UNKNOWN" if info['up_to_date'] is None
else yes_no(info['up_to_date'])
),
url_description))
def path_subcommand(args):
"""
Print the local path to a download
"""
print(get_path(args.download_name))
def url_subcommand(args):
"""
Print the URL(s) for a download
"""
downloads = get_current_release_downloads()
download = downloads[args.download_name]["metadata"]
urls = []
if download.get("url"):
urls.append(download["url"])
if download.get("part_urls"):
urls.extend(download["part_urls"])
print("\n".join(urls))
| apache-2.0 |
thilbern/scikit-learn | sklearn/metrics/cluster/tests/test_supervised.py | 44 | 7663 | import numpy as np
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import homogeneity_score
from sklearn.metrics.cluster import completeness_score
from sklearn.metrics.cluster import v_measure_score
from sklearn.metrics.cluster import homogeneity_completeness_v_measure
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import mutual_info_score
from sklearn.metrics.cluster import expected_mutual_information
from sklearn.metrics.cluster import contingency_matrix
from sklearn.metrics.cluster import entropy
from sklearn.utils.testing import assert_raise_message
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from numpy.testing import assert_array_almost_equal
score_funcs = [
adjusted_rand_score,
homogeneity_score,
completeness_score,
v_measure_score,
adjusted_mutual_info_score,
normalized_mutual_info_score,
]
def test_error_messages_on_wrong_input():
for score_func in score_funcs:
expected = ('labels_true and labels_pred must have same size,'
' got 2 and 3')
assert_raise_message(ValueError, expected, score_func,
[0, 1], [1, 1, 1])
expected = "labels_true must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[[0, 1], [1, 0]], [1, 1, 1])
expected = "labels_pred must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[0, 1, 0], [[1, 1], [0, 0]])
def test_perfect_matches():
for score_func in score_funcs:
assert_equal(score_func([], []), 1.0)
assert_equal(score_func([0], [1]), 1.0)
assert_equal(score_func([0, 0, 0], [0, 0, 0]), 1.0)
assert_equal(score_func([0, 1, 0], [42, 7, 42]), 1.0)
assert_equal(score_func([0., 1., 0.], [42., 7., 42.]), 1.0)
assert_equal(score_func([0., 1., 2.], [42., 7., 2.]), 1.0)
assert_equal(score_func([0, 1, 2], [42, 7, 2]), 1.0)
def test_homogeneous_but_not_complete_labeling():
# homogeneous but not complete clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 2, 2])
assert_almost_equal(h, 1.00, 2)
assert_almost_equal(c, 0.69, 2)
assert_almost_equal(v, 0.81, 2)
def test_complete_but_not_homogeneous_labeling():
# complete but not homogeneous clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 1, 1])
assert_almost_equal(h, 0.58, 2)
assert_almost_equal(c, 1.00, 2)
assert_almost_equal(v, 0.73, 2)
def test_not_complete_and_not_homogeneous_labeling():
# neither complete nor homogeneous but not so bad either
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
def test_non_consicutive_labels():
# regression tests for labels with gaps
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 2, 2, 2],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 4, 0, 4, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
ari_1 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
ari_2 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
assert_almost_equal(ari_1, 0.24, 2)
assert_almost_equal(ari_2, 0.24, 2)
def uniform_labelings_scores(score_func, n_samples, k_range, n_runs=10,
seed=42):
"""Compute score for random uniform cluster labelings"""
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(k_range), n_runs))
for i, k in enumerate(k_range):
for j in range(n_runs):
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
def test_adjustment_for_chance():
"""Check that adjusted scores are almost zero on random labels"""
n_clusters_range = [2, 10, 50, 90]
n_samples = 100
n_runs = 10
scores = uniform_labelings_scores(
adjusted_rand_score, n_samples, n_clusters_range, n_runs)
max_abs_scores = np.abs(scores).max(axis=1)
assert_array_almost_equal(max_abs_scores, [0.02, 0.03, 0.03, 0.02], 2)
def test_adjusted_mutual_info_score():
"""Compute the Adjusted Mutual Information and test against known values"""
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
# Mutual information
mi = mutual_info_score(labels_a, labels_b)
assert_almost_equal(mi, 0.41022, 5)
# Expected mutual information
C = contingency_matrix(labels_a, labels_b)
n_samples = np.sum(C)
emi = expected_mutual_information(C, n_samples)
assert_almost_equal(emi, 0.15042, 5)
# Adjusted mutual information
ami = adjusted_mutual_info_score(labels_a, labels_b)
assert_almost_equal(ami, 0.27502, 5)
ami = adjusted_mutual_info_score([1, 1, 2, 2], [2, 2, 3, 3])
assert_equal(ami, 1.0)
# Test with a very large array
a110 = np.array([list(labels_a) * 110]).flatten()
b110 = np.array([list(labels_b) * 110]).flatten()
ami = adjusted_mutual_info_score(a110, b110)
# This is not accurate to more than 2 places
assert_almost_equal(ami, 0.37, 2)
def test_entropy():
ent = entropy([0, 0, 42.])
assert_almost_equal(ent, 0.6365141, 5)
assert_almost_equal(entropy([]), 1)
def test_contingency_matrix():
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
C = contingency_matrix(labels_a, labels_b)
C2 = np.histogram2d(labels_a, labels_b,
bins=(np.arange(1, 5),
np.arange(1, 5)))[0]
assert_array_almost_equal(C, C2)
C = contingency_matrix(labels_a, labels_b, eps=.1)
assert_array_almost_equal(C, C2 + .1)
def test_exactly_zero_info_score():
"""Check numerical stability when information is exactly zero"""
for i in np.logspace(1, 4, 4).astype(np.int):
labels_a, labels_b = np.ones(i, dtype=np.int),\
np.arange(i, dtype=np.int)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(v_measure_score(labels_a, labels_b), 0.0)
assert_equal(adjusted_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
def test_v_measure_and_mutual_information(seed=36):
"""Check relation between v_measure, entropy and mutual information"""
for i in np.logspace(1, 4, 4).astype(np.int):
random_state = np.random.RandomState(seed)
labels_a, labels_b = random_state.random_integers(0, 10, i),\
random_state.random_integers(0, 10, i)
assert_almost_equal(v_measure_score(labels_a, labels_b),
2.0 * mutual_info_score(labels_a, labels_b) /
(entropy(labels_a) + entropy(labels_b)), 0)
| bsd-3-clause |
q1ang/scikit-learn | sklearn/svm/tests/test_sparse.py | 70 | 12992 | from nose.tools import assert_raises, assert_true, assert_false
import numpy as np
from scipy import sparse
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from sklearn import datasets, svm, linear_model, base
from sklearn.datasets import make_classification, load_digits, make_blobs
from sklearn.svm.tests import test_svm
from sklearn.utils import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.testing import assert_warns, assert_raise_message
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def check_svm_model_equal(dense_svm, sparse_svm, X_train, y_train, X_test):
dense_svm.fit(X_train.toarray(), y_train)
if sparse.isspmatrix(X_test):
X_test_dense = X_test.toarray()
else:
X_test_dense = X_test
sparse_svm.fit(X_train, y_train)
assert_true(sparse.issparse(sparse_svm.support_vectors_))
assert_true(sparse.issparse(sparse_svm.dual_coef_))
assert_array_almost_equal(dense_svm.support_vectors_,
sparse_svm.support_vectors_.toarray())
assert_array_almost_equal(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray())
if dense_svm.kernel == "linear":
assert_true(sparse.issparse(sparse_svm.coef_))
assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray())
assert_array_almost_equal(dense_svm.support_, sparse_svm.support_)
assert_array_almost_equal(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test_dense))
if isinstance(dense_svm, svm.OneClassSVM):
msg = "cannot use sparse input in 'OneClassSVM' trained on dense data"
else:
assert_array_almost_equal(dense_svm.predict_proba(X_test_dense),
sparse_svm.predict_proba(X_test), 4)
msg = "cannot use sparse input in 'SVC' trained on dense data"
if sparse.isspmatrix(X_test):
assert_raise_message(ValueError, msg, dense_svm.predict, X_test)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
# many class dataset:
X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, Y, T], [X2_sp, Y2, T2],
[X_blobs[:80], y_blobs[:80], X_blobs[80:]],
[iris.data, iris.target, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.SVC(kernel=kernel, probability=True, random_state=0)
sp_clf = svm.SVC(kernel=kernel, probability=True, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_unsorted_indices():
# test that the result with sorted and unsorted indices in csr is the same
# we use a subset of digits as iris, blobs or make_classification didn't
# show the problem
digits = load_digits()
X, y = digits.data[:50], digits.target[:50]
X_test = sparse.csr_matrix(digits.data[50:100])
X_sparse = sparse.csr_matrix(X)
coef_dense = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X, y).coef_
sparse_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse, y)
coef_sorted = sparse_svc.coef_
# make sure dense and sparse SVM give the same result
assert_array_almost_equal(coef_dense, coef_sorted.toarray())
X_sparse_unsorted = X_sparse[np.arange(X.shape[0])]
X_test_unsorted = X_test[np.arange(X_test.shape[0])]
# make sure we scramble the indices
assert_false(X_sparse_unsorted.has_sorted_indices)
assert_false(X_test_unsorted.has_sorted_indices)
unsorted_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse_unsorted, y)
coef_unsorted = unsorted_svc.coef_
# make sure unsorted indices give same result
assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray())
assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted),
sparse_svc.predict_proba(X_test))
def test_svc_with_custom_kernel():
kfunc = lambda x, y: safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
def test_svc_iris():
# Test the sparse SVC with the iris dataset
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
def test_sparse_decision_function():
#Test decision_function
#Sanity check, test that decision_function implemented in python
#returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1).fit(iris.data, iris.target)
dec = safe_sparse_dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
def test_error():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
# Similar to test_SVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
def test_linearsvc_iris():
# Test the sparse LinearSVC with the iris dataset
sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.toarray()))
# sparsify the coefficients on both models and check that they still
# produce the same results
clf.sparsify()
assert_array_equal(pred, clf.predict(iris.data))
sp_clf.sparsify()
assert_array_equal(pred, sp_clf.predict(iris.data))
def test_weight():
# Test class weights
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
# Test weights on individual samples
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
def test_sparse_liblinear_intercept_handling():
# Test that sparse liblinear honours intercept_scaling param
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_oneclasssvm():
"""Check that sparse OneClassSVM gives the same result as dense OneClassSVM"""
# many class dataset:
X_blobs, _ = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, None, T], [X2_sp, None, T2],
[X_blobs[:80], None, X_blobs[80:]],
[iris.data, None, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.OneClassSVM(kernel=kernel, random_state=0)
sp_clf = svm.OneClassSVM(kernel=kernel, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_sparse_realdata():
# Test on a subset from the 20newsgroups dataset.
# This catchs some bugs if input is not correctly converted into
# sparse format or weights are not correctly initialized.
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.toarray(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
def test_sparse_svc_clone_with_callable_kernel():
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
def test_timeout():
sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, sp.fit, X_sp, Y)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
| bsd-3-clause |
RPGOne/scikit-learn | examples/neighbors/plot_species_kde.py | 39 | 4039 | """
================================================
Kernel Density Estimate of Species Distributions
================================================
This shows an example of a neighbors-based query (in particular a kernel
density estimate) on geospatial data, using a Ball Tree built upon the
Haversine distance metric -- i.e. distances over points in latitude/longitude.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.org/basemap>`_
to plot the coast lines and national boundaries of South America.
This example does not perform any learning over the data
(see :ref:`sphx_glr_auto_examples_applications_plot_species_distribution_modeling.py` for
an example of classification based on the attributes in this dataset). It
simply shows the kernel density estimate of observed data points in
geospatial coordinates.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Author: Jake Vanderplas <jakevdp@cs.washington.edu>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn.neighbors import KernelDensity
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
# Get matrices/arrays of species IDs and locations
data = fetch_species_distributions()
species_names = ['Bradypus Variegatus', 'Microryzomys Minutus']
Xtrain = np.vstack([data['train']['dd lat'],
data['train']['dd long']]).T
ytrain = np.array([d.decode('ascii').startswith('micro')
for d in data['train']['species']], dtype='int')
Xtrain *= np.pi / 180. # Convert lat/long to radians
# Set up the data grid for the contour plot
xgrid, ygrid = construct_grids(data)
X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1])
land_reference = data.coverages[6][::5, ::5]
land_mask = (land_reference > -9999).ravel()
xy = np.vstack([Y.ravel(), X.ravel()]).T
xy = xy[land_mask]
xy *= np.pi / 180.
# Plot map of South America with distributions of each species
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05)
for i in range(2):
plt.subplot(1, 2, i + 1)
# construct a kernel density estimate of the distribution
print(" - computing KDE in spherical coordinates")
kde = KernelDensity(bandwidth=0.04, metric='haversine',
kernel='gaussian', algorithm='ball_tree')
kde.fit(Xtrain[ytrain == i])
# evaluate only on the land: -9999 indicates ocean
Z = -9999 + np.zeros(land_mask.shape[0])
Z[land_mask] = np.exp(kde.score_samples(xy))
Z = Z.reshape(X.shape)
# plot contours of the density
levels = np.linspace(0, Z.max(), 25)
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
plt.title(species_names[i])
plt.show()
| bsd-3-clause |
fredhusser/scikit-learn | sklearn/ensemble/__init__.py | 217 | 1307 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification and regression.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from .voting_classifier import VotingClassifier
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "VotingClassifier",
"bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
| bsd-3-clause |
etkirsch/scikit-learn | sklearn/covariance/tests/test_graph_lasso.py | 272 | 5245 | """ Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graph_lasso(emp_cov, alpha=alpha, mode=method,
return_costs=True)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graph_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# The iris datasets in R and sklearn do not match in a few places, these
# values are for the sklearn version
cov_R = np.array([
[0.68112222, 0.0, 0.2651911, 0.02467558],
[0.00, 0.1867507, 0.0, 0.00],
[0.26519111, 0.0, 3.0924249, 0.28774489],
[0.02467558, 0.0, 0.2877449, 0.57853156]
])
icov_R = np.array([
[1.5188780, 0.0, -0.1302515, 0.0],
[0.0, 5.354733, 0.0, 0.0],
[-0.1302515, 0.0, 0.3502322, -0.1686399],
[0.0, 0.0, -0.1686399, 1.8123908]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
| bsd-3-clause |
nss350/magPy | tests/testsPhoenixReader.py | 1 | 3600 | import os
import sys
sys.path.append(os.path.join("..", "core"))
sys.path.append(os.path.join("..", "inbuilt"))
sys.path.append(os.path.join("..", "utils"))
import numpy as np
import math
from datetime import datetime, timedelta
import struct
# import readers
from dataReaderPhoenix import DataReaderPhoenix
from dataReaderInternal import DataReaderInternal
# import writers
from dataWriterInternal import DataWriterInternal
# import inbuilt
from projectDefault import *
from projectViewTime import *
# import utils
from utilsProcess import *
from utilsIO import *
# graphing
import matplotlib.pyplot as plt
# def readCoil(coilFile):
# coilPath = os.path.join("..", "..", "Data", "riftVolc", "202", "COIL1547.CLC")
# coilFile = open(coilPath, "rb")
# print struct.unpack("20b", coilFile.read(20))
# print struct.unpack("12s", coilFile.read(12))
# print struct.unpack("12s", coilFile.read(12))
# print struct.unpack("12s", coilFile.read(12))
# print struct.unpack("8s", coilFile.read(8))
# print struct.unpack("12s", coilFile.read(12))
# print struct.unpack("d", coilFile.read(8))
# print struct.unpack("d", coilFile.read(8))
# print struct.unpack("7s", coilFile.read(7))
# print struct.unpack("18s", coilFile.read(18))
# print struct.unpack("f", coilFile.read(4))
# print struct.unpack("f", coilFile.read(4))
# print struct.unpack("f", coilFile.read(4))
# print struct.unpack("f", coilFile.read(4))
# print struct.unpack("f", coilFile.read(4))
# print struct.unpack("f", coilFile.read(4))
# print struct.unpack("f", coilFile.read(4))
# print struct.unpack("f", coilFile.read(4))
# print struct.unpack("f", coilFile.read(4))
# print struct.unpack("f", coilFile.read(4))
# print struct.unpack("f", coilFile.read(4))
# print struct.unpack("f", coilFile.read(4))
# print struct.unpack("f", coilFile.read(4))
# print struct.unpack("f", coilFile.read(4))
# print struct.unpack("d", coilFile.read(8))
# print struct.unpack("d", coilFile.read(8))
# print struct.unpack("d", coilFile.read(8))
# print struct.unpack("d", coilFile.read(8))
# print struct.unpack("d", coilFile.read(8))
# print struct.unpack("500s", coilFile.read(500))
# coilFile.close()
### test the data reader
dataPath = os.path.join("..", "..", "Data", "riftVolc", "202")
dataReader = DataReaderPhoenix(dataPath)
dataReader.printInfo()
dataReader.printDataFileList()
print dataReader.getSamplesRatesTS()
print dataReader.getNumberSamplesTS()
dataReader.printTableFile()
# startTime = "2017-04-07 23:00:00"
# endTime = "2017-04-08 01:00:00"
# data = dataReader.getUnscaledData(startTime, endTime)
# plt.figure()
# for idx, chan in enumerate(data.keys()):
# plt.subplot(dataReader.getNumChannels(), 1, idx+1)
# plt.title(chan)
# plt.plot(data[chan]-np.average(data[chan]))
# plt.show()
### now try and reformat
# outpath = os.path.join("..", "..", "Data", "riftVolc", "202_reformat")
# dataReader.reformat(outpath)
### create a project
# projectPath = (os.path.join("..", "..", "Data", "riftVolcProject"))
# projectMakeDefault(projectPath, "2017-04-07 06:00:00")
# proj = projectLoad(projectPath, "mtProj.prj")
### let's look at some time
# projectViewTime(proj, "2017-04-08 02:00:00", "2017-04-08 04:30:00", freqs=[15], save=True, chans=["Ex", "Ey", "Hx", "Hy", "Hz"])
# projectViewTime(proj, "2017-04-07 09:16:00", "2017-04-07 09:16:16", freqs=[150], save=True, chans=["Ex", "Ey", "Hx", "Hy", "Hz"])
# projectViewTime(proj, "2017-04-07 09:33:00", "2017-04-07 09:33:01", freqs=[2400], save=True, chans=["Ex", "Ey", "Hx", "Hy", "Hz"])
| apache-2.0 |
nrhine1/scikit-learn | sklearn/feature_selection/tests/test_feature_select.py | 143 | 22295 | """
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (chi2, f_classif, f_oneway, f_regression,
SelectPercentile, SelectKBest,
SelectFpr, SelectFdr, SelectFwe,
GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(30)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([0, 1, 2])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
| bsd-3-clause |
mojoboss/scikit-learn | examples/feature_selection/plot_feature_selection.py | 249 | 2827 | """
===============================
Univariate Feature Selection
===============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM assigns a large weight to one of these features, but also
Selects many of the non-informative features.
Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
from sklearn.feature_selection import SelectPercentile, f_classif
###############################################################################
# import some data to play with
# The iris dataset
iris = datasets.load_iris()
# Some noisy data not correlated
E = np.random.uniform(0, 0.1, size=(len(iris.data), 20))
# Add the noisy data to the informative features
X = np.hstack((iris.data, E))
y = iris.target
###############################################################################
plt.figure(1)
plt.clf()
X_indices = np.arange(X.shape[-1])
###############################################################################
# Univariate feature selection with F-test for feature scoring
# We use the default selection function: the 10% most significant features
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(X, y)
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
plt.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)', color='g')
###############################################################################
# Compare to the weights of an SVM
clf = svm.SVC(kernel='linear')
clf.fit(X, y)
svm_weights = (clf.coef_ ** 2).sum(axis=0)
svm_weights /= svm_weights.max()
plt.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight', color='r')
clf_selected = svm.SVC(kernel='linear')
clf_selected.fit(selector.transform(X), y)
svm_weights_selected = (clf_selected.coef_ ** 2).sum(axis=0)
svm_weights_selected /= svm_weights_selected.max()
plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected,
width=.2, label='SVM weights after selection', color='b')
plt.title("Comparing feature selection")
plt.xlabel('Feature number')
plt.yticks(())
plt.axis('tight')
plt.legend(loc='upper right')
plt.show()
| bsd-3-clause |
twosigma/Cook | scheduler/simulator_files/analysis/analysis/__init__.py | 1 | 16724 | import matplotlib
import matplotlib.cm as cmx
import matplotlib.colors
import matplotlib.pyplot as plt
import numpy as np
import pandas
def prepare_df(df):
"""Reads an output trace file into a dataframe and translates the time columns
so the minimum time occurring is 0
Parameters:
-----------
out_trace_df : pandas.DataFrame
dataframe of csv output from simulation run.
Expected columns:
[submit_time_ms, start_time_ms, end_time_ms, mesos_start_time_ms]
Returns:
-------
df : pandas.DataFrame
dataframe containing the fields from the output trace file"""
min_time = min(df["submit_time_ms"])
df["start_time_ms"] =df["start_time_ms"]-min_time
df["end_time_ms"] =df["end_time_ms"]-min_time
df["submit_time_ms"] = df["submit_time_ms"]-min_time
df["mesos_start_time_ms"] = df["mesos_start_time_ms"] - min_time
df["run_time_ms"] = df.end_time_ms - df.start_time_ms
df.loc[df['run_time_ms'] < 0, 'end_time_ms'] = df.end_time_ms.max()
df["run_time_ms"] = df.end_time_ms - df.start_time_ms
df["overhead"] = df.start_time_ms - df.submit_time_ms
return df
def job_view_stats(df):
"""Produces a dataframe that is focused on the job level.
Each row is a job, the task id is the last task that ran and 'overhead'
is (end_time_ms-submit_time_ms)-run_time_ms.
This measures the added cost from the scheduler and lack of infinite compute.
Parameters:
-----------
df : pandas.DataFrame
dataframe returned from prepare_df
Returns:
--------
df : pandas.DataFrame
dataframe with job level info"""
df = df.copy()
df = df.sort_values("end_time_ms").groupby("job_id").last()
df["submit_to_complete_ms"] = df.end_time_ms - df.submit_time_ms
df["job_overhead"] = df.submit_to_complete_ms - df.run_time_ms
df.reset_index(inplace=True)
return df
def running_tasks_at(df, t):
"""Returns the tasks running at t
Parameters:
-----------
df : pandas.DataFrame
prepare_df outputted dataframe
t : int
time in milliseconds (in translated time)
Returns:
--------
pandas.DataFrame
dataframe with same form as df, but only with tasks that were running at t"""
df = df.copy()
started_before_df = df[df.start_time_ms <= t]
completed_after_df = started_before_df[started_before_df.end_time_ms > t]
return completed_after_df
def point_in_time_analysis(df, t):
"""Returns the 5 dataframes with analysis for a particular point in time
Parameters:
-----------
df : pandas.DataFrame
prepare_df outputted dataframe
t : int
time in milliseconds (in translated time)
Returns:
--------
per_host : pandas.DataFrame
dataframe with a row per host. Has column for mem, cpus and count
of tasks running on the host
per_user : pandas.DataFrame
dataframe with a row per user. Has a column for mem, cpus and count
of tasks running for the user
waiting : pandas.DataFrame
dataframe with a row for tasks that were submitted before t but were
not running at t.
running_tasks_df : pandas.DataFrame
dataframe outputted by running_tasks_at
df : pandas.DataFrame
input dataframe
"""
running_df = running_tasks_at(df, t)
running_df["count"] = 1
per_host = running_df.groupby("hostname").sum()[["mem", "cpus", "count"]].reset_index()
per_user = running_df.groupby("user").sum()[["mem", "cpus", "count"]].reset_index()
waiting = df[(df.submit_time_ms < t) & (df.start_time_ms > t)]
return [per_host, per_user, waiting, running_df, df]
def time_series_events(events):
"""Given a list of event tuples (time, count, mem, cpus),
produce a time series of the cumulative sum of each of values at each
time
Parameters:
-----------
events : list of tuples (time, count, mem, cpus)
Returns:
---------
pandas.DataFrame
dataframe time series of usage with keys
'time_ms', 'count', 'cpus', 'mem'
where
1. 'count' is the number of tasks
2. 'mem' is the memory
3. 'cpus' is the cpus
at time 'time_ms'.
"""
ordered_events = sorted(events, key=lambda x: x[0])
time_series = []
count_total = 0
mem_total = 0
cpus_total = 0
for time, count, mem, cpus in ordered_events:
count_total += count
mem_total += mem
cpus_total += cpus
time_series.append({"time_ms" : time, "count" : count_total, "cpus" : cpus_total, "mem": mem_total})
return pandas.DataFrame(time_series)
def running_concurrently(df):
"""Given a dataframe of tasks, returns a dataframe where each row
is the utilization at a given time
Parameters:
-----------
df : pandas.DataFrame
dataframe output from prepare_df
Returns:
--------
pandas.DataFrame
dataframe time series of utilization with keys
'time_ms', 'count', 'cpus', 'mem'
where
1. 'count' is the number of tasks running
2. 'mem' is the memory utilized
3. 'cpus' is the cpus utilized
at time 'time_ms'."""
rows = df.to_records()
events = [e for r in rows for e in [(r["start_time_ms"], 1, r["mem"], r["cpus"]),
(r["end_time_ms"], -1, -r["mem"], -r["cpus"])]]
return time_series_events(events)
def waiting_over_time(df):
"""Returns a time series of count, mem and cpus waiting
Parameters:
-----------
df : pandas.DataFrame
dataframe output from prepare_df
Returns:
--------
pandas.DataFrame
dataframe time series of waiting with keys
'time_ms', 'count', 'cpus', 'mem'
where
1. 'count' is the number of tasks waiting
2. 'mem' is the memory waiting
3. 'cpus' is the cpus waiting
at time 'time_ms'."""
rows = df.to_records()
events = [e for r in rows for e in [(r["submit_time_ms"], 1, r["mem"], r["cpus"]),
(r["start_time_ms"], -1, -r["mem"], -r["cpus"])]]
return time_series_events(events)
def mem_tb_hours_run(df):
return (df.mem*df.run_time_ms).sum()/(1000*60*60)/(1024*1024)
def cpu_hours_run(df):
return (df.cpus*df.run_time_ms).sum()/(1000*60*60)
def sample_usage(user_running_over_time, user_waiting_over_time, cycle_time_ms):
"""Samples the usages at fixed time step for both running and waiting
Parameters:
-----------
user_running_over_time : pandas.DataFrame
A dataframe with columns:
['time_ms', 'user', 'mem', 'cpus', 'count']
user_waiting_over_time : pandas.DataFrame
A dataframe with columns:
['time_ms', 'user', 'mem', 'cpus', 'count']
cycle_time_ms : int
time step to sample at
Returns:
--------
dataframe with a row per user per time_ms with the colums from both
running and waiting of the most recent update for that user time_ms pair
prior to the time_ms. waiting columns will have no suffix, running
columns will have a _running suffix
"""
clock = range(0, user_running_over_time.time_ms.max(), cycle_time_ms)
users = np.unique(user_running_over_time.user.values)
rows = []
for tick in clock:
for user in users:
rows.append({"time_ms" : tick, "user" : user})
df = pandas.DataFrame(rows)
df = pandas.merge_asof(df, user_waiting_over_time,
on="time_ms", by="user",
suffixes=("", "_waiting"))
df = pandas.merge_asof(df, user_running_over_time,
on="time_ms", by="user",
suffixes=("", "_running"))
return df
def add_starvation(df_with_fairness):
"""Returns a data frame with a column `starved_mem_gb` which is the amount of gbs a user is starved.
Starved is defined as amount of resources a user wants, below their share, that they aren't getting
Parameters:
-----------
df_with_fairness : pandas.DataFrame
A dataframe of usage (from intervalize usage) annotated with the fair allocation for each user at
each time slice
Returns:
--------
pandas.DataFrame
df_with_fairness annotated with a column `starved_mem_gb`
"""
df = df_with_fairness
df["share_mem"] = 2.5e6
df["starved_mem_gb"] = 0
df.loc[df["mem_running"] < df.share_mem, "starved_mem_gb"] = (df[["mem_fair", "share_mem"]].min(axis=1) - df.mem_running)/1000
df.loc[df["starved_mem_gb"] < 0, "starved_mem_gb"] = 0 # starved is on interval [0,share_mem]
return df
def prepare_desired_resource_dicts(point_in_time_usage):
"""Given a point in time usage, returns a list of dicts with keys columns `mem` and `user` which is the
sum of the waiting and running memory for each user
Parameters:
-----------
point_in_time_usage: pandas.DataFrame
A dataframe with keys: [time_ms, user, mem, count, mem_running, count_running]
Returns:
--------
desired_resources: list of dicts
A list of dicts with keys: [user, mem] which provides the user's desired resources
sorted by mem"""
user_desired = point_in_time_usage.copy()
user_desired.mem += user_desired.mem_running
user_desired = user_desired[["mem", "user"]]
user_desired.dropna()
user_desired = user_desired[(user_desired.mem>0)]
resource_records = user_desired.sort_values("mem").to_records()
desired_resources = [{"mem" : mem, "user" : user} for _,mem,user in resource_records]
return desired_resources
def get_fair_allocation(point_in_time_usage):
"""Given a point in time usage, returns the fair allocation for each user with the following assumptions:
1. We only care about memory
2. All user shares are equal
Generalizing this shouldn't be TOO difficult
Parameters:
-----------
point_in_time_usage: pandas.DataFrame
A dataframe with keys: [time_ms, user, mem, count, mem_running, count_running]
Returns:
--------
allocations_df: pandas.DataFrame
A dataframe with keys: [user, mem] which provides the users fair allocation given what they requested"""
mem_to_allocate = point_in_time_usage.mem_running.sum()
desired_resources = prepare_desired_resource_dicts(point_in_time_usage)
allocations = {user : 0 for user in point_in_time_usage.user}
mem_allocated_per_active_user = 0
while mem_to_allocate > 1e-6:
min_mem = desired_resources[0]["mem"] - mem_allocated_per_active_user
if mem_to_allocate < min_mem * len(desired_resources):
min_mem = mem_to_allocate/len(desired_resources)
mem_allocated_per_active_user += min_mem
non_pos_count = 0
while non_pos_count < len(desired_resources) and (desired_resources[non_pos_count]["mem"] - mem_allocated_per_active_user - 1e-6) <= 0:
allocations[desired_resources[non_pos_count]["user"]] = mem_allocated_per_active_user
non_pos_count += 1
mem_to_allocate -= min_mem*len(desired_resources)
desired_resources = desired_resources[non_pos_count:]
# Any users remaining at the end still had resources desired resources. Give them the full allocation
for user_mem_desired in desired_resources:
allocations[user_mem_desired["user"]] = mem_allocated_per_active_user
allocation_df = pandas.DataFrame([(user,mem) for user,mem in allocations.items()],
columns=['user','mem'])
allocation_df["time_ms"] = point_in_time_usage.time_ms.values[0] #they will all be the same
return allocation_df
def prepare_usage_df(user_running, user_waiting, cycle_time):
usage_df = sample_usage(user_running, user_waiting, cycle_time)
fair_allocs = usage_df.groupby("time_ms", as_index=False).apply(get_fair_allocation).reset_index(drop=True)
usage_df = pandas.merge(usage_df, fair_allocs, on=["time_ms", "user"], suffixes=("","_fair"))
usage_df = add_starvation(usage_df)
usage_df["fair_minus_running"] = np.abs(usage_df.mem_fair - usage_df.mem_running)
usage_df["fair_ratio"] = usage_df.mem_running/usage_df.mem_fair
usage_df["starved_mem_log10"] = np.log10(usage_df[usage_df.starved_mem_gb > 0].starved_mem_gb)
return usage_df
def score_card(task_df, user_running, user_waiting, cycle_time):
"""Returns a dataframe where each column is a different metric.
Whether a larger values or lower values is better is noted where it makes sense.
Metrics:
"cpu_hours" : total cpu hours used by tasks, up is good
"cpu_hours_preemption" : cpu hours for jobs that were preempted, down is good
"cpu_hours_success" : cpu hours for jobs that were successful, up is good
"mem_running_over_fair_alloc_median" : measure of fairness. median underallocation over time and users. up is good
"mem_tb_hours" : total memory hours in tb used by tasks, up is good
"mem_tb_hours_preemption" : memory hours in tb for jobs that were preempted, down is good
"mem_tb_hours_success" : memory hours in tb for jobs that were successful, up is good
"scheduling_latency_mean" : mean scheduling latency for all tasks, down is good
"scheduling_latency_mean_preemption" : mean scheduling latency for preempted tasks, down is good
"scheduling_latency_mean_success" : mean scheduling latency for successfully tasks, down is good
"scheduling_latency_p50" : median scheduling latency for all tasks, down is good
"scheduling_latency_p50_preemption" : median scheduling latency for preempted tasks, down is good
"scheduling_latency_p50_success" : median scheduling latency for successfully tasks, down is good
"starvation_log10_median": median of log 10 starvation across all users and time. down is good
"starvation_log10_mean_cycle_median" : median of mean of log 10 starvation across users for each time. down is good
"tasks_run" : total number of tasks run, up is good
"task_run_preemption" : tasks that were run that failed due to preemption, down is good
"tasks_run_success" : tasks that were run that succeeded, up is good
"""
task_df.reason = task_df.reason.astype(str)
success_task_df = task_df[task_df.status == ":instance.status/success"]
preemption_task_df = task_df[task_df.reason == "Preempted by rebalancer"]
running_task_df = task_df[task_df.status == ":instance.status/running"]
usage_df = prepare_usage_df(user_running, user_waiting, cycle_time)
values = [{"cpu_hours" : cpu_hours_run(task_df),
"cpu_hours_preemption" : cpu_hours_run(preemption_task_df),
"cpu_hours_success" : cpu_hours_run(success_task_df),
"cpu_hours_running" : cpu_hours_run(running_task_df),
"mem_running_over_fair_alloc_median" : usage_df[(usage_df.fair_ratio > 0) & (usage_df.fair_ratio <= 1)].fair_ratio.median(),
"mem_tb_hours" : mem_tb_hours_run(task_df),
"mem_tb_hours_preemption" : mem_tb_hours_run(preemption_task_df),
"mem_tb_hours_success" : mem_tb_hours_run(success_task_df),
"mem_tb_hours_running" : mem_tb_hours_run(running_task_df),
"scheduling_latency_mean" : task_df.overhead.mean(),
"scheduling_latency_mean_preemption" : preemption_task_df.overhead.mean(),
"scheduling_latency_mean_success" : success_task_df.overhead.mean(),
"scheduling_latency_p50" : task_df.overhead.median(),
"scheduling_latency_p50_preemption" : preemption_task_df.overhead.median(),
"scheduling_latency_p50_success" : success_task_df.overhead.median(),
"starvation_log10_median": usage_df.starved_mem_log10.median(),
"starvation_log10_mean_cycle_median" : usage_df.groupby("time_ms").starved_mem_log10.mean().median(),
"tasks_run" : len(task_df),
"tasks_run_preemption" : len(preemption_task_df),
"tasks_run_success" : len(success_task_df),
"total_sim_time" : task_df.end_time_ms.max() - task_df.start_time_ms.min()
}]
return pandas.DataFrame(values)
| apache-2.0 |
alheinecke/tensorflow-xsmm | tensorflow/contrib/learn/python/learn/tests/dataframe/tensorflow_dataframe_test.py | 7 | 12865 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for learn.dataframe.tensorflow_dataframe."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import math
import tempfile
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe import tensorflow_dataframe as df
from tensorflow.contrib.learn.python.learn.dataframe.transforms import densify
from tensorflow.core.example import example_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def _assert_df_equals_dict(expected_df, actual_dict):
for col in expected_df:
if expected_df[col].dtype in [np.float32, np.float64]:
assertion = np.testing.assert_allclose
else:
assertion = np.testing.assert_array_equal
if expected_df[col].dtype.kind in ["O", "S", "U"]:
# Python 2/3 compatibility
# TensorFlow always returns bytes, so we just convert the unicode
# expectations to bytes also before comparing.
expected_values = [x.encode("utf-8") for x in expected_df[col].values]
else:
expected_values = expected_df[col].values
assertion(
expected_values,
actual_dict[col],
err_msg="Expected {} in column '{}'; got {}.".format(expected_values,
col,
actual_dict[col]))
def _make_test_csv():
f = tempfile.NamedTemporaryFile(
dir=test.get_temp_dir(), delete=False, mode="w")
w = csv.writer(f)
w.writerow(["int", "float", "bool", "string"])
for _ in range(100):
intvalue = np.random.randint(-10, 10)
floatvalue = np.random.rand()
boolvalue = int(np.random.rand() > 0.3)
stringvalue = "S: %.4f" % np.random.rand()
row = [intvalue, floatvalue, boolvalue, stringvalue]
w.writerow(row)
f.close()
return f.name
def _make_test_csv_sparse():
f = tempfile.NamedTemporaryFile(
dir=test.get_temp_dir(), delete=False, mode="w")
w = csv.writer(f)
w.writerow(["int", "float", "bool", "string"])
for _ in range(100):
# leave columns empty; these will be read as default value (e.g. 0 or NaN)
intvalue = np.random.randint(-10, 10) if np.random.rand() > 0.5 else ""
floatvalue = np.random.rand() if np.random.rand() > 0.5 else ""
boolvalue = int(np.random.rand() > 0.3) if np.random.rand() > 0.5 else ""
stringvalue = (("S: %.4f" % np.random.rand()) if np.random.rand() > 0.5 else
"")
row = [intvalue, floatvalue, boolvalue, stringvalue]
w.writerow(row)
f.close()
return f.name
def _make_test_tfrecord():
f = tempfile.NamedTemporaryFile(dir=test.get_temp_dir(), delete=False)
w = tf_record.TFRecordWriter(f.name)
for i in range(100):
ex = example_pb2.Example()
ex.features.feature["var_len_int"].int64_list.value.extend(range((i % 3)))
ex.features.feature["fixed_len_float"].float_list.value.extend(
[float(i), 2 * float(i)])
w.write(ex.SerializeToString())
return f.name
class TensorFlowDataFrameTestCase(test.TestCase):
"""Tests for `TensorFlowDataFrame`."""
def _assert_pandas_equals_tensorflow(self, pandas_df, tensorflow_df,
num_batches, batch_size):
self.assertItemsEqual(
list(pandas_df.columns) + ["index"], tensorflow_df.columns())
for batch_num, batch in enumerate(tensorflow_df.run(num_batches)):
row_numbers = [
total_row_num % pandas_df.shape[0]
for total_row_num in range(batch_size * batch_num, batch_size * (
batch_num + 1))
]
expected_df = pandas_df.iloc[row_numbers]
_assert_df_equals_dict(expected_df, batch)
def testInitFromPandas(self):
"""Test construction from Pandas DataFrame."""
if not HAS_PANDAS:
return
pandas_df = pd.DataFrame({"sparrow": range(10), "ostrich": 1})
tensorflow_df = df.TensorFlowDataFrame.from_pandas(
pandas_df, batch_size=10, shuffle=False)
batch = tensorflow_df.run_one_batch()
np.testing.assert_array_equal(pandas_df.index.values, batch["index"],
"Expected index {}; got {}".format(
pandas_df.index.values, batch["index"]))
_assert_df_equals_dict(pandas_df, batch)
def testBatch(self):
"""Tests `batch` method.
`DataFrame.batch()` should iterate through the rows of the
`pandas.DataFrame`, and should "wrap around" when it reaches the last row.
"""
if not HAS_PANDAS:
return
pandas_df = pd.DataFrame({
"albatross": range(10),
"bluejay": 1,
"cockatoo": range(0, 20, 2),
"penguin": list("abcdefghij")
})
tensorflow_df = df.TensorFlowDataFrame.from_pandas(pandas_df, shuffle=False)
# Rebatch `df` into the following sizes successively.
batch_sizes = [4, 7]
num_batches = 3
final_batch_size = batch_sizes[-1]
for batch_size in batch_sizes:
tensorflow_df = tensorflow_df.batch(batch_size, shuffle=False)
self._assert_pandas_equals_tensorflow(
pandas_df,
tensorflow_df,
num_batches=num_batches,
batch_size=final_batch_size)
def testFromNumpy(self):
x = np.eye(20)
tensorflow_df = df.TensorFlowDataFrame.from_numpy(x, batch_size=10)
for batch in tensorflow_df.run(30):
for ind, val in zip(batch["index"], batch["value"]):
expected_val = np.zeros_like(val)
expected_val[ind] = 1
np.testing.assert_array_equal(expected_val, val)
def testFromCSV(self):
if not HAS_PANDAS:
return
num_batches = 100
batch_size = 8
enqueue_size = 7
data_path = _make_test_csv()
default_values = [0, 0.0, 0, ""]
pandas_df = pd.read_csv(data_path)
tensorflow_df = df.TensorFlowDataFrame.from_csv(
[data_path],
enqueue_size=enqueue_size,
batch_size=batch_size,
shuffle=False,
default_values=default_values)
self._assert_pandas_equals_tensorflow(
pandas_df,
tensorflow_df,
num_batches=num_batches,
batch_size=batch_size)
def testFromCSVLimitEpoch(self):
batch_size = 8
num_epochs = 17
expected_num_batches = (num_epochs * 100) // batch_size
data_path = _make_test_csv()
default_values = [0, 0.0, 0, ""]
tensorflow_df = df.TensorFlowDataFrame.from_csv(
[data_path],
batch_size=batch_size,
shuffle=False,
default_values=default_values)
result_batches = list(tensorflow_df.run(num_epochs=num_epochs))
actual_num_batches = len(result_batches)
self.assertEqual(expected_num_batches, actual_num_batches)
# TODO(soergel): figure out how to dequeue the final small batch
expected_rows = 1696 # num_epochs * 100
actual_rows = sum([len(x["int"]) for x in result_batches])
self.assertEqual(expected_rows, actual_rows)
def testFromCSVWithFeatureSpec(self):
if not HAS_PANDAS:
return
num_batches = 100
batch_size = 8
data_path = _make_test_csv_sparse()
feature_spec = {
"int": parsing_ops.FixedLenFeature(None, dtypes.int16, np.nan),
"float": parsing_ops.VarLenFeature(dtypes.float16),
"bool": parsing_ops.VarLenFeature(dtypes.bool),
"string": parsing_ops.FixedLenFeature(None, dtypes.string, "")
}
pandas_df = pd.read_csv(data_path, dtype={"string": object})
# Pandas insanely uses NaN for empty cells in a string column.
# And, we can't use Pandas replace() to fix them because nan != nan
s = pandas_df["string"]
for i in range(0, len(s)):
if isinstance(s[i], float) and math.isnan(s[i]):
pandas_df.set_value(i, "string", "")
tensorflow_df = df.TensorFlowDataFrame.from_csv_with_feature_spec(
[data_path],
batch_size=batch_size,
shuffle=False,
feature_spec=feature_spec)
# These columns were sparse; re-densify them for comparison
tensorflow_df["float"] = densify.Densify(np.nan)(tensorflow_df["float"])
tensorflow_df["bool"] = densify.Densify(np.nan)(tensorflow_df["bool"])
self._assert_pandas_equals_tensorflow(
pandas_df,
tensorflow_df,
num_batches=num_batches,
batch_size=batch_size)
def testFromExamples(self):
num_batches = 77
enqueue_size = 11
batch_size = 13
data_path = _make_test_tfrecord()
features = {
"fixed_len_float":
parsing_ops.FixedLenFeature(
shape=[2], dtype=dtypes.float32, default_value=[0.0, 0.0]),
"var_len_int":
parsing_ops.VarLenFeature(dtype=dtypes.int64)
}
tensorflow_df = df.TensorFlowDataFrame.from_examples(
data_path,
enqueue_size=enqueue_size,
batch_size=batch_size,
features=features,
shuffle=False)
# `test.tfrecord` contains 100 records with two features: var_len_int and
# fixed_len_float. Entry n contains `range(n % 3)` and
# `float(n)` for var_len_int and fixed_len_float,
# respectively.
num_records = 100
def _expected_fixed_len_float(n):
return np.array([float(n), 2 * float(n)])
def _expected_var_len_int(n):
return np.arange(n % 3)
for batch_num, batch in enumerate(tensorflow_df.run(num_batches)):
record_numbers = [
n % num_records
for n in range(batch_num * batch_size, (batch_num + 1) * batch_size)
]
for i, j in enumerate(record_numbers):
np.testing.assert_allclose(
_expected_fixed_len_float(j), batch["fixed_len_float"][i])
var_len_int = batch["var_len_int"]
for i, ind in enumerate(var_len_int.indices):
val = var_len_int.values[i]
expected_row = _expected_var_len_int(record_numbers[ind[0]])
expected_value = expected_row[ind[1]]
np.testing.assert_array_equal(expected_value, val)
def testSplitString(self):
batch_size = 8
num_epochs = 17
expected_num_batches = (num_epochs * 100) // batch_size
data_path = _make_test_csv()
default_values = [0, 0.0, 0, ""]
tensorflow_df = df.TensorFlowDataFrame.from_csv(
[data_path],
batch_size=batch_size,
shuffle=False,
default_values=default_values)
a, b = tensorflow_df.split("string", 0.7) # no rebatching
total_result_batches = list(tensorflow_df.run(num_epochs=num_epochs))
a_result_batches = list(a.run(num_epochs=num_epochs))
b_result_batches = list(b.run(num_epochs=num_epochs))
self.assertEqual(expected_num_batches, len(total_result_batches))
self.assertEqual(expected_num_batches, len(a_result_batches))
self.assertEqual(expected_num_batches, len(b_result_batches))
total_rows = sum([len(x["int"]) for x in total_result_batches])
a_total_rows = sum([len(x["int"]) for x in a_result_batches])
b_total_rows = sum([len(x["int"]) for x in b_result_batches])
print("Split rows: %s => %s, %s" % (total_rows, a_total_rows, b_total_rows))
# TODO(soergel): figure out how to dequeue the final small batch
expected_total_rows = 1696 # (num_epochs * 100)
self.assertEqual(expected_total_rows, total_rows)
self.assertEqual(1087, a_total_rows) # stochastic but deterministic
# self.assertEqual(int(total_rows * 0.7), a_total_rows)
self.assertEqual(609, b_total_rows) # stochastic but deterministic
# self.assertEqual(int(total_rows * 0.3), b_total_rows)
# The strings used for hashing were all unique in the original data, but
# we ran 17 epochs, so each one should appear 17 times. Each copy should
# be hashed into the same partition, so there should be no overlap of the
# keys.
a_strings = set([s for x in a_result_batches for s in x["string"]])
b_strings = set([s for x in b_result_batches for s in x["string"]])
self.assertEqual(frozenset(), a_strings & b_strings)
if __name__ == "__main__":
test.main()
| apache-2.0 |
mne-tools/mne-tools.github.io | 0.12/_downloads/plot_morph_data.py | 22 | 2290 | """
==========================================================
Morph source estimates from one subject to another subject
==========================================================
A source estimate from a given subject 'sample' is morphed
to the anatomy of another subject 'fsaverage'. The output
is a source estimate defined on the anatomy of 'fsaverage'
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
subject_from = 'sample'
subject_to = 'fsaverage'
subjects_dir = data_path + '/subjects'
fname = data_path + '/MEG/sample/sample_audvis-meg'
src_fname = data_path + '/MEG/sample/sample_audvis-meg-oct-6-fwd.fif'
# Read input stc file
stc_from = mne.read_source_estimate(fname)
# Morph using one method (supplying the vertices in fsaverage's source
# space makes it faster). Note that for any generic subject, you could do:
# vertices_to = mne.grade_to_vertices(subject_to, grade=5)
# But fsaverage's source space was set up so we can just do this:
vertices_to = [np.arange(10242), np.arange(10242)]
stc_to = mne.morph_data(subject_from, subject_to, stc_from, n_jobs=1,
grade=vertices_to, subjects_dir=subjects_dir)
stc_to.save('%s_audvis-meg' % subject_to)
# Morph using another method -- useful if you're going to do a lot of the
# same inter-subject morphing operations; you could save and load morph_mat
morph_mat = mne.compute_morph_matrix(subject_from, subject_to,
stc_from.vertices, vertices_to,
subjects_dir=subjects_dir)
stc_to_2 = mne.morph_data_precomputed(subject_from, subject_to,
stc_from, vertices_to, morph_mat)
stc_to_2.save('%s_audvis-meg_2' % subject_to)
# View source activations
plt.plot(stc_from.times, stc_from.data.mean(axis=0), 'r', label='from')
plt.plot(stc_to.times, stc_to.data.mean(axis=0), 'b', label='to')
plt.plot(stc_to_2.times, stc_to.data.mean(axis=0), 'g', label='to_2')
plt.xlabel('time (ms)')
plt.ylabel('Mean Source amplitude')
plt.legend()
plt.show()
| bsd-3-clause |
florian-f/sklearn | examples/plot_hmm_sampling.py | 9 | 2029 | """
==================================
Demonstration of sampling from HMM
==================================
This script shows how to sample points from a Hiden Markov Model (HMM):
we use a 4-components with specified mean and covariance.
The plot show the sequence of observations generated with the transitions
between them. We can see that, as specified by our transition matrix,
there are no transition between component 1 and 3.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn import hmm
##############################################################
# Prepare parameters for a 3-components HMM
# Initial population probability
start_prob = np.array([0.6, 0.3, 0.1, 0.0])
# The transition matrix, note that there are no transitions possible
# between component 1 and 4
trans_mat = np.array([[0.7, 0.2, 0.0, 0.1],
[0.3, 0.5, 0.2, 0.0],
[0.0, 0.3, 0.5, 0.2],
[0.2, 0.0, 0.2, 0.6]])
# The means of each component
means = np.array([[0.0, 0.0],
[0.0, 11.0],
[9.0, 10.0],
[11.0, -1.0],
])
# The covariance of each component
covars = .5 * np.tile(np.identity(2), (4, 1, 1))
# Build an HMM instance and set parameters
model = hmm.GaussianHMM(4, "full", start_prob, trans_mat,
random_state=42)
# Instead of fitting it from the data, we directly set the estimated
# parameters, the means and covariance of the components
model.means_ = means
model.covars_ = covars
###############################################################
# Generate samples
X, Z = model.sample(500)
# Plot the sampled data
plt.plot(X[:, 0], X[:, 1], "-o", label="observations", ms=6,
mfc="orange", alpha=0.7)
# Indicate the component numbers
for i, m in enumerate(means):
plt.text(m[0], m[1], 'Component %i' % (i + 1),
size=17, horizontalalignment='center',
bbox=dict(alpha=.7, facecolor='w'))
plt.legend(loc='best')
plt.show()
| bsd-3-clause |
rserban/chrono | src/projects/geomechanics/PlotShearCurve.py | 1 | 2344 | import sys
import matplotlib.pyplot as plt
if len(sys.argv) != 2:
print('usage: ' + sys.argv[0] + ' <data file>')
exit(1)
t = []
x = []
fm = []
fc = []
ffm = []
area = []
shear_stress_filtered = []
shear_stress_unfiltered = []
ultimate_shear_stress = []
# t,x,fm,fc,ffm,A,iter
file = open(sys.argv[1])
for line in file.readlines()[2:]:
tok = line.split(',')
t.append(float(tok[0]))
x.append(float(tok[1]))
fm.append(float(tok[2]))
fc.append(float(tok[3]))
ffm.append(float(tok[4]))
area.append(float(tok[5]))
first_area = area[0]
for i in range(len(ffm)):
shear_stress_filtered.append(ffm[i] / first_area)
shear_stress_unfiltered.append(fm[i] / first_area)
for i in range(len(shear_stress_filtered)):
ultimate_shear_stress.append(shear_stress_filtered[-1])
plt.figure(1)
plt.title(r'Friction = 0.06, r = 0.001m')
plt.plot(x, shear_stress_unfiltered, 'g-', label='Unfiltered Shear Stress')
plt.plot(x, shear_stress_filtered, 'b-', label='Filtered Shear Stress')
plt.plot(x, ultimate_shear_stress, 'k-', label='Ultimate Shear Stress')
plt.xlabel('Shear Displacement (m)')
plt.ylabel('Shear Stress (N/m^2)')
plt.legend()
plt.show()
# plt.subplot(211)
# plt.plot(x, fm, 'b-')
# plt.title('Displacement-Shear Force (Un-filtered) Curve')
# plt.xlabel('Shear Displacement (m)')
# plt.ylabel('Shear Force Filtered (motor) (N)')
#
# plt.subplot(212)
# plt.plot(x, ffm, 'k-')
# plt.title('Displacement-Shear Force (Filtered) Curve')
# plt.xlabel('Shear Displacement (m)')
# plt.ylabel('Shear Force Filtered (motor) (N)')
# plt.subplot(212)
# plt.plot(x, shear_stress_filtered, 'b-', label='Shear Stress')
# plt.plot(x, ultimate_shear_stress, 'r-', label='Ultimate Shear Stress')
# plt.legend()
# plt.title('Displacement-Shear Stess Curve')
# plt.xlabel('Shear Displacement (m)')
# plt.ylabel('Shear Stress (motor) (N/m2)')
# plt.figure(1)
# plt.subplot(311)
# plt.plot(x, fm, 'r-')
# plt.title('Displacement-Shear Force Curve')
# plt.xlabel('Shear Displacement (m)')
# plt.ylabel('Shear Force (motor) (N)')
# plt.subplot(312)
# plt.plot(x, fc, 'g-')
# plt.title('Displacement-Shear Curve')
# plt.xlabel('Shear Displacement (m)')
# plt.ylabel('Shear Force (contact) (N)')
# plt.subplot(313)
# plt.plot(t, x, 'b-')
# plt.title('Time-Displacement Curve')
# plt.xlabel('Time (t)')
# plt.ylabel('Shear Displacement (m)')
# plt.show() | bsd-3-clause |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/scipy/optimize/_lsq/least_squares.py | 22 | 36536 | """Generic interface for least-square minimization."""
from __future__ import division, print_function, absolute_import
from warnings import warn
import numpy as np
from numpy.linalg import norm
from scipy.sparse import issparse, csr_matrix
from scipy.sparse.linalg import LinearOperator
from scipy.optimize import _minpack, OptimizeResult
from scipy.optimize._numdiff import approx_derivative, group_columns
from scipy._lib.six import string_types
from .trf import trf
from .dogbox import dogbox
from .common import EPS, in_bounds, make_strictly_feasible
TERMINATION_MESSAGES = {
-1: "Improper input parameters status returned from `leastsq`",
0: "The maximum number of function evaluations is exceeded.",
1: "`gtol` termination condition is satisfied.",
2: "`ftol` termination condition is satisfied.",
3: "`xtol` termination condition is satisfied.",
4: "Both `ftol` and `xtol` termination conditions are satisfied."
}
FROM_MINPACK_TO_COMMON = {
0: -1, # Improper input parameters from MINPACK.
1: 2,
2: 3,
3: 4,
4: 1,
5: 0
# There are 6, 7, 8 for too small tolerance parameters,
# but we guard against it by checking ftol, xtol, gtol beforehand.
}
def call_minpack(fun, x0, jac, ftol, xtol, gtol, max_nfev, x_scale, diff_step):
n = x0.size
if diff_step is None:
epsfcn = EPS
else:
epsfcn = diff_step**2
# Compute MINPACK's `diag`, which is inverse of our `x_scale` and
# ``x_scale='jac'`` corresponds to ``diag=None``.
if isinstance(x_scale, string_types) and x_scale == 'jac':
diag = None
else:
diag = 1 / x_scale
full_output = True
col_deriv = False
factor = 100.0
if jac is None:
if max_nfev is None:
# n squared to account for Jacobian evaluations.
max_nfev = 100 * n * (n + 1)
x, info, status = _minpack._lmdif(
fun, x0, (), full_output, ftol, xtol, gtol,
max_nfev, epsfcn, factor, diag)
else:
if max_nfev is None:
max_nfev = 100 * n
x, info, status = _minpack._lmder(
fun, jac, x0, (), full_output, col_deriv,
ftol, xtol, gtol, max_nfev, factor, diag)
f = info['fvec']
if callable(jac):
J = jac(x)
else:
J = np.atleast_2d(approx_derivative(fun, x))
cost = 0.5 * np.dot(f, f)
g = J.T.dot(f)
g_norm = norm(g, ord=np.inf)
nfev = info['nfev']
njev = info.get('njev', None)
status = FROM_MINPACK_TO_COMMON[status]
active_mask = np.zeros_like(x0, dtype=int)
return OptimizeResult(
x=x, cost=cost, fun=f, jac=J, grad=g, optimality=g_norm,
active_mask=active_mask, nfev=nfev, njev=njev, status=status)
def prepare_bounds(bounds, n):
lb, ub = [np.asarray(b, dtype=float) for b in bounds]
if lb.ndim == 0:
lb = np.resize(lb, n)
if ub.ndim == 0:
ub = np.resize(ub, n)
return lb, ub
def check_tolerance(ftol, xtol, gtol):
message = "{} is too low, setting to machine epsilon {}."
if ftol < EPS:
warn(message.format("`ftol`", EPS))
ftol = EPS
if xtol < EPS:
warn(message.format("`xtol`", EPS))
xtol = EPS
if gtol < EPS:
warn(message.format("`gtol`", EPS))
gtol = EPS
return ftol, xtol, gtol
def check_x_scale(x_scale, x0):
if isinstance(x_scale, string_types) and x_scale == 'jac':
return x_scale
try:
x_scale = np.asarray(x_scale, dtype=float)
valid = np.all(np.isfinite(x_scale)) and np.all(x_scale > 0)
except (ValueError, TypeError):
valid = False
if not valid:
raise ValueError("`x_scale` must be 'jac' or array_like with "
"positive numbers.")
if x_scale.ndim == 0:
x_scale = np.resize(x_scale, x0.shape)
if x_scale.shape != x0.shape:
raise ValueError("Inconsistent shapes between `x_scale` and `x0`.")
return x_scale
def check_jac_sparsity(jac_sparsity, m, n):
if jac_sparsity is None:
return None
if not issparse(jac_sparsity):
jac_sparsity = np.atleast_2d(jac_sparsity)
if jac_sparsity.shape != (m, n):
raise ValueError("`jac_sparsity` has wrong shape.")
return jac_sparsity, group_columns(jac_sparsity)
# Loss functions.
def huber(z, rho, cost_only):
mask = z <= 1
rho[0, mask] = z[mask]
rho[0, ~mask] = 2 * z[~mask]**0.5 - 1
if cost_only:
return
rho[1, mask] = 1
rho[1, ~mask] = z[~mask]**-0.5
rho[2, mask] = 0
rho[2, ~mask] = -0.5 * z[~mask]**-1.5
def soft_l1(z, rho, cost_only):
t = 1 + z
rho[0] = 2 * (t**0.5 - 1)
if cost_only:
return
rho[1] = t**-0.5
rho[2] = -0.5 * t**-1.5
def cauchy(z, rho, cost_only):
rho[0] = np.log1p(z)
if cost_only:
return
t = 1 + z
rho[1] = 1 / t
rho[2] = -1 / t**2
def arctan(z, rho, cost_only):
rho[0] = np.arctan(z)
if cost_only:
return
t = 1 + z**2
rho[1] = 1 / t
rho[2] = -2 * z / t**2
IMPLEMENTED_LOSSES = dict(linear=None, huber=huber, soft_l1=soft_l1,
cauchy=cauchy, arctan=arctan)
def construct_loss_function(m, loss, f_scale):
if loss == 'linear':
return None
if not callable(loss):
loss = IMPLEMENTED_LOSSES[loss]
rho = np.empty((3, m))
def loss_function(f, cost_only=False):
z = (f / f_scale) ** 2
loss(z, rho, cost_only=cost_only)
if cost_only:
return 0.5 * f_scale ** 2 * np.sum(rho[0])
rho[0] *= f_scale ** 2
rho[2] /= f_scale ** 2
return rho
else:
def loss_function(f, cost_only=False):
z = (f / f_scale) ** 2
rho = loss(z)
if cost_only:
return 0.5 * f_scale ** 2 * np.sum(rho[0])
rho[0] *= f_scale ** 2
rho[2] /= f_scale ** 2
return rho
return loss_function
def least_squares(
fun, x0, jac='2-point', bounds=(-np.inf, np.inf), method='trf',
ftol=1e-8, xtol=1e-8, gtol=1e-8, x_scale=1.0, loss='linear',
f_scale=1.0, diff_step=None, tr_solver=None, tr_options={},
jac_sparsity=None, max_nfev=None, verbose=0, args=(), kwargs={}):
"""Solve a nonlinear least-squares problem with bounds on the variables.
Given the residuals f(x) (an m-dimensional function of n variables) and
the loss function rho(s) (a scalar function), `least_squares` finds a
local minimum of the cost function F(x)::
minimize F(x) = 0.5 * sum(rho(f_i(x)**2), i = 0, ..., m - 1)
subject to lb <= x <= ub
The purpose of the loss function rho(s) is to reduce the influence of
outliers on the solution.
Parameters
----------
fun : callable
Function which computes the vector of residuals, with the signature
``fun(x, *args, **kwargs)``, i.e., the minimization proceeds with
respect to its first argument. The argument ``x`` passed to this
function is an ndarray of shape (n,) (never a scalar, even for n=1).
It must return a 1-d array_like of shape (m,) or a scalar.
x0 : array_like with shape (n,) or float
Initial guess on independent variables. If float, it will be treated
as a 1-d array with one element.
jac : {'2-point', '3-point', 'cs', callable}, optional
Method of computing the Jacobian matrix (an m-by-n matrix, where
element (i, j) is the partial derivative of f[i] with respect to
x[j]). The keywords select a finite difference scheme for numerical
estimation. The scheme '3-point' is more accurate, but requires
twice as much operations compared to '2-point' (default). The
scheme 'cs' uses complex steps, and while potentially the most
accurate, it is applicable only when `fun` correctly handles
complex inputs and can be analytically continued to the complex
plane. Method 'lm' always uses the '2-point' scheme. If callable,
it is used as ``jac(x, *args, **kwargs)`` and should return a
good approximation (or the exact value) for the Jacobian as an
array_like (np.atleast_2d is applied), a sparse matrix or a
`scipy.sparse.linalg.LinearOperator`.
bounds : 2-tuple of array_like, optional
Lower and upper bounds on independent variables. Defaults to no bounds.
Each array must match the size of `x0` or be a scalar, in the latter
case a bound will be the same for all variables. Use ``np.inf`` with
an appropriate sign to disable bounds on all or some variables.
method : {'trf', 'dogbox', 'lm'}, optional
Algorithm to perform minimization.
* 'trf' : Trust Region Reflective algorithm, particularly suitable
for large sparse problems with bounds. Generally robust method.
* 'dogbox' : dogleg algorithm with rectangular trust regions,
typical use case is small problems with bounds. Not recommended
for problems with rank-deficient Jacobian.
* 'lm' : Levenberg-Marquardt algorithm as implemented in MINPACK.
Doesn't handle bounds and sparse Jacobians. Usually the most
efficient method for small unconstrained problems.
Default is 'trf'. See Notes for more information.
ftol : float, optional
Tolerance for termination by the change of the cost function. Default
is 1e-8. The optimization process is stopped when ``dF < ftol * F``,
and there was an adequate agreement between a local quadratic model and
the true model in the last step.
xtol : float, optional
Tolerance for termination by the change of the independent variables.
Default is 1e-8. The exact condition depends on the `method` used:
* For 'trf' and 'dogbox' : ``norm(dx) < xtol * (xtol + norm(x))``
* For 'lm' : ``Delta < xtol * norm(xs)``, where ``Delta`` is
a trust-region radius and ``xs`` is the value of ``x``
scaled according to `x_scale` parameter (see below).
gtol : float, optional
Tolerance for termination by the norm of the gradient. Default is 1e-8.
The exact condition depends on a `method` used:
* For 'trf' : ``norm(g_scaled, ord=np.inf) < gtol``, where
``g_scaled`` is the value of the gradient scaled to account for
the presence of the bounds [STIR]_.
* For 'dogbox' : ``norm(g_free, ord=np.inf) < gtol``, where
``g_free`` is the gradient with respect to the variables which
are not in the optimal state on the boundary.
* For 'lm' : the maximum absolute value of the cosine of angles
between columns of the Jacobian and the residual vector is less
than `gtol`, or the residual vector is zero.
x_scale : array_like or 'jac', optional
Characteristic scale of each variable. Setting `x_scale` is equivalent
to reformulating the problem in scaled variables ``xs = x / x_scale``.
An alternative view is that the size of a trust region along j-th
dimension is proportional to ``x_scale[j]``. Improved convergence may
be achieved by setting `x_scale` such that a step of a given size
along any of the scaled variables has a similar effect on the cost
function. If set to 'jac', the scale is iteratively updated using the
inverse norms of the columns of the Jacobian matrix (as described in
[JJMore]_).
loss : str or callable, optional
Determines the loss function. The following keyword values are allowed:
* 'linear' (default) : ``rho(z) = z``. Gives a standard
least-squares problem.
* 'soft_l1' : ``rho(z) = 2 * ((1 + z)**0.5 - 1)``. The smooth
approximation of l1 (absolute value) loss. Usually a good
choice for robust least squares.
* 'huber' : ``rho(z) = z if z <= 1 else 2*z**0.5 - 1``. Works
similarly to 'soft_l1'.
* 'cauchy' : ``rho(z) = ln(1 + z)``. Severely weakens outliers
influence, but may cause difficulties in optimization process.
* 'arctan' : ``rho(z) = arctan(z)``. Limits a maximum loss on
a single residual, has properties similar to 'cauchy'.
If callable, it must take a 1-d ndarray ``z=f**2`` and return an
array_like with shape (3, m) where row 0 contains function values,
row 1 contains first derivatives and row 2 contains second
derivatives. Method 'lm' supports only 'linear' loss.
f_scale : float, optional
Value of soft margin between inlier and outlier residuals, default
is 1.0. The loss function is evaluated as follows
``rho_(f**2) = C**2 * rho(f**2 / C**2)``, where ``C`` is `f_scale`,
and ``rho`` is determined by `loss` parameter. This parameter has
no effect with ``loss='linear'``, but for other `loss` values it is
of crucial importance.
max_nfev : None or int, optional
Maximum number of function evaluations before the termination.
If None (default), the value is chosen automatically:
* For 'trf' and 'dogbox' : 100 * n.
* For 'lm' : 100 * n if `jac` is callable and 100 * n * (n + 1)
otherwise (because 'lm' counts function calls in Jacobian
estimation).
diff_step : None or array_like, optional
Determines the relative step size for the finite difference
approximation of the Jacobian. The actual step is computed as
``x * diff_step``. If None (default), then `diff_step` is taken to be
a conventional "optimal" power of machine epsilon for the finite
difference scheme used [NR]_.
tr_solver : {None, 'exact', 'lsmr'}, optional
Method for solving trust-region subproblems, relevant only for 'trf'
and 'dogbox' methods.
* 'exact' is suitable for not very large problems with dense
Jacobian matrices. The computational complexity per iteration is
comparable to a singular value decomposition of the Jacobian
matrix.
* 'lsmr' is suitable for problems with sparse and large Jacobian
matrices. It uses the iterative procedure
`scipy.sparse.linalg.lsmr` for finding a solution of a linear
least-squares problem and only requires matrix-vector product
evaluations.
If None (default) the solver is chosen based on the type of Jacobian
returned on the first iteration.
tr_options : dict, optional
Keyword options passed to trust-region solver.
* ``tr_solver='exact'``: `tr_options` are ignored.
* ``tr_solver='lsmr'``: options for `scipy.sparse.linalg.lsmr`.
Additionally ``method='trf'`` supports 'regularize' option
(bool, default is True) which adds a regularization term to the
normal equation, which improves convergence if the Jacobian is
rank-deficient [Byrd]_ (eq. 3.4).
jac_sparsity : {None, array_like, sparse matrix}, optional
Defines the sparsity structure of the Jacobian matrix for finite
difference estimation, its shape must be (m, n). If the Jacobian has
only few non-zero elements in *each* row, providing the sparsity
structure will greatly speed up the computations [Curtis]_. A zero
entry means that a corresponding element in the Jacobian is identically
zero. If provided, forces the use of 'lsmr' trust-region solver.
If None (default) then dense differencing will be used. Has no effect
for 'lm' method.
verbose : {0, 1, 2}, optional
Level of algorithm's verbosity:
* 0 (default) : work silently.
* 1 : display a termination report.
* 2 : display progress during iterations (not supported by 'lm'
method).
args, kwargs : tuple and dict, optional
Additional arguments passed to `fun` and `jac`. Both empty by default.
The calling signature is ``fun(x, *args, **kwargs)`` and the same for
`jac`.
Returns
-------
`OptimizeResult` with the following fields defined:
x : ndarray, shape (n,)
Solution found.
cost : float
Value of the cost function at the solution.
fun : ndarray, shape (m,)
Vector of residuals at the solution.
jac : ndarray, sparse matrix or LinearOperator, shape (m, n)
Modified Jacobian matrix at the solution, in the sense that J^T J
is a Gauss-Newton approximation of the Hessian of the cost function.
The type is the same as the one used by the algorithm.
grad : ndarray, shape (m,)
Gradient of the cost function at the solution.
optimality : float
First-order optimality measure. In unconstrained problems, it is always
the uniform norm of the gradient. In constrained problems, it is the
quantity which was compared with `gtol` during iterations.
active_mask : ndarray of int, shape (n,)
Each component shows whether a corresponding constraint is active
(that is, whether a variable is at the bound):
* 0 : a constraint is not active.
* -1 : a lower bound is active.
* 1 : an upper bound is active.
Might be somewhat arbitrary for 'trf' method as it generates a sequence
of strictly feasible iterates and `active_mask` is determined within a
tolerance threshold.
nfev : int
Number of function evaluations done. Methods 'trf' and 'dogbox' do not
count function calls for numerical Jacobian approximation, as opposed
to 'lm' method.
njev : int or None
Number of Jacobian evaluations done. If numerical Jacobian
approximation is used in 'lm' method, it is set to None.
status : int
The reason for algorithm termination:
* -1 : improper input parameters status returned from MINPACK.
* 0 : the maximum number of function evaluations is exceeded.
* 1 : `gtol` termination condition is satisfied.
* 2 : `ftol` termination condition is satisfied.
* 3 : `xtol` termination condition is satisfied.
* 4 : Both `ftol` and `xtol` termination conditions are satisfied.
message : str
Verbal description of the termination reason.
success : bool
True if one of the convergence criteria is satisfied (`status` > 0).
See Also
--------
leastsq : A legacy wrapper for the MINPACK implementation of the
Levenberg-Marquadt algorithm.
curve_fit : Least-squares minimization applied to a curve fitting problem.
Notes
-----
Method 'lm' (Levenberg-Marquardt) calls a wrapper over least-squares
algorithms implemented in MINPACK (lmder, lmdif). It runs the
Levenberg-Marquardt algorithm formulated as a trust-region type algorithm.
The implementation is based on paper [JJMore]_, it is very robust and
efficient with a lot of smart tricks. It should be your first choice
for unconstrained problems. Note that it doesn't support bounds. Also
it doesn't work when m < n.
Method 'trf' (Trust Region Reflective) is motivated by the process of
solving a system of equations, which constitute the first-order optimality
condition for a bound-constrained minimization problem as formulated in
[STIR]_. The algorithm iteratively solves trust-region subproblems
augmented by a special diagonal quadratic term and with trust-region shape
determined by the distance from the bounds and the direction of the
gradient. This enhancements help to avoid making steps directly into bounds
and efficiently explore the whole space of variables. To further improve
convergence, the algorithm considers search directions reflected from the
bounds. To obey theoretical requirements, the algorithm keeps iterates
strictly feasible. With dense Jacobians trust-region subproblems are
solved by an exact method very similar to the one described in [JJMore]_
(and implemented in MINPACK). The difference from the MINPACK
implementation is that a singular value decomposition of a Jacobian
matrix is done once per iteration, instead of a QR decomposition and series
of Givens rotation eliminations. For large sparse Jacobians a 2-d subspace
approach of solving trust-region subproblems is used [STIR]_, [Byrd]_.
The subspace is spanned by a scaled gradient and an approximate
Gauss-Newton solution delivered by `scipy.sparse.linalg.lsmr`. When no
constraints are imposed the algorithm is very similar to MINPACK and has
generally comparable performance. The algorithm works quite robust in
unbounded and bounded problems, thus it is chosen as a default algorithm.
Method 'dogbox' operates in a trust-region framework, but considers
rectangular trust regions as opposed to conventional ellipsoids [Voglis]_.
The intersection of a current trust region and initial bounds is again
rectangular, so on each iteration a quadratic minimization problem subject
to bound constraints is solved approximately by Powell's dogleg method
[NumOpt]_. The required Gauss-Newton step can be computed exactly for
dense Jacobians or approximately by `scipy.sparse.linalg.lsmr` for large
sparse Jacobians. The algorithm is likely to exhibit slow convergence when
the rank of Jacobian is less than the number of variables. The algorithm
often outperforms 'trf' in bounded problems with a small number of
variables.
Robust loss functions are implemented as described in [BA]_. The idea
is to modify a residual vector and a Jacobian matrix on each iteration
such that computed gradient and Gauss-Newton Hessian approximation match
the true gradient and Hessian approximation of the cost function. Then
the algorithm proceeds in a normal way, i.e. robust loss functions are
implemented as a simple wrapper over standard least-squares algorithms.
.. versionadded:: 0.17.0
References
----------
.. [STIR] M. A. Branch, T. F. Coleman, and Y. Li, "A Subspace, Interior,
and Conjugate Gradient Method for Large-Scale Bound-Constrained
Minimization Problems," SIAM Journal on Scientific Computing,
Vol. 21, Number 1, pp 1-23, 1999.
.. [NR] William H. Press et. al., "Numerical Recipes. The Art of Scientific
Computing. 3rd edition", Sec. 5.7.
.. [Byrd] R. H. Byrd, R. B. Schnabel and G. A. Shultz, "Approximate
solution of the trust region problem by minimization over
two-dimensional subspaces", Math. Programming, 40, pp. 247-263,
1988.
.. [Curtis] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
sparse Jacobian matrices", Journal of the Institute of
Mathematics and its Applications, 13, pp. 117-120, 1974.
.. [JJMore] J. J. More, "The Levenberg-Marquardt Algorithm: Implementation
and Theory," Numerical Analysis, ed. G. A. Watson, Lecture
Notes in Mathematics 630, Springer Verlag, pp. 105-116, 1977.
.. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region
Dogleg Approach for Unconstrained and Bound Constrained
Nonlinear Optimization", WSEAS International Conference on
Applied Mathematics, Corfu, Greece, 2004.
.. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization,
2nd edition", Chapter 4.
.. [BA] B. Triggs et. al., "Bundle Adjustment - A Modern Synthesis",
Proceedings of the International Workshop on Vision Algorithms:
Theory and Practice, pp. 298-372, 1999.
Examples
--------
In this example we find a minimum of the Rosenbrock function without bounds
on independed variables.
>>> def fun_rosenbrock(x):
... return np.array([10 * (x[1] - x[0]**2), (1 - x[0])])
Notice that we only provide the vector of the residuals. The algorithm
constructs the cost function as a sum of squares of the residuals, which
gives the Rosenbrock function. The exact minimum is at ``x = [1.0, 1.0]``.
>>> from scipy.optimize import least_squares
>>> x0_rosenbrock = np.array([2, 2])
>>> res_1 = least_squares(fun_rosenbrock, x0_rosenbrock)
>>> res_1.x
array([ 1., 1.])
>>> res_1.cost
9.8669242910846867e-30
>>> res_1.optimality
8.8928864934219529e-14
We now constrain the variables, in such a way that the previous solution
becomes infeasible. Specifically, we require that ``x[1] >= 1.5``, and
``x[0]`` left unconstrained. To this end, we specify the `bounds` parameter
to `least_squares` in the form ``bounds=([-np.inf, 1.5], np.inf)``.
We also provide the analytic Jacobian:
>>> def jac_rosenbrock(x):
... return np.array([
... [-20 * x[0], 10],
... [-1, 0]])
Putting this all together, we see that the new solution lies on the bound:
>>> res_2 = least_squares(fun_rosenbrock, x0_rosenbrock, jac_rosenbrock,
... bounds=([-np.inf, 1.5], np.inf))
>>> res_2.x
array([ 1.22437075, 1.5 ])
>>> res_2.cost
0.025213093946805685
>>> res_2.optimality
1.5885401433157753e-07
Now we solve a system of equations (i.e., the cost function should be zero
at a minimum) for a Broyden tridiagonal vector-valued function of 100000
variables:
>>> def fun_broyden(x):
... f = (3 - x) * x + 1
... f[1:] -= x[:-1]
... f[:-1] -= 2 * x[1:]
... return f
The corresponding Jacobian matrix is sparse. We tell the algorithm to
estimate it by finite differences and provide the sparsity structure of
Jacobian to significantly speed up this process.
>>> from scipy.sparse import lil_matrix
>>> def sparsity_broyden(n):
... sparsity = lil_matrix((n, n), dtype=int)
... i = np.arange(n)
... sparsity[i, i] = 1
... i = np.arange(1, n)
... sparsity[i, i - 1] = 1
... i = np.arange(n - 1)
... sparsity[i, i + 1] = 1
... return sparsity
...
>>> n = 100000
>>> x0_broyden = -np.ones(n)
...
>>> res_3 = least_squares(fun_broyden, x0_broyden,
... jac_sparsity=sparsity_broyden(n))
>>> res_3.cost
4.5687069299604613e-23
>>> res_3.optimality
1.1650454296851518e-11
Let's also solve a curve fitting problem using robust loss function to
take care of outliers in the data. Define the model function as
``y = a + b * exp(c * t)``, where t is a predictor variable, y is an
observation and a, b, c are parameters to estimate.
First, define the function which generates the data with noise and
outliers, define the model parameters, and generate data:
>>> def gen_data(t, a, b, c, noise=0, n_outliers=0, random_state=0):
... y = a + b * np.exp(t * c)
...
... rnd = np.random.RandomState(random_state)
... error = noise * rnd.randn(t.size)
... outliers = rnd.randint(0, t.size, n_outliers)
... error[outliers] *= 10
...
... return y + error
...
>>> a = 0.5
>>> b = 2.0
>>> c = -1
>>> t_min = 0
>>> t_max = 10
>>> n_points = 15
...
>>> t_train = np.linspace(t_min, t_max, n_points)
>>> y_train = gen_data(t_train, a, b, c, noise=0.1, n_outliers=3)
Define function for computing residuals and initial estimate of
parameters.
>>> def fun(x, t, y):
... return x[0] + x[1] * np.exp(x[2] * t) - y
...
>>> x0 = np.array([1.0, 1.0, 0.0])
Compute a standard least-squares solution:
>>> res_lsq = least_squares(fun, x0, args=(t_train, y_train))
Now compute two solutions with two different robust loss functions. The
parameter `f_scale` is set to 0.1, meaning that inlier residuals should
not significantly exceed 0.1 (the noise level used).
>>> res_soft_l1 = least_squares(fun, x0, loss='soft_l1', f_scale=0.1,
... args=(t_train, y_train))
>>> res_log = least_squares(fun, x0, loss='cauchy', f_scale=0.1,
... args=(t_train, y_train))
And finally plot all the curves. We see that by selecting an appropriate
`loss` we can get estimates close to optimal even in the presence of
strong outliers. But keep in mind that generally it is recommended to try
'soft_l1' or 'huber' losses first (if at all necessary) as the other two
options may cause difficulties in optimization process.
>>> t_test = np.linspace(t_min, t_max, n_points * 10)
>>> y_true = gen_data(t_test, a, b, c)
>>> y_lsq = gen_data(t_test, *res_lsq.x)
>>> y_soft_l1 = gen_data(t_test, *res_soft_l1.x)
>>> y_log = gen_data(t_test, *res_log.x)
...
>>> import matplotlib.pyplot as plt
>>> plt.plot(t_train, y_train, 'o')
>>> plt.plot(t_test, y_true, 'k', linewidth=2, label='true')
>>> plt.plot(t_test, y_lsq, label='linear loss')
>>> plt.plot(t_test, y_soft_l1, label='soft_l1 loss')
>>> plt.plot(t_test, y_log, label='cauchy loss')
>>> plt.xlabel("t")
>>> plt.ylabel("y")
>>> plt.legend()
>>> plt.show()
"""
if method not in ['trf', 'dogbox', 'lm']:
raise ValueError("`method` must be 'trf', 'dogbox' or 'lm'.")
if jac not in ['2-point', '3-point', 'cs'] and not callable(jac):
raise ValueError("`jac` must be '2-point', '3-point', 'cs' or "
"callable.")
if tr_solver not in [None, 'exact', 'lsmr']:
raise ValueError("`tr_solver` must be None, 'exact' or 'lsmr'.")
if loss not in IMPLEMENTED_LOSSES and not callable(loss):
raise ValueError("`loss` must be one of {0} or a callable."
.format(IMPLEMENTED_LOSSES.keys()))
if method == 'lm' and loss != 'linear':
raise ValueError("method='lm' supports only 'linear' loss function.")
if verbose not in [0, 1, 2]:
raise ValueError("`verbose` must be in [0, 1, 2].")
if len(bounds) != 2:
raise ValueError("`bounds` must contain 2 elements.")
if max_nfev is not None and max_nfev <= 0:
raise ValueError("`max_nfev` must be None or positive integer.")
x0 = np.atleast_1d(x0).astype(float)
if x0.ndim > 1:
raise ValueError("`x0` must have at most 1 dimension.")
lb, ub = prepare_bounds(bounds, x0.shape[0])
if method == 'lm' and not np.all((lb == -np.inf) & (ub == np.inf)):
raise ValueError("Method 'lm' doesn't support bounds.")
if lb.shape != x0.shape or ub.shape != x0.shape:
raise ValueError("Inconsistent shapes between bounds and `x0`.")
if np.any(lb >= ub):
raise ValueError("Each lower bound must be strictly less than each "
"upper bound.")
if not in_bounds(x0, lb, ub):
raise ValueError("`x0` is infeasible.")
x_scale = check_x_scale(x_scale, x0)
ftol, xtol, gtol = check_tolerance(ftol, xtol, gtol)
def fun_wrapped(x):
return np.atleast_1d(fun(x, *args, **kwargs))
if method == 'trf':
x0 = make_strictly_feasible(x0, lb, ub)
f0 = fun_wrapped(x0)
if f0.ndim != 1:
raise ValueError("`fun` must return at most 1-d array_like.")
if not np.all(np.isfinite(f0)):
raise ValueError("Residuals are not finite in the initial point.")
n = x0.size
m = f0.size
if method == 'lm' and m < n:
raise ValueError("Method 'lm' doesn't work when the number of "
"residuals is less than the number of variables.")
loss_function = construct_loss_function(m, loss, f_scale)
if callable(loss):
rho = loss_function(f0)
if rho.shape != (3, m):
raise ValueError("The return value of `loss` callable has wrong "
"shape.")
initial_cost = 0.5 * np.sum(rho[0])
elif loss_function is not None:
initial_cost = loss_function(f0, cost_only=True)
else:
initial_cost = 0.5 * np.dot(f0, f0)
if callable(jac):
J0 = jac(x0, *args, **kwargs)
if issparse(J0):
J0 = csr_matrix(J0)
def jac_wrapped(x, _=None):
return csr_matrix(jac(x, *args, **kwargs))
elif isinstance(J0, LinearOperator):
def jac_wrapped(x, _=None):
return jac(x, *args, **kwargs)
else:
J0 = np.atleast_2d(J0)
def jac_wrapped(x, _=None):
return np.atleast_2d(jac(x, *args, **kwargs))
else: # Estimate Jacobian by finite differences.
if method == 'lm':
if jac_sparsity is not None:
raise ValueError("method='lm' does not support "
"`jac_sparsity`.")
if jac != '2-point':
warn("jac='{0}' works equivalently to '2-point' "
"for method='lm'.".format(jac))
J0 = jac_wrapped = None
else:
if jac_sparsity is not None and tr_solver == 'exact':
raise ValueError("tr_solver='exact' is incompatible "
"with `jac_sparsity`.")
jac_sparsity = check_jac_sparsity(jac_sparsity, m, n)
def jac_wrapped(x, f):
J = approx_derivative(fun, x, rel_step=diff_step, method=jac,
f0=f, bounds=bounds, args=args,
kwargs=kwargs, sparsity=jac_sparsity)
if J.ndim != 2: # J is guaranteed not sparse.
J = np.atleast_2d(J)
return J
J0 = jac_wrapped(x0, f0)
if J0 is not None:
if J0.shape != (m, n):
raise ValueError(
"The return value of `jac` has wrong shape: expected {0}, "
"actual {1}.".format((m, n), J0.shape))
if not isinstance(J0, np.ndarray):
if method == 'lm':
raise ValueError("method='lm' works only with dense "
"Jacobian matrices.")
if tr_solver == 'exact':
raise ValueError(
"tr_solver='exact' works only with dense "
"Jacobian matrices.")
jac_scale = isinstance(x_scale, string_types) and x_scale == 'jac'
if isinstance(J0, LinearOperator) and jac_scale:
raise ValueError("x_scale='jac' can't be used when `jac` "
"returns LinearOperator.")
if tr_solver is None:
if isinstance(J0, np.ndarray):
tr_solver = 'exact'
else:
tr_solver = 'lsmr'
if method == 'lm':
result = call_minpack(fun_wrapped, x0, jac_wrapped, ftol, xtol, gtol,
max_nfev, x_scale, diff_step)
elif method == 'trf':
result = trf(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol, xtol,
gtol, max_nfev, x_scale, loss_function, tr_solver,
tr_options.copy(), verbose)
elif method == 'dogbox':
if tr_solver == 'lsmr' and 'regularize' in tr_options:
warn("The keyword 'regularize' in `tr_options` is not relevant "
"for 'dogbox' method.")
tr_options = tr_options.copy()
del tr_options['regularize']
result = dogbox(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol,
xtol, gtol, max_nfev, x_scale, loss_function,
tr_solver, tr_options, verbose)
result.message = TERMINATION_MESSAGES[result.status]
result.success = result.status > 0
if verbose >= 1:
print(result.message)
print("Function evaluations {0}, initial cost {1:.4e}, final cost "
"{2:.4e}, first-order optimality {3:.2e}."
.format(result.nfev, initial_cost, result.cost,
result.optimality))
return result
| mit |
seismology/mc_kernel | tests/create_pyffproc_seismograms.py | 1 | 3068 |
# coding: utf-8
#
# simple and convoluted script creating instaseis seismograms,
# filtering them with the pyffproc loggabor routine and storing
# the filtered seismogram for comparison with mckernel
#
import instaseis
import numpy as np
from filtering import bandpassfilter
import matplotlib.pyplot as plt
plot = True
# Create STF:
dt = 1.0
t_half = 10.0
t = np.arange(0,100,dt)
stf = 2 * np.exp(-(t/t_half)**2) * t / t_half**2
nsample = len(t)
f = open('./stf_pyffproc.dat', 'w')
f.write('%d %f\n'%(nsample, dt))
for y in stf:
f.write('%f\n'%(y))
f.close()
# Open instaseis db
db = instaseis.open_db("/scratch/auerl/wavefield_pyffproc_10s_647km_prem_ani/bwd/")
#db = instaseis.open_db('../wavefield/bwd')
receiver = instaseis.Receiver(latitude=-51.68, longitude=-58.06, network="AB", station="EFI")
source = instaseis.Source(latitude = -13.8200,
longitude = -67.2500,
depth_in_m = 647100,
m_rr = -7.590000e+27 / 1E7,
m_tt = 7.750000e+27 / 1E7,
m_pp = -1.600000e+26 / 1E7,
m_rt = -2.503000e+28 / 1E7,
m_rp = 4.200000e+26 / 1E7,
m_tp = -2.480000e+27 / 1E7,
time_shift=None,
sliprate=stf,
dt=dt)
source.resample_sliprate(db.info.dt, db.info.npts)
# create velocity and displacement seismograms
st = db.get_seismograms(source, receiver, reconvolve_stf=True,
kind="displacement", remove_source_shift=False)
st.filter('lowpass', freq=1./10.0)
tr_disp = st[0]
st = db.get_seismograms(source, receiver, reconvolve_stf=True,
kind="velocity", remove_source_shift=False)
st.filter('lowpass', freq=1./10.0)
tr_velo = st[0]
# bandpass the seismograms
len_orig=np.size(tr_disp.data)
tr_velo_bandpassed = bandpassfilter(tr_velo, 'log-gabor', 1024, 8, 30, 1.4142, 0.5)
tr_disp_bandpassed = bandpassfilter(tr_disp, 'log-gabor', 1024, 8, 30, 1.4142, 0.5)
if plot:
f, axarr = plt.subplots(3, sharex=True,sharey=True)
# plot and export
for i in np.arange(0,4,1):
f = open('seism_ref_raw_EFI_P_0'+str(i+1), 'w')
f.write('%d\n'%len_orig)
for y in tr_disp.data:
f.write('%e\n'%y)
f.close()
f = open('seism_ref_EFI_P_0'+str(i+1), 'w')
f.write('%d\n'%len_orig)
for y in np.arange(0,len_orig):
a=tr_disp_bandpassed[0][y, 0, i]
b=tr_velo_bandpassed[0][y, 0, i]
f.write('%e %e\n' % (a,b))
f.close()
if plot:
c=np.genfromtxt('./Seismograms/seism_EFI_P_0'+str(i+1))
time=c[:,0]
axarr[0].plot(time,tr_velo_bandpassed[0][0:len_orig, 0, i])
axarr[1].plot(time,c[:,2])
axarr[2].plot(time,tr_velo.data)
axarr[0].set_title('Gabor filter results in pyffproc')
axarr[1].set_title('Gabor filter results in mckernel')
axarr[2].set_title('Raw unfiltered seismogram')
if plot:
plt.show()
| gpl-3.0 |
jdorvi/MonteCarlos_SLC | calculate_gap.py | 1 | 1836 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 17 17:57:40 2016
@author: jdorvinen
"""
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
# <codecell>
# Model data fit: alpha=1.498, beta=-0.348, gamma=1.275
# Callaghan et al. used: alpha=21.46, beta=1.08, gamma=1.07
a = 12*1.498
b = 12*-0.348
c = 12*1.275
a_c = 21.46
b_c = 1.08
c_c = 1.07
w = 2*np.pi
rnv = np.random.random()
# Takes a random variable and can be used to find a value for Gi
# formulaG = '1 - np.exp(-(a*w*Gi \
# + b*(np.cos(w*te) - np.cos(w*(te + Gi))) \
# - c*(np.sin(w*te) - np.sin(w*(te + Gi))))/w)'
formulaG = '1 - np.exp(-({0}*w*Gi \
+ {1}*(np.cos(w*te) - np.cos(w*(te + Gi))) \
- {2}*(np.sin(w*te) - np.sin(w*(te + Gi))))/w)'
# Initial estimate of Gi. Obtained from the second order Taylor series
# expansion about Gi=0 of "formulaG"
formulaGi_0 = 'rnv / (a + b*np.sin(w*te[i-1]) + c*np.cos(w*te[i-1]))'
def func(te,Gi,a,b,c):
z = eval(formulaG.format(a,b,c))
return z
te = np.arange(0,1.01,0.01)
Gi = np.arange(0,1.01,0.01)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
X, Y = np.meshgrid(te, Gi)
zs = np.array([func(te,Gi,a,b,c) for te,Gi in zip(np.ravel(X), np.ravel(Y))])
Z = zs.reshape(X.shape)
zs2 = np.array([func(te,Gi,a_c,b_c,c_c) for te,Gi in zip(np.ravel(X), np.ravel(Y))])
Z2 = zs2.reshape(X.shape)
#from mayavi import mlab
#s1 = mlab.mesh(X,Y,Z)
#s2 = mlab.mesh(X,Y,Z2)
#mlab.show
ax.plot_surface(X,Y,Z,
cmap = 'viridis_r',
rstride=1,
cstride=10,
alpha=1,
zorder=0,
linewidth=0)
#ax.plot_surface(X,Y,Z2, color='yellow', alpha=1, zorder=1)
ax.set_xlabel('TimeEnd')
ax.set_ylabel('Gi')
ax.set_zlabel('RNV')
plt.show()
| mit |
nvoron23/scikit-learn | sklearn/utils/tests/test_multiclass.py | 128 | 12853 |
from __future__ import division
import numpy as np
import scipy.sparse as sp
from itertools import product
from sklearn.externals.six.moves import xrange
from sklearn.externals.six import iteritems
from scipy.sparse import issparse
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.multiclass import is_multilabel
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.multiclass import class_distribution
class NotAnArray(object):
"""An object that is convertable to an array. This is useful to
simulate a Pandas timeseries."""
def __init__(self, data):
self.data = data
def __array__(self):
return self.data
EXAMPLES = {
'multilabel-indicator': [
# valid when the data is formated as sparse or dense, identified
# by CSR format when the testing takes place
csr_matrix(np.random.RandomState(42).randint(2, size=(10, 10))),
csr_matrix(np.array([[0, 1], [1, 0]])),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.bool)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.int8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.uint8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float32)),
csr_matrix(np.array([[0, 0], [0, 0]])),
csr_matrix(np.array([[0, 1]])),
# Only valid when data is dense
np.array([[-1, 1], [1, -1]]),
np.array([[-3, 3], [3, -3]]),
NotAnArray(np.array([[-3, 3], [3, -3]])),
],
'multiclass': [
[1, 0, 2, 2, 1, 4, 2, 4, 4, 4],
np.array([1, 0, 2]),
np.array([1, 0, 2], dtype=np.int8),
np.array([1, 0, 2], dtype=np.uint8),
np.array([1, 0, 2], dtype=np.float),
np.array([1, 0, 2], dtype=np.float32),
np.array([[1], [0], [2]]),
NotAnArray(np.array([1, 0, 2])),
[0, 1, 2],
['a', 'b', 'c'],
np.array([u'a', u'b', u'c']),
np.array([u'a', u'b', u'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
],
'multiclass-multioutput': [
np.array([[1, 0, 2, 2], [1, 4, 2, 4]]),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32),
np.array([['a', 'b'], ['c', 'd']]),
np.array([[u'a', u'b'], [u'c', u'd']]),
np.array([[u'a', u'b'], [u'c', u'd']], dtype=object),
np.array([[1, 0, 2]]),
NotAnArray(np.array([[1, 0, 2]])),
],
'binary': [
[0, 1],
[1, 1],
[],
[0],
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.bool),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32),
np.array([[0], [1]]),
NotAnArray(np.array([[0], [1]])),
[1, -1],
[3, 5],
['a'],
['a', 'b'],
['abc', 'def'],
np.array(['abc', 'def']),
[u'a', u'b'],
np.array(['abc', 'def'], dtype=object),
],
'continuous': [
[1e-5],
[0, .5],
np.array([[0], [.5]]),
np.array([[0], [.5]], dtype=np.float32),
],
'continuous-multioutput': [
np.array([[0, .5], [.5, 0]]),
np.array([[0, .5], [.5, 0]], dtype=np.float32),
np.array([[0, .5]]),
],
'unknown': [
[[]],
[()],
# sequence of sequences that were'nt supported even before deprecation
np.array([np.array([]), np.array([1, 2, 3])], dtype=object),
[np.array([]), np.array([1, 2, 3])],
[set([1, 2, 3]), set([1, 2])],
[frozenset([1, 2, 3]), frozenset([1, 2])],
# and also confusable as sequences of sequences
[{0: 'a', 1: 'b'}, {0: 'a'}],
# empty second dimension
np.array([[], []]),
# 3d
np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
]
}
NON_ARRAY_LIKE_EXAMPLES = [
set([1, 2, 3]),
{0: 'a', 1: 'b'},
{0: [5], 1: [5]},
'abc',
frozenset([1, 2, 3]),
None,
]
MULTILABEL_SEQUENCES = [
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
np.array([[], [1, 2]], dtype='object'),
NotAnArray(np.array([[], [1, 2]], dtype='object'))
]
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(xrange(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabel indicator
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[1, 0, 1],
[0, 0, 0]])),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[0, 0, 0]])),
np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], xrange(5)),
np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)),
np.arange(3))
# Border line case with binary indicator matrix
assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5)))
assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))),
np.arange(5))
def test_unique_labels_non_specific():
# Test unique_labels with a variety of collected examples
# Smoke test for all supported format
for format in ["binary", "multiclass", "multilabel-indicator"]:
for y in EXAMPLES[format]:
unique_labels(y)
# We don't support those format at the moment
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, unique_labels, example)
for y_type in ["unknown", "continuous", 'continuous-multioutput',
'multiclass-multioutput']:
for example in EXAMPLES[y_type]:
assert_raises(ValueError, unique_labels, example)
def test_unique_labels_mixed_types():
# Mix with binary or multiclass and multilabel
mix_clf_format = product(EXAMPLES["multilabel-indicator"],
EXAMPLES["multiclass"] +
EXAMPLES["binary"])
for y_multilabel, y_multiclass in mix_clf_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
assert_raises(ValueError, unique_labels, [[1, 2]], [["a", "d"]])
assert_raises(ValueError, unique_labels, ["1", 2])
assert_raises(ValueError, unique_labels, [["1", 2], [1, 3]])
assert_raises(ValueError, unique_labels, [["1", "2"], [2, 3]])
def test_is_multilabel():
for group, group_examples in iteritems(EXAMPLES):
if group in ['multilabel-indicator']:
dense_assert_, dense_exp = assert_true, 'True'
else:
dense_assert_, dense_exp = assert_false, 'False'
for example in group_examples:
# Only mark explicitly defined sparse examples as valid sparse
# multilabel-indicators
if group == 'multilabel-indicator' and issparse(example):
sparse_assert_, sparse_exp = assert_true, 'True'
else:
sparse_assert_, sparse_exp = assert_false, 'False'
if (issparse(example) or
(hasattr(example, '__array__') and
np.asarray(example).ndim == 2 and
np.asarray(example).dtype.kind in 'biuf' and
np.asarray(example).shape[1] > 0)):
examples_sparse = [sparse_matrix(example)
for sparse_matrix in [coo_matrix,
csc_matrix,
csr_matrix,
dok_matrix,
lil_matrix]]
for exmpl_sparse in examples_sparse:
sparse_assert_(is_multilabel(exmpl_sparse),
msg=('is_multilabel(%r)'
' should be %s')
% (exmpl_sparse, sparse_exp))
# Densify sparse examples before testing
if issparse(example):
example = example.toarray()
dense_assert_(is_multilabel(example),
msg='is_multilabel(%r) should be %s'
% (example, dense_exp))
def test_type_of_target():
for group, group_examples in iteritems(EXAMPLES):
for example in group_examples:
assert_equal(type_of_target(example), group,
msg=('type_of_target(%r) should be %r, got %r'
% (example, group, type_of_target(example))))
for example in NON_ARRAY_LIKE_EXAMPLES:
msg_regex = 'Expected array-like \(array or non-string sequence\).*'
assert_raises_regex(ValueError, msg_regex, type_of_target, example)
for example in MULTILABEL_SEQUENCES:
msg = ('You appear to be using a legacy multi-label data '
'representation. Sequence of sequences are no longer supported;'
' use a binary array or sparse matrix instead.')
assert_raises_regex(ValueError, msg, type_of_target, example)
def test_class_distribution():
y = np.array([[1, 0, 0, 1],
[2, 2, 0, 1],
[1, 3, 0, 1],
[4, 2, 0, 1],
[2, 0, 0, 1],
[1, 3, 0, 1]])
# Define the sparse matrix with a mix of implicit and explicit zeros
data = np.array([1, 2, 1, 4, 2, 1, 0, 2, 3, 2, 3, 1, 1, 1, 1, 1, 1])
indices = np.array([0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4, 5])
indptr = np.array([0, 6, 11, 11, 17])
y_sp = sp.csc_matrix((data, indices, indptr), shape=(6, 4))
classes, n_classes, class_prior = class_distribution(y)
classes_sp, n_classes_sp, class_prior_sp = class_distribution(y_sp)
classes_expected = [[1, 2, 4],
[0, 2, 3],
[0],
[1]]
n_classes_expected = [3, 3, 1, 1]
class_prior_expected = [[3/6, 2/6, 1/6],
[1/3, 1/3, 1/3],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
# Test again with explicit sample weights
(classes,
n_classes,
class_prior) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
(classes_sp,
n_classes_sp,
class_prior_sp) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
class_prior_expected = [[4/9, 3/9, 2/9],
[2/9, 4/9, 3/9],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
| bsd-3-clause |
zifeo/nest-simulator | pynest/examples/sinusoidal_poisson_generator.py | 9 | 5522 | # -*- coding: utf-8 -*-
#
# sinusoidal_poisson_generator.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
#
'''
Sinusoidal poisson generator example
----------------------------------
This script demonstrates the use of the `sinusoidal_poisson_generator`
and its different parameters and modes. The source code of the model
can be found in models/sinusoidal_poisson_generator.h.
The script is structured into two parts and creates one common figure.
In Part 1, two instances of the `sinusoidal_poisson_generator` are
created with different parameters. Part 2 illustrates the effect of
the ``individual_spike_trains`` switch.
'''
'''
First, we import all necessary modules for simulation, analysis and
plotting.
'''
import nest
nest.ResetKernel() # in case we run the script multiple times from iPython
import matplotlib.pyplot as plt
import numpy as np
'''
Then we create two instances of the `sinusoidal_poisson_generator`
with two different parameter sets using `Create`. Moreover, we create
devices to record firing rates (`multimeter`) and spikes
(`spike_detector`) and connect them to the generators using `Connect`.
'''
nest.SetKernelStatus({'resolution': 0.01})
g = nest.Create('sinusoidal_poisson_generator', n=2, params=[{'rate': 10000.0,
'amplitude': 5000.0,
'frequency': 10.0,
'phase': 0.0},
{'rate': 0.0,
'amplitude': 10000.0,
'frequency': 5.0,
'phase': 90.0}])
m = nest.Create('multimeter', n=2, params={'interval': 0.1, 'withgid': False,
'record_from': ['rate']})
s = nest.Create('spike_detector', n=2, params={'withgid': False})
nest.Connect(m, g, 'one_to_one')
nest.Connect(g, s, 'one_to_one')
print nest.GetStatus(m)
nest.Simulate(200)
'''
After simulating, the spikes are extracted from the
`spike_detector` using `GetStatus` and plots are created with panels
for the PST and ISI histograms.
'''
colors = ['b', 'g']
for j in range(2):
ev = nest.GetStatus([m[j]])[0]['events']
t = ev['times']
r = ev['rate']
sp = nest.GetStatus([s[j]])[0]['events']['times']
plt.subplot(221)
h, e = np.histogram(sp, bins=np.arange(0., 201., 5.))
plt.plot(t, r, color=colors[j])
plt.step(e[:-1], h * 1000 / 5., color=colors[j], where='post')
plt.title('PST histogram and firing rates')
plt.ylabel('Spikes per second')
plt.subplot(223)
plt.hist(np.diff(sp), bins=np.arange(0., 1.005, 0.02),
histtype='step', color=colors[j])
plt.title('ISI histogram')
'''
The kernel is reset and the number of threads set to 4.
'''
nest.ResetKernel()
nest.SetKernelStatus({'local_num_threads': 4}) # show this work for multiple threads
'''
First, a `sinusoidal_poisson_generator` with
`individual_spike_trains` set to ``True`` is created and connected to
20 parrot neurons whose spikes are recorded by a spike detector. After
simulating, a raster plot of the spikes is created.
'''
g = nest.Create('sinusoidal_poisson_generator',
params={'rate': 100.0, 'amplitude': 50.0,
'frequency': 10.0, 'phase': 0.0,
'individual_spike_trains': True})
p = nest.Create('parrot_neuron', 20)
s = nest.Create('spike_detector')
nest.Connect(g, p, 'all_to_all')
nest.Connect(p, s, 'all_to_all')
nest.Simulate(200)
ev = nest.GetStatus(s)[0]['events']
plt.subplot(222)
plt.plot(ev['times'], ev['senders']-min(ev['senders']), 'o')
plt.ylim([-0.5, 19.5])
plt.yticks([])
plt.title('Individual spike trains for each target')
'''
The kernel is reset again and the whole procedure is repeated for
a `sinusoidal_poisson_generator` with `individual_spike_trains` set to ``False``. The plot
shows that in this case, all neurons receive the same spike train from
the `sinusoidal_poisson_generator`.
'''
nest.ResetKernel()
nest.SetKernelStatus({'local_num_threads': 4}) # show this work for multiple threads
g = nest.Create('sinusoidal_poisson_generator',
params={'rate': 100.0, 'amplitude': 50.0,
'frequency': 10.0, 'phase': 0.0,
'individual_spike_trains': False})
p = nest.Create('parrot_neuron', 20)
s = nest.Create('spike_detector')
nest.Connect(g, p, 'all_to_all')
nest.Connect(p, s, 'all_to_all')
nest.Simulate(200)
ev = nest.GetStatus(s)[0]['events']
plt.subplot(224)
plt.plot(ev['times'], ev['senders']-min(ev['senders']), 'o')
plt.ylim([-0.5, 19.5])
plt.yticks([])
plt.title('One spike train for all targets')
| gpl-2.0 |
anurag313/scikit-learn | examples/preprocessing/plot_robust_scaling.py | 221 | 2702 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Robust Scaling on Toy Data
=========================================================
Making sure that each Feature has approximately the same scale can be a
crucial preprocessing step. However, when data contains outliers,
:class:`StandardScaler <sklearn.preprocessing.StandardScaler>` can often
be mislead. In such cases, it is better to use a scaler that is robust
against outliers.
Here, we demonstrate this on a toy dataset, where one single datapoint
is a large outlier.
"""
from __future__ import print_function
print(__doc__)
# Code source: Thomas Unterthiner
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import StandardScaler, RobustScaler
# Create training and test data
np.random.seed(42)
n_datapoints = 100
Cov = [[0.9, 0.0], [0.0, 20.0]]
mu1 = [100.0, -3.0]
mu2 = [101.0, -3.0]
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_train = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_train = np.vstack([X1, X2])
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_test = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_test = np.vstack([X1, X2])
X_train[0, 0] = -1000 # a fairly large outlier
# Scale data
standard_scaler = StandardScaler()
Xtr_s = standard_scaler.fit_transform(X_train)
Xte_s = standard_scaler.transform(X_test)
robust_scaler = RobustScaler()
Xtr_r = robust_scaler.fit_transform(X_train)
Xte_r = robust_scaler.fit_transform(X_test)
# Plot data
fig, ax = plt.subplots(1, 3, figsize=(12, 4))
ax[0].scatter(X_train[:, 0], X_train[:, 1],
color=np.where(Y_train > 0, 'r', 'b'))
ax[1].scatter(Xtr_s[:, 0], Xtr_s[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[2].scatter(Xtr_r[:, 0], Xtr_r[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[0].set_title("Unscaled data")
ax[1].set_title("After standard scaling (zoomed in)")
ax[2].set_title("After robust scaling (zoomed in)")
# for the scaled data, we zoom in to the data center (outlier can't be seen!)
for a in ax[1:]:
a.set_xlim(-3, 3)
a.set_ylim(-3, 3)
plt.tight_layout()
plt.show()
# Classify using k-NN
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(Xtr_s, Y_train)
acc_s = knn.score(Xte_s, Y_test)
print("Testset accuracy using standard scaler: %.3f" % acc_s)
knn.fit(Xtr_r, Y_train)
acc_r = knn.score(Xte_r, Y_test)
print("Testset accuracy using robust scaler: %.3f" % acc_r)
| bsd-3-clause |
harisbal/pandas | pandas/tests/indexes/timedeltas/test_formats.py | 9 | 3573 | # -*- coding: utf-8 -*-
import pytest
import pandas as pd
from pandas import TimedeltaIndex
class TestTimedeltaIndexRendering(object):
@pytest.mark.parametrize('method', ['__repr__', '__unicode__', '__str__'])
def test_representation(self, method):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = getattr(idx, method)()
assert result == expected
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = ("0 1 days\n"
"dtype: timedelta64[ns]")
exp3 = ("0 1 days\n"
"1 2 days\n"
"dtype: timedelta64[ns]")
exp4 = ("0 1 days\n"
"1 2 days\n"
"2 3 days\n"
"dtype: timedelta64[ns]")
exp5 = ("0 1 days 00:00:01\n"
"1 2 days 00:00:00\n"
"2 3 days 00:00:00\n"
"dtype: timedelta64[ns]")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
assert result == expected
def test_summary(self):
# GH#9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = ("TimedeltaIndex: 0 entries\n"
"Freq: D")
exp2 = ("TimedeltaIndex: 1 entries, 1 days to 1 days\n"
"Freq: D")
exp3 = ("TimedeltaIndex: 2 entries, 1 days to 2 days\n"
"Freq: D")
exp4 = ("TimedeltaIndex: 3 entries, 1 days to 3 days\n"
"Freq: D")
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx._summary()
assert result == expected
| bsd-3-clause |
ab93/Depression-Identification | src/tests/clf_test.py | 1 | 6446 | import unittest
import numpy as np
from sklearn.linear_model import LogisticRegression
from ..models.classifier import MetaClassifier, LateFusionClassifier
from ..feature_extract.read_labels import features
from ..main.classify import get_single_mode_data, get_multi_data
class MetaClassifierTest(unittest.TestCase):
"""
Tests for the models.MetaClassifier class
"""
def _get_dummy_data(self):
x1 = np.array([ np.array([[1,5,7], [1,2,4], [1,8,9]]), # [r1,r2,r3] for p1
np.array([[2,8,6], [2,0,3]]), # [r1,r2] for p2
np.array([[3,7,5], [3,4,3], [3,9,7]]) # [r1,r2,r3] for p3
])
# for non discriminative
x2 = np.array([ np.array([[1,5,7], [1,2,4]]),
np.array([[2,8,6], [2,0,3], [2,5,5]]),
np.array([[3,7,5], [3,4,3], [3,9,7]])
])
y1 = np.array([ np.array([1,1,1]),
np.array([1,1]),
np.array([0,0,0])
])
y2 = np.array([ np.array([0,0]),
np.array([0,0,0]),
np.array([1,1,1])
])
X = [x1,x2]
y = [y1,y2]
return X,y
def _get_classifiers(self):
clf1 = LogisticRegression(n_jobs=-1, class_weight={1:4})
clf2 = LogisticRegression(n_jobs=-1, class_weight={1:4})
return [clf1, clf2]
def test_fit_predict(self):
X_list, y_list = self._get_dummy_data()
clfs = [LogisticRegression(C=100, penalty='l2'), LogisticRegression(C=10,penalty='l1')]
meta_clf = MetaClassifier(clfs)
meta_clf.fit(X_list,y_list)
print "\npredict:",meta_clf.predict(X_list)
def test_fit_predict_proba(self):
X_list, y_list = self._get_dummy_data()
clfs = [LogisticRegression(C=100, penalty='l2'), LogisticRegression(C=10,penalty='l1')]
meta_clf = MetaClassifier(clfs)
meta_clf.fit(X_list,y_list)
print "\npredict:",meta_clf.predict_proba(X_list)
def test_fit_score(self):
X_list, y_list = self._get_dummy_data()
clfs = [LogisticRegression(C=100, penalty='l2'), LogisticRegression(C=10,penalty='l1')]
meta_clf = MetaClassifier(clfs)
meta_clf.fit(X_list,y_list)
y_true = np.array([1,0,0])
print "\nscore:",meta_clf.score(X_list, y_true)
def test_model(self):
X_train, y_train, X_val, y_val = get_single_mode_data()
y_true = map(int,map(np.mean,y_val[0]))
clfs = self._get_classifiers()
meta_clf = MetaClassifier(classifiers=clfs, weights=[0.9, 0.1])
meta_clf.fit(X_train, y_train)
print "\nTesting data..."
preds = meta_clf.predict_proba(X_val, get_all=True)
print "F1-score: ", meta_clf.score(X_val, y_true)
print "Accuracy: ", meta_clf.score(X_val, y_true, scoring='accuracy')
for i in xrange(len(y_true)):
print preds[0][i], preds[1][i], y_true[i]
class LateFusionClassifierTest(unittest.TestCase):
"""
Tests for the models.LateFusionClassifierTest class
"""
def _get_dummy_data(self):
x1 = np.array([ np.array([[1,5,7], [1,2,4], [1,8,9]]),
np.array([[2,8,6], [2,0,3]]),
np.array([[3,7,5], [3,4,3], [3,9,7]])
])
x2 = np.array([ np.array([[1,5,7], [1,2,4]]),
np.array([[2,8,6], [2,0,3], [2,5,5]]),
np.array([[3,7,5], [3,4,3], [3,9,7]])
])
y1 = np.array([ np.array([1,1,1]),
np.array([1,1]),
np.array([0,0,0])
])
y2 = np.array([ np.array([0,0]),
np.array([0,0,0]),
np.array([1,1,1])
])
X_acou, y_acou = [x1,x2], [y1,y2]
X_vis, y_vis = [x1,x2], [y1,y2]
X_lin, y_lin = [x1,x2], [y1,y2]
return [X_acou, X_vis, X_lin], [y_acou, y_vis, y_lin]
def _get_fitted_clf(self,Xs,ys):
clfs = [LogisticRegression(C=100, penalty='l2'), LogisticRegression(C=10,penalty='l1')]
meta_clf = MetaClassifier(clfs)
meta_clf.fit(Xs,ys)
return meta_clf
def test_fit_predict(self):
Xs, Ys = self._get_dummy_data()
clf1 = self._get_fitted_clf(Xs[0],Ys[0])
clf2 = self._get_fitted_clf(Xs[1],Ys[1])
clf3 = self._get_fitted_clf(Xs[2],Ys[2])
lf_clf = LateFusionClassifier(classifiers=[clf1,clf2,clf3])
lf_clf.fit(Xs,Ys)
print "\npredict:\n", lf_clf.predict(Xs)
print "\npredict_proba:\n",lf_clf.predict_proba(Xs)
def test_scores(self):
Xs, Ys = self._get_dummy_data()
clf1 = self._get_fitted_clf(Xs[0],Ys[0])
clf2 = self._get_fitted_clf(Xs[1],Ys[1])
clf3 = self._get_fitted_clf(Xs[2],Ys[2])
lf_clf = LateFusionClassifier(classifiers=[clf1,clf2,clf3])
lf_clf.fit(Xs,Ys)
y_true = np.array([1,0,0])
print "\npredict:\n", lf_clf.predict(Xs)
print "\nscore:", lf_clf.score(Xs,y_true)
def test_late_fusion_model(self):
# Read the data
Xs_train, ys_train, Xs_val, ys_val = get_multi_data()
clf_A_D = LogisticRegression(C=1, penalty='l2', class_weight={1:4})
clf_A_ND = LogisticRegression(C=0.001, penalty='l1', class_weight={1:4})
clf_V_D = LogisticRegression(C=1.0, penalty='l2', class_weight={1:4})
clf_V_ND = LogisticRegression(C=1.0, penalty='l2', class_weight={1:4})
clf_L_D = LogisticRegression(C=1.0, penalty='l2', class_weight={1:3})
clf_L_ND = LogisticRegression(C=1.0, penalty='l2', class_weight={1:3})
clf_A = MetaClassifier(classifiers=[clf_A_D, clf_A_ND])
clf_V = MetaClassifier(classifiers=[clf_V_D, clf_V_ND])
clf_L = MetaClassifier(classifiers=[clf_L_D, clf_L_ND])
lf_clf = LateFusionClassifier(classifiers=[clf_A, clf_V, clf_L], weights=[0.6,0.2,0.1])
lf_clf.fit(Xs_train, ys_train)
print lf_clf.predict(Xs_val)
preds = lf_clf.predict_proba(Xs_val, get_all=True)
y_true = map(int,map(np.mean,ys_val[0][0]))
print lf_clf.score(Xs_val,y_true,scoring='f1')
for i in xrange(len(y_true)):
print preds[0][i], preds[1][i], preds[2][i], y_true[i]
if __name__ == '__main__':
unittest.main()
| mit |
Barmaley-exe/scikit-learn | sklearn/feature_selection/variance_threshold.py | 26 | 2532 | # Author: Lars Buitinck <L.J.Buitinck@uva.nl>
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import check_array
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self, 'variances_')
return self.variances_ > self.threshold
| bsd-3-clause |
tommiseppanen/visualizations | tyre-model/old-plots/combined-slip-limited.py | 1 | 1501 | from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np
def coefficient(slipValue, extremumValue, extremumSlip, asymptoteValue, asymptoteSlip):
coefficient = asymptoteValue;
absoluteSlip = abs(slipValue);
if (absoluteSlip <= extremumSlip):
coefficient = (extremumValue / extremumSlip) * absoluteSlip;
elif (absoluteSlip > extremumSlip and absoluteSlip < asymptoteSlip):
coefficient = ((asymptoteValue - extremumValue) / (asymptoteSlip - extremumSlip)) \
* (absoluteSlip - extremumSlip) + extremumValue;
return coefficient
def adjustedLateral(longitudinal, lateral, extremumLongitudinalValue):
return lateral*np.sqrt(1-(longitudinal/extremumLongitudinalValue)**2)
fig = plt.figure()
ax = fig.gca(projection='3d')
X = np.arange(0, 0.9, 0.01)
Y = np.arange(0, 90, 1)
X, Y = np.meshgrid(X, Y)
longfunc = np.vectorize(lambda t: coefficient(t, 1, 0.2, 0.75, 0.4))
lateralfunc = np.vectorize(lambda t: coefficient(t, 1.0, 20.0, 0.75, 40))
Z = np.sqrt(longfunc(X)**2 + adjustedLateral(longfunc(X), lateralfunc(Y), 1.0)**2)
surf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
ax.set_zlim(0.0, 2.0)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
| mit |
PatrickOReilly/scikit-learn | examples/text/hashing_vs_dict_vectorizer.py | 93 | 3243 | """
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
| bsd-3-clause |
jondo/shogun | examples/undocumented/python_modular/graphical/multiclass_qda.py | 26 | 3294 | """
Shogun demo
Fernando J. Iglesias Garcia
"""
import numpy as np
import matplotlib as mpl
import pylab
import util
from scipy import linalg
from modshogun import QDA
from modshogun import RealFeatures, MulticlassLabels
# colormap
cmap = mpl.colors.LinearSegmentedColormap('color_classes',
{'red': [(0, 1, 1),
(1, .7, .7)],
'green': [(0, 1, 1),
(1, .7, .7)],
'blue': [(0, 1, 1),
(1, .7, .7)]})
pylab.cm.register_cmap(cmap = cmap)
# Generate data from Gaussian distributions
def gen_data():
np.random.seed(0)
covs = np.array([[[0., -1. ], [2.5, .7]],
[[3., -1.5], [1.2, .3]],
[[ 2, 0 ], [ .0, 1.5 ]]])
X = np.r_[np.dot(np.random.randn(N, dim), covs[0]) + np.array([-4, 3]),
np.dot(np.random.randn(N, dim), covs[1]) + np.array([-1, -5]),
np.dot(np.random.randn(N, dim), covs[2]) + np.array([3, 4])];
Y = np.hstack((np.zeros(N), np.ones(N), 2*np.ones(N)))
return X, Y
def plot_data(qda, X, y, y_pred, ax):
X0, X1, X2 = X[y == 0], X[y == 1], X[y == 2]
# Correctly classified
tp = (y == y_pred)
tp0, tp1, tp2 = tp[y == 0], tp[y == 1], tp[y == 2]
X0_tp, X1_tp, X2_tp = X0[tp0], X1[tp1], X2[tp2]
# Misclassified
X0_fp, X1_fp, X2_fp = X0[tp0 != True], X1[tp1 != True], X2[tp2 != True]
# Class 0 data
pylab.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', color = cols[0])
pylab.plot(X0_fp[:, 0], X0_fp[:, 1], 's', color = cols[0])
m0 = qda.get_mean(0)
pylab.plot(m0[0], m0[1], 'o', color = 'black', markersize = 8)
# Class 1 data
pylab.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', color = cols[1])
pylab.plot(X1_fp[:, 0], X1_fp[:, 1], 's', color = cols[1])
m1 = qda.get_mean(1)
pylab.plot(m1[0], m1[1], 'o', color = 'black', markersize = 8)
# Class 2 data
pylab.plot(X2_tp[:, 0], X2_tp[:, 1], 'o', color = cols[2])
pylab.plot(X2_fp[:, 0], X2_fp[:, 1], 's', color = cols[2])
m2 = qda.get_mean(2)
pylab.plot(m2[0], m2[1], 'o', color = 'black', markersize = 8)
def plot_cov(plot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0]) # rad
angle = 180 * angle / np.pi # degrees
# Filled gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2*v[0]**0.5, 2*v[1]**0.5, 180 + angle, color = color)
ell.set_clip_box(plot.bbox)
ell.set_alpha(0.5)
plot.add_artist(ell)
def plot_regions(qda):
nx, ny = 500, 500
x_min, x_max = pylab.xlim()
y_min, y_max = pylab.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
dense = RealFeatures(np.array((np.ravel(xx), np.ravel(yy))))
dense_labels = qda.apply(dense).get_labels()
Z = dense_labels.reshape(xx.shape)
pylab.pcolormesh(xx, yy, Z)
pylab.contour(xx, yy, Z, linewidths = 3, colors = 'k')
# Number of classes
M = 3
# Number of samples of each class
N = 300
# Dimension of the data
dim = 2
cols = ['blue', 'green', 'red']
fig = pylab.figure()
ax = fig.add_subplot(111)
pylab.title('Quadratic Discrimant Analysis')
X, y = gen_data()
labels = MulticlassLabels(y)
features = RealFeatures(X.T)
qda = QDA(features, labels, 1e-4, True)
qda.train()
ypred = qda.apply().get_labels()
plot_data(qda, X, y, ypred, ax)
for i in range(M):
plot_cov(ax, qda.get_mean(i), qda.get_cov(i), cols[i])
plot_regions(qda)
pylab.connect('key_press_event', util.quit)
pylab.show()
| gpl-3.0 |
brchiu/tensorflow | tensorflow/examples/get_started/regression/imports85.py | 41 | 6589 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A dataset loader for imports85.data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow as tf
try:
import pandas as pd # pylint: disable=g-import-not-at-top
except ImportError:
pass
URL = "https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data"
# Order is important for the csv-readers, so we use an OrderedDict here.
defaults = collections.OrderedDict([
("symboling", [0]),
("normalized-losses", [0.0]),
("make", [""]),
("fuel-type", [""]),
("aspiration", [""]),
("num-of-doors", [""]),
("body-style", [""]),
("drive-wheels", [""]),
("engine-location", [""]),
("wheel-base", [0.0]),
("length", [0.0]),
("width", [0.0]),
("height", [0.0]),
("curb-weight", [0.0]),
("engine-type", [""]),
("num-of-cylinders", [""]),
("engine-size", [0.0]),
("fuel-system", [""]),
("bore", [0.0]),
("stroke", [0.0]),
("compression-ratio", [0.0]),
("horsepower", [0.0]),
("peak-rpm", [0.0]),
("city-mpg", [0.0]),
("highway-mpg", [0.0]),
("price", [0.0])
]) # pyformat: disable
types = collections.OrderedDict((key, type(value[0]))
for key, value in defaults.items())
def _get_imports85():
path = tf.contrib.keras.utils.get_file(URL.split("/")[-1], URL)
return path
def dataset(y_name="price", train_fraction=0.7):
"""Load the imports85 data as a (train,test) pair of `Dataset`.
Each dataset generates (features_dict, label) pairs.
Args:
y_name: The name of the column to use as the label.
train_fraction: A float, the fraction of data to use for training. The
remainder will be used for evaluation.
Returns:
A (train,test) pair of `Datasets`
"""
# Download and cache the data
path = _get_imports85()
# Define how the lines of the file should be parsed
def decode_line(line):
"""Convert a csv line into a (features_dict,label) pair."""
# Decode the line to a tuple of items based on the types of
# csv_header.values().
items = tf.decode_csv(line, list(defaults.values()))
# Convert the keys and items to a dict.
pairs = zip(defaults.keys(), items)
features_dict = dict(pairs)
# Remove the label from the features_dict
label = features_dict.pop(y_name)
return features_dict, label
def has_no_question_marks(line):
"""Returns True if the line of text has no question marks."""
# split the line into an array of characters
chars = tf.string_split(line[tf.newaxis], "").values
# for each character check if it is a question mark
is_question = tf.equal(chars, "?")
any_question = tf.reduce_any(is_question)
no_question = ~any_question
return no_question
def in_training_set(line):
"""Returns a boolean tensor, true if the line is in the training set."""
# If you randomly split the dataset you won't get the same split in both
# sessions if you stop and restart training later. Also a simple
# random split won't work with a dataset that's too big to `.cache()` as
# we are doing here.
num_buckets = 1000000
bucket_id = tf.string_to_hash_bucket_fast(line, num_buckets)
# Use the hash bucket id as a random number that's deterministic per example
return bucket_id < int(train_fraction * num_buckets)
def in_test_set(line):
"""Returns a boolean tensor, true if the line is in the training set."""
# Items not in the training set are in the test set.
# This line must use `~` instead of `not` because `not` only works on python
# booleans but we are dealing with symbolic tensors.
return ~in_training_set(line)
base_dataset = (
tf.data
# Get the lines from the file.
.TextLineDataset(path)
# drop lines with question marks.
.filter(has_no_question_marks))
train = (base_dataset
# Take only the training-set lines.
.filter(in_training_set)
# Decode each line into a (features_dict, label) pair.
.map(decode_line)
# Cache data so you only decode the file once.
.cache())
# Do the same for the test-set.
test = (base_dataset.filter(in_test_set).cache().map(decode_line))
return train, test
def raw_dataframe():
"""Load the imports85 data as a pd.DataFrame."""
# Download and cache the data
path = _get_imports85()
# Load it into a pandas dataframe
df = pd.read_csv(path, names=types.keys(), dtype=types, na_values="?")
return df
def load_data(y_name="price", train_fraction=0.7, seed=None):
"""Get the imports85 data set.
A description of the data is available at:
https://archive.ics.uci.edu/ml/datasets/automobile
The data itself can be found at:
https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data
Args:
y_name: the column to return as the label.
train_fraction: the fraction of the dataset to use for training.
seed: The random seed to use when shuffling the data. `None` generates a
unique shuffle every run.
Returns:
a pair of pairs where the first pair is the training data, and the second
is the test data:
`(x_train, y_train), (x_test, y_test) = get_imports85_dataset(...)`
`x` contains a pandas DataFrame of features, while `y` contains the label
array.
"""
# Load the raw data columns.
data = raw_dataframe()
# Delete rows with unknowns
data = data.dropna()
# Shuffle the data
np.random.seed(seed)
# Split the data into train/test subsets.
x_train = data.sample(frac=train_fraction, random_state=seed)
x_test = data.drop(x_train.index)
# Extract the label from the features dataframe.
y_train = x_train.pop(y_name)
y_test = x_test.pop(y_name)
return (x_train, y_train), (x_test, y_test)
| apache-2.0 |
rohanp/scikit-learn | sklearn/utils/deprecation.py | 77 | 2417 | import warnings
__all__ = ["deprecated", ]
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<sklearn.utils.deprecation.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
"""
Parameters
----------
extra: string
to be added to the deprecation messages
"""
self.extra = extra
def __call__(self, obj):
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
| bsd-3-clause |
mikebenfield/scipy | scipy/integrate/quadrature.py | 7 | 28155 | from __future__ import division, print_function, absolute_import
import numpy as np
import math
import warnings
# trapz is a public function for scipy.integrate,
# even though it's actually a numpy function.
from numpy import trapz
from scipy.special import roots_legendre
from scipy.special import gammaln
from scipy._lib.six import xrange
__all__ = ['fixed_quad', 'quadrature', 'romberg', 'trapz', 'simps', 'romb',
'cumtrapz', 'newton_cotes']
class AccuracyWarning(Warning):
pass
def _cached_roots_legendre(n):
"""
Cache roots_legendre results to speed up calls of the fixed_quad
function.
"""
if n in _cached_roots_legendre.cache:
return _cached_roots_legendre.cache[n]
_cached_roots_legendre.cache[n] = roots_legendre(n)
return _cached_roots_legendre.cache[n]
_cached_roots_legendre.cache = dict()
def fixed_quad(func, a, b, args=(), n=5):
"""
Compute a definite integral using fixed-order Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature of
order `n`.
Parameters
----------
func : callable
A Python function or method to integrate (must accept vector inputs).
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function, if any.
n : int, optional
Order of quadrature integration. Default is 5.
Returns
-------
val : float
Gaussian quadrature approximation to the integral
none : None
Statically returned value of None
See Also
--------
quad : adaptive quadrature using QUADPACK
dblquad : double integrals
tplquad : triple integrals
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
romb : integrators for sampled data
simps : integrators for sampled data
cumtrapz : cumulative integration for sampled data
ode : ODE integrator
odeint : ODE integrator
"""
x, w = _cached_roots_legendre(n)
x = np.real(x)
if np.isinf(a) or np.isinf(b):
raise ValueError("Gaussian quadrature is only available for "
"finite limits.")
y = (b-a)*(x+1)/2.0 + a
return (b-a)/2.0 * np.sum(w*func(y, *args), axis=0), None
def vectorize1(func, args=(), vec_func=False):
"""Vectorize the call to a function.
This is an internal utility function used by `romberg` and
`quadrature` to create a vectorized version of a function.
If `vec_func` is True, the function `func` is assumed to take vector
arguments.
Parameters
----------
func : callable
User defined function.
args : tuple, optional
Extra arguments for the function.
vec_func : bool, optional
True if the function func takes vector arguments.
Returns
-------
vfunc : callable
A function that will take a vector argument and return the
result.
"""
if vec_func:
def vfunc(x):
return func(x, *args)
else:
def vfunc(x):
if np.isscalar(x):
return func(x, *args)
x = np.asarray(x)
# call with first point to get output type
y0 = func(x[0], *args)
n = len(x)
dtype = getattr(y0, 'dtype', type(y0))
output = np.empty((n,), dtype=dtype)
output[0] = y0
for i in xrange(1, n):
output[i] = func(x[i], *args)
return output
return vfunc
def quadrature(func, a, b, args=(), tol=1.49e-8, rtol=1.49e-8, maxiter=50,
vec_func=True, miniter=1):
"""
Compute a definite integral using fixed-tolerance Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature
with absolute tolerance `tol`.
Parameters
----------
func : function
A Python function or method to integrate.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function.
tol, rtol : float, optional
Iteration stops when error between last two iterates is less than
`tol` OR the relative change is less than `rtol`.
maxiter : int, optional
Maximum order of Gaussian quadrature.
vec_func : bool, optional
True or False if func handles arrays as arguments (is
a "vector" function). Default is True.
miniter : int, optional
Minimum order of Gaussian quadrature.
Returns
-------
val : float
Gaussian quadrature approximation (within tolerance) to integral.
err : float
Difference between last two estimates of the integral.
See also
--------
romberg: adaptive Romberg quadrature
fixed_quad: fixed-order Gaussian quadrature
quad: adaptive quadrature using QUADPACK
dblquad: double integrals
tplquad: triple integrals
romb: integrator for sampled data
simps: integrator for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrator
odeint: ODE integrator
"""
if not isinstance(args, tuple):
args = (args,)
vfunc = vectorize1(func, args, vec_func=vec_func)
val = np.inf
err = np.inf
maxiter = max(miniter+1, maxiter)
for n in xrange(miniter, maxiter+1):
newval = fixed_quad(vfunc, a, b, (), n)[0]
err = abs(newval-val)
val = newval
if err < tol or err < rtol*abs(val):
break
else:
warnings.warn(
"maxiter (%d) exceeded. Latest difference = %e" % (maxiter, err),
AccuracyWarning)
return val, err
def tupleset(t, i, value):
l = list(t)
l[i] = value
return tuple(l)
def cumtrapz(y, x=None, dx=1.0, axis=-1, initial=None):
"""
Cumulatively integrate y(x) using the composite trapezoidal rule.
Parameters
----------
y : array_like
Values to integrate.
x : array_like, optional
The coordinate to integrate along. If None (default), use spacing `dx`
between consecutive elements in `y`.
dx : int, optional
Spacing between elements of `y`. Only used if `x` is None.
axis : int, optional
Specifies the axis to cumulate. Default is -1 (last axis).
initial : scalar, optional
If given, uses this value as the first value in the returned result.
Typically this value should be 0. Default is None, which means no
value at ``x[0]`` is returned and `res` has one element less than `y`
along the axis of integration.
Returns
-------
res : ndarray
The result of cumulative integration of `y` along `axis`.
If `initial` is None, the shape is such that the axis of integration
has one less value than `y`. If `initial` is given, the shape is equal
to that of `y`.
See Also
--------
numpy.cumsum, numpy.cumprod
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
ode: ODE integrators
odeint: ODE integrators
Examples
--------
>>> from scipy import integrate
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2, 2, num=20)
>>> y = x
>>> y_int = integrate.cumtrapz(y, x, initial=0)
>>> plt.plot(x, y_int, 'ro', x, y[0] + 0.5 * x**2, 'b-')
>>> plt.show()
"""
y = np.asarray(y)
if x is None:
d = dx
else:
x = np.asarray(x)
if x.ndim == 1:
d = np.diff(x)
# reshape to correct shape
shape = [1] * y.ndim
shape[axis] = -1
d = d.reshape(shape)
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-d or the "
"same as y.")
else:
d = np.diff(x, axis=axis)
if d.shape[axis] != y.shape[axis] - 1:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
nd = len(y.shape)
slice1 = tupleset((slice(None),)*nd, axis, slice(1, None))
slice2 = tupleset((slice(None),)*nd, axis, slice(None, -1))
res = np.cumsum(d * (y[slice1] + y[slice2]) / 2.0, axis=axis)
if initial is not None:
if not np.isscalar(initial):
raise ValueError("`initial` parameter should be a scalar.")
shape = list(res.shape)
shape[axis] = 1
res = np.concatenate([np.ones(shape, dtype=res.dtype) * initial, res],
axis=axis)
return res
def _basic_simps(y, start, stop, x, dx, axis):
nd = len(y.shape)
if start is None:
start = 0
step = 2
slice_all = (slice(None),)*nd
slice0 = tupleset(slice_all, axis, slice(start, stop, step))
slice1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
slice2 = tupleset(slice_all, axis, slice(start+2, stop+2, step))
if x is None: # Even spaced Simpson's rule.
result = np.sum(dx/3.0 * (y[slice0]+4*y[slice1]+y[slice2]),
axis=axis)
else:
# Account for possibly different spacings.
# Simpson's rule changes a bit.
h = np.diff(x, axis=axis)
sl0 = tupleset(slice_all, axis, slice(start, stop, step))
sl1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
h0 = h[sl0]
h1 = h[sl1]
hsum = h0 + h1
hprod = h0 * h1
h0divh1 = h0 / h1
tmp = hsum/6.0 * (y[slice0]*(2-1.0/h0divh1) +
y[slice1]*hsum*hsum/hprod +
y[slice2]*(2-h0divh1))
result = np.sum(tmp, axis=axis)
return result
def simps(y, x=None, dx=1, axis=-1, even='avg'):
"""
Integrate y(x) using samples along the given axis and the composite
Simpson's rule. If x is None, spacing of dx is assumed.
If there are an even number of samples, N, then there are an odd
number of intervals (N-1), but Simpson's rule requires an even number
of intervals. The parameter 'even' controls how this is handled.
Parameters
----------
y : array_like
Array to be integrated.
x : array_like, optional
If given, the points at which `y` is sampled.
dx : int, optional
Spacing of integration points along axis of `y`. Only used when
`x` is None. Default is 1.
axis : int, optional
Axis along which to integrate. Default is the last axis.
even : str {'avg', 'first', 'last'}, optional
'avg' : Average two results:1) use the first N-2 intervals with
a trapezoidal rule on the last interval and 2) use the last
N-2 intervals with a trapezoidal rule on the first interval.
'first' : Use Simpson's rule for the first N-2 intervals with
a trapezoidal rule on the last interval.
'last' : Use Simpson's rule for the last N-2 intervals with a
trapezoidal rule on the first interval.
See Also
--------
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrators
odeint: ODE integrators
Notes
-----
For an odd number of samples that are equally spaced the result is
exact if the function is a polynomial of order 3 or less. If
the samples are not equally spaced, then the result is exact only
if the function is a polynomial of order 2 or less.
"""
y = np.asarray(y)
nd = len(y.shape)
N = y.shape[axis]
last_dx = dx
first_dx = dx
returnshape = 0
if x is not None:
x = np.asarray(x)
if len(x.shape) == 1:
shapex = [1] * nd
shapex[axis] = x.shape[0]
saveshape = x.shape
returnshape = 1
x = x.reshape(tuple(shapex))
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-d or the "
"same as y.")
if x.shape[axis] != N:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
if N % 2 == 0:
val = 0.0
result = 0.0
slice1 = (slice(None),)*nd
slice2 = (slice(None),)*nd
if even not in ['avg', 'last', 'first']:
raise ValueError("Parameter 'even' must be "
"'avg', 'last', or 'first'.")
# Compute using Simpson's rule on first intervals
if even in ['avg', 'first']:
slice1 = tupleset(slice1, axis, -1)
slice2 = tupleset(slice2, axis, -2)
if x is not None:
last_dx = x[slice1] - x[slice2]
val += 0.5*last_dx*(y[slice1]+y[slice2])
result = _basic_simps(y, 0, N-3, x, dx, axis)
# Compute using Simpson's rule on last set of intervals
if even in ['avg', 'last']:
slice1 = tupleset(slice1, axis, 0)
slice2 = tupleset(slice2, axis, 1)
if x is not None:
first_dx = x[tuple(slice2)] - x[tuple(slice1)]
val += 0.5*first_dx*(y[slice2]+y[slice1])
result += _basic_simps(y, 1, N-2, x, dx, axis)
if even == 'avg':
val /= 2.0
result /= 2.0
result = result + val
else:
result = _basic_simps(y, 0, N-2, x, dx, axis)
if returnshape:
x = x.reshape(saveshape)
return result
def romb(y, dx=1.0, axis=-1, show=False):
"""
Romberg integration using samples of a function.
Parameters
----------
y : array_like
A vector of ``2**k + 1`` equally-spaced samples of a function.
dx : float, optional
The sample spacing. Default is 1.
axis : int, optional
The axis along which to integrate. Default is -1 (last axis).
show : bool, optional
When `y` is a single 1-D array, then if this argument is True
print the table showing Richardson extrapolation from the
samples. Default is False.
Returns
-------
romb : ndarray
The integrated result for `axis`.
See also
--------
quad : adaptive quadrature using QUADPACK
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
fixed_quad : fixed-order Gaussian quadrature
dblquad : double integrals
tplquad : triple integrals
simps : integrators for sampled data
cumtrapz : cumulative integration for sampled data
ode : ODE integrators
odeint : ODE integrators
"""
y = np.asarray(y)
nd = len(y.shape)
Nsamps = y.shape[axis]
Ninterv = Nsamps-1
n = 1
k = 0
while n < Ninterv:
n <<= 1
k += 1
if n != Ninterv:
raise ValueError("Number of samples must be one plus a "
"non-negative power of 2.")
R = {}
slice_all = (slice(None),) * nd
slice0 = tupleset(slice_all, axis, 0)
slicem1 = tupleset(slice_all, axis, -1)
h = Ninterv * np.asarray(dx, dtype=float)
R[(0, 0)] = (y[slice0] + y[slicem1])/2.0*h
slice_R = slice_all
start = stop = step = Ninterv
for i in xrange(1, k+1):
start >>= 1
slice_R = tupleset(slice_R, axis, slice(start, stop, step))
step >>= 1
R[(i, 0)] = 0.5*(R[(i-1, 0)] + h*y[slice_R].sum(axis=axis))
for j in xrange(1, i+1):
prev = R[(i, j-1)]
R[(i, j)] = prev + (prev-R[(i-1, j-1)]) / ((1 << (2*j))-1)
h /= 2.0
if show:
if not np.isscalar(R[(0, 0)]):
print("*** Printing table only supported for integrals" +
" of a single data set.")
else:
try:
precis = show[0]
except (TypeError, IndexError):
precis = 5
try:
width = show[1]
except (TypeError, IndexError):
width = 8
formstr = "%%%d.%df" % (width, precis)
title = "Richardson Extrapolation Table for Romberg Integration"
print("", title.center(68), "=" * 68, sep="\n", end="")
for i in xrange(k+1):
for j in xrange(i+1):
print(formstr % R[(i, j)], end=" ")
print()
print("=" * 68)
print()
return R[(k, k)]
# Romberg quadratures for numeric integration.
#
# Written by Scott M. Ransom <ransom@cfa.harvard.edu>
# last revision: 14 Nov 98
#
# Cosmetic changes by Konrad Hinsen <hinsen@cnrs-orleans.fr>
# last revision: 1999-7-21
#
# Adapted to scipy by Travis Oliphant <oliphant.travis@ieee.org>
# last revision: Dec 2001
def _difftrap(function, interval, numtraps):
"""
Perform part of the trapezoidal rule to integrate a function.
Assume that we had called difftrap with all lower powers-of-2
starting with 1. Calling difftrap only returns the summation
of the new ordinates. It does _not_ multiply by the width
of the trapezoids. This must be performed by the caller.
'function' is the function to evaluate (must accept vector arguments).
'interval' is a sequence with lower and upper limits
of integration.
'numtraps' is the number of trapezoids to use (must be a
power-of-2).
"""
if numtraps <= 0:
raise ValueError("numtraps must be > 0 in difftrap().")
elif numtraps == 1:
return 0.5*(function(interval[0])+function(interval[1]))
else:
numtosum = numtraps/2
h = float(interval[1]-interval[0])/numtosum
lox = interval[0] + 0.5 * h
points = lox + h * np.arange(numtosum)
s = np.sum(function(points), axis=0)
return s
def _romberg_diff(b, c, k):
"""
Compute the differences for the Romberg quadrature corrections.
See Forman Acton's "Real Computing Made Real," p 143.
"""
tmp = 4.0**k
return (tmp * c - b)/(tmp - 1.0)
def _printresmat(function, interval, resmat):
# Print the Romberg result matrix.
i = j = 0
print('Romberg integration of', repr(function), end=' ')
print('from', interval)
print('')
print('%6s %9s %9s' % ('Steps', 'StepSize', 'Results'))
for i in xrange(len(resmat)):
print('%6d %9f' % (2**i, (interval[1]-interval[0])/(2.**i)), end=' ')
for j in xrange(i+1):
print('%9f' % (resmat[i][j]), end=' ')
print('')
print('')
print('The final result is', resmat[i][j], end=' ')
print('after', 2**(len(resmat)-1)+1, 'function evaluations.')
def romberg(function, a, b, args=(), tol=1.48e-8, rtol=1.48e-8, show=False,
divmax=10, vec_func=False):
"""
Romberg integration of a callable function or method.
Returns the integral of `function` (a function of one variable)
over the interval (`a`, `b`).
If `show` is 1, the triangular array of the intermediate results
will be printed. If `vec_func` is True (default is False), then
`function` is assumed to support vector arguments.
Parameters
----------
function : callable
Function to be integrated.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
Returns
-------
results : float
Result of the integration.
Other Parameters
----------------
args : tuple, optional
Extra arguments to pass to function. Each element of `args` will
be passed as a single argument to `func`. Default is to pass no
extra arguments.
tol, rtol : float, optional
The desired absolute and relative tolerances. Defaults are 1.48e-8.
show : bool, optional
Whether to print the results. Default is False.
divmax : int, optional
Maximum order of extrapolation. Default is 10.
vec_func : bool, optional
Whether `func` handles arrays as arguments (i.e whether it is a
"vector" function). Default is False.
See Also
--------
fixed_quad : Fixed-order Gaussian quadrature.
quad : Adaptive quadrature using QUADPACK.
dblquad : Double integrals.
tplquad : Triple integrals.
romb : Integrators for sampled data.
simps : Integrators for sampled data.
cumtrapz : Cumulative integration for sampled data.
ode : ODE integrator.
odeint : ODE integrator.
References
----------
.. [1] 'Romberg's method' http://en.wikipedia.org/wiki/Romberg%27s_method
Examples
--------
Integrate a gaussian from 0 to 1 and compare to the error function.
>>> from scipy import integrate
>>> from scipy.special import erf
>>> gaussian = lambda x: 1/np.sqrt(np.pi) * np.exp(-x**2)
>>> result = integrate.romberg(gaussian, 0, 1, show=True)
Romberg integration of <function vfunc at ...> from [0, 1]
::
Steps StepSize Results
1 1.000000 0.385872
2 0.500000 0.412631 0.421551
4 0.250000 0.419184 0.421368 0.421356
8 0.125000 0.420810 0.421352 0.421350 0.421350
16 0.062500 0.421215 0.421350 0.421350 0.421350 0.421350
32 0.031250 0.421317 0.421350 0.421350 0.421350 0.421350 0.421350
The final result is 0.421350396475 after 33 function evaluations.
>>> print("%g %g" % (2*result, erf(1)))
0.842701 0.842701
"""
if np.isinf(a) or np.isinf(b):
raise ValueError("Romberg integration only available "
"for finite limits.")
vfunc = vectorize1(function, args, vec_func=vec_func)
n = 1
interval = [a, b]
intrange = b - a
ordsum = _difftrap(vfunc, interval, n)
result = intrange * ordsum
resmat = [[result]]
err = np.inf
last_row = resmat[0]
for i in xrange(1, divmax+1):
n *= 2
ordsum += _difftrap(vfunc, interval, n)
row = [intrange * ordsum / n]
for k in xrange(i):
row.append(_romberg_diff(last_row[k], row[k], k+1))
result = row[i]
lastresult = last_row[i-1]
if show:
resmat.append(row)
err = abs(result - lastresult)
if err < tol or err < rtol * abs(result):
break
last_row = row
else:
warnings.warn(
"divmax (%d) exceeded. Latest difference = %e" % (divmax, err),
AccuracyWarning)
if show:
_printresmat(vfunc, interval, resmat)
return result
# Coefficients for Netwon-Cotes quadrature
#
# These are the points being used
# to construct the local interpolating polynomial
# a are the weights for Newton-Cotes integration
# B is the error coefficient.
# error in these coefficients grows as N gets larger.
# or as samples are closer and closer together
# You can use maxima to find these rational coefficients
# for equally spaced data using the commands
# a(i,N) := integrate(product(r-j,j,0,i-1) * product(r-j,j,i+1,N),r,0,N) / ((N-i)! * i!) * (-1)^(N-i);
# Be(N) := N^(N+2)/(N+2)! * (N/(N+3) - sum((i/N)^(N+2)*a(i,N),i,0,N));
# Bo(N) := N^(N+1)/(N+1)! * (N/(N+2) - sum((i/N)^(N+1)*a(i,N),i,0,N));
# B(N) := (if (mod(N,2)=0) then Be(N) else Bo(N));
#
# pre-computed for equally-spaced weights
#
# num_a, den_a, int_a, num_B, den_B = _builtincoeffs[N]
#
# a = num_a*array(int_a)/den_a
# B = num_B*1.0 / den_B
#
# integrate(f(x),x,x_0,x_N) = dx*sum(a*f(x_i)) + B*(dx)^(2k+3) f^(2k+2)(x*)
# where k = N // 2
#
_builtincoeffs = {
1: (1,2,[1,1],-1,12),
2: (1,3,[1,4,1],-1,90),
3: (3,8,[1,3,3,1],-3,80),
4: (2,45,[7,32,12,32,7],-8,945),
5: (5,288,[19,75,50,50,75,19],-275,12096),
6: (1,140,[41,216,27,272,27,216,41],-9,1400),
7: (7,17280,[751,3577,1323,2989,2989,1323,3577,751],-8183,518400),
8: (4,14175,[989,5888,-928,10496,-4540,10496,-928,5888,989],
-2368,467775),
9: (9,89600,[2857,15741,1080,19344,5778,5778,19344,1080,
15741,2857], -4671, 394240),
10: (5,299376,[16067,106300,-48525,272400,-260550,427368,
-260550,272400,-48525,106300,16067],
-673175, 163459296),
11: (11,87091200,[2171465,13486539,-3237113, 25226685,-9595542,
15493566,15493566,-9595542,25226685,-3237113,
13486539,2171465], -2224234463, 237758976000),
12: (1, 5255250, [1364651,9903168,-7587864,35725120,-51491295,
87516288,-87797136,87516288,-51491295,35725120,
-7587864,9903168,1364651], -3012, 875875),
13: (13, 402361344000,[8181904909, 56280729661, -31268252574,
156074417954,-151659573325,206683437987,
-43111992612,-43111992612,206683437987,
-151659573325,156074417954,-31268252574,
56280729661,8181904909], -2639651053,
344881152000),
14: (7, 2501928000, [90241897,710986864,-770720657,3501442784,
-6625093363,12630121616,-16802270373,19534438464,
-16802270373,12630121616,-6625093363,3501442784,
-770720657,710986864,90241897], -3740727473,
1275983280000)
}
def newton_cotes(rn, equal=0):
"""
Return weights and error coefficient for Newton-Cotes integration.
Suppose we have (N+1) samples of f at the positions
x_0, x_1, ..., x_N. Then an N-point Newton-Cotes formula for the
integral between x_0 and x_N is:
:math:`\\int_{x_0}^{x_N} f(x)dx = \\Delta x \\sum_{i=0}^{N} a_i f(x_i)
+ B_N (\\Delta x)^{N+2} f^{N+1} (\\xi)`
where :math:`\\xi \\in [x_0,x_N]`
and :math:`\\Delta x = \\frac{x_N-x_0}{N}` is the average samples spacing.
If the samples are equally-spaced and N is even, then the error
term is :math:`B_N (\\Delta x)^{N+3} f^{N+2}(\\xi)`.
Parameters
----------
rn : int
The integer order for equally-spaced data or the relative positions of
the samples with the first sample at 0 and the last at N, where N+1 is
the length of `rn`. N is the order of the Newton-Cotes integration.
equal : int, optional
Set to 1 to enforce equally spaced data.
Returns
-------
an : ndarray
1-D array of weights to apply to the function at the provided sample
positions.
B : float
Error coefficient.
Notes
-----
Normally, the Newton-Cotes rules are used on smaller integration
regions and a composite rule is used to return the total integral.
"""
try:
N = len(rn)-1
if equal:
rn = np.arange(N+1)
elif np.all(np.diff(rn) == 1):
equal = 1
except:
N = rn
rn = np.arange(N+1)
equal = 1
if equal and N in _builtincoeffs:
na, da, vi, nb, db = _builtincoeffs[N]
an = na * np.array(vi, dtype=float) / da
return an, float(nb)/db
if (rn[0] != 0) or (rn[-1] != N):
raise ValueError("The sample positions must start at 0"
" and end at N")
yi = rn / float(N)
ti = 2 * yi - 1
nvec = np.arange(N+1)
C = ti ** nvec[:, np.newaxis]
Cinv = np.linalg.inv(C)
# improve precision of result
for i in range(2):
Cinv = 2*Cinv - Cinv.dot(C).dot(Cinv)
vec = 2.0 / (nvec[::2]+1)
ai = Cinv[:, ::2].dot(vec) * (N / 2.)
if (N % 2 == 0) and equal:
BN = N/(N+3.)
power = N+2
else:
BN = N/(N+2.)
power = N+1
BN = BN - np.dot(yi**power, ai)
p1 = power+1
fac = power*math.log(N) - gammaln(p1)
fac = math.exp(fac)
return ai, BN*fac
| bsd-3-clause |
james4424/nest-simulator | examples/nest/plot_tsodyks_depr_fac.py | 17 | 1135 | # -*- coding: utf-8 -*-
#
# plot_tsodyks_depr_fac.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
from scipy import *
from matplotlib.pylab import *
from matplotlib.mlab import *
def plot_spikes():
dt = 0.1 # time resolution
nbins = 1000
N = 500 # number of neurons
vm = load('voltmeter-4-0.dat')
figure(1)
clf()
plot(vm[:, 0], vm[:, 1], 'r')
xlabel('time / ms')
ylabel('$V_m [mV]$')
savefig('test_tsodyks_depressing.png')
plot_spikes()
show()
| gpl-2.0 |
trnewman/VT-USRP-daughterboard-drivers | gr-utils/src/python/gr_plot_const.py | 5 | 9871 | #!/usr/bin/env python
#
# Copyright 2007,2008 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
try:
import scipy
except ImportError:
print "Please install SciPy to run this script (http://www.scipy.org/)"
raise SystemExit, 1
try:
from pylab import *
from matplotlib.font_manager import fontManager, FontProperties
except ImportError:
print "Please install Matplotlib to run this script (http://matplotlib.sourceforge.net/)"
raise SystemExit, 1
from optparse import OptionParser
class draw_constellation:
def __init__(self, filename, options):
self.hfile = open(filename, "r")
self.block_length = options.block
self.start = options.start
self.sample_rate = options.sample_rate
self.datatype = scipy.complex64
self.sizeof_data = self.datatype().nbytes # number of bytes per sample in file
self.axis_font_size = 16
self.label_font_size = 18
self.title_font_size = 20
# Setup PLOT
self.fig = figure(1, figsize=(16, 9), facecolor='w')
rcParams['xtick.labelsize'] = self.axis_font_size
rcParams['ytick.labelsize'] = self.axis_font_size
self.text_file = figtext(0.10, 0.95, ("File: %s" % filename), weight="heavy", size=16)
self.text_file_pos = figtext(0.10, 0.90, "File Position: ", weight="heavy", size=16)
self.text_block = figtext(0.40, 0.90, ("Block Size: %d" % self.block_length),
weight="heavy", size=16)
self.text_sr = figtext(0.60, 0.90, ("Sample Rate: %.2f" % self.sample_rate),
weight="heavy", size=16)
self.make_plots()
self.button_left_axes = self.fig.add_axes([0.45, 0.01, 0.05, 0.05], frameon=True)
self.button_left = Button(self.button_left_axes, "<")
self.button_left_callback = self.button_left.on_clicked(self.button_left_click)
self.button_right_axes = self.fig.add_axes([0.50, 0.01, 0.05, 0.05], frameon=True)
self.button_right = Button(self.button_right_axes, ">")
self.button_right_callback = self.button_right.on_clicked(self.button_right_click)
self.xlim = self.sp_iq.get_xlim()
self.manager = get_current_fig_manager()
connect('draw_event', self.zoom)
connect('key_press_event', self.click)
connect('button_press_event', self.mouse_button_callback)
show()
def get_data(self):
self.text_file_pos.set_text("File Position: %d" % (self.hfile.tell()//self.sizeof_data))
iq = scipy.fromfile(self.hfile, dtype=self.datatype, count=self.block_length)
#print "Read in %d items" % len(iq)
if(len(iq) == 0):
print "End of File"
else:
self.reals = [r.real for r in iq]
self.imags = [i.imag for i in iq]
self.time = [i*(1/self.sample_rate) for i in range(len(self.reals))]
def make_plots(self):
# if specified on the command-line, set file pointer
self.hfile.seek(self.sizeof_data*self.start, 1)
self.get_data()
# Subplot for real and imaginary parts of signal
self.sp_iq = self.fig.add_subplot(2,1,1, position=[0.075, 0.2, 0.4, 0.6])
self.sp_iq.set_title(("I&Q"), fontsize=self.title_font_size, fontweight="bold")
self.sp_iq.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold")
self.sp_iq.set_ylabel("Amplitude (V)", fontsize=self.label_font_size, fontweight="bold")
self.plot_iq = self.sp_iq.plot(self.time, self.reals, 'bo-', self.time, self.imags, 'ro-')
# Subplot for constellation plot
self.sp_const = self.fig.add_subplot(2,2,1, position=[0.575, 0.2, 0.4, 0.6])
self.sp_const.set_title(("Constellation"), fontsize=self.title_font_size, fontweight="bold")
self.sp_const.set_xlabel("Inphase", fontsize=self.label_font_size, fontweight="bold")
self.sp_const.set_ylabel("Qaudrature", fontsize=self.label_font_size, fontweight="bold")
self.plot_const = self.sp_const.plot(self.reals, self.imags, 'bo')
# Add plots to mark current location of point between time and constellation plots
self.indx = 0
self.plot_iq += self.sp_iq.plot([self.time[self.indx],], [self.reals[self.indx],], 'mo', ms=8)
self.plot_iq += self.sp_iq.plot([self.time[self.indx],], [self.imags[self.indx],], 'mo', ms=8)
self.plot_const += self.sp_const.plot([self.reals[self.indx],], [self.imags[self.indx],], 'mo', ms=12)
# Adjust axis
self.sp_iq.axis([min(self.time), max(self.time),
1.5*min([min(self.reals), min(self.imags)]),
1.5*max([max(self.reals), max(self.imags)])])
self.sp_const.axis([-2, 2, -2, 2])
draw()
def update_plots(self):
self.plot_iq[0].set_data([self.time, self.reals])
self.plot_iq[1].set_data([self.time, self.imags])
self.sp_iq.axis([min(self.time), max(self.time),
1.5*min([min(self.reals), min(self.imags)]),
1.5*max([max(self.reals), max(self.imags)])])
self.plot_const[0].set_data([self.reals, self.imags])
self.sp_const.axis([-2, 2, -2, 2])
draw()
def zoom(self, event):
newxlim = scipy.array(self.sp_iq.get_xlim())
curxlim = scipy.array(self.xlim)
if(newxlim.all() != curxlim.all()):
self.xlim = newxlim
r = self.reals[int(ceil(self.xlim[0])) : int(ceil(self.xlim[1]))]
i = self.imags[int(ceil(self.xlim[0])) : int(ceil(self.xlim[1]))]
self.plot_const[0].set_data(r, i)
self.sp_const.axis([-2, 2, -2, 2])
self.manager.canvas.draw()
draw()
def click(self, event):
forward_valid_keys = [" ", "down", "right"]
backward_valid_keys = ["up", "left"]
trace_forward_valid_keys = [">",]
trace_backward_valid_keys = ["<",]
if(find(event.key, forward_valid_keys)):
self.step_forward()
elif(find(event.key, backward_valid_keys)):
self.step_backward()
elif(find(event.key, trace_forward_valid_keys)):
self.indx = min(self.indx+1, len(self.time)-1)
self.set_trace(self.indx)
elif(find(event.key, trace_backward_valid_keys)):
self.indx = max(0, self.indx-1)
self.set_trace(self.indx)
def button_left_click(self, event):
self.step_backward()
def button_right_click(self, event):
self.step_forward()
def step_forward(self):
self.get_data()
self.update_plots()
def step_backward(self):
# Step back in file position
if(self.hfile.tell() >= 2*self.sizeof_data*self.block_length ):
self.hfile.seek(-2*self.sizeof_data*self.block_length, 1)
else:
self.hfile.seek(-self.hfile.tell(),1)
self.get_data()
self.update_plots()
def mouse_button_callback(self, event):
x, y = event.xdata, event.ydata
if x is not None and y is not None:
if(event.inaxes == self.sp_iq):
self.indx = searchsorted(self.time, [x])
self.set_trace(self.indx)
def set_trace(self, indx):
self.plot_iq[2].set_data(self.time[indx], self.reals[indx])
self.plot_iq[3].set_data(self.time[indx], self.imags[indx])
self.plot_const[1].set_data(self.reals[indx], self.imags[indx])
draw()
def find(item_in, list_search):
try:
return list_search.index(item_in) != None
except ValueError:
return False
def main():
usage="%prog: [options] input_filename"
description = "Takes a GNU Radio complex binary file and displays the I&Q data versus time and the constellation plot (I vs. Q). You can set the block size to specify how many points to read in at a time and the start position in the file. By default, the system assumes a sample rate of 1, so in time, each sample is plotted versus the sample number. To set a true time axis, set the sample rate (-R or --sample-rate) to the sample rate used when capturing the samples."
parser = OptionParser(conflict_handler="resolve", usage=usage, description=description)
parser.add_option("-B", "--block", type="int", default=1000,
help="Specify the block size [default=%default]")
parser.add_option("-s", "--start", type="int", default=0,
help="Specify where to start in the file [default=%default]")
parser.add_option("-R", "--sample-rate", type="float", default=1.0,
help="Set the sampler rate of the data [default=%default]")
(options, args) = parser.parse_args ()
if len(args) != 1:
parser.print_help()
raise SystemExit, 1
filename = args[0]
dc = draw_constellation(filename, options)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
Myasuka/scikit-learn | sklearn/tests/test_dummy.py | 129 | 17774 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.base import clone
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.stats import _weighted_percentile
from sklearn.dummy import DummyClassifier, DummyRegressor
@ignore_warnings
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in range(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_equal(proba[k].sum(axis=1), np.ones(len(X)))
# We know that we can have division by zero
assert_array_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
# 2d case
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3],
[2, 0, 1, 2, 5],
[1, 0, 4, 5, 2],
[1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_equality_regressor(statistic, y_learn, y_pred_learn,
y_test, y_pred_test):
assert_array_equal(np.tile(statistic, (y_learn.shape[0], 1)),
y_pred_learn)
assert_array_equal(np.tile(statistic, (y_test.shape[0], 1)),
y_pred_test)
def test_most_frequent_and_prior_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
if strategy == "prior":
assert_array_equal(clf.predict_proba(X[0]),
clf.class_prior_.reshape((1, -1)))
else:
assert_array_equal(clf.predict_proba(X[0]),
clf.class_prior_.reshape((1, -1)) > 0.5)
def test_most_frequent_and_prior_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
n_samples = len(X)
for strategy in ("prior", "most_frequent"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 1],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="quantile", quantile=0.5)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.min(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=1)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.max(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0.3)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.percentile(y, q=30)] * len(X))
def test_quantile_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
quantile_values = np.percentile(y_learn, axis=0, q=80).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.5)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.8)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
quantile_values, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_invalid():
X = [[0]] * 5 # ignored
y = [0] * 5 # ignored
est = DummyRegressor(strategy="quantile")
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=None)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=[0])
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=-0.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=1.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile='abc')
assert_raises(TypeError, est.fit, X, y)
def test_quantile_strategy_empty_train():
est = DummyRegressor(strategy="quantile", quantile=0.4)
assert_raises(ValueError, est.fit, [], [])
def test_constant_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="constant", constant=[43])
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
reg = DummyRegressor(strategy="constant", constant=43)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
def test_constant_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
# test with 2d array
constants = random_state.randn(5)
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="constant", constant=constants)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
constants, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d_for_constant(est)
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy='mean')
est.fit(X, y)
assert_equal(est.constant_, np.mean(y))
def test_unknown_strategey_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='gona')
assert_raises(ValueError, est.fit, X, y)
def test_constants_not_specified_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='constant')
assert_raises(TypeError, est.fit, X, y)
def test_constant_size_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X = random_state.randn(10, 10)
y = random_state.randn(10, 5)
est = DummyRegressor(strategy='constant', constant=[1, 2, 3, 4])
assert_raises(ValueError, est.fit, X, y)
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ['two', 'one', 'two', 'two']
clf = DummyClassifier(strategy="constant", random_state=0, constant='one')
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(['one'] * 4))
_check_predict_proba(clf, X, y)
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3],
[1, 3],
[2, 3],
[2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[1, 0])
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
def test_constant_strategy_exceptions():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0)
assert_raises(ValueError, clf.fit, X, y)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[2, 0])
assert_raises(ValueError, clf.fit, X, y)
def test_classification_sample_weight():
X = [[0], [0], [1]]
y = [0, 1, 0]
sample_weight = [0.1, 1., 0.1]
clf = DummyClassifier().fit(X, y, sample_weight)
assert_array_almost_equal(clf.class_prior_, [0.2 / 1.2, 1. / 1.2])
def test_constant_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[0, 1],
[4, 0],
[1, 1],
[1, 4],
[1, 1]]))
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
def test_uniform_strategy_sparse_target_warning():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[2, 1],
[2, 2],
[1, 4],
[4, 2],
[1, 1]]))
clf = DummyClassifier(strategy="uniform", random_state=0)
assert_warns_message(UserWarning,
"the uniform strategy would not save memory",
clf.fit, X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 1 / 3, decimal=1)
assert_almost_equal(p[2], 1 / 3, decimal=1)
assert_almost_equal(p[4], 1 / 3, decimal=1)
def test_stratified_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[4, 1],
[0, 0],
[1, 1],
[1, 4],
[1, 1]]))
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
y_pred = y_pred.toarray()
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[0], 1. / 5, decimal=1)
assert_almost_equal(p[4], 1. / 5, decimal=1)
def test_most_frequent_and_prior_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[1, 0],
[1, 3],
[4, 0],
[0, 1],
[1, 0]]))
n_samples = len(X)
y_expected = np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))])
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), y_expected)
def test_dummy_regressor_sample_weight(n_samples=10):
random_state = np.random.RandomState(seed=1)
X = [[0]] * n_samples
y = random_state.rand(n_samples)
sample_weight = random_state.rand(n_samples)
est = DummyRegressor(strategy="mean").fit(X, y, sample_weight)
assert_equal(est.constant_, np.average(y, weights=sample_weight))
est = DummyRegressor(strategy="median").fit(X, y, sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 50.))
est = DummyRegressor(strategy="quantile", quantile=.95).fit(X, y,
sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 95.))
| bsd-3-clause |
mlyundin/Machine-Learning | ex2/ex2_reg.py | 1 | 1663 | import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import minimize
from common_functions import load_data, add_zero_feature, lr_accuracy, cf_lr_reg as cost_function, gf_lr_reg as grad_function
def map_feature(X1, X2, degree=6):
return add_zero_feature(np.hstack([X1**(i-j)*X2**j for i in range(1, degree+1) for j in range(i+1)]))
if __name__ == '__main__':
X, y = load_data('ex2data2.txt')
x1, x2 = X.T
f_y = y.ravel()
plt.plot(x1[f_y == 0], x2[f_y == 0], 'yo')
plt.plot(x1[f_y == 1], x2[f_y == 1], 'bx')
plt.show()
X = map_feature(X[:, 0:1], X[:, 1:])
m, n = X.shape
initial_theta = np.ones((n, 1))
lambda_coef = 0.1
theta = minimize(cost_function, initial_theta, method='BFGS', jac=grad_function, options={'disp': False},
args=(X, y, lambda_coef)).x
u = np.linspace(-1, 1.5, 50)
v = np.linspace(-1, 1.5, 50)
X1, X2 = np.meshgrid(u, v)
X1, X2 = X1.reshape(-1, 1), X2.reshape(-1, 1)
temp_X = map_feature(X1, X2)
z = np.dot(temp_X, theta).reshape(len(u), len(v))
plt.plot(x1[f_y == 0], x2[f_y == 0], 'yo')
plt.plot(x1[f_y == 1], x2[f_y == 1], 'bx')
CS = plt.contour(u, v, z)
plt.clabel(CS, inline=1, fontsize=10)
plt.show()
for lambda_coef in (0.03, 0.3, 0.1, 1, 3, 10):
initial_theta = np.ones((n, 1))
theta = minimize(cost_function, initial_theta, method='BFGS', jac=grad_function, options={'disp': False},
args=(X, y, lambda_coef)).x
print 'lambda = {}, Train Accuracy: {}'.format(lambda_coef, lr_accuracy(X, y, theta)) | mit |
johnny555/2d3g | utils.py | 1 | 7610 | __author__ = 'jvial'
import pandas as pd
import numpy as np
lith = pd.read_csv('corrected_lithology.csv')
geo = pd.read_csv('Complete_Geophysics.csv')
# Read in all ATV data once.
atv_dictionary = {}
print('Read in lith and geo')
def get_label(bore_id, depth, rtype=False):
"""
Function to get the label, will return either a string, nan or None.
I bore_id is unknown it will raise an error.
If we are at a labelled stratigraphy it will return a string.
If we are at an unlabbelled stratigraphy it will return NaN
if we are outside the bounds it will return None.
:param bore_id: A string containing the bore id
:param depth: a float for the depth
:return:
"""
holeid = pd.unique(lith.HOLEID)
if bore_id not in holeid.tolist():
raise Exception('BoreId {} not in corrected lith logs'.format(bore_id))
bore = lith.query('HOLEID == @bore_id and GEOLFROM < @depth and GEOLTO >= @depth')
if bore.shape[0] >= 1:
if rtype:
seam = bore.iloc[0, 4] # Rock type
else:
seam = bore.iloc[0, 5] # The lith_seam is at location 5
else:
seam = None
return seam
cols = ['ADEN', 'AUCS', 'AVOL', 'AXLE', 'AXLN', 'AZID', 'AZIF', 'AZP1', 'BBRG',
'BISI', 'BRAD', 'BRDU', 'BRG1', 'BRG2', 'BRG3', 'BRG4', 'CADE', 'CALD',
'CODE', 'CORF', 'DECR', 'DENB', 'DENL', 'DEPO', 'DIPF', 'FE1', 'FE1C',
'FE1U', 'FE2', 'FMAG', 'GRDE', 'GRNP', 'HVOL', 'LSDU', 'LSN', 'MC2A',
'MC2F', 'MC2U', 'MC4F', 'MC6F', 'MCUF', 'MDTC', 'MSAL', 'P1F', 'P2F',
'P3F', 'PCH1', 'RAD1', 'RAD2', 'RAD3', 'RAD4', 'RPOR', 'SPOR', 'SSN',
'TDEP', 'TDIF', 'TEMP', 'TILD', 'UCS', 'UCSM', 'VDEN', 'VL2A', 'VL2F',
'VL4F', 'VL6F', 'VLUF', 'UCSD', 'TILF', 'GRFE', 'DTCA',
'DTCB', 'DTCC', 'DTCD', 'DTCE', 'DTCF']
atv_cols = ['ATV_AMP[0]',
'ATV_AMP[1]',
'ATV_AMP[2]',
'ATV_AMP[3]',
'ATV_AMP[4]',
'ATV_AMP[5]',
'ATV_AMP[6]',
'ATV_AMP[7]',
'ATV_AMP[8]',
'ATV_AMP[9]',
'ATV_AMP[10]',
'ATV_AMP[11]',
'ATV_AMP[12]',
'ATV_AMP[13]',
'ATV_AMP[14]',
'ATV_AMP[15]',
'ATV_AMP[16]',
'ATV_AMP[17]',
'ATV_AMP[18]',
'ATV_AMP[19]',
'ATV_AMP[20]',
'ATV_AMP[21]',
'ATV_AMP[22]',
'ATV_AMP[23]',
'ATV_AMP[24]',
'ATV_AMP[25]',
'ATV_AMP[26]',
'ATV_AMP[27]',
'ATV_AMP[28]',
'ATV_AMP[29]',
'ATV_AMP[30]',
'ATV_AMP[31]',
'ATV_AMP[32]',
'ATV_AMP[33]',
'ATV_AMP[34]',
'ATV_AMP[35]',
'ATV_AMP[36]',
'ATV_AMP[37]',
'ATV_AMP[38]',
'ATV_AMP[39]',
'ATV_AMP[40]',
'ATV_AMP[41]',
'ATV_AMP[42]',
'ATV_AMP[43]',
'ATV_AMP[44]',
'ATV_AMP[45]',
'ATV_AMP[46]',
'ATV_AMP[47]',
'ATV_AMP[48]',
'ATV_AMP[49]',
'ATV_AMP[50]',
'ATV_AMP[51]',
'ATV_AMP[52]',
'ATV_AMP[53]',
'ATV_AMP[54]',
'ATV_AMP[55]',
'ATV_AMP[56]',
'ATV_AMP[57]',
'ATV_AMP[58]',
'ATV_AMP[59]',
'ATV_AMP[60]',
'ATV_AMP[61]',
'ATV_AMP[62]',
'ATV_AMP[63]',
'ATV_AMP[64]',
'ATV_AMP[65]',
'ATV_AMP[66]',
'ATV_AMP[67]',
'ATV_AMP[68]',
'ATV_AMP[69]',
'ATV_AMP[70]',
'ATV_AMP[71]',
'ATV_AMP[72]',
'ATV_AMP[73]',
'ATV_AMP[74]',
'ATV_AMP[75]',
'ATV_AMP[76]',
'ATV_AMP[77]',
'ATV_AMP[78]',
'ATV_AMP[79]',
'ATV_AMP[80]',
'ATV_AMP[81]',
'ATV_AMP[82]',
'ATV_AMP[83]',
'ATV_AMP[84]',
'ATV_AMP[85]',
'ATV_AMP[86]',
'ATV_AMP[87]',
'ATV_AMP[88]',
'ATV_AMP[89]',
'ATV_AMP[90]',
'ATV_AMP[91]',
'ATV_AMP[92]',
'ATV_AMP[93]',
'ATV_AMP[94]',
'ATV_AMP[95]',
'ATV_AMP[96]',
'ATV_AMP[97]',
'ATV_AMP[98]',
'ATV_AMP[99]',
'ATV_AMP[100]',
'ATV_AMP[101]',
'ATV_AMP[102]',
'ATV_AMP[103]',
'ATV_AMP[104]',
'ATV_AMP[105]',
'ATV_AMP[106]',
'ATV_AMP[107]',
'ATV_AMP[108]',
'ATV_AMP[109]',
'ATV_AMP[110]',
'ATV_AMP[111]',
'ATV_AMP[112]',
'ATV_AMP[113]',
'ATV_AMP[114]',
'ATV_AMP[115]',
'ATV_AMP[116]',
'ATV_AMP[117]',
'ATV_AMP[118]',
'ATV_AMP[119]',
'ATV_AMP[120]',
'ATV_AMP[121]',
'ATV_AMP[122]',
'ATV_AMP[123]',
'ATV_AMP[124]',
'ATV_AMP[125]',
'ATV_AMP[126]',
'ATV_AMP[127]',
'ATV_AMP[128]',
'ATV_AMP[129]',
'ATV_AMP[130]',
'ATV_AMP[131]',
'ATV_AMP[132]',
'ATV_AMP[133]',
'ATV_AMP[134]',
'ATV_AMP[135]',
'ATV_AMP[136]',
'ATV_AMP[137]',
'ATV_AMP[138]',
'ATV_AMP[139]',
'ATV_AMP[140]',
'ATV_AMP[141]',
'ATV_AMP[142]',
'ATV_AMP[143]']
def get_windows(boreid, centre_point, window_size, bin_width):
"""
Function to get data related to the windows around a point.
Note that the first run with a new bore id will need to load
the data from xls (SLOOOOW!) subsequent runs will use a cached
form of this data.
:param bore_id: String of the bore id.
:param centre_point: depth of the centre oint
:param window_size: window size in meters.
:param bin_width: bin width in meters
:return: will return a pandas data frame containing data.
"""
bore = geo.query('HOLEID == @boreid').sort('DEPTH')
if atv_dictionary.get(boreid, None) is None:
print('Need to read the acoustic scanner file')
atv = pd.read_excel('Acoustic Scanner/ATV_Data_{}.xlsx'.format(boreid))
print('done')
atv_dictionary[boreid] = atv
else:
atv = atv_dictionary[boreid]
bottom = centre_point - window_size/2.
top = centre_point + window_size/2.
bore = bore.query('DEPTH > @bottom and DEPTH <= @top').sort('DEPTH')
atv = atv.rename(columns={'MD': 'DEPTH'})
atv = atv.query('DEPTH > @bottom and DEPTH <= @top').sort('DEPTH')
def bin_number(depth):
return np.floor(depth/bin_width)*bin_width
geo_df = bore.set_index('DEPTH')[cols].groupby(bin_number, axis=0).mean()
atv_df = atv.set_index('DEPTH').groupby(bin_number).mean()
result = pd.concat([geo_df, atv_df], axis=1)
return result
def get_data(boreid, centre_point, window_size, bin_width):
result = get_windows(boreid, centre_point, window_size, bin_width)
result = result.reset_index().rename(columns={'index':'DEPTH'})
result['LABELS'] = result.DEPTH.apply(lambda x: get_label(boreid, x))
result['LABELS_ROCK_TYPE'] = result.DEPTH.apply(lambda x: get_label(boreid, x, rtype=True))
return result
| bsd-2-clause |
varantz/airflow | airflow/contrib/plugins/metastore_browser/main.py | 42 | 5126 | from datetime import datetime
import json
from flask import Blueprint, request
from flask.ext.admin import BaseView, expose
import pandas as pd
from airflow.hooks import HiveMetastoreHook, MySqlHook, PrestoHook, HiveCliHook
from airflow.plugins_manager import AirflowPlugin
from airflow.www import utils as wwwutils
METASTORE_CONN_ID = 'metastore_default'
METASTORE_MYSQL_CONN_ID = 'metastore_mysql'
PRESTO_CONN_ID = 'presto_default'
HIVE_CLI_CONN_ID = 'hive_default'
DEFAULT_DB = 'default'
DB_WHITELIST = None
DB_BLACKLIST = ['tmp']
TABLE_SELECTOR_LIMIT = 2000
# Keeping pandas from truncating long strings
pd.set_option('display.max_colwidth', -1)
# Creating a flask admin BaseView
class MetastoreBrowserView(BaseView, wwwutils.DataProfilingMixin):
@expose('/')
def index(self):
sql = """
SELECT
a.name as db, db_location_uri as location,
count(1) as object_count, a.desc as description
FROM DBS a
JOIN TBLS b ON a.DB_ID = b.DB_ID
GROUP BY a.name, db_location_uri, a.desc
""".format(**locals())
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
df = h.get_pandas_df(sql)
df.db = (
'<a href="/admin/metastorebrowserview/db/?db=' +
df.db + '">' + df.db + '</a>')
table = df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
escape=False,
na_rep='',)
return self.render(
"metastore_browser/dbs.html", table=table)
@expose('/table/')
def table(self):
table_name = request.args.get("table")
m = HiveMetastoreHook(METASTORE_CONN_ID)
table = m.get_table(table_name)
return self.render(
"metastore_browser/table.html",
table=table, table_name=table_name, datetime=datetime, int=int)
@expose('/db/')
def db(self):
db = request.args.get("db")
m = HiveMetastoreHook(METASTORE_CONN_ID)
tables = sorted(m.get_tables(db=db), key=lambda x: x.tableName)
return self.render(
"metastore_browser/db.html", tables=tables, db=db)
@wwwutils.gzipped
@expose('/partitions/')
def partitions(self):
schema, table = request.args.get("table").split('.')
sql = """
SELECT
a.PART_NAME,
a.CREATE_TIME,
c.LOCATION,
c.IS_COMPRESSED,
c.INPUT_FORMAT,
c.OUTPUT_FORMAT
FROM PARTITIONS a
JOIN TBLS b ON a.TBL_ID = b.TBL_ID
JOIN DBS d ON b.DB_ID = d.DB_ID
JOIN SDS c ON a.SD_ID = c.SD_ID
WHERE
b.TBL_NAME like '{table}' AND
d.NAME like '{schema}'
ORDER BY PART_NAME DESC
""".format(**locals())
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
df = h.get_pandas_df(sql)
return df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
na_rep='',)
@wwwutils.gzipped
@expose('/objects/')
def objects(self):
where_clause = ''
if DB_WHITELIST:
dbs = ",".join(["'" + db + "'" for db in DB_WHITELIST])
where_clause = "AND b.name IN ({})".format(dbs)
if DB_BLACKLIST:
dbs = ",".join(["'" + db + "'" for db in DB_BLACKLIST])
where_clause = "AND b.name NOT IN ({})".format(dbs)
sql = """
SELECT CONCAT(b.NAME, '.', a.TBL_NAME), TBL_TYPE
FROM TBLS a
JOIN DBS b ON a.DB_ID = b.DB_ID
WHERE
a.TBL_NAME NOT LIKE '%tmp%' AND
a.TBL_NAME NOT LIKE '%temp%' AND
b.NAME NOT LIKE '%tmp%' AND
b.NAME NOT LIKE '%temp%'
{where_clause}
LIMIT {LIMIT};
""".format(where_clause=where_clause, LIMIT=TABLE_SELECTOR_LIMIT)
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
d = [
{'id': row[0], 'text': row[0]}
for row in h.get_records(sql)]
return json.dumps(d)
@wwwutils.gzipped
@expose('/data/')
def data(self):
table = request.args.get("table")
sql = "SELECT * FROM {table} LIMIT 1000;".format(table=table)
h = PrestoHook(PRESTO_CONN_ID)
df = h.get_pandas_df(sql)
return df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
na_rep='',)
@expose('/ddl/')
def ddl(self):
table = request.args.get("table")
sql = "SHOW CREATE TABLE {table};".format(table=table)
h = HiveCliHook(HIVE_CLI_CONN_ID)
return h.run_cli(sql)
v = MetastoreBrowserView(category="Plugins", name="Hive Metadata Browser")
# Creating a flask blueprint to intergrate the templates and static folder
bp = Blueprint(
"metastore_browser", __name__,
template_folder='templates',
static_folder='static',
static_url_path='/static/metastore_browser')
# Defining the plugin class
class MetastoreBrowserPlugin(AirflowPlugin):
name = "metastore_browser"
flask_blueprints = [bp]
admin_views = [v]
| apache-2.0 |
dingocuster/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 240 | 6055 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=3)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features+1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
| bsd-3-clause |
kobejean/tensorflow | tensorflow/contrib/eager/python/examples/rnn_colorbot/rnn_colorbot.py | 16 | 13781 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""TensorFlow Eager Execution Example: RNN Colorbot.
This example builds, trains, and evaluates a multi-layer RNN that can be
run with eager execution enabled. The RNN is trained to map color names to
their RGB values: it takes as input a one-hot encoded character sequence and
outputs a three-tuple (R, G, B) (scaled by 1/255).
For example, say we'd like the RNN Colorbot to generate the RGB values for the
color white. To represent our query in a form that the Colorbot could
understand, we would create a sequence of five 256-long vectors encoding the
ASCII values of the characters in "white". The first vector in our sequence
would be 0 everywhere except for the ord("w")-th position, where it would be
1, the second vector would be 0 everywhere except for the
ord("h")-th position, where it would be 1, and similarly for the remaining three
vectors. We refer to such indicator vectors as "one-hot encodings" of
characters. After consuming these vectors, a well-trained Colorbot would output
the three tuple (1, 1, 1), since the RGB values for white are (255, 255, 255).
We are of course free to ask the colorbot to generate colors for any string we'd
like, such as "steel gray," "tensorflow orange," or "green apple," though
your mileage may vary as your queries increase in creativity.
This example shows how to:
1. read, process, (one-hot) encode, and pad text data via the
Datasets API;
2. build a trainable model;
3. implement a multi-layer RNN using Python control flow
constructs (e.g., a for loop);
4. train a model using an iterative gradient-based method; and
The data used in this example is licensed under the Creative Commons
Attribution-ShareAlike License and is available at
https://en.wikipedia.org/wiki/List_of_colors:_A-F
https://en.wikipedia.org/wiki/List_of_colors:_G-M
https://en.wikipedia.org/wiki/List_of_colors:_N-Z
This example was adapted from
https://github.com/random-forests/tensorflow-workshop/tree/master/extras/colorbot
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import functools
import os
import sys
import time
import urllib
import six
import tensorflow as tf
from tensorflow.contrib.eager.python import tfe
try:
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
layers = tf.keras.layers
def parse(line):
"""Parse a line from the colors dataset."""
# Each line of the dataset is comma-separated and formatted as
# color_name, r, g, b
# so `items` is a list [color_name, r, g, b].
items = tf.string_split([line], ",").values
rgb = tf.string_to_number(items[1:], out_type=tf.float32) / 255.
# Represent the color name as a one-hot encoded character sequence.
color_name = items[0]
chars = tf.one_hot(tf.decode_raw(color_name, tf.uint8), depth=256)
# The sequence length is needed by our RNN.
length = tf.cast(tf.shape(chars)[0], dtype=tf.int64)
return rgb, chars, length
def maybe_download(filename, work_directory, source_url):
"""Download the data from source url, unless it's already here.
Args:
filename: string, name of the file in the directory.
work_directory: string, path to working directory.
source_url: url to download from if file doesn't exist.
Returns:
Path to resulting file.
"""
if not tf.gfile.Exists(work_directory):
tf.gfile.MakeDirs(work_directory)
filepath = os.path.join(work_directory, filename)
if not tf.gfile.Exists(filepath):
temp_file_name, _ = urllib.request.urlretrieve(source_url)
tf.gfile.Copy(temp_file_name, filepath)
with tf.gfile.GFile(filepath) as f:
size = f.size()
print("Successfully downloaded", filename, size, "bytes.")
return filepath
def load_dataset(data_dir, url, batch_size):
"""Loads the colors data at path into a PaddedDataset."""
# Downloads data at url into data_dir/basename(url). The dataset has a header
# row (color_name, r, g, b) followed by comma-separated lines.
path = maybe_download(os.path.basename(url), data_dir, url)
# This chain of commands loads our data by:
# 1. skipping the header; (.skip(1))
# 2. parsing the subsequent lines; (.map(parse))
# 3. shuffling the data; (.shuffle(...))
# 3. grouping the data into padded batches (.padded_batch(...)).
dataset = tf.data.TextLineDataset(path).skip(1).map(parse).shuffle(
buffer_size=10000).padded_batch(
batch_size, padded_shapes=([None], [None, None], []))
return dataset
# pylint: disable=not-callable
class RNNColorbot(tf.keras.Model):
"""Multi-layer (LSTM) RNN that regresses on real-valued vector labels.
"""
def __init__(self, rnn_cell_sizes, label_dimension, keep_prob):
"""Constructs an RNNColorbot.
Args:
rnn_cell_sizes: list of integers denoting the size of each LSTM cell in
the RNN; rnn_cell_sizes[i] is the size of the i-th layer cell
label_dimension: the length of the labels on which to regress
keep_prob: (1 - dropout probability); dropout is applied to the outputs of
each LSTM layer
"""
super(RNNColorbot, self).__init__(name="")
self.label_dimension = label_dimension
self.keep_prob = keep_prob
self.cells = tf.contrib.checkpoint.List(
[tf.nn.rnn_cell.BasicLSTMCell(size) for size in rnn_cell_sizes])
self.relu = layers.Dense(
label_dimension, activation=tf.nn.relu, name="relu")
def call(self, inputs, training=False):
"""Implements the RNN logic and prediction generation.
Args:
inputs: A tuple (chars, sequence_length), where chars is a batch of
one-hot encoded color names represented as a Tensor with dimensions
[batch_size, time_steps, 256] and sequence_length holds the length
of each character sequence (color name) as a Tensor with dimension
[batch_size].
training: whether the invocation is happening during training
Returns:
A tensor of dimension [batch_size, label_dimension] that is produced by
passing chars through a multi-layer RNN and applying a ReLU to the final
hidden state.
"""
(chars, sequence_length) = inputs
# Transpose the first and second dimensions so that chars is of shape
# [time_steps, batch_size, dimension].
chars = tf.transpose(chars, [1, 0, 2])
# The outer loop cycles through the layers of the RNN; the inner loop
# executes the time steps for a particular layer.
batch_size = int(chars.shape[1])
for l in range(len(self.cells)):
cell = self.cells[l]
outputs = []
state = cell.zero_state(batch_size, tf.float32)
# Unstack the inputs to obtain a list of batches, one for each time step.
chars = tf.unstack(chars, axis=0)
for ch in chars:
output, state = cell(ch, state)
outputs.append(output)
# The outputs of this layer are the inputs of the subsequent layer.
chars = tf.stack(outputs, axis=0)
if training:
chars = tf.nn.dropout(chars, self.keep_prob)
# Extract the correct output (i.e., hidden state) for each example. All the
# character sequences in this batch were padded to the same fixed length so
# that they could be easily fed through the above RNN loop. The
# `sequence_length` vector tells us the true lengths of the character
# sequences, letting us obtain for each sequence the hidden state that was
# generated by its non-padding characters.
batch_range = [i for i in range(batch_size)]
indices = tf.stack([sequence_length - 1, batch_range], axis=1)
hidden_states = tf.gather_nd(chars, indices)
return self.relu(hidden_states)
def loss(labels, predictions):
"""Computes mean squared loss."""
return tf.reduce_mean(tf.square(predictions - labels))
def test(model, eval_data):
"""Computes the average loss on eval_data, which should be a Dataset."""
avg_loss = tfe.metrics.Mean("loss")
for (labels, chars, sequence_length) in tfe.Iterator(eval_data):
predictions = model((chars, sequence_length), training=False)
avg_loss(loss(labels, predictions))
print("eval/loss: %.6f\n" % avg_loss.result())
with tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar("loss", avg_loss.result())
def train_one_epoch(model, optimizer, train_data, log_interval=10):
"""Trains model on train_data using optimizer."""
tf.train.get_or_create_global_step()
def model_loss(labels, chars, sequence_length):
predictions = model((chars, sequence_length), training=True)
loss_value = loss(labels, predictions)
tf.contrib.summary.scalar("loss", loss_value)
return loss_value
for (batch, (labels, chars, sequence_length)) in enumerate(
tfe.Iterator(train_data)):
with tf.contrib.summary.record_summaries_every_n_global_steps(log_interval):
batch_model_loss = functools.partial(model_loss, labels, chars,
sequence_length)
optimizer.minimize(
batch_model_loss, global_step=tf.train.get_global_step())
if log_interval and batch % log_interval == 0:
print("train/batch #%d\tloss: %.6f" % (batch, batch_model_loss()))
SOURCE_TRAIN_URL = "https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/archive/extras/colorbot/data/train.csv"
SOURCE_TEST_URL = "https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/archive/extras/colorbot/data/test.csv"
def main(_):
data_dir = os.path.join(FLAGS.dir, "data")
train_data = load_dataset(
data_dir=data_dir, url=SOURCE_TRAIN_URL, batch_size=FLAGS.batch_size)
eval_data = load_dataset(
data_dir=data_dir, url=SOURCE_TEST_URL, batch_size=FLAGS.batch_size)
model = RNNColorbot(
rnn_cell_sizes=FLAGS.rnn_cell_sizes,
label_dimension=3,
keep_prob=FLAGS.keep_probability)
optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
if FLAGS.no_gpu or tfe.num_gpus() <= 0:
print(tfe.num_gpus())
device = "/cpu:0"
else:
device = "/gpu:0"
print("Using device %s." % device)
log_dir = os.path.join(FLAGS.dir, "summaries")
tf.gfile.MakeDirs(log_dir)
train_summary_writer = tf.contrib.summary.create_file_writer(
os.path.join(log_dir, "train"), flush_millis=10000)
test_summary_writer = tf.contrib.summary.create_file_writer(
os.path.join(log_dir, "eval"), flush_millis=10000, name="eval")
with tf.device(device):
for epoch in range(FLAGS.num_epochs):
start = time.time()
with train_summary_writer.as_default():
train_one_epoch(model, optimizer, train_data, FLAGS.log_interval)
end = time.time()
print("train/time for epoch #%d: %.2f" % (epoch, end - start))
with test_summary_writer.as_default():
test(model, eval_data)
print("Colorbot is ready to generate colors!")
while True:
try:
color_name = six.moves.input(
"Give me a color name (or press enter to exit): ")
except EOFError:
return
if not color_name:
return
_, chars, length = parse(color_name)
with tf.device(device):
(chars, length) = (tf.identity(chars), tf.identity(length))
chars = tf.expand_dims(chars, 0)
length = tf.expand_dims(length, 0)
preds = tf.unstack(model((chars, length), training=False)[0])
# Predictions cannot be negative, as they are generated by a ReLU layer;
# they may, however, be greater than 1.
clipped_preds = tuple(min(float(p), 1.0) for p in preds)
rgb = tuple(int(p * 255) for p in clipped_preds)
print("rgb:", rgb)
data = [[clipped_preds]]
if HAS_MATPLOTLIB:
plt.imshow(data)
plt.title(color_name)
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--dir",
type=str,
default="/tmp/rnn_colorbot/",
help="Directory to download data files and save logs.")
parser.add_argument(
"--log_interval",
type=int,
default=10,
metavar="N",
help="Log training loss every log_interval batches.")
parser.add_argument(
"--num_epochs", type=int, default=20, help="Number of epochs to train.")
parser.add_argument(
"--rnn_cell_sizes",
type=int,
nargs="+",
default=[256, 128],
help="List of sizes for each layer of the RNN.")
parser.add_argument(
"--batch_size",
type=int,
default=64,
help="Batch size for training and eval.")
parser.add_argument(
"--keep_probability",
type=float,
default=0.5,
help="Keep probability for dropout between layers.")
parser.add_argument(
"--learning_rate",
type=float,
default=0.01,
help="Learning rate to be used during training.")
parser.add_argument(
"--no_gpu",
action="store_true",
default=False,
help="Disables GPU usage even if a GPU is available.")
FLAGS, unparsed = parser.parse_known_args()
tfe.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |