repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
891k
| license
stringclasses 15
values | hash
int64 -9,223,135,201,861,841,000
9,223,183,049B
| line_mean
float64 6
99.4
| line_max
int64 17
1k
| alpha_frac
float64 0.25
0.89
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
leylabmpi/pyTecanFluent | pyTecanFluent/Pool.py | 1 | 18814 | from __future__ import print_function
# import
## batteries
import os
import sys
import argparse
import functools
from itertools import product
## 3rd party
import numpy as np
import pandas as pd
## package
from pyTecanFluent import Utils
from pyTecanFluent import Fluent
from pyTecanFluent import Labware
# functions
def get_desc():
desc = 'Create robot commands for pooling samples'
return desc
def parse_args(test_args=None, subparsers=None):
# desc
desc = get_desc()
epi = """DESCRIPTION:
Create a worklist and labware file for the TECAN Fluent robot for pooling
samples (eg., pooling PCR reaction replicates).
The main input is >=1 Excel or tab-delimited file containing the following columns:
1) A column of sample names (samples with the same name will be pooled)
2) A column designating whether to include or skip the samplles
(include = 'Success/Pass/Include'; skip = 'Fail/Skip')
3) A column designating the sample labware name
4) A column designating the sample labware type (eg., '96 Well Eppendorf TwinTec PCR')
5) A column designating the sample position (well)
6) [optional] A column designating the volume of each sample (overrides --volume)
*) Note: you can designate the column names in the parameters
Mapping file:
If a mapping file is provided (same names as in the pooling file),
then the mapping file will be trimmed to just those pooled, and
the final pooled locations will be added to the mapping table.
The added columns have the prefix: "TECAN_postPool_*"
Notes:
* Sample locations in plates numbered are column-wise.
* All volumes are in ul.
"""
if subparsers:
parser = subparsers.add_parser('pool', description=desc, epilog=epi,
formatter_class=argparse.RawTextHelpFormatter)
else:
parser = argparse.ArgumentParser(description=desc, epilog=epi,
formatter_class=argparse.RawTextHelpFormatter)
# args
## I/O
groupIO = parser.add_argument_group('I/O')
groupIO.add_argument('samplefiles', metavar='SampleFile', type=str, nargs='+',
help='An excel or tab-delim file of samples to pool')
groupIO.add_argument('--prefix', type=str, default='TECAN_pool',
help='Output file name prefix (default: %(default)s)')
groupIO.add_argument('--mapfile', type=str,
help='A QIIME-formatted mapping file')
## file format
filef = parser.add_argument_group('Sample file details')
### sample file
filef.add_argument('--sample-format', type=str, default=None,
help='File format (excel or tab). If not provided, the format is determined from the file extension')
filef.add_argument('--sample-header', action='store_false', default=True,
help='Header in the sample file? (default: %(default)s)')
filef.add_argument('--sample-rows', type=str, default='all',
help='Which rows (not including header) of the sample file to use ("all"=all rows; "1-48"=rows 1-48) (default: %(default)s)')
filef.add_argument('--sample-col', type=str, default='Sample',
help='Name of column containing the samples (default: %(default)s)')
filef.add_argument('--include-col', type=str, default='Call',
help='Name of column designating sample include/skip (default: %(default)s)')
filef.add_argument('--sample-labware-name', type=str, default='labware_name',
help='Name of column designating the sample labware name (default: %(default)s)')
filef.add_argument('--sample-labware-type', type=str, default='labware_type',
help='Name of column designating the sample labware type (default: %(default)s)')
filef.add_argument('--position-col', type=str, default='Well',
help='Name of column designating sample location in the plate (default: %(default)s)')
filef.add_argument('--volume-col', type=str, default='None',
help='Name of column designating volumes per sample. Use "None" to skip (default: %(default)s)')
### mapping file
filef.add_argument('--map-format', type=str, default=None,
help='File format (excel or tab). If not provided, the format is determined from the file extension')
filef.add_argument('--map-header', action='store_false', default=True,
help='Header in the mapping file? (default: %(default)s)')
## pooling
pooling = parser.add_argument_group('Pooling')
pooling.add_argument('--volume', type=float, default=30.0,
help='Per-sample volume to pool (default: %(default)s)')
pooling.add_argument('--liq-cls', type=str, default='Water Free Single No-cLLD',
help='Liquid class for pooling (default: %(default)s)')
# pooling.add_argument('--new-tips', action='store_true', default=False,
# help='Use new tips between sample replicates? (default: %(default)s)')
## destination plate
dest = parser.add_argument_group('Destination labware')
dest.add_argument('--dest-name', type=str, default='Pooled DNA plate',
help='Destination labware name (default: %(default)s)')
dest.add_argument('--dest-type', type=str, default='96 Well Eppendorf TwinTec PCR',
help='Destination labware type (default: %(default)s)')
dest.add_argument('--dest-start', type=int, default=1,
help='Starting position (well) on the destination labware (default: %(default)s)')
# parse & return
if test_args:
args = parser.parse_args(test_args)
return args
return parser
def main(args=None):
# Input
if args is None:
args = parse_args()
check_args(args)
# Import
## sample file(s); then joining
df_samps = []
for f in args.samplefiles:
df_samp = sample2df(f,
sample_col=args.sample_col,
include_col=args.include_col,
labware_name_col=args.sample_labware_name,
labware_type_col=args.sample_labware_type,
position_col=args.position_col,
volume_col=args.volume_col,
file_format=args.sample_format,
row_select=args.sample_rows,
header=args.sample_header)
df_samps.append(df_samp)
df_samp = pd.concat(df_samps)
## mapping file
df_map = map2df(args.mapfile,
file_format=args.map_format,
header=args.map_header)
# filtering sample file to just pass
df_samp = df_samp.loc[df_samp[args.include_col].isin(['success', 'pass', 'include'])]
# adding destination
df_samp = add_dest(df_samp,
dest_labware=args.dest_name,
sample_col=args.sample_col,
position_col=args.position_col,
labware_name_col=args.sample_labware_name,
dest_type=args.dest_type,
dest_start=args.dest_start)
df_samp = check_rack_labels(df_samp)
# gwl construction
TipTypes = ['FCA, 1000ul SBS', 'FCA, 200ul SBS',
'FCA, 50ul SBS', 'FCA, 10ul SBS']
gwl = Fluent.gwl(TipTypes)
# Reordering src if plate type is 384-well
df_samp = Utils.reorder_384well(df_samp, gwl,
labware_name_col=args.sample_labware_name,
labware_type_col=args.sample_labware_type,
position_col=args.position_col)
# samples
pool_samples(df_samp,
gwl,
sample_col=args.sample_col,
labware_name_col=args.sample_labware_name,
labware_type_col=args.sample_labware_type,
position_col=args.position_col,
volume_col=args.volume_col,
dest_labware_name=args.dest_name,
dest_labware_type=args.dest_type,
volume=args.volume,
liq_cls=args.liq_cls)
#new_tips=args.new_tips)
## writing out worklist (gwl) file
gwl_file = args.prefix + '.gwl'
gwl.write(gwl_file)
# making labware table
lw = Labware.labware()
lw.add_gwl(gwl)
lw_df = lw.table()
lw_file = args.prefix + '_labware.txt'
lw_df.to_csv(lw_file, sep='\t', index=False)
# Writing out updated mapping table
if df_map is not None:
df_map = filter_map(df_map, df_samp, args.sample_col)
map_file = args.prefix + '_map.txt'
df_map.round(1).to_csv(map_file, sep='\t', index=False)
else:
df_samp = filter_samp(df_samp, args.sample_col)
samp_file = args.prefix + '_samples.txt'
df_samp.round(1).to_csv(samp_file, sep='\t', index=False)
# status
Utils.file_written(gwl_file)
Utils.file_written(lw_file)
if df_map is not None:
Utils.file_written(map_file)
else:
Utils.file_written(samp_file)
# end
if df_map is not None:
return (gwl_file, lw_file, map_file)
else:
return (gwl_file, lw_file, samp_file)
def check_args(args):
"""Checking user input
"""
# input table column IDs
args.rows = Utils.make_range(args.sample_rows, set_zero_index=True)
# destination labware name
args.dest_name = Utils.rm_special_chars(args.dest_name)
# dilution
assert args.volume >= 0.0, '--volume must be >= 0'
def sample2df(samplefile, sample_col, include_col,
labware_name_col, labware_type_col, position_col, volume_col,
row_select=None, file_format=None, header=True):
"""Loading a sample file as a pandas dataframe
"""
if header==True:
header=0
else:
header=None
# format
if file_format is None:
if samplefile.endswith('.csv'):
file_format = 'csv'
elif samplefile.endswith('.txt'):
file_format = 'tab'
elif samplefile.endswith('.xls') or samplefile.endswith('.xlsx'):
file_format = 'excel'
else:
file_format = file_format.lower()
# load via pandas IO
if file_format == 'csv':
df = pd.read_csv(samplefile, sep=',', header=header)
elif file_format == 'tab':
df = pd.read_csv(samplefile, sep='\t', header=header)
elif file_format == 'excel':
xls = pd.ExcelFile(samplefile)
df = pd.read_excel(xls, header=header)
else:
raise ValueError('Sample file is not in a usable format')
# checking dataframe format
## columns
req_cols = [sample_col, include_col, labware_name_col, labware_type_col, position_col]
if volume_col.lower() != 'none':
req_cols += [volume_col]
msg = 'Column "{}" not found in sample table'
for req_col in req_cols:
if req_col not in df.columns.values:
raise ValueError(msg.format(req_col))
## include col
f = lambda x: x.lower()
df.loc[:,include_col] = df.loc[:,include_col].apply(f)
msg = '"{}" value not allowed in include column in sample file'
df.loc[:,include_col].apply(check_include_column)
## converting wells to positions
lw_utils = Labware.utils()
f = lambda row: lw_utils.well2position(row[position_col],
RackType=row[labware_type_col])
df[position_col] = df.apply(f, axis=1)
# selecting relevant columns
df = df.loc[:,req_cols]
# making sure labware names are "TECAN worklist friendly"
df = Utils.rm_special_chars(df, labware_name_col)
# return
return df
def check_include_column(x):
psbl_vals = ('success', 'include', 'pass', 'fail', 'skip')
assert x in psbl_vals, msg.format(x)
def check_rack_labels(df_samp):
"""Removing '.' for rack labels (causes execution failures)
"""
cols = ['labware_name', 'TECAN_dest_labware_name']
for x in cols:
df_samp[x] = [y.replace('.', '_') for y in df_samp[x].tolist()]
return df_samp
def map2df(mapfile, file_format=None, header=True):
"""Loading a mapping file as a pandas dataframe
"""
if mapfile is None:
return None
if header==True:
header=0
else:
header=None
# format
if file_format is None:
if mapfile.endswith('.csv'):
file_format = 'csv'
elif mapfile.endswith('.txt'):
file_format = 'tab'
elif mapfile.endswith('.xls') or mapfile.endswith('xlsx'):
file_format = 'excel'
else:
file_format = file_format.lower()
# load via pandas IO
if file_format == 'csv':
df = pd.read_csv(mapfile, sep=',', header=header)
elif file_format == 'tab':
df = pd.read_csv(mapfile, sep='\t', header=header)
elif file_format == 'excel':
xls = pd.ExcelFile(mapfile)
df = pd.read_excel(xls, header=header)
else:
raise ValueError('Mapping file is not in a usable format')
# checking for required columns
req_cols = ['#SampleID']
msg = 'Column "{}" not found in mapping file'
for req_col in req_cols:
if req_col not in df.columns.values:
raise ValueError(msg.format(req_col))
# ret
return df
def add_dest(df, dest_labware, sample_col, position_col, labware_name_col,
dest_type='96 Well Eppendorf TwinTec PCR', dest_start=1):
"""Setting destination locations for samples & primers.
Add destination location columns:
[dest_labware_name, dest_labware_type, dest_location]
"""
df.reset_index(inplace=True)
assert isinstance(dest_start, int)
# number of wells in destination plate type
lw_utils = Labware.utils()
n_dest_wells = lw_utils.get_wells(dest_type)
if n_dest_wells is None:
msg = 'RackType has no "wells" value: "{}"'
raise ValueError(msg.format(dest_type))
# number of samples in final pool
n_samples = len(df['Sample'].unique())
# init destination df
df['TECAN_dest_labware_name'] = ''
df['TECAN_dest_labware_type'] = ''
df['TECAN_dest_target_position'] = np.nan
# iterating through df and adding dest
dest_pos_idx = {}
cur_pos = dest_start
dest_plate_cnt = 1
for i in range(df.shape[0]):
# sample destination position
cur_sample = df.loc[i,'Sample']
try:
# destination location for that sample
dest_pos_tmp = dest_pos_idx[cur_sample]
except KeyError:
x = ['{0}[{1:0>3}]'.format(dest_labware, dest_plate_cnt), cur_pos]
dest_pos_idx[cur_sample] = x
cur_pos += 1
# destination labware name
## dest row
df.at[i,'TECAN_dest_labware_name'] = dest_pos_idx[cur_sample][0]
df.at[i,'TECAN_dest_labware_type'] = dest_type
df.at[i,'TECAN_dest_target_position'] = dest_pos_idx[cur_sample][1]
# next plate
if cur_pos > n_dest_wells:
cur_pos = 1
dest_plate_cnt += 1
#df.to_csv(sys.stdout, sep='\t'); sys.exit()
return df
def pool_samples(df, gwl, sample_col, labware_name_col,
labware_type_col, position_col, volume_col,
dest_labware_name, dest_labware_type,
volume, liq_cls='Water Free Single No-cLLD'):
"""Writing gwl commands for pooling sample replicates
"""
gwl.add(Fluent.Comment('Sample pooling'))
# for each Sample, generate asp/dispense commands
## optional: keep tips among sample replicates
for sample in df[sample_col].unique():
df_sub = df.loc[df[sample_col] == sample]
df_sub.index = range(df_sub.shape[0])
for i in range(df_sub.shape[0]):
# aspiration
asp = Fluent.Aspirate()
asp.RackLabel = df_sub.loc[i, labware_name_col]
asp.RackType = df_sub.loc[i, labware_type_col]
asp.Position = df_sub.loc[i, position_col]
if volume_col.lower() == 'none':
asp.Volume = volume
else:
asp.Volume = df_sub.loc[i, volume_col]
if asp.Volume <= 0:
msg = 'WARNING: skipping sample because volume <= 0\n'
sys.stderr.write(msg)
continue
asp.LiquidClass = liq_cls
gwl.add(asp)
# dispensing
disp = Fluent.Dispense()
disp.RackLabel = df_sub.loc[i,'TECAN_dest_labware_name']
disp.RackType = df_sub.loc[i,'TECAN_dest_labware_type']
disp.Position = df_sub.loc[i,'TECAN_dest_target_position']
disp.Volume = asp.Volume
disp.LiquidClass = liq_cls
gwl.add(disp)
# tip to waste (each replicate)
gwl.add(Fluent.Waste())
# tip to waste (between samples)
#if new_tips == False:
# gwl.add(Fluent.Waste())
def filter_samp(df_samp, sample_col):
"""Filtering df_samp to just distination position
"""
cols = [sample_col, 'TECAN_dest_labware_name',
'TECAN_dest_labware_type', 'TECAN_dest_target_position']
df_samp = df_samp[cols].drop_duplicates()
x = 'TECAN_dest_target_position'
df_samp[x] = df_samp[x].astype(int)
df_samp.columns = [sample_col,
'TECAN_postPool_labware_name',
'TECAN_postPool_labware_type',
'TECAN_postPool_target_position']
sort_vals = ['TECAN_postPool_labware_name',
'TECAN_postPool_target_position']
df_samp.sort_values(sort_vals, inplace=True)
return(df_samp)
def filter_map(df_map, df_samp, sample_col):
"""Filtering df_sample to just destination position,
then adding values to df_map
"""
# formatting sample table
df_samp = filter_samp(df_samp, sample_col)
# formatting mapping table
df_map = df_map.drop_duplicates(subset='#SampleID')
# joining
df_map = pd.merge(df_map, df_samp, how='inner',
left_on=['#SampleID'], right_on=[sample_col])
# sorting by destination position
df_map.sort_values('TECAN_postPool_target_position', inplace=True)
return df_map
# main
if __name__ == '__main__':
pass
| mit | 4,169,848,481,043,146,000 | 37.952381 | 147 | 0.584777 | false |
data-refinery/data_refinery | workers/data_refinery_workers/processors/smasher.py | 1 | 42027 | import boto3
import csv
import os
import rpy2
import rpy2.robjects as ro
import shutil
import simplejson as json
import string
import warnings
from botocore.exceptions import ClientError
from datetime import datetime, timedelta
from django.conf import settings
from django.utils import timezone
from pathlib import Path
from rpy2.robjects import pandas2ri
from rpy2.robjects import r as rlang
from rpy2.robjects.packages import importr
from sklearn import preprocessing
from typing import Dict
import numpy as np
import pandas as pd
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import (
ComputationalResult,
ComputedFile,
OriginalFile,
Pipeline,
SampleResultAssociation,
)
from data_refinery_common.utils import get_env_variable
from data_refinery_workers.processors import utils
RESULTS_BUCKET = get_env_variable("S3_RESULTS_BUCKET_NAME", "refinebio-results-bucket")
S3_BUCKET_NAME = get_env_variable("S3_BUCKET_NAME", "data-refinery")
BODY_HTML = Path('data_refinery_workers/processors/smasher_email.min.html').read_text().replace('\n', '')
BODY_ERROR_HTML = Path('data_refinery_workers/processors/smasher_email_error.min.html').read_text().replace('\n', '')
logger = get_and_configure_logger(__name__)
def _prepare_files(job_context: Dict) -> Dict:
"""
Fetches and prepares the files to smash.
"""
all_sample_files = []
job_context['input_files'] = {}
# `key` can either be the species name or experiment accession.
for key, samples in job_context["samples"].items():
smashable_files = []
for sample in samples:
smashable_file = sample.get_most_recent_smashable_result_file()
if smashable_file is not None:
smashable_files = smashable_files + [smashable_file]
smashable_files = list(set(smashable_files))
job_context['input_files'][key] = smashable_files
all_sample_files = all_sample_files + smashable_files
# Filter empty results. This shouldn't get here, but it's possible, so we filter just in case it does.
all_sample_files = [sf for sf in all_sample_files if sf is not None]
if all_sample_files == []:
error_message = "Couldn't get any files to smash for Smash job!!"
logger.error(error_message,
dataset_id=job_context['dataset'].id,
samples=job_context["samples"])
# Delay failing this pipeline until the failure notify has been sent
job_context['dataset'].failure_reason = error_message
job_context['dataset'].success = False
job_context['dataset'].save()
job_context['job'].success = False
job_context["job"].failure_reason = "Couldn't get any files to smash for Smash job - empty all_sample_files"
return job_context
job_context["work_dir"] = "/home/user/data_store/smashed/" + str(job_context["dataset"].pk) + "/"
# Ensure we have a fresh smash directory
shutil.rmtree(job_context["work_dir"], ignore_errors=True)
os.makedirs(job_context["work_dir"])
job_context["output_dir"] = job_context["work_dir"] + "output/"
os.makedirs(job_context["output_dir"])
return job_context
def _add_annotation_column(annotation_columns, column_name):
"""Add annotation column names in place.
Any column_name that starts with "refinebio_" will be skipped.
"""
if not column_name.startswith("refinebio_"):
annotation_columns.add(column_name)
def _get_tsv_columns(samples_metadata):
"""Returns an array of strings that will be written as a TSV file's
header. The columns are based on fields found in samples_metadata.
Some nested annotation fields are taken out as separate columns
because they are more important than the others.
"""
refinebio_columns = set()
annotation_columns = set()
for sample_metadata in samples_metadata.values():
for meta_key, meta_value in sample_metadata.items():
if meta_key != 'refinebio_annotations':
refinebio_columns.add(meta_key)
continue
# Decompose sample_metadata["annotations"], which is an array of annotations!
for annotation in meta_value:
for annotation_key, annotation_value in annotation.items():
# For ArrayExpress samples, take out the fields
# nested in "characteristic" as separate columns.
if (sample_metadata.get('refinebio_source_database', '') == "ARRAY_EXPRESS"
and annotation_key == "characteristic"):
for pair_dict in annotation_value:
if 'category' in pair_dict and 'value' in pair_dict:
_add_annotation_column(annotation_columns, pair_dict['category'])
# For ArrayExpress samples, also take out the fields
# nested in "variable" as separate columns.
elif (sample_metadata.get('refinebio_source_database', '') == "ARRAY_EXPRESS"
and annotation_key == "variable"):
for pair_dict in annotation_value:
if 'name' in pair_dict and 'value' in pair_dict:
_add_annotation_column(annotation_columns, pair_dict['name'])
# For ArrayExpress samples, skip "source" field
elif (sample_metadata.get('refinebio_source_database', '') == "ARRAY_EXPRESS"
and annotation_key == "source"):
continue
# For GEO samples, take out the fields nested in
# "characteristics_ch1" as separate columns.
elif (sample_metadata.get('refinebio_source_database', '') == "GEO"
and annotation_key == "characteristics_ch1"): # array of strings
for pair_str in annotation_value:
if ':' in pair_str:
tokens = pair_str.split(':', 1)
_add_annotation_column(annotation_columns, tokens[0])
# Saves all other annotation fields in separate columns
else:
_add_annotation_column(annotation_columns, annotation_key)
# Return sorted columns, in which "refinebio_accession_code" is always the first,
# followed by the other refinebio columns (in alphabetic order), and
# annotation columns (in alphabetic order) at the end.
refinebio_columns.discard('refinebio_accession_code')
return ['refinebio_accession_code'] + sorted(refinebio_columns) + sorted(annotation_columns)
def _add_annotation_value(row_data, col_name, col_value, sample_accession_code):
"""Adds a new `col_name` key whose value is `col_value` to row_data.
If col_name already exists in row_data with different value, print
out a warning message.
"""
# Generate a warning message if annotation field name starts with
# "refinebio_". This should rarely (if ever) happen.
if col_name.startswith("refinebio_"):
logger.warning(
"Annotation value skipped",
annotation_field=col_name,
annotation_value=col_value,
sample_accession_code=sample_accession_code
)
elif col_name not in row_data:
row_data[col_name] = col_value
# Generate a warning message in case of conflicts of annotation values.
# (Requested by Dr. Jackie Taroni)
elif row_data[col_name] != col_value:
logger.warning(
"Conflict of values found in column %s: %s vs. %s" % (
col_name, row_data[col_name], col_value),
sample_accession_code=sample_accession_code
)
def _get_tsv_row_data(sample_metadata):
"""Returns field values based on input sample_metadata.
Some annotation fields are treated specially because they are more
important. See `_get_tsv_columns` function above for details.
"""
sample_accession_code = sample_metadata.get('refinebio_accession_code', '')
row_data = dict()
for meta_key, meta_value in sample_metadata.items():
# If the field is a refinebio-specific field, simply copy it.
if meta_key != 'refinebio_annotations':
row_data[meta_key] = meta_value
continue
# Decompose sample_metadata["refinebio_annotations"], which is
# an array of annotations.
for annotation in meta_value:
for annotation_key, annotation_value in annotation.items():
# "characteristic" in ArrayExpress annotation
if (sample_metadata.get('refinebio_source_database', '') == "ARRAY_EXPRESS"
and annotation_key == "characteristic"):
for pair_dict in annotation_value:
if 'category' in pair_dict and 'value' in pair_dict:
col_name, col_value = pair_dict['category'], pair_dict['value']
_add_annotation_value(row_data, col_name, col_value,
sample_accession_code)
# "variable" in ArrayExpress annotation
elif (sample_metadata.get('refinebio_source_database', '') == "ARRAY_EXPRESS"
and annotation_key == "variable"):
for pair_dict in annotation_value:
if 'name' in pair_dict and 'value' in pair_dict:
col_name, col_value = pair_dict['name'], pair_dict['value']
_add_annotation_value(row_data, col_name, col_value,
sample_accession_code)
# Skip "source" field ArrayExpress sample's annotation
elif (sample_metadata.get('refinebio_source_database', '') == "ARRAY_EXPRESS"
and annotation_key == "source"):
continue
# "characteristics_ch1" in GEO annotation
elif (sample_metadata.get('refinebio_source_database', '') == "GEO"
and annotation_key == "characteristics_ch1"): # array of strings
for pair_str in annotation_value:
if ':' in pair_str:
col_name, col_value = pair_str.split(':', 1)
col_value = col_value.strip()
_add_annotation_value(row_data, col_name, col_value,
sample_accession_code)
# If annotation_value includes only a 'name' key, extract its value directly:
elif (isinstance(annotation_value, dict)
and len(annotation_value) == 1 and 'name' in annotation_value):
_add_annotation_value(row_data, annotation_key, annotation_value['name'],
sample_accession_code)
# If annotation_value is a single-element array, extract the element directly:
elif isinstance(annotation_value, list) and len(annotation_value) == 1:
_add_annotation_value(row_data, annotation_key, annotation_value[0],
sample_accession_code)
# Otherwise save all annotation fields in separate columns
else:
_add_annotation_value(row_data, annotation_key, annotation_value,
sample_accession_code)
return row_data
def _write_tsv_json(job_context, metadata, smash_path):
"""Writes tsv files on disk.
If the dataset is aggregated by species, also write species-level
JSON file.
"""
# Uniform TSV header per dataset
columns = _get_tsv_columns(metadata['samples'])
if job_context["dataset"].aggregate_by == "EXPERIMENT":
for experiment_title, experiment_data in metadata['experiments'].items():
experiment_dir = smash_path + experiment_title + '/'
os.makedirs(experiment_dir, exist_ok=True)
with open(experiment_dir + 'metadata_' + experiment_title + '.tsv', 'w') as tsv_file:
dw = csv.DictWriter(tsv_file, columns, delimiter='\t')
dw.writeheader()
for sample_title, sample_metadata in metadata['samples'].items():
if sample_title in experiment_data['sample_titles']:
row_data = _get_tsv_row_data(sample_metadata)
dw.writerow(row_data)
elif job_context["dataset"].aggregate_by == "SPECIES":
for species in job_context['input_files'].keys():
species_dir = smash_path + species + '/'
os.makedirs(species_dir, exist_ok=True)
samples_in_species = []
with open(species_dir + "metadata_" + species + '.tsv', 'w') as tsv_file:
dw = csv.DictWriter(tsv_file, columns, delimiter='\t')
dw.writeheader()
for sample_metadata in metadata['samples'].values():
if sample_metadata.get('refinebio_organism', '') == species:
row_data = _get_tsv_row_data(sample_metadata)
dw.writerow(row_data)
samples_in_species.append(sample_metadata)
# Writes a json file for current species:
if len(samples_in_species):
species_metadata = {
'species': species,
'samples': samples_in_species
}
with open(species_dir + "metadata_" + species + '.json', 'w') as json_file:
json.dump(species_metadata, json_file, indent=4, sort_keys=True)
else:
all_dir = smash_path + "ALL/"
os.makedirs(all_dir, exist_ok=True)
with open(all_dir + 'metadata_ALL.tsv', 'w') as tsv_file:
dw = csv.DictWriter(tsv_file, columns, delimiter='\t')
dw.writeheader()
for sample_metadata in metadata['samples'].values():
row_data = _get_tsv_row_data(sample_metadata)
dw.writerow(row_data)
def _smash(job_context: Dict) -> Dict:
"""
Smash all of the samples together!
Steps:
Combine common genes (pandas merge)
Transpose such that genes are columns (features)
Scale features with sci-kit learn
Transpose again such that samples are columns and genes are rows
"""
# We have already failed - return now so we can send our fail email.
if job_context['dataset'].failure_reason not in ['', None]:
return job_context
try:
# Prepare the output directory
smash_path = job_context["output_dir"]
scalers = {
'MINMAX': preprocessing.MinMaxScaler,
'STANDARD': preprocessing.StandardScaler,
'ROBUST': preprocessing.RobustScaler,
}
unsmashable_files = []
num_samples = 0
# Smash all of the sample sets
logger.debug("About to smash!",
input_files=job_context['input_files'],
dataset_data=job_context['dataset'].data,
)
# Once again, `key` is either a species name or an experiment accession
for key, input_files in job_context['input_files'].items():
# Merge all the frames into one
all_frames = []
for computed_file in input_files:
# Download the file to a job-specific location so it
# won't disappear while we're using it.
computed_file_path = job_context["work_dir"] + computed_file.filename
computed_file_path = computed_file.get_synced_file_path(path=computed_file_path)
# Bail appropriately if this isn't a real file.
if not computed_file_path or not os.path.exists(computed_file_path):
unsmashable_files.append(computed_file_path)
logger.error("Smasher received non-existent file path.",
computed_file_path=computed_file_path,
computed_file=computed_file,
dataset=job_context['dataset'],
)
continue
try:
data = pd.read_csv(computed_file_path, sep='\t', header=0, index_col=0, error_bad_lines=False)
# Strip any funky whitespace
data.columns = data.columns.str.strip()
data = data.dropna(axis='columns', how='all')
# Make sure the index type is correct
data.index = data.index.map(str)
if len(data.columns) > 2:
# Most of the time, >1 is actually bad, but we also need to support
# two-channel samples. I think ultimately those should be given some kind of
# special consideration.
logger.info("Found a frame with more than 2 columns - this shouldn't happen!",
computed_file_path=computed_file_path,
computed_file_id=computed_file.id
)
continue
# via https://github.com/AlexsLemonade/refinebio/issues/330:
# aggregating by experiment -> return untransformed output from tximport
# aggregating by species -> log2(x + 1) tximport output
if job_context['dataset'].aggregate_by == 'SPECIES' \
and computed_file_path.endswith("lengthScaledTPM.tsv"):
data = data + 1
data = np.log2(data)
# Detect if this data hasn't been log2 scaled yet.
# Ideally done in the NO-OPPER, but sanity check here.
if (not computed_file_path.endswith("lengthScaledTPM.tsv")) and (data.max() > 100).any():
logger.info("Detected non-log2 microarray data.", file=computed_file)
data = np.log2(data)
# Ensure that we don't have any dangling Brainarray-generated probe symbols.
# BA likes to leave '_at', signifying probe identifiers,
# on their converted, non-probe identifiers. It makes no sense.
# So, we chop them off and don't worry about it.
data.index = data.index.str.replace('_at', '')
# Remove any lingering Affymetrix control probes ("AFFX-")
data = data[~data.index.str.contains('AFFX-')]
# If there are any _versioned_ gene identifiers, remove that
# version information. We're using the latest brainarray for everything anyway.
# Jackie says this is okay.
# She also says that in the future, we may only want to do this
# for cross-technology smashes.
# This regex needs to be able to handle EGIDs in the form:
# ENSGXXXXYYYZZZZ.6
# and
# fgenesh2_kg.7__3016__AT5G35080.1 (via http://plants.ensembl.org/Arabidopsis_lyrata/Gene/Summary?g=fgenesh2_kg.7__3016__AT5G35080.1;r=7:17949732-17952000;t=fgenesh2_kg.7__3016__AT5G35080.1;db=core)
data.index = data.index.str.replace(r"(\.[^.]*)$", '')
# Squish duplicated rows together.
# XXX/TODO: Is mean the appropriate method here?
# We can make this an option in future.
# Discussion here: https://github.com/AlexsLemonade/refinebio/issues/186#issuecomment-395516419
data = data.groupby(data.index, sort=False).mean()
# Explicitly title this dataframe
try:
# Unfortuantely, we can't use this as `title` can cause a collision
# data.columns = [computed_file.samples.all()[0].title]
# So we use this, which also helps us support the case of missing SampleComputedFileAssociation
data.columns = [computed_file.samples.all()[0].accession_code]
except ValueError:
# This sample might have multiple channels, or something else.
# Don't mess with it.
pass
except Exception as e:
# Okay, somebody probably forgot to create a SampleComputedFileAssociation
data.columns = [computed_file.filename]
all_frames.append(data)
num_samples = num_samples + 1
except Exception as e:
unsmashable_files.append(computed_file_path)
logger.exception("Unable to smash file",
file=computed_file_path,
dataset_id=job_context['dataset'].id,
)
finally:
# Delete before archiving the work dir
os.remove(computed_file_path)
job_context['all_frames'] = all_frames
if len(all_frames) < 1:
logger.warning("Was told to smash a frame with no frames!",
key=key,
input_files=str(input_files)
)
continue
# Merge all of the frames we've gathered into a single big frame, skipping duplicates.
# TODO: If the very first frame is the wrong platform, are we boned?
merged = all_frames[0]
i = 1
old_len_merged = len(merged)
new_len_merged = len(merged)
merged_backup = merged
while i < len(all_frames):
frame = all_frames[i]
i = i + 1
# I'm not sure where these are sneaking in from, but we don't want them.
# Related: https://github.com/AlexsLemonade/refinebio/issues/390
breaker = False
for column in frame.columns:
if column in merged.columns:
breaker = True
if breaker:
logger.warning("Column repeated for smash job!",
input_files=str(input_files),
dataset_id=job_context["dataset"].id,
processor_job_id=job_context["job"].id,
column=column,
frame=frame
)
continue
# This is the inner join, the main "Smash" operation
merged = merged.merge(frame, left_index=True, right_index=True)
new_len_merged = len(merged)
if new_len_merged < old_len_merged:
logger.warning("Dropped rows while smashing!",
dataset_id=job_context["dataset"].id,
old_len_merged=old_len_merged,
new_len_merged=new_len_merged
)
if new_len_merged == 0:
logger.warning("Skipping a bad merge frame!",
dataset_id=job_context["dataset"].id,
old_len_merged=old_len_merged,
new_len_merged=new_len_merged,
bad_frame_number=i,
)
merged = merged_backup
new_len_merged = len(merged)
try:
unsmashable_files.append(frame.columns[0])
except Exception:
# Something is really, really wrong with this frame.
pass
old_len_merged = len(merged)
merged_backup = merged
job_context['original_merged'] = merged
# Quantile Normalization
if job_context['dataset'].quantile_normalize:
try:
# Prepare our QN target file
organism = computed_file.samples.first().organism
qn_target = utils.get_most_recent_qn_target_for_organism(organism)
if not qn_target:
logger.error("Could not find QN target for Organism!",
organism=organism,
dataset_id=job_context['dataset'].id,
dataset_data=job_context['dataset'].data,
processor_job_id=job_context["job"].id,
)
else:
qn_target_path = qn_target.sync_from_s3()
qn_target_frame = pd.read_csv(qn_target_path, sep='\t', header=None,
index_col=None, error_bad_lines=False)
# Prepare our RPy2 bridge
pandas2ri.activate()
preprocessCore = importr('preprocessCore')
as_numeric = rlang("as.numeric")
data_matrix = rlang('data.matrix')
# Convert the smashed frames to an R numeric Matrix
# and the target Dataframe into an R numeric Vector
target_vector = as_numeric(qn_target_frame[0])
merged_matrix = data_matrix(merged)
# Perform the Actual QN
reso = preprocessCore.normalize_quantiles_use_target(
x=merged_matrix,
target=target_vector,
copy=True
)
# Verify this QN, related: https://github.com/AlexsLemonade/refinebio/issues/599#issuecomment-422132009
set_seed = rlang("set.seed")
combn = rlang("combn")
ncol = rlang("ncol")
ks_test = rlang("ks.test")
which = rlang("which")
set_seed(123)
n = ncol(reso)[0]
m = 2
if n < m:
raise Exception("Found fewer columns than required for QN combinatorial - bad smash?")
combos = combn(ncol(reso), 2)
# Convert to NP, Shuffle, Return to R
ar = np.array(combos)
np.random.shuffle(np.transpose(ar))
nr, nc = ar.shape
combos = ro.r.matrix(ar, nrow=nr, ncol=nc)
# adapted from
# https://stackoverflow.com/questions/9661469/r-t-test-over-all-columns
# apply KS test to randomly selected pairs of columns (samples)
for i in range(1, min(ncol(combos)[0], 100)):
value1 = combos.rx(1, i)[0]
value2 = combos.rx(2, i)[0]
test_a = reso.rx(True, value1)
test_b = reso.rx(True, value2)
# RNA-seq has a lot of zeroes in it, which
# breaks the ks_test. Therefore we want to
# filter them out. To do this we drop the
# lowest half of the values. If there's
# still zeroes in there, then that's
# probably too many zeroes so it's okay to
# fail.
median_a = np.median(test_a)
median_b = np.median(test_b)
# `which` returns indices which are
# 1-indexed. Python accesses lists with
# zero-indexes, even if that list is
# actually an R vector. Therefore subtract
# 1 to account for the difference.
test_a = [test_a[i-1] for i in which(test_a > median_a)]
test_b = [test_b[i-1] for i in which(test_b > median_b)]
# The python list comprehension gives us a
# python list, but ks_test wants an R
# vector so let's go back.
test_a = as_numeric(test_a)
test_b = as_numeric(test_b)
ks_res = ks_test(test_a, test_b)
statistic = ks_res.rx('statistic')[0][0]
pvalue = ks_res.rx('p.value')[0][0]
# We're unsure of how strigent to be about
# the pvalue just yet, so we're extra lax
# rather than failing tons of tests. This may need tuning.
if statistic > 0.001 or pvalue < 0.8:
raise Exception("Failed Kolmogorov Smirnov test! Stat: " +
str(statistic) + ", PVal: " + str(pvalue))
# And finally convert back to Pandas
ar = np.array(reso)
new_merged = pd.DataFrame(ar, columns=merged.columns, index=merged.index)
job_context['merged_no_qn'] = merged
job_context['merged_qn'] = new_merged
merged = new_merged
except Exception as e:
logger.exception("Problem occured during quantile normalization",
dataset_id=job_context['dataset'].id,
dataset_data=job_context['dataset'].data,
processor_job_id=job_context["job"].id,
)
job_context['dataset'].success = False
job_context['job'].failure_reason = "Failure reason: " + str(e)
job_context['dataset'].failure_reason = "Failure reason: " + str(e)
job_context['dataset'].save()
# Delay failing this pipeline until the failure notify has been sent
job_context['job'].success = False
job_context['failure_reason'] = str(e)
return job_context
# End QN
# Transpose before scaling
# Do this even if we don't want to scale in case transpose
# modifies the data in any way. (Which it shouldn't but
# we're paranoid.)
transposed = merged.transpose()
# Scaler
if job_context['dataset'].scale_by != "NONE":
scale_funtion = scalers[job_context['dataset'].scale_by]
scaler = scale_funtion(copy=True)
scaler.fit(transposed)
scaled = pd.DataFrame( scaler.transform(transposed),
index=transposed.index,
columns=transposed.columns
)
# Untranspose
untransposed = scaled.transpose()
else:
# Wheeeeeeeeeee
untransposed = transposed.transpose()
# This is just for quality assurance in tests.
job_context['final_frame'] = untransposed
# Write to temp file with dataset UUID in filename.
subdir = ''
if job_context['dataset'].aggregate_by in ["SPECIES", "EXPERIMENT"]:
subdir = key
elif job_context['dataset'].aggregate_by == "ALL":
subdir = "ALL"
outfile_dir = smash_path + key + "/"
os.makedirs(outfile_dir, exist_ok=True)
outfile = outfile_dir + key + ".tsv"
untransposed.to_csv(outfile, sep='\t', encoding='utf-8')
# Copy LICENSE.txt and README.md files
shutil.copy("README_DATASET.md", smash_path + "README.md")
shutil.copy("LICENSE_DATASET.txt", smash_path + "LICENSE.TXT")
# Create metadata file.
metadata = {}
metadata['num_samples'] = num_samples
metadata['num_experiments'] = job_context["experiments"].count()
metadata['aggregate_by'] = job_context["dataset"].aggregate_by
metadata['scale_by'] = job_context["dataset"].scale_by
# https://github.com/AlexsLemonade/refinebio/pull/421#discussion_r203799646
metadata['non_aggregated_files'] = unsmashable_files
samples = {}
for sample in job_context["dataset"].get_samples():
samples[sample.title] = sample.to_metadata_dict()
metadata['samples'] = samples
experiments = {}
for experiment in job_context["dataset"].get_experiments():
exp_dict = experiment.to_metadata_dict()
exp_dict['sample_titles'] = [v for v in experiment.samples.all().values_list('title', flat=True)]
experiments[experiment.accession_code] = exp_dict
metadata['experiments'] = experiments
# Write samples metadata to TSV
_write_tsv_json(job_context, metadata, smash_path)
metadata['files'] = os.listdir(smash_path)
# Metadata to JSON
metadata['created_at'] = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')
with open(smash_path + 'aggregated_metadata.json', 'w') as metadata_file:
json.dump(metadata, metadata_file, indent=4, sort_keys=True)
# Finally, compress all files into a zip
final_zip_base = "/home/user/data_store/smashed/" + str(job_context["dataset"].pk)
shutil.make_archive(final_zip_base, 'zip', smash_path)
job_context["output_file"] = final_zip_base + ".zip"
except Exception as e:
logger.exception("Could not smash dataset.",
dataset_id=job_context['dataset'].id,
processor_job_id=job_context['job_id'],
input_files=job_context['input_files'])
job_context['dataset'].success = False
job_context['job'].failure_reason = "Failure reason: " + str(e)
job_context['dataset'].failure_reason = "Failure reason: " + str(e)
job_context['dataset'].save()
# Delay failing this pipeline until the failure notify has been sent
job_context['job'].success = False
job_context['failure_reason'] = str(e)
return job_context
job_context['metadata'] = metadata
job_context['unsmashable_files'] = unsmashable_files
job_context['dataset'].success = True
job_context['dataset'].save()
logger.debug("Created smash output!",
archive_location=job_context["output_file"])
return job_context
def _upload(job_context: Dict) -> Dict:
""" Uploads the result file to S3 and notifies user. """
# There has been a failure already, don't try to upload anything.
if not job_context.get("output_file", None):
return job_context
try:
if job_context.get("upload", True) and settings.RUNNING_IN_CLOUD:
s3_client = boto3.client('s3')
# Note that file expiry is handled by the S3 object lifecycle,
# managed by terraform.
s3_client.upload_file(
job_context["output_file"],
RESULTS_BUCKET,
job_context["output_file"].split('/')[-1],
ExtraArgs={'ACL':'public-read'}
)
result_url = ("https://s3.amazonaws.com/" + RESULTS_BUCKET + "/" +
job_context["output_file"].split('/')[-1])
job_context["result_url"] = result_url
logger.debug("Result uploaded!",
result_url=job_context["result_url"]
)
job_context["dataset"].s3_bucket = RESULTS_BUCKET
job_context["dataset"].s3_key = job_context["output_file"].split('/')[-1]
job_context["dataset"].save()
# File is uploaded, we can delete the local.
try:
os.remove(job_context["output_file"])
except OSError:
pass
except Exception as e:
logger.exception("Failed to upload smash result file.", file=job_context["output_file"])
job_context['job'].success = False
job_context['job'].failure_reason = str(e)
# Delay failing this pipeline until the failure notify has been sent
# job_context['success'] = False
return job_context
def _notify(job_context: Dict) -> Dict:
""" Use AWS SES to notify a user of a smash result.. """
##
# SES
##
if job_context.get("upload", True) and settings.RUNNING_IN_CLOUD:
# Don't send an email if we don't have address.
if job_context["dataset"].email_address:
SENDER = "Refine.bio Mail Robot <noreply@refine.bio>"
RECIPIENT = job_context["dataset"].email_address
AWS_REGION = "us-east-1"
CHARSET = "UTF-8"
if job_context['job'].failure_reason not in ['', None]:
SUBJECT = "There was a problem processing your refine.bio dataset :("
BODY_TEXT = "We tried but were unable to process your requested dataset. Error was: \n\n" + str(job_context['job'].failure_reason) + "\nDataset ID: " + str(job_context['dataset'].id) + "\n We have been notified and are looking into the problem. \n\nSorry!"
# Link to the dataset page, where the user can re-try the download job
dataset_url = 'https://www.refine.bio/dataset/' + str(job_context['dataset'].id)
FORMATTED_HTML = BODY_ERROR_HTML.replace('REPLACE_DATASET_URL', dataset_url).replace('REPLACE_ERROR_TEXT', job_context['job'].failure_reason)
job_context['success'] = False
else:
SUBJECT = "Your refine.bio Dataset is Ready!"
BODY_TEXT = "Hot off the presses:\n\n" + job_context["result_url"] + "\n\nLove!,\nThe refine.bio Team"
FORMATTED_HTML = BODY_HTML.replace('REPLACEME', job_context["result_url"])
# Try to send the email.
try:
# Create a new SES resource and specify a region.
client = boto3.client('ses', region_name=AWS_REGION)
#Provide the contents of the email.
response = client.send_email(
Destination={
'ToAddresses': [
RECIPIENT,
],
},
Message={
'Body': {
'Html': {
'Charset': CHARSET,
'Data': FORMATTED_HTML,
},
'Text': {
'Charset': CHARSET,
'Data': BODY_TEXT,
},
},
'Subject': {
'Charset': CHARSET,
'Data': SUBJECT,
}
},
Source=SENDER,
)
# Display an error if something goes wrong.
except ClientError as e:
logger.exception("ClientError while notifying.", client_error_message=e.response['Error']['Message'])
job_context['job'].success = False
job_context['job'].failure_reason = e.response['Error']['Message']
job_context['success'] = False
return job_context
except Exception as e:
logger.exception("General failure when trying to send email.", result_url=job_context["result_url"])
job_context['job'].success = False
job_context['job'].failure_reason = str(e)
job_context['success'] = False
return job_context
job_context["dataset"].email_sent = True
job_context["dataset"].save()
# Handle non-cloud too
if job_context['job'].failure_reason:
job_context['success'] = False
return job_context
def _update_result_objects(job_context: Dict) -> Dict:
"""Closes out the dataset object."""
dataset = job_context["dataset"]
dataset.is_processing = False
dataset.is_processed = True
dataset.is_available = True
dataset.expires_on = timezone.now() + timedelta(days=1)
dataset.save()
job_context['success'] = True
return job_context
def smash(job_id: int, upload=True) -> None:
""" Main Smasher interface """
pipeline = Pipeline(name=utils.PipelineEnum.SMASHER.value)
return utils.run_pipeline({ "job_id": job_id,
"upload": upload,
"pipeline": pipeline
},
[utils.start_job,
_prepare_files,
_smash,
_upload,
_notify,
_update_result_objects,
utils.end_job])
| bsd-3-clause | -8,341,371,405,440,176,000 | 46.06271 | 272 | 0.529303 | false |
nayyarv/MonteGMM | BayesGMM/adaptiveMCMC.py | 1 | 2035 | __author__ = 'Varun Nayyar'
import numpy as np
from matplotlib import pyplot as plt
def main():
pass
def adaptive():
if (i-1)%50 ==0:
#Update Step sizes
n = i/50
delta_n = min(0.01, 1/np.sqrt(n))
exp_deltan = np.exp(delta_n)
#acceptance probabilities
meanAccProb = np.mean(meanBatchAcceptance/(i*1.0))
covAccProb = np.mean(covBatchAcceptance/(i*1.0))
weightAccProb = weightBatchAcceptance/(i*1.0)
print "Acceptance rate for batch {} is:".format(n)
print "Means: ", meanAccProb
print "Covs: ", covAccProb
print "Weights: ", weightAccProb
if meanAccProb > 0.35: # too high
localMean*=exp_deltan
print "increasing menStep"
elif meanAccProb < 0.25:
localMean/=exp_deltan
print "reducing meanStep"
if covAccProb > 0.35:
localVar *= exp_deltan
print "increasing covStep"
elif covAccProb < 0.25:
localVar /=exp_deltan
print "reducing covStep"
#otherwise
if weightAccProb > 0.35:
weightStep *= exp_deltan
print "increasing weightStep"
elif weightAccProb < 0.25:
weightStep /= exp_deltan
print "reducing weightStep"
meanBatchAcceptance[:] = 0
covBatchAcceptance[:] = 0
weightBatchAcceptance = 0
def adaptMeans():
for mixture in xrange(LLeval.numMixtures):
meansStorage[i-1]
newMeans = means+0
newMeans[mixture] = means[mixture] + \
np.random.multivariate_normal(size = LLeval.dim).astype(np.float32)
newLL = LLeval.loglikelihood(newMeans, diagCovs, weights)
acceptProb = newLL - oldLL
if acceptProb > 0 or acceptProb > np.log(np.random.uniform()):
means[mixture] = newMeans[mixture]
oldLL = newLL
overallMeanAcceptance[mixture]+=1
if __name__ == "__main__":
main() | mit | -293,227,597,269,516,740 | 23.829268 | 96 | 0.569533 | false |
themech/Machine-Learning-Coursera-Tensorflow | ex4-neural networks learning/2_learning_and_test_sets.py | 1 | 6392 | # This exercise is very similar to the previous one. The only difference is
# that we will get better overview of how the neural network is learning. To do
# this we will split out input data into learning set and test set. We will
# perform all the learning using the first set of data. Of course as the number
# of epochs increases, so does the accuracy on the learning set (as the cost
# function on this set is minimized by the learning process). But every now and
# then we will check the accuracy on the test set - data that the network
# hasn't seen during the learning phase. Ideally the accuracy on the learning
# set should increase along with the accuracy on the test set, as this means
# the network has "learned" something general that can be applied on a data
# that was not seen. At the end we will draw a little chart to check how the
# accuracy was changing over time for both sets.
import argparse
import matplotlib.pyplot as plt
from scipy import io
from sklearn import metrics
from sklearn.model_selection import train_test_split
import tensorflow as tf
# size of a single digit image (in pixels)
IMAGE_WIDTH = 20
IMAGE_HEIGHT = 20
TEST_SIZE = 0.25 # test set will be 25% of the data
# Parse the command line arguments (or use default values).
parser = argparse.ArgumentParser(
description='Recognizing hand-written number using neural network.')
parser.add_argument('-s', '--hidden_layer_size', type=int,
help='number of neurons in the hidden layer (default: 64)',
default=64)
parser.add_argument('-lr', '--learning_rate', type=float,
help='learning rate for the algorithm (default: 0.5)',
default=0.5)
parser.add_argument('-d', '--decay', dest='decay', type=float,
help='learning rate decay (default: 0.9999, 1.0 means '
'no decay)', default=0.9999)
parser.add_argument('-e', '--epochs', type=int,
help='number of epochs (default: 1000)', default=1000)
parser.add_argument('-o', '--optimizer', type=str,
help='tensorflow optimizer class (default: '
'AdagradOptimizer)', default='AdagradOptimizer')
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true',
help='increase output verbosity')
args = parser.parse_args()
optimizer_class = getattr(tf.train, args.optimizer)
# Load the hand-written digits data.
filename = 'data/ex4data1.mat'
data = io.loadmat(filename)
X_data, Y_data = data['X'], data['y']
# y==10 is digit 0, convert it to 0 then to make the code below simpler
Y_data[Y_data == 10] = 0
# Split the data
X_data, X_test_data, Y_data, Y_test_data = train_test_split(
X_data, Y_data, test_size=TEST_SIZE)
if args.verbose:
print('Shape of the X_data', X_data.shape)
print('Shape of the Y_data', Y_data.shape)
print('Shape of the X_test_data', X_test_data.shape)
print('Shape of the Y_test_data', Y_test_data.shape)
numSamples = X_data.shape[0]
numTestSamples = X_test_data.shape[0]
def fc_layer(input, size_in, size_out):
"""Creates a fully connected nn layer.
The layer is initialized with random numbers from normal distribution.
"""
w = tf.Variable(tf.truncated_normal([size_in, size_out], stddev=0.1))
b = tf.Variable(tf.truncated_normal([size_out], stddev=0.1))
return tf.nn.relu(tf.matmul(input, w) + b)
# Setup placeholders, and reshape the data
x = tf.placeholder(tf.float32, shape=[None, IMAGE_WIDTH * IMAGE_HEIGHT])
# 10 outputs, one for each digit
y = tf.placeholder(tf.float32, shape=[None, 10])
if args.verbose:
print("Creating a network with {:d} neurons in a hidden layer".format(
args.hidden_layer_size))
hidden_layer = fc_layer(x, IMAGE_WIDTH * IMAGE_HEIGHT, args.hidden_layer_size)
output_layer = fc_layer(hidden_layer, args.hidden_layer_size, 10)
# define cost function and
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(
logits=output_layer, labels=y))
# learning rate decay
batch = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(
args.learning_rate, # Base learning rate.
batch, # Current index into the dataset.
1, # Decay step.
args.decay, # Decay rate.
staircase=True)
optimizer = optimizer_class(learning_rate).minimize(cost, global_step=batch)
# measure accuracy - pick the output with the highest score as the prediction
pred = tf.argmax(tf.nn.softmax(output_layer), 1) # softmax is optional here
correct_prediction = tf.equal(pred, tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Convert the aswers vector to a sparse matrix (refer to
# 1_nn_training_example.py for a more detailed comment)
Y_sparse = tf.keras.utils.to_categorical(Y_data, 10)
Y_test_sparse = tf.keras.utils.to_categorical(Y_test_data, 10)
print("Training...")
# Variables for tracking accuracy over time
iter_arr = []
train_accuracy_arr = []
test_accuracy_arr = []
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for epoch in range(args.epochs):
if not (epoch+1) % 5:
train_accuracy = sess.run([accuracy],
feed_dict={x: X_data, y: Y_sparse})
test_accuracy = sess.run([accuracy],
feed_dict={x: X_test_data, y: Y_test_sparse})
iter_arr.append(epoch)
train_accuracy_arr.append(train_accuracy)
test_accuracy_arr.append(test_accuracy)
if args.verbose:
print('Epoch: {:04d}, accuracy: {}, test accuracy: {}'.format(
epoch+1, train_accuracy, test_accuracy))
sess.run([optimizer], feed_dict={x: X_data, y: Y_sparse})
print("Accuracy report for the learning set")
y_pred = sess.run(pred, feed_dict={x: X_data, y: Y_sparse})
print(metrics.classification_report(Y_data, y_pred))
print("Accuracy report for the test set")
y_test_pred = sess.run(pred, feed_dict={x: X_test_data, y: Y_test_sparse})
print(metrics.classification_report(Y_test_data, y_test_pred))
print("Plotting accuracy over time...")
plt.plot(iter_arr, train_accuracy_arr, label='train accuracy')
plt.plot(iter_arr, test_accuracy_arr, label='test accuracy')
plt.xlabel('epoch', fontsize=16)
plt.ylabel('accuracy', fontsize=16)
plt.legend(bbox_to_anchor=(0, 1), loc=2, borderaxespad=0.)
plt.show()
| mit | -6,915,658,900,622,790,000 | 40.23871 | 79 | 0.681633 | false |
tomkcook/clock | ringing.py | 1 | 6087 | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 28 09:44:25 2013
@author: tom.cook
"""
import numpy as np
import numpy.linalg.linalg as li
import matplotlib.pylab as mp
import collections
import pygame.time as tm
import pygame.mixer as m
def change_from_places(p, order = 7):
m = np.mat(np.zeros([order, order], int))
i = 0
while i < order:
if i in p:
m[i,i] = 1
i += 1
else:
m[i,i+1] = 1
m[i+1,i] = 1
i += 2
return m
def method_from_places(p, order = 7):
lead_length = p.shape[0]
m = []
for i in range(0, lead_length):
m.append(change_from_places(p[i], order))
return m
def calls_from_places(m, bob_p = [], single_p = [], lead_heads=[0]):
plain = m[(lead_heads[0]-1)%len(m)]
order = plain.shape[0]
if len(bob_p) > 0:
bob = change_from_places(bob_p, order)
else:
bob = plain
if len(single_p) > 0:
single = change_from_places(single_p, order)
else:
single = plain
plain_i = li.inv(np.matrix(plain))
return [(plain_i*bob).astype(int), (plain_i*single).astype(int)]
def get_default_transition(t, order):
if t == []:
t = np.eye(order)
return t
def get_transition(m, index, bob, single, calls, lead_heads):
lead_length = len(m)
index = index % lead_length
t = m[index]
if (index + 1) % lead_length in lead_heads:
c = calls[0]
calls.popleft()
if len(calls) < 1:
calls.append(0)
if c > 0:
t = t * bob
elif c < 0:
t = t * single
else:
pass
return t
def step_method(m, last_change, index):
pass
def iterate_method(t, s, index):
p = s[:, index]
p = t * p
s = np.hstack([s, p])
return s
def run_method(m, changes, bob, single, calls=[0], start_change=0, lead_heads=[0]):
order = m[0].shape[0]
bells = np.mat(range(0, order), int).transpose()
s = np.mat(np.zeros([order, 1]), dtype=int)
s[:,0] = bells
for i in range(0, changes):
t = get_transition(m, i, bob, single, calls, lead_heads)
s = iterate_method(t, s, i)
return s
def run_method_to_rounds(m, bob, single, calls=[0], start_change = 0, lead_heads=[0]):
order = m[0].shape[0]
bells = np.mat(range(0, order), int).transpose()
s = np.mat(np.zeros([order, 1]), int)
s = s.astype(int)
s[:,0] = bells.astype(int)
j = 0
while (s[:, s.shape[1]-1] != bells).any() or s.shape[1] == 1:
t = get_transition(m, j + start_change, bob, single, calls, lead_heads)
s = iterate_method(t, s, j)
j += 1
return s.astype(int)
def rounds(order = 7, changes = 1):
places = np.mat([range(0, order)], dtype=int)
method = method_from_places(places, order)
return run_method(method, changes-1, None, None, collections.deque([0]))
def method_parts(method_places, bob_places, single_places, order = 7):
method = method_from_places(method_places, order)
[bob, single] = calls_from_places(method, bob_places, single_places)
return [method, bob, single]
def method(method_places, bob_places, single_places, calls = [0], order = 7):
[method, bob, single] = method_parts(method_places, bob_places, single_places, order)
return run_method_to_rounds(method, bob, single, collections.deque(calls), 9, [0, 6])
def stedman(order = 7, calls = [0]):
stedman_places = np.mat([[2], [0], [2], [0], [2], [order-1], [0], [2], [0], [2], [0], [order-1]])
stedman_bob = [order-3]
stedman_single = [order-3, order-2, order-1]
return method(stedman_places, stedman_bob, stedman_single)
def stedman_parts(order = 7):
stedman_places = np.mat([[2], [0], [2], [0], [2], [order-1], [0], [2], [0], [2], [0], [order-1]])
stedman_bob = [order-3]
stedman_single = [order-3, order-2, order-1]
return method_parts(stedman_places, stedman_bob, stedman_single)
def plot_method(s, lines = [], numbers = []):
bells = s.shape[0]
# Default is to print all lines and all numbers
if len(lines) == 0:
lines = range(0, bells)
if len(numbers) == 0:
numbers = range(0, bells)
rounds = np.mat(range(0,bells)).astype(int).transpose()
changes = s.shape[1]
places = np.empty(s.shape, int)
for i in range(0,changes):
c = s[:,i]
d = places[:,i]
d[c] = rounds
mp.plot(places[lines, :].transpose())
mp.subplots_adjust(left=0.04, right=0.96, top = 0.9, bottom = 0.1)
for i in numbers:
for j in range(0,changes):
mp.text(j, places[i, j], i+1, fontsize=16, horizontalalignment='center', verticalalignment='center')
ticks = [2.5]
while ticks[-1] < changes:
ticks.append(ticks[-1] + 6)
mp.xticks(ticks)
# mp.grid(linestyle='-', linewidth=2)
mp.gca().yaxis.grid(False)
def string_change(change):
return "".join(str(change[i,0]) for i in range(0,7))
def array_change(change):
return np.mat([int(c) for c in change], int).transpose()
def play_row(s, i, sounds, gap, covering):
bells = s.shape[0]
for j in range(bells):
sounds[s[j, i]].stop()
sounds[s[j, i]].play()
tm.wait(gap)
if covering:
sounds[-1].stop()
sounds[-1].play()
tm.wait(gap)
def play_rounds(bells, sounds, gap, covering, rows = 2):
r = rounds(bells, rows)
play_method_matrix(r, sounds, gap, covering)
def play_method_matrix(s, sounds, gap, covering):
rows = s.shape[1]
for i in range(rows):
play_row(s, i, sounds, gap, covering)
if i % 2 == 1:
tm.wait(gap)
def play_method(s, sounds, gap=300):
bells = s.shape[0]
covering = (bells % 2 == 1)
play_rounds(bells, sounds, gap, covering, 4)
play_method_matrix(s[:,1:], sounds,gap, covering)
play_rounds(bells, sounds, gap, covering, 4) | gpl-3.0 | 2,008,799,829,821,739,000 | 30.736559 | 112 | 0.55446 | false |
davidharvey1986/pyRRG | src/masking_star.py | 1 | 10024 | from astropy.io import fits
import numpy as np
import matplotlib.pyplot as plt
import os as os
import ipdb as pdb
import RRGtools as tools
def plot_region_maskstar( filename, star):
regionFile = open( filename, "w")
regionFile.write('# Region file format: DS9 version 4.1\n')
#regionFile.write('# Filename: dummy.fits\n')
regionFile.write("global color=green dashlist=8 3 width=1 font='helvetica 10 normal roman' select=1 highlite=1 dash=0 fixed=0 edit=1 move=1 delete=1 include=1 source=1\n")
regionFile.write("image\n")
for j in range(len(star)):
polygonStr = 'polygon('
for i in star[j]:
polygonStr += '%0.4f,' % i
polygonStr=polygonStr[:-2] ##delete , in the end
polygonStr += ')\n'
regionFile.write(polygonStr)
def instar(xl,yl,xs,ys,m):
##define some parameters------------------------------------------------------------------------
# Spikes of stars in pixels
e=20 #width of spike
spike1=200 #length of first spike
spike2=500 # length of second spike
spike3=900 # length of third spike
spike4=1500 # length of fourth spike
r1=50 # radius of mask
r2=120
r3=200
r4=260
mcut1=19
mcut2=17.5 # magnitude cut
mcut3=16.6 # magnitude cut
mcut4=15 # magnitude cut
#mcut3=14
r_close=1.5 #radius for looking for bright objects in arcmin
#---------------------------------------------------------------------------------------------
if m>mcut1:
l=spike1
R=r1
if m>mcut2 and m<=mcut1:
l=spike2
R=r2
if m>mcut3 and m<=mcut2:
l=spike3
R=r3
if m<=mcut3:
l=spike4
R=r4
#star x corrdinates
xstar=np.array([xs-e,xs-e,xs+e,xs+e,xs+R,xs+l,xs+l,xs+R,xs+e,xs+e,xs-e,xs-e,xs-R,xs-l,xs-l,xs-R])
#star y corrdinates
ystar=np.array([ys+R,ys+l,ys+l,ys+R,ys+e,ys+e,ys-e,ys-e,ys-R,ys-l,ys-l,ys-R,ys-e,ys-e,ys+e,ys+e])
inside=inpoly(xl,yl,xstar,ystar)
star=[]
for i in np.arange(len(xstar)):
star.append(xstar[i])
star.append(ystar[i])
return inside, star
def inpoly(Px,Py,xl,yl):
# Determines if a point P(px, py) in inside or outside a polygon.
# The method used is the ray intersection method based on the
# Jordan curve theorem. Basically, we trace a "ray" (a semi line)
# from the point P and we calculate the number of edges that it
# intersects. If this number is odd, P is inside.
#
# (Px,Py) Coordinates of point
# xv List of x coordinates of vertices of polygon
# yv List of y coordinates of vertices of polygon
#
# The x and y lists may be given clockwise or anticlockwise.
# The algorithm works for both convex and concave polygons.
N=len(xl)
xv=np.zeros(N)
yv=np.zeros(N)
for i in np.arange(N):
xv[i]=xl[i]
yv[i]=yl[i]
nc=0 #Number of edge crossings
N=len(xv) #Number of vertices
#test input
if N<3:
print("A polygon must have at least three vertices")
if len(xv)!=len(yv):
print('Must have same number of X and Y coordinates')
#---------------------- Change coordinate system -----------------
#Place P at the center of the coordinate system.
for i in np.arange(N):
xv[i]=xv[i]-Px
yv[i]=yv[i]-Py
#---------------------- Calculate crossings ----------------------
#The positive half of the x axis is chosen as the ray
#We must determine how many edges cross the x axis with x>0
for i in np.arange(N):
Ax=xv[i] #first vertice of edge
Ay=yv[i]
if i==(N-1):
Bx=xv[0]
By=yv[0]
else:
Bx=xv[i+1] #Second vertice of edge
By=yv[i+1]
#We define two regions in the plan: R1/ y<0 and R2/ y>=0. Note that
#the case y=0 (vertice on the ray) is included in R2.
if Ay<0:
signA=-1
else: signA=+1
if By<0:
signB=-1
else: signB=+1
#The edge crosses the ray only if A and B are in different regions.
#If a vertice is only the ray, the number of crossings will still be
#correct.
if (signA*signB<0):
if (Ax>0 and Bx>0): nc+=1
else:
x=Ax-(Ay*(Bx-Ax))/(By-Ay)
if x>0: nc+=1
#if inside then uneven
#if outside then even
nc=nc%2
return nc
def main( shear_catalog, object_catalog_fits, \
mask_file='mask.reg', outFile='Shear_remove.fits' ,plot_reg=True):
'''
This algoritm will do two things.
a) Draw masks(polygon) around detected stars automatically and remove objects within the polygon.
b) Take an additional optional mask file (defaulted to mask.reg) and remove
any object that lies within those regions. This file MUST be a ds9 region file with a
formatting with degrees and not sexidecimal.
'''
#-------------------------select the stars from the size vs mag diagram----------------------------------------------------
object_catalog = fits.open(object_catalog_fits)[1].data
Star_catalogue = \
object_catalog[ (object_catalog['galStarFlag']==-1) &
(object_catalog['MAG_AUTO'] < \
np.min( object_catalog['MAG_AUTO'][ object_catalog['galStarFlag']==0]))]
data=fits.open(shear_catalog)[1].data ##remember to change it to the name of your shear catalogue
clean=np.zeros(len(data['ra']))
cols = []
cols.append(
fits.Column(name='clean', format='D', array= clean)
)
orig_cols = data.columns
new_cols = fits.ColDefs(cols)
hdu = fits.BinTableHDU.from_columns(orig_cols + new_cols)
clean_catalog = shear_catalog.split('.')[0]+'_clean.'+\
shear_catalog.split('.')[1]
hdu.writeto(clean_catalog, overwrite=True)
##########plot remove_star.reg---------------------------------------------------------
star_corr=[[] for i in np.arange(len(Star_catalogue["ra"]))]
for j in np.arange(len(Star_catalogue["ra"])):
star_x=Star_catalogue["X_IMAGE"][j]
star_y=Star_catalogue["Y_IMAGE"][j]
m=Star_catalogue["MAG_AUTO"][j]
inside,star_corr_one=instar(1,1,star_x,star_y,m)
star_corr[j]=star_corr_one
if plot_reg==True:
plot_region_maskstar("remove_star.reg",star_corr)
##-------------------------------start masking-------------------------------
Shears=fits.open(clean_catalog)[1].data
##go through the sources list:
for i in np.arange(len(Shears['ra'])):
xl=Shears['X_IMAGE'][i]
yl=Shears["Y_IMAGE"][i]
for j in np.arange(len(Star_catalogue["ra"])): #go through the star list
star_x=Star_catalogue['X_IMAGE'][j]
star_y=Star_catalogue['Y_IMAGE'][j]
m=Star_catalogue["MAG_AUTO"][j]
inside, star_corr_one = instar(xl,yl,star_x,star_y,m)
#star_corr[j]=star_corr_one
if inside==1:
Shears['clean'][i]=1
Shears_remove=Shears[Shears['clean']==0]
fits.writeto(outFile, Shears_remove, overwrite=True, output_verify='ignore' )
##-------------------------------start masking (for mask.reg)-------------------------------
if os.path.isfile(mask_file):
mask_obj = open(mask_file, 'r')
for mask in mask_obj:
if mask[0:3] != 'box' and mask[0:7] !='polygon':
continue
elif mask[0:3] == 'box':
print("masking a box")
mask_x = np.float(mask.split('(')[1].split(',')[0])
mask_y = np.float(mask.split('(')[1].split(',')[1])
mask_sizex = np.float(mask.split('(')[1].split(',')[2][:-1])
mask_sizey = np.float(mask.split('(')[1].split(',')[3][:-1])
mask_angle = np.float(mask.split('(')[1].split(',')[4][:-2])
#rotate the shears into fram of reference of the mask
shears_x_mask_ref = tools.ra_separation(Shears_remove['X_WORLD'], mask_y, mask_x , mask_y)
shears_y_mask_ref = (Shears_remove['Y_WORLD'] - mask_y)*3600.
shears_x_mask_rot = np.cos(mask_angle*np.pi/180.)*shears_x_mask_ref + np.sin(mask_angle*np.pi/180.)*shears_y_mask_ref
shears_y_mask_rot = -np.sin(mask_angle*np.pi/180.)*shears_x_mask_ref + np.cos(mask_angle*np.pi/180.)*shears_y_mask_ref
inBox = (shears_x_mask_rot < mask_sizex/2.) & \
(shears_x_mask_rot > -mask_sizex/2.) &\
(shears_y_mask_rot < mask_sizey/2.) &\
(shears_y_mask_rot > -mask_sizey/2.)
Shears_remove = Shears_remove[ inBox == False ]
elif mask[0:7] =='polygon':
print("masking a ploygon")
mask_x = mask.split('(')[1].split(',')[::2]
px1 = [float(i) for i in mask_x]
mask_y = mask.split('(')[1].split(',')[1::2]
mask_y[-1] = mask_y[-1][:-2]
py1 = [float(i) for i in mask_y]
for k in np.arange(len(Shears_remove['ra'])):
xl=Shears_remove['X_WORLD'][k]
yl=Shears_remove['Y_WORLD'][k]
inside=inpoly(xl,yl,px1,py1)
if inside==1:
Shears_remove['clean'][k]=1
Shears_remove=Shears_remove[Shears_remove['clean']==0]
fits.writeto(outFile,Shears_remove, overwrite=True,output_verify='ignore' )
| mit | 3,349,394,885,736,628,700 | 36.263941 | 175 | 0.514366 | false |
ikaee/bfr-attendant | facerecognitionlibrary/jni-build/jni/include/tensorflow/contrib/learn/python/learn/tests/dataframe/tensorflow_dataframe_test.py | 4 | 12979 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for learn.dataframe.tensorflow_dataframe."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import math
import tempfile
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe import tensorflow_dataframe as df
from tensorflow.contrib.learn.python.learn.dataframe.transforms import densify
from tensorflow.core.example import example_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def _assert_df_equals_dict(expected_df, actual_dict):
for col in expected_df:
if expected_df[col].dtype in [np.float32, np.float64]:
assertion = np.testing.assert_allclose
else:
assertion = np.testing.assert_array_equal
if expected_df[col].dtype.kind in ["O", "S", "U"]:
# Python 2/3 compatibility
# TensorFlow always returns bytes, so we just convert the unicode
# expectations to bytes also before comparing.
expected_values = [x.encode("utf-8") for x in expected_df[col].values]
else:
expected_values = expected_df[col].values
assertion(
expected_values,
actual_dict[col],
err_msg="Expected {} in column '{}'; got {}.".format(expected_values,
col,
actual_dict[col]))
class TensorFlowDataFrameTestCase(test.TestCase):
"""Tests for `TensorFlowDataFrame`."""
def _make_test_csv(self):
f = tempfile.NamedTemporaryFile(
dir=self.get_temp_dir(), delete=False, mode="w")
w = csv.writer(f)
w.writerow(["int", "float", "bool", "string"])
for _ in range(100):
intvalue = np.random.randint(-10, 10)
floatvalue = np.random.rand()
boolvalue = int(np.random.rand() > 0.3)
stringvalue = "S: %.4f" % np.random.rand()
row = [intvalue, floatvalue, boolvalue, stringvalue]
w.writerow(row)
f.close()
return f.name
def _make_test_csv_sparse(self):
f = tempfile.NamedTemporaryFile(
dir=self.get_temp_dir(), delete=False, mode="w")
w = csv.writer(f)
w.writerow(["int", "float", "bool", "string"])
for _ in range(100):
# leave columns empty; these will be read as default value (e.g. 0 or NaN)
intvalue = np.random.randint(-10, 10) if np.random.rand() > 0.5 else ""
floatvalue = np.random.rand() if np.random.rand() > 0.5 else ""
boolvalue = int(np.random.rand() > 0.3) if np.random.rand() > 0.5 else ""
stringvalue = (("S: %.4f" % np.random.rand()) if np.random.rand() > 0.5 else
"")
row = [intvalue, floatvalue, boolvalue, stringvalue]
w.writerow(row)
f.close()
return f.name
def _make_test_tfrecord(self):
f = tempfile.NamedTemporaryFile(dir=self.get_temp_dir(), delete=False)
w = tf_record.TFRecordWriter(f.name)
for i in range(100):
ex = example_pb2.Example()
ex.features.feature["var_len_int"].int64_list.value.extend(range((i % 3)))
ex.features.feature["fixed_len_float"].float_list.value.extend(
[float(i), 2 * float(i)])
w.write(ex.SerializeToString())
return f.name
def _assert_pandas_equals_tensorflow(self, pandas_df, tensorflow_df,
num_batches, batch_size):
self.assertItemsEqual(
list(pandas_df.columns) + ["index"], tensorflow_df.columns())
for batch_num, batch in enumerate(tensorflow_df.run(num_batches)):
row_numbers = [
total_row_num % pandas_df.shape[0]
for total_row_num in range(batch_size * batch_num, batch_size * (
batch_num + 1))
]
expected_df = pandas_df.iloc[row_numbers]
_assert_df_equals_dict(expected_df, batch)
def testInitFromPandas(self):
"""Test construction from Pandas DataFrame."""
if not HAS_PANDAS:
return
pandas_df = pd.DataFrame({"sparrow": range(10), "ostrich": 1})
tensorflow_df = df.TensorFlowDataFrame.from_pandas(
pandas_df, batch_size=10, shuffle=False)
batch = tensorflow_df.run_one_batch()
np.testing.assert_array_equal(pandas_df.index.values, batch["index"],
"Expected index {}; got {}".format(
pandas_df.index.values, batch["index"]))
_assert_df_equals_dict(pandas_df, batch)
def testBatch(self):
"""Tests `batch` method.
`DataFrame.batch()` should iterate through the rows of the
`pandas.DataFrame`, and should "wrap around" when it reaches the last row.
"""
if not HAS_PANDAS:
return
pandas_df = pd.DataFrame({
"albatross": range(10),
"bluejay": 1,
"cockatoo": range(0, 20, 2),
"penguin": list("abcdefghij")
})
tensorflow_df = df.TensorFlowDataFrame.from_pandas(pandas_df, shuffle=False)
# Rebatch `df` into the following sizes successively.
batch_sizes = [4, 7]
num_batches = 3
final_batch_size = batch_sizes[-1]
for batch_size in batch_sizes:
tensorflow_df = tensorflow_df.batch(batch_size, shuffle=False)
self._assert_pandas_equals_tensorflow(
pandas_df,
tensorflow_df,
num_batches=num_batches,
batch_size=final_batch_size)
def testFromNumpy(self):
x = np.eye(20)
tensorflow_df = df.TensorFlowDataFrame.from_numpy(x, batch_size=10)
for batch in tensorflow_df.run(30):
for ind, val in zip(batch["index"], batch["value"]):
expected_val = np.zeros_like(val)
expected_val[ind] = 1
np.testing.assert_array_equal(expected_val, val)
def testFromCSV(self):
if not HAS_PANDAS:
return
num_batches = 100
batch_size = 8
enqueue_size = 7
data_path = self._make_test_csv()
default_values = [0, 0.0, 0, ""]
pandas_df = pd.read_csv(data_path)
tensorflow_df = df.TensorFlowDataFrame.from_csv(
[data_path],
enqueue_size=enqueue_size,
batch_size=batch_size,
shuffle=False,
default_values=default_values)
self._assert_pandas_equals_tensorflow(
pandas_df,
tensorflow_df,
num_batches=num_batches,
batch_size=batch_size)
def testFromCSVLimitEpoch(self):
batch_size = 8
num_epochs = 17
expected_num_batches = (num_epochs * 100) // batch_size
data_path = self._make_test_csv()
default_values = [0, 0.0, 0, ""]
tensorflow_df = df.TensorFlowDataFrame.from_csv(
[data_path],
batch_size=batch_size,
shuffle=False,
default_values=default_values)
result_batches = list(tensorflow_df.run(num_epochs=num_epochs))
actual_num_batches = len(result_batches)
self.assertEqual(expected_num_batches, actual_num_batches)
# TODO(soergel): figure out how to dequeue the final small batch
expected_rows = 1696 # num_epochs * 100
actual_rows = sum([len(x["int"]) for x in result_batches])
self.assertEqual(expected_rows, actual_rows)
def testFromCSVWithFeatureSpec(self):
if not HAS_PANDAS:
return
num_batches = 100
batch_size = 8
data_path = self._make_test_csv_sparse()
feature_spec = {
"int": parsing_ops.FixedLenFeature(None, dtypes.int16, np.nan),
"float": parsing_ops.VarLenFeature(dtypes.float16),
"bool": parsing_ops.VarLenFeature(dtypes.bool),
"string": parsing_ops.FixedLenFeature(None, dtypes.string, "")
}
pandas_df = pd.read_csv(data_path, dtype={"string": object})
# Pandas insanely uses NaN for empty cells in a string column.
# And, we can't use Pandas replace() to fix them because nan != nan
s = pandas_df["string"]
for i in range(0, len(s)):
if isinstance(s[i], float) and math.isnan(s[i]):
pandas_df.set_value(i, "string", "")
tensorflow_df = df.TensorFlowDataFrame.from_csv_with_feature_spec(
[data_path],
batch_size=batch_size,
shuffle=False,
feature_spec=feature_spec)
# These columns were sparse; re-densify them for comparison
tensorflow_df["float"] = densify.Densify(np.nan)(tensorflow_df["float"])
tensorflow_df["bool"] = densify.Densify(np.nan)(tensorflow_df["bool"])
self._assert_pandas_equals_tensorflow(
pandas_df,
tensorflow_df,
num_batches=num_batches,
batch_size=batch_size)
def testFromExamples(self):
num_batches = 77
enqueue_size = 11
batch_size = 13
data_path = self._make_test_tfrecord()
features = {
"fixed_len_float":
parsing_ops.FixedLenFeature(
shape=[2], dtype=dtypes.float32, default_value=[0.0, 0.0]),
"var_len_int":
parsing_ops.VarLenFeature(dtype=dtypes.int64)
}
tensorflow_df = df.TensorFlowDataFrame.from_examples(
data_path,
enqueue_size=enqueue_size,
batch_size=batch_size,
features=features,
shuffle=False)
# `test.tfrecord` contains 100 records with two features: var_len_int and
# fixed_len_float. Entry n contains `range(n % 3)` and
# `float(n)` for var_len_int and fixed_len_float,
# respectively.
num_records = 100
def _expected_fixed_len_float(n):
return np.array([float(n), 2 * float(n)])
def _expected_var_len_int(n):
return np.arange(n % 3)
for batch_num, batch in enumerate(tensorflow_df.run(num_batches)):
record_numbers = [
n % num_records
for n in range(batch_num * batch_size, (batch_num + 1) * batch_size)
]
for i, j in enumerate(record_numbers):
np.testing.assert_allclose(
_expected_fixed_len_float(j), batch["fixed_len_float"][i])
var_len_int = batch["var_len_int"]
for i, ind in enumerate(var_len_int.indices):
val = var_len_int.values[i]
expected_row = _expected_var_len_int(record_numbers[ind[0]])
expected_value = expected_row[ind[1]]
np.testing.assert_array_equal(expected_value, val)
def testSplitString(self):
batch_size = 8
num_epochs = 17
expected_num_batches = (num_epochs * 100) // batch_size
data_path = self._make_test_csv()
default_values = [0, 0.0, 0, ""]
tensorflow_df = df.TensorFlowDataFrame.from_csv(
[data_path],
batch_size=batch_size,
shuffle=False,
default_values=default_values)
a, b = tensorflow_df.split("string", 0.7) # no rebatching
total_result_batches = list(tensorflow_df.run(num_epochs=num_epochs))
a_result_batches = list(a.run(num_epochs=num_epochs))
b_result_batches = list(b.run(num_epochs=num_epochs))
self.assertEqual(expected_num_batches, len(total_result_batches))
self.assertEqual(expected_num_batches, len(a_result_batches))
self.assertEqual(expected_num_batches, len(b_result_batches))
total_rows = sum([len(x["int"]) for x in total_result_batches])
a_total_rows = sum([len(x["int"]) for x in a_result_batches])
b_total_rows = sum([len(x["int"]) for x in b_result_batches])
print("Split rows: %s => %s, %s" % (total_rows, a_total_rows, b_total_rows))
# TODO(soergel): figure out how to dequeue the final small batch
expected_total_rows = 1696 # (num_epochs * 100)
self.assertEqual(expected_total_rows, total_rows)
self.assertEqual(1087, a_total_rows) # stochastic but deterministic
# self.assertEqual(int(total_rows * 0.7), a_total_rows)
self.assertEqual(609, b_total_rows) # stochastic but deterministic
# self.assertEqual(int(total_rows * 0.3), b_total_rows)
# The strings used for hashing were all unique in the original data, but
# we ran 17 epochs, so each one should appear 17 times. Each copy should
# be hashed into the same partition, so there should be no overlap of the
# keys.
a_strings = set([s for x in a_result_batches for s in x["string"]])
b_strings = set([s for x in b_result_batches for s in x["string"]])
self.assertEqual(frozenset(), a_strings & b_strings)
if __name__ == "__main__":
test.main()
| apache-2.0 | 3,086,519,239,749,617,000 | 34.853591 | 86 | 0.634564 | false |
jaimejimbo/rod-analysis | correlation.py | 1 | 7697 | """
Correlation animation creation module.
"""
import numpy as np
import scipy as sp
import math
import matplotlib.pyplot as plt
import sqlite3 as sql
from matplotlib import animation
import matplotlib
from multiprocessing import Pool
import itertools
np.warnings.filterwarnings('ignore')
_writer = animation.writers['ffmpeg']
writer = _writer(fps=15, metadata=dict(artist='Jaime Perez Aparicio'), bitrate=1800)
WRITER = writer
CURSOR_UP_ONE = '\x1b[1A'
ERASE_LINE = '\x1b[2K'
REWRITE_LAST = CURSOR_UP_ONE + ERASE_LINE + CURSOR_UP_ONE
resol = 30
cresol = 256*32
sigma = 1.0
connection = sql.connect("rods.db")
cursor = connection.cursor()
cursor2 = connection.cursor()
get_rods_sql = "select xmid,ymid,major,minor,angle from datos where experiment_id=? and file_id=? order by ymid,xmid"
get_file_ids_sql = "select distinct file_id from datos where experiment_id=?"
cursor.execute("select distinct experiment_id from datos")
experiment_ids = cursor.fetchall()
experiment_ids = [experiment_id[0] for experiment_id in experiment_ids]
colors = plt.cm.jet(np.linspace(-1,1,cresol))
white = (1,1,1,1)
fig, axarr = plt.subplots(2, 2, sharex=True, sharey=True)
def _update_matrix(row, col, rods_6, rods_12, dx, dy, x0, y0, center, rad, num_rods, distr6_q2, distr6_q4, distr12_q2, distr12_q4):
"""
Updates matrix with density values.
"""
pos = np.array([col*dx+x0, row*dy+y0])
if np.sum((pos-center)**2) <= rad**2:
xcenter = col*dx+x0
ycenter = row*dy+y0
xvals6 = rods_6[:, 0]
yvals6 = rods_6[:, 1]
xvals12 = rods_12[:, 0]
yvals12 = rods_12[:, 1]
angles6 = rods_6[:, 2]
angles12 = rods_12[:, 2]
q2_6 = (np.cos(2*angles6) + np.sin(2*angles6))/num_rods
q2_12 = (np.cos(2*angles12) + np.sin(2*angles12))/num_rods
q4_6 = (np.cos(4*angles6) + np.sin(2*angles6))/num_rods
q4_12 = (np.cos(4*angles12) + np.sin(4*angles12))/num_rods
val6 = -((xvals6-xcenter)**2/dx**2 + (yvals6-ycenter)**2/dy**2)
exps6 = np.exp(val6/(4*sigma**2))/(np.sqrt(2*math.pi)*sigma)
distr6_q2[row, col] = np.dot(q2_6, exps6)/5.0
distr6_q4[row, col] = np.dot(q4_6, exps6)/5.0
val12 = -((xvals12-xcenter)**2/dx**2 + (yvals12-ycenter)**2/dy**2)
exps12 = np.exp(val12/(4*sigma**2))/(np.sqrt(2*math.pi)*sigma)
distr12_q2[row, col] = np.dot(q2_12, exps12)/5.0
distr12_q4[row, col] = np.dot(q4_12, exps12)/5.0
else:
distr6_q2[row, col] = np.nan
distr6_q4[row, col] = np.nan
distr12_q2[row, col] = np.nan
distr12_q4[row, col] = np.nan
return distr6_q2, distr6_q4, distr12_q2, distr12_q4
def _correlation_matrix_process(experiment_id_, file_ids, rods):
"""
Order parameter matrix computing process.
"""
distr6_q2 = np.array([[0.0 for dummy1 in range(resol)] for dummy2 in range(resol)])
distr6_q4 = np.array([[0.0 for dummy1 in range(resol)] for dummy2 in range(resol)])
distr12_q2 = np.array([[0.0 for dummy1 in range(resol)] for dummy2 in range(resol)])
distr12_q4 = np.array([[0.0 for dummy1 in range(resol)] for dummy2 in range(resol)])
#len(file_ids) can be < 5
if not len(rods):
print("Empty")
print(experiment_id_)
print(file_ids)
return
rods = np.array([np.array(rod) for rod in rods])
rods_6, rods_12 = [], []
for rod in rods:
if 6 < float(rod[2])/rod[3] < 12:
rods_6.append(np.append(rod[:2],rods[4]))
elif 12 < float(rod[2])/rod[3] < 20:
rods_12.append(np.append(rod[:2],rods[4]))
rods_6, rods_12 = np.array(rods_6), np.array(rods_12)
num_rods = len(rods)
xvals, yvals = rods[:, 0], rods[:, 1]
x0, y0 = min(xvals), min(yvals)
dx = float(max(xvals)-x0)/resol
dy = float(max(yvals)-y0)/resol
rad = (dx*resol/2.0 + dy*resol/2.0)/2.0
center = np.array([(min(xvals)+max(xvals))/2.0, (min(yvals)+max(yvals))/2.0])
rows, cols = np.meshgrid(range(resol), range(resol))
rows = np.array(rows).reshape(1,resol**2)[0]
cols = np.array(cols).reshape(1,resol**2)[0]
for row, col in zip(rows, cols):
distr6_q2, distr6_q4, distr12_q2, distr12_q4 = _update_matrix(row, col, rods_6,
rods_12, dx, dy, x0, y0, center, rad, num_rods,
distr6_q2, distr6_q4, distr12_q2, distr12_q4)
return distr6_q2, distr6_q4, distr12_q2, distr12_q4
def _get_distrs(experiment_id_, file_ids):
"""
Computes plottable data.
"""
cursor2 = connection.cursor()
rods = []
for index in range(5):
cursor2.execute(get_rods_sql, (str(experiment_id_), str(file_ids[index])))
rods.append(cursor2.fetchall())
try:
pool = Pool(4)
r = itertools.repeat
izip = zip(r(experiment_id_), r(file_ids), rods)
distrs = np.array(pool.starmap(_correlation_matrix_process, izip))
finally:
pool.close()
pool.join()
distr6_q2 = sum(distrs[:,0,:,:])
distr6_q4 = sum(distrs[:,1,:,:])
distr12_q2 = sum(distrs[:,2,:,:])
distr12_q4 = sum(distrs[:,3,:,:])
return distr6_q2, distr6_q4, distr12_q2, distr12_q4
def _plot_frame(experiment_id_, file_ids, fig, axarr):
"""
Plots frame.
"""
distr6_q2, distr6_q4, distr12_q2, distr12_q4 = _get_distrs(experiment_id_, file_ids)
x_vals, y_vals = np.array(range(resol)), np.array(range(resol))
mesh = np.array(np.meshgrid(x_vals, y_vals))
x_vals, y_vals = tuple(mesh.reshape(2, resol**2))
rad = (max(x_vals)-min(x_vals))/(2.0*resol)
size = 125*(rad)**2
sp1 = axarr[0, 0].scatter(x_vals, y_vals, c=distr6_q2, marker='s', s=size)
sp2 = axarr[0, 1].scatter(x_vals, y_vals, c=distr6_q4, marker='s', s=size)
sp3 = axarr[1, 0].scatter(x_vals, y_vals, c=distr12_q2, marker='s', s=size)
sp4 = axarr[1, 1].scatter(x_vals, y_vals, c=distr12_q4, marker='s', s=size)
fig.suptitle("angular correlations")
axarr[0, 0].set_title("K6 Q2")
axarr[0, 1].set_title("K6 Q4")
axarr[1, 0].set_title("K12 Q2")
axarr[1, 1].set_title("K12 Q4")
for ax_ in axarr.reshape(1, 4):
for ax in ax_:
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
cb1 = plt.colorbar(sp1, cax=axarr[0, 0])
cb2 = plt.colorbar(sp2, cax=axarr[0, 1])
cb3 = plt.colorbar(sp3, cax=axarr[1, 0])
cb4 = plt.colorbar(sp4, cax=axarr[1, 1])
plt.tight_layout()
plt.subplots_adjust(top=0.85)
def create_animation(experiment_id):
"""
Creates animation for an experiment.
"""
file_ids = cursor.execute(get_file_ids_sql, (experiment_id,)).fetchall()
file_ids = [file_id[0] for file_id in file_ids]
num_frames = int(len(file_ids)/5)
global fig
global axarr
exit = False
frame_idx = 0
msg = "Computing order parameter matrix for experiment {}".format(experiment_id)
print(msg)
def animate(frame_idx):
"""
Wrapper
"""
global fig
global axarr
progress = str(frame_idx) + "/" + str(num_frames) + "\t("
progress += "{0:.2f}%)"
print(progress.format(frame_idx*100.0/num_frames),end='\r')
file_id0 = frame_idx*5
_plot_frame(int(experiment_id), file_ids[file_id0:file_id0+5], fig, axarr)
anim = animation.FuncAnimation(fig, animate, frames=num_frames, repeat=False)
name = 'correlation_animation{}.mp4'.format(experiment_id)
try:
anim.save(name)
except KeyboardInterrupt:
connection.commit()
connection.close()
raise KeyboardInterrupt
connection.commit()
for experiment_id in experiment_ids:
create_animation(experiment_id)
connection.close()
| gpl-3.0 | 5,106,264,792,251,868,000 | 36.730392 | 131 | 0.610628 | false |
alee156/cldev | Tony/scripts/atlasregiongraphWithLabels.py | 2 | 3220 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
from __future__ import print_function
__author__ = 'seelviz'
from plotly.offline import download_plotlyjs
from plotly.graph_objs import *
from plotly import tools
import plotly
import os
#os.chdir('C:/Users/L/Documents/Homework/BME/Neuro Data I/Data/')
import csv,gc # garbage memory collection :)
import numpy as np
# import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import axes3d
# from mpl_toolkits.mplot3d import axes3d
# from collections import namedtuple
import csv
import re
import matplotlib
import time
import seaborn as sns
from collections import OrderedDict
class atlasregiongraph(object):
"""Class for generating the color coded atlas region graphs"""
def __init__(self, token, path=None):
self._token = token
self._path = path
data_txt = ""
if path == None:
data_txt = token + '/' + token + '.csv'
else:
data_txt = path + '/' + token + '.csv'
self._data = np.genfromtxt(data_txt, delimiter=',', dtype='int', usecols = (0,1,2,4), names=['x','y','z','region'])
def generate_atlas_region_graph(self, path=None, numRegions = 10):
font = {'weight' : 'bold',
'size' : 18}
matplotlib.rc('font', **font)
thedata = self._data
if path == None:
thedata = self._data
else:
### load data
thedata = np.genfromtxt(self._token + '/' + self._token + '.csv', delimiter=',', dtype='int', usecols = (0,1,2,4), names=['x','y','z','region'])
region_dict = OrderedDict()
for l in thedata:
trace = atlasCCF[str(l[3])]
#trace = 'trace' + str(l[3])
if trace not in region_dict:
region_dict[trace] = np.array([[l[0], l[1], l[2], l[3]]])
else:
tmp = np.array([[l[0], l[1], l[2], l[3]]])
region_dict[trace] = np.concatenate((region_dict.get(trace, np.zeros((1,4))), tmp), axis=0)
current_palette = sns.color_palette("husl", numRegions)
# print current_palette
data = []
for i, key in enumerate(region_dict):
trace = region_dict[key]
tmp_col = current_palette[i]
tmp_col_lit = 'rgb' + str(tmp_col)
trace_scatter = Scatter3d(
x = trace[:,0],
y = trace[:,1],
z = trace[:,2],
mode='markers',
marker=dict(
size=1.2,
color=tmp_col_lit, #'purple', # set color to an array/list of desired values
colorscale='Viridis', # choose a colorscale
opacity=0.15
)
)
data.append(trace_scatter)
layout = Layout(
margin=dict(
l=0,
r=0,
b=0,
t=0
),
paper_bgcolor='rgb(0,0,0)',
plot_bgcolor='rgb(0,0,0)'
)
fig = Figure(data=data, layout=layout)
plotly.offline.plot(fig, filename= self._path + '/' + self._token + "_region_color.html")
| apache-2.0 | -6,013,394,582,156,751,000 | 29.666667 | 156 | 0.518634 | false |
umd-memsys/DRAMsim3 | scripts/final_PowerTemperature_map.py | 1 | 2884 | import argparse
from numpy import genfromtxt
from numpy import amax
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import sys
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--mem_name", help="type of memory")
parser.add_argument("-l", "--layer_str", help="specified layer")
parser.add_argument("-f", "--save_root", help="figure saved file")
args = parser.parse_args()
if len(sys.argv) != 7:
parser.print_help()
sys.exit(0)
PT_file = "../build/" + args.mem_name + "-output-final_power_temperature.csv"
pos_file = "../build/" + args.mem_name + "-output-bank_position.csv"
print "data file: " + PT_file
print "bank position file: " + pos_file
PT_data = genfromtxt(PT_file, delimiter=',')
PT_data = PT_data[1:, :]
Bpos = genfromtxt(pos_file, delimiter=',')
Bpos = Bpos[1:, :]
X = int(amax(PT_data[:, 1]))
Y = int(amax(PT_data[:, 2]))
Z = int(amax(PT_data[:, 3]))
print "Dimension: (" + str(X) + ", " + str(Y) + ", " + str(Z) + ")"
power = np.empty((X+1, Y+1, Z+1))
temperature = np.empty((X+1, Y+1, Z+1))
for i in range(0, len(PT_data)):
x_ = int(PT_data[i,1])
y_ = int(PT_data[i,2])
z_ = int(PT_data[i,3])
power[x_, y_, z_] = PT_data[i,4]
temperature[x_, y_, z_] = PT_data[i,5]
layer = int(args.layer_str)
if layer >= 0 and layer <= Z:
plt.figure()
plt.imshow(power[:,:,layer], aspect='auto')
ca = plt.gca()
for i in range(0, len(Bpos)):
if Bpos[i,6] == layer:
x_ = Bpos[i,2]
y_ = Bpos[i,4]
w_ = Bpos[i,3] - Bpos[i,2] + 1
l_ = Bpos[i,5] - Bpos[i,4] + 1
ca.add_patch(Rectangle((y_-0.5, x_-0.5), l_, w_, fill=None, edgecolor='r'))
if l_ > w_:
rot = 0
else:
rot = 0
ca.text(y_-0.5+l_/4, x_-0.5+w_/2, 'R'+str(int(Bpos[i,0]))+'B'+str(int(Bpos[i,1])), color='r', rotation=rot)
ca.set_xlabel('Y (Column)')
ca.set_ylabel('X (Row)')
title_str = 'Power (layer' + str(layer) + ')'
ca.set_title(title_str)
plt.colorbar()
plt.savefig(args.save_root + args.mem_name + '_final_power_layer' + str(layer) + '.png')
plt.figure()
plt.imshow(temperature[:,:,layer], aspect='auto')
ca = plt.gca()
for i in range(0, len(Bpos)):
if Bpos[i,6] == layer:
x_ = Bpos[i,2]
y_ = Bpos[i,4]
w_ = Bpos[i,3] - Bpos[i,2] + 1
l_ = Bpos[i,5] - Bpos[i,4] + 1
ca.add_patch(Rectangle((y_-0.5, x_-0.5), l_, w_, fill=None, edgecolor='r'))
if l_ > w_:
rot = 0
else:
rot = 0
ca.text(y_-0.5+l_/4, x_-0.5+w_/2, 'R'+str(int(Bpos[i,0]))+'B'+str(int(Bpos[i,1])), color='r', rotation=rot)
ca.set_xlabel('Y (Column)')
ca.set_ylabel('X (Row)')
title_str = 'Power (layer' + str(layer) + ')'
ca.set_title(title_str)
plt.colorbar()
plt.savefig(args.save_root + args.mem_name + '_final_temperature_layer' + str(layer) + '.png')
else:
print "You should name a correct layer index"
print "Layer index should be in the range of [" + str(0) + ", " + str(Z) + "]"
| mit | -671,032,131,470,933,500 | 26.207547 | 110 | 0.588766 | false |
yavalvas/yav_com | build/matplotlib/doc/mpl_examples/event_handling/pick_event_demo.py | 7 | 6316 | #!/usr/bin/env python
"""
You can enable picking by setting the "picker" property of an artist
(for example, a matplotlib Line2D, Text, Patch, Polygon, AxesImage,
etc...)
There are a variety of meanings of the picker property
None - picking is disabled for this artist (default)
boolean - if True then picking will be enabled and the
artist will fire a pick event if the mouse event is over
the artist
float - if picker is a number it is interpreted as an
epsilon tolerance in points and the artist will fire
off an event if it's data is within epsilon of the mouse
event. For some artists like lines and patch collections,
the artist may provide additional data to the pick event
that is generated, for example, the indices of the data within
epsilon of the pick event
function - if picker is callable, it is a user supplied
function which determines whether the artist is hit by the
mouse event.
hit, props = picker(artist, mouseevent)
to determine the hit test. If the mouse event is over the
artist, return hit=True and props is a dictionary of properties
you want added to the PickEvent attributes
After you have enabled an artist for picking by setting the "picker"
property, you need to connect to the figure canvas pick_event to get
pick callbacks on mouse press events. For example,
def pick_handler(event):
mouseevent = event.mouseevent
artist = event.artist
# now do something with this...
The pick event (matplotlib.backend_bases.PickEvent) which is passed to
your callback is always fired with two attributes:
mouseevent - the mouse event that generate the pick event. The
mouse event in turn has attributes like x and y (the coordinates in
display space, such as pixels from left, bottom) and xdata, ydata (the
coords in data space). Additionally, you can get information about
which buttons were pressed, which keys were pressed, which Axes
the mouse is over, etc. See matplotlib.backend_bases.MouseEvent
for details.
artist - the matplotlib.artist that generated the pick event.
Additionally, certain artists like Line2D and PatchCollection may
attach additional meta data like the indices into the data that meet
the picker criteria (for example, all the points in the line that are within
the specified epsilon tolerance)
The examples below illustrate each of these methods.
"""
from __future__ import print_function
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.patches import Rectangle
from matplotlib.text import Text
from matplotlib.image import AxesImage
import numpy as np
from numpy.random import rand
if 1: # simple picking, lines, rectangles and text
fig, (ax1, ax2) = plt.subplots(2,1)
ax1.set_title('click on points, rectangles or text', picker=True)
ax1.set_ylabel('ylabel', picker=True, bbox=dict(facecolor='red'))
line, = ax1.plot(rand(100), 'o', picker=5) # 5 points tolerance
# pick the rectangle
bars = ax2.bar(range(10), rand(10), picker=True)
for label in ax2.get_xticklabels(): # make the xtick labels pickable
label.set_picker(True)
def onpick1(event):
if isinstance(event.artist, Line2D):
thisline = event.artist
xdata = thisline.get_xdata()
ydata = thisline.get_ydata()
ind = event.ind
print('onpick1 line:', zip(np.take(xdata, ind), np.take(ydata, ind)))
elif isinstance(event.artist, Rectangle):
patch = event.artist
print('onpick1 patch:', patch.get_path())
elif isinstance(event.artist, Text):
text = event.artist
print('onpick1 text:', text.get_text())
fig.canvas.mpl_connect('pick_event', onpick1)
if 1: # picking with a custom hit test function
# you can define custom pickers by setting picker to a callable
# function. The function has the signature
#
# hit, props = func(artist, mouseevent)
#
# to determine the hit test. if the mouse event is over the artist,
# return hit=True and props is a dictionary of
# properties you want added to the PickEvent attributes
def line_picker(line, mouseevent):
"""
find the points within a certain distance from the mouseclick in
data coords and attach some extra attributes, pickx and picky
which are the data points that were picked
"""
if mouseevent.xdata is None: return False, dict()
xdata = line.get_xdata()
ydata = line.get_ydata()
maxd = 0.05
d = np.sqrt((xdata-mouseevent.xdata)**2. + (ydata-mouseevent.ydata)**2.)
ind = np.nonzero(np.less_equal(d, maxd))
if len(ind):
pickx = np.take(xdata, ind)
picky = np.take(ydata, ind)
props = dict(ind=ind, pickx=pickx, picky=picky)
return True, props
else:
return False, dict()
def onpick2(event):
print('onpick2 line:', event.pickx, event.picky)
fig, ax = plt.subplots()
ax.set_title('custom picker for line data')
line, = ax.plot(rand(100), rand(100), 'o', picker=line_picker)
fig.canvas.mpl_connect('pick_event', onpick2)
if 1: # picking on a scatter plot (matplotlib.collections.RegularPolyCollection)
x, y, c, s = rand(4, 100)
def onpick3(event):
ind = event.ind
print('onpick3 scatter:', ind, np.take(x, ind), np.take(y, ind))
fig, ax = plt.subplots()
col = ax.scatter(x, y, 100*s, c, picker=True)
#fig.savefig('pscoll.eps')
fig.canvas.mpl_connect('pick_event', onpick3)
if 1: # picking images (matplotlib.image.AxesImage)
fig, ax = plt.subplots()
im1 = ax.imshow(rand(10,5), extent=(1,2,1,2), picker=True)
im2 = ax.imshow(rand(5,10), extent=(3,4,1,2), picker=True)
im3 = ax.imshow(rand(20,25), extent=(1,2,3,4), picker=True)
im4 = ax.imshow(rand(30,12), extent=(3,4,3,4), picker=True)
ax.axis([0,5,0,5])
def onpick4(event):
artist = event.artist
if isinstance(artist, AxesImage):
im = artist
A = im.get_array()
print('onpick4 image', A.shape)
fig.canvas.mpl_connect('pick_event', onpick4)
plt.show()
| mit | 774,636,289,026,054,000 | 34.683616 | 81 | 0.668303 | false |
quantopian/qrisk | empyrical/tests/test_perf_attrib.py | 1 | 7083 | import numpy as np
import pandas as pd
import unittest
from empyrical.perf_attrib import perf_attrib
class PerfAttribTestCase(unittest.TestCase):
def test_perf_attrib_simple(self):
start_date = '2017-01-01'
periods = 2
dts = pd.date_range(start_date, periods=periods)
dts.name = 'dt'
tickers = ['stock1', 'stock2']
styles = ['risk_factor1', 'risk_factor2']
returns = pd.Series(data=[0.1, 0.1], index=dts)
factor_returns = pd.DataFrame(
columns=styles,
index=dts,
data={'risk_factor1': [.1, .1],
'risk_factor2': [.1, .1]}
)
index = pd.MultiIndex.from_product(
[dts, tickers], names=['dt', 'ticker'])
positions = pd.Series([0.2857142857142857, 0.7142857142857143,
0.2857142857142857, 0.7142857142857143],
index=index)
factor_loadings = pd.DataFrame(
columns=styles,
index=index,
data={'risk_factor1': [0.25, 0.25, 0.25, 0.25],
'risk_factor2': [0.25, 0.25, 0.25, 0.25]}
)
expected_perf_attrib_output = pd.DataFrame(
index=dts,
columns=['risk_factor1', 'risk_factor2', 'common_returns',
'specific_returns', 'total_returns'],
data={'risk_factor1': [0.025, 0.025],
'risk_factor2': [0.025, 0.025],
'common_returns': [0.05, 0.05],
'specific_returns': [0.05, 0.05],
'total_returns': returns}
)
expected_exposures_portfolio = pd.DataFrame(
index=dts,
columns=['risk_factor1', 'risk_factor2'],
data={'risk_factor1': [0.25, 0.25],
'risk_factor2': [0.25, 0.25]}
)
exposures_portfolio, perf_attrib_output = perf_attrib(returns,
positions,
factor_returns,
factor_loadings)
pd.util.testing.assert_frame_equal(expected_perf_attrib_output,
perf_attrib_output)
pd.util.testing.assert_frame_equal(expected_exposures_portfolio,
exposures_portfolio)
# test long and short positions
positions = pd.Series([0.5, -0.5, 0.5, -0.5], index=index)
exposures_portfolio, perf_attrib_output = perf_attrib(returns,
positions,
factor_returns,
factor_loadings)
expected_perf_attrib_output = pd.DataFrame(
index=dts,
columns=['risk_factor1', 'risk_factor2', 'common_returns',
'specific_returns', 'total_returns'],
data={'risk_factor1': [0.0, 0.0],
'risk_factor2': [0.0, 0.0],
'common_returns': [0.0, 0.0],
'specific_returns': [0.1, 0.1],
'total_returns': returns}
)
expected_exposures_portfolio = pd.DataFrame(
index=dts,
columns=['risk_factor1', 'risk_factor2'],
data={'risk_factor1': [0.0, 0.0],
'risk_factor2': [0.0, 0.0]}
)
pd.util.testing.assert_frame_equal(expected_perf_attrib_output,
perf_attrib_output)
pd.util.testing.assert_frame_equal(expected_exposures_portfolio,
exposures_portfolio)
def test_perf_attrib_regression(self):
positions = pd.read_csv('empyrical/tests/test_data/positions.csv',
index_col=0, parse_dates=True)
positions.columns = [int(col) if col != 'cash' else col
for col in positions.columns]
positions = positions.divide(positions.sum(axis='columns'),
axis='rows')
positions = positions.drop('cash', axis='columns').stack()
returns = pd.read_csv('empyrical/tests/test_data/returns.csv',
index_col=0, parse_dates=True,
header=None, squeeze=True)
factor_loadings = pd.read_csv(
'empyrical/tests/test_data/factor_loadings.csv',
index_col=[0, 1], parse_dates=True
)
factor_returns = pd.read_csv(
'empyrical/tests/test_data/factor_returns.csv',
index_col=0, parse_dates=True
)
residuals = pd.read_csv('empyrical/tests/test_data/residuals.csv',
index_col=0, parse_dates=True)
residuals.columns = [int(col) for col in residuals.columns]
intercepts = pd.read_csv('empyrical/tests/test_data/intercepts.csv',
index_col=0, header=None, squeeze=True)
risk_exposures_portfolio, perf_attrib_output = perf_attrib(
returns,
positions,
factor_returns,
factor_loadings,
)
specific_returns = perf_attrib_output['specific_returns']
common_returns = perf_attrib_output['common_returns']
combined_returns = specific_returns + common_returns
# since all returns are factor returns, common returns should be
# equivalent to total returns, and specific returns should be 0
pd.util.testing.assert_series_equal(returns,
common_returns,
check_names=False)
self.assertTrue(np.isclose(specific_returns, 0).all())
# specific and common returns combined should equal total returns
pd.util.testing.assert_series_equal(returns,
combined_returns,
check_names=False)
# check that residuals + intercepts = specific returns
self.assertTrue(np.isclose((residuals + intercepts), 0).all())
# check that exposure * factor returns = common returns
expected_common_returns = risk_exposures_portfolio.multiply(
factor_returns, axis='rows'
).sum(axis='columns')
pd.util.testing.assert_series_equal(expected_common_returns,
common_returns,
check_names=False)
# since factor loadings are ones, portfolio risk exposures
# should be ones
pd.util.testing.assert_frame_equal(
risk_exposures_portfolio,
pd.DataFrame(np.ones_like(risk_exposures_portfolio),
index=risk_exposures_portfolio.index,
columns=risk_exposures_portfolio.columns)
)
| apache-2.0 | -1,256,355,192,365,609,500 | 38.132597 | 78 | 0.504588 | false |
SaturnFromTitan/Freedan | adwords_reports/account.py | 1 | 2460 | import io
import pandas as pd
from retrying import retry
from adwords_reports import logger
from adwords_reports.account_label import AccountLabel
class Account:
SELECTOR = {
"fields": ["Name", "CustomerId", "CurrencyCode", "DateTimeZone"],
"predicates": [{
"field": "CanManageClients",
"operator": "EQUALS",
"values": "FALSE"
}],
"ordering": [{
"field": "Name",
"sortOrder": "ASCENDING"
}]
}
def __init__(self, client, account_id, name, currency, time_zone, labels):
self.client = client
self.id = account_id
self.name = name
self.currency = currency
self.time_zone = time_zone
self.labels = labels
@classmethod
def from_ad_account(cls, client, ad_account):
labels = cls.parse_labels(ad_account)
return cls(client=client, account_id=ad_account.customerId, name=ad_account.name,
currency=ad_account.currencyCode, time_zone=ad_account.dateTimeZone,
labels=labels)
def download(self, report_definition, zero_impressions):
""" Downloads a report from the API
:param report_definition: ReportDefinition
:param zero_impressions: bool
:return: DataFrame
"""
json_report_definition = report_definition.raw
header = json_report_definition["selector"]["fields"]
response = self._download(json_report_definition, zero_impressions)
data = io.StringIO(response)
return pd.read_csv(data, names=header)
@retry(stop_max_attempt_number=3, wait_random_min=5000, wait_random_max=10000)
def _download(self, json_report_definition, zero_impressions):
logger.info("Downloading report.")
downloader = self.client.downloader
response = downloader.DownloadReportAsString(
json_report_definition, skip_report_header=True, skip_column_header=True,
skip_report_summary=True, include_zero_impressions=zero_impressions)
return response
@staticmethod
def parse_labels(ad_account):
if "accountLabels" in ad_account:
return [AccountLabel.from_ad_account_label(ad_label)
for ad_label in ad_account["accountLabels"]]
else:
return list()
def __repr__(self):
return "\nAccountName: {name} (ID: {id})".format(name=self.name, id=self.id)
| apache-2.0 | -3,467,836,693,126,205,400 | 34.652174 | 89 | 0.621951 | false |
YuepengGuo/backtrader | backtrader/feeds/pandafeed.py | 1 | 7678 | #!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from backtrader.utils.py3 import filter, string_types, integer_types
from backtrader import date2num
import backtrader.feed as feed
class PandasDirectData(feed.DataBase):
'''
Uses a Pandas DataFrame as the feed source, iterating directly over the
tuples returned by "itertuples".
This means that all parameters related to lines must have numeric
values as indices into the tuples
Note:
- The ``dataname`` parameter is a Pandas DataFrame
- A negative value in any of the parameters for the Data lines
indicates it's not present in the DataFrame
it is
'''
params = (
('datetime', 0),
('open', 1),
('high', 2),
('low', 3),
('close', 4),
('volume', 5),
('openinterest', 6),
)
datafields = [
'datetime', 'open', 'high', 'low', 'close', 'volume', 'openinterest'
]
def start(self):
# reset the iterator on each start
self._rows = self.p.dataname.itertuples()
def _load(self):
try:
row = next(self._rows)
except StopIteration:
return False
# Set the standard datafields - except for datetime
for datafield in self.datafields[1:]:
# get the column index
colidx = getattr(self.params, datafield)
if colidx < 0:
# column not present -- skip
continue
# get the line to be set
line = getattr(self.lines, datafield)
# indexing for pandas: 1st is colum, then row
line[0] = row[colidx]
# datetime
colidx = getattr(self.params, self.datafields[0])
tstamp = row[colidx]
# convert to float via datetime and store it
dt = tstamp.to_datetime()
dtnum = date2num(dt)
# get the line to be set
line = getattr(self.lines, self.datafields[0])
line[0] = dtnum
# Done ... return
return True
class PandasData(feed.DataBase):
'''
Uses a Pandas DataFrame as the feed source, using indices into column
names (which can be "numeric")
This means that all parameters related to lines must have numeric
values as indices into the tuples
Note:
- The ``dataname`` parameter is a Pandas DataFrame
- Values possible for datetime
- None: the index contains the datetime
- -1: no index, autodetect column
- >= 0 or string: specific colum identifier
- For other lines parameters
- None: column not present
- -1: autodetect
- >= 0 or string: specific colum identifier
'''
params = (
# Possible values for datetime (must always be present)
# None : datetime is the "index" in the Pandas Dataframe
# -1 : autodetect position or case-wise equal name
# >= 0 : numeric index to the colum in the pandas dataframe
# string : column name (as index) in the pandas dataframe
('datetime', None),
# Possible values below:
# None : column not present
# -1 : autodetect position or case-wise equal name
# >= 0 : numeric index to the colum in the pandas dataframe
# string : column name (as index) in the pandas dataframe
('open', -1),
('high', -1),
('low', -1),
('close', -1),
('volume', -1),
('openinterest', -1),
)
datafields = [
'datetime', 'open', 'high', 'low', 'close', 'volume', 'openinterest'
]
def __init__(self):
super(PandasData, self).__init__()
# these "colnames" can be strings or numeric types
colnames = list(self.p.dataname.columns.values)
if self.p.datetime is None:
# datetime is expected as index col and hence not returned
# add fake entry for the autodetection algorithm
colnames.insert(0, 0)
# try to autodetect if all columns are numeric
cstrings = filter(lambda x: isinstance(x, string_types), colnames)
colsnumeric = not len(cstrings)
# Where each datafield find its value
self._colmapping = dict()
# Build the column mappings to internal fields in advance
for i, datafield in enumerate(self.datafields):
defmapping = getattr(self.params, datafield)
if isinstance(defmapping, integer_types) and defmapping < 0:
# autodetection requested
if colsnumeric:
# matching names doesn't help, all indices are numeric
# use current colname index
self._colmapping[datafield] = colnames[i]
else:
# name matching may be possible
for colname in colnames:
if isinstance(colname, string_types):
if datafield.lower() == colname.lower():
self._colmapping[datafield] = colname
break
if datafield not in self._colmapping:
# not yet there ... use current index
self._colmapping[datafield] = colnames[i]
else:
# all other cases -- used given index
self._colmapping[datafield] = defmapping
def start(self):
# reset the length with each start
self._idx = -1
def _load(self):
self._idx += 1
if self._idx >= len(self.p.dataname):
# exhausted all rows
return False
# Set the standard datafields
for datafield in self.datafields[1:]:
colindex = self._colmapping[datafield]
if colindex is None:
# datafield signaled as missing in the stream: skip it
continue
# get the line to be set
line = getattr(self.lines, datafield)
# indexing for pandas: 1st is colum, then row
line[0] = self.p.dataname[colindex][self._idx]
# datetime conversion
coldtime = self._colmapping[self.datafields[0]]
if coldtime is None:
# standard index in the datetime
tstamp = self.p.dataname.index[self._idx]
else:
# it's in a different column ... use standard column index
tstamp = self.p.dataname.index[coldtime][self._idx]
# convert to float via datetime and store it
dt = tstamp.to_datetime()
dtnum = date2num(dt)
self.lines.datetime[0] = dtnum
# Done ... return
return True
| gpl-3.0 | 4,511,277,259,873,989,600 | 31.125523 | 79 | 0.569289 | false |
pkubik/setags | setags/data/input.py | 1 | 5577 | from collections import defaultdict
from pathlib import Path
import tempfile
import numpy as np
import pandas as pd
import tensorflow as tf
import setags.data.utils as utils
import logging
EMBEDDING_SIZE = 300
FILENAME_INPUT_PRODUCER_SEED = 1
RECORDS_FILE_EXTENSION = '.tfrecords'
log = logging.getLogger(__name__)
def create_input_fn(data_subdir: Path, batch_size: int, for_train=True, num_epochs=1):
input_files = all_records_files(data_subdir)
def input_fn():
filename_queue = tf.train.string_input_producer(input_files, num_epochs=num_epochs,
seed=FILENAME_INPUT_PRODUCER_SEED)
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
if for_train:
examples_batch = tf.train.shuffle_batch(
[serialized_example], batch_size=batch_size, num_threads=3,
capacity=utils.MIN_AFTER_DEQUEUE + 5 * batch_size,
min_after_dequeue=utils.MIN_AFTER_DEQUEUE)
else:
examples_batch = tf.train.batch(
[serialized_example], batch_size=batch_size, num_threads=3,
capacity=5 * batch_size, allow_smaller_final_batch=True)
return parse_examples_batch(examples_batch)
return input_fn
def all_records_files(data_subdir):
return [str(file_path) for file_path in data_subdir.iterdir() if file_path.suffix == RECORDS_FILE_EXTENSION]
def parse_examples_batch(examples_batch):
example_fields = tf.parse_example(
examples_batch,
features={
'id': tf.FixedLenFeature([], dtype=tf.string),
'title': tf.FixedLenSequenceFeature([], dtype=tf.int64, allow_missing=True),
'title_bio': tf.FixedLenSequenceFeature([], dtype=tf.int64, allow_missing=True),
'title_length': tf.FixedLenFeature([], dtype=tf.int64),
'content': tf.FixedLenSequenceFeature([], dtype=tf.int64, allow_missing=True),
'content_bio': tf.FixedLenSequenceFeature([], dtype=tf.int64, allow_missing=True),
'content_length': tf.FixedLenFeature([], dtype=tf.int64)
})
features = {key: example_fields[key]
for key in ['id', 'title', 'title_length', 'content', 'content_length']}
labels = {key: example_fields[key] for key in ['title_bio', 'content_bio']}
return features, labels
class PredictionInput:
def __init__(self, data_file: Path, data_dir: Path, vocabulary: list, batch_size: int,
embedding_matrix: np.ndarray = None):
self.data_dir = data_dir
self.original_vocab_size = len(vocabulary)
self.word_encoding = defaultdict(lambda: len(self.word_encoding))
self.word_encoding.update({value: i for i, value in enumerate(vocabulary)})
self.embedding_matrix = embedding_matrix
if self.embedding_matrix is None:
self.embedding_matrix = utils.load_embeddings_matrix(self.data_dir)
self._create_input_fn(batch_size, data_file)
self._create_hooks()
def _create_input_fn(self, batch_size, data_file):
with data_file.open() as f:
df = pd.read_csv(f)
ids = []
titles = []
contents = []
max_title_length = 0
max_content_length = 0
for _, row in df.iterrows():
ids.append(str(row.id))
encoded_title = utils.encode_text(row.title, self.word_encoding)
titles.append(encoded_title)
encoded_content = utils.encode_text(row.content, self.word_encoding)
contents.append(encoded_content)
max_title_length = max(max_title_length, len(encoded_title))
max_content_length = max(max_content_length, len(encoded_content))
title_array = encode_as_array(titles, max_title_length)
content_array = encode_as_array(contents, max_content_length)
data = {
'id': np.array(ids),
'title': title_array,
'title_length': np.array([len(title) for title in titles]),
'content': content_array,
'content_length': np.array([len(content) for content in contents]),
}
self.input_fn = tf.estimator.inputs.numpy_input_fn(data, batch_size=batch_size, shuffle=False)
def _create_hooks(self):
words = utils.encoding_as_list(self.word_encoding)[self.original_vocab_size:]
if len(words) > 0:
with tempfile.NamedTemporaryFile(mode='w+t', prefix='vocab-', delete=False) as vocab_ext_file:
self.vocab_ext_path = vocab_ext_file.name
for word in words:
vocab_ext_file.write(str(word) + '\n')
embedding_model = utils.load_embedding_model(self.data_dir)
new_vectors = utils.create_embedding_vectors(words, embedding_model)
new_matrix = np.array(new_vectors)
all_embeddings = np.concatenate((self.embedding_matrix, new_matrix))
else:
all_embeddings = self.embedding_matrix
self.hooks = [create_embedding_feed_hook(all_embeddings)]
def encode_as_array(sequences: list, max_sequence_length):
ret = np.zeros([len(sequences), max_sequence_length], dtype=np.int64)
for r, seq in enumerate(sequences):
for c, elem in enumerate(seq):
ret[r][c] = elem
return ret
def create_embedding_feed_hook(embedding_matrix):
def feed_fn():
return {
'embeddings:0': embedding_matrix
}
return tf.train.FeedFnHook(feed_fn=feed_fn)
| mit | -1,952,944,910,254,285,000 | 38.553191 | 112 | 0.622736 | false |
sniemi/SamPy | sandbox/src1/examples/cursor_demo.py | 1 | 2270 | #!/usr/bin/env python
"""
This example shows how to use matplotlib to provide a data cursor. It
uses matplotlib to draw the cursor and may be a slow since this
requires redrawing the figure with every mouse move.
Faster cursoring is possible using native GUI drawing, as in
wxcursor_demo.py
"""
from pylab import *
class Cursor:
def __init__(self, ax):
self.ax = ax
self.lx, = ax.plot( (0,0), (0,0), 'k-' ) # the horiz line
self.ly, = ax.plot( (0,0), (0,0), 'k-' ) # the vert line
# text location in axes coords
self.txt = ax.text( 0.7, 0.9, '', transform=ax.transAxes)
def mouse_move(self, event):
if not event.inaxes: return
ax = event.inaxes
minx, maxx = ax.get_xlim()
miny, maxy = ax.get_ylim()
x, y = event.xdata, event.ydata
# update the line positions
self.lx.set_data( (minx, maxx), (y, y) )
self.ly.set_data( (x, x), (miny, maxy) )
self.txt.set_text( 'x=%1.2f, y=%1.2f'%(x,y) )
draw()
class SnaptoCursor:
"""
Like Cursor but the crosshair snaps to the nearest x,y point
For simplicity, I'm assuming x is sorted
"""
def __init__(self, ax, x, y):
self.ax = ax
self.lx, = ax.plot( (0,0), (0,0), 'k-' ) # the horiz line
self.ly, = ax.plot( (0,0), (0,0), 'k-' ) # the vert line
self.x = x
self.y = y
# text location in axes coords
self.txt = ax.text( 0.7, 0.9, '', transform=ax.transAxes)
def mouse_move(self, event):
if not event.inaxes: return
ax = event.inaxes
minx, maxx = ax.get_xlim()
miny, maxy = ax.get_ylim()
x, y = event.xdata, event.ydata
indx = searchsorted(self.x, [x])[0]
x = self.x[indx]
y = self.y[indx]
# update the line positions
self.lx.set_data( (minx, maxx), (y, y) )
self.ly.set_data( (x, x), (miny, maxy) )
self.txt.set_text( 'x=%1.2f, y=%1.2f'%(x,y) )
print 'x=%1.2f, y=%1.2f'%(x,y)
draw()
t = arange(0.0, 1.0, 0.01)
s = sin(2*2*pi*t)
ax = subplot(111)
cursor = Cursor(ax)
#cursor = SnaptoCursor(ax, t, s)
connect('motion_notify_event', cursor.mouse_move)
ax.plot(t, s, 'o')
axis([0,1,-1,1])
show()
| bsd-2-clause | -4,512,411,273,419,999,700 | 26.682927 | 70 | 0.551101 | false |
fhennecker/semiteleporter | src/demo/filter_demo.py | 1 | 2388 | import sys, cv2
import numpy as np
from matplotlib import pyplot as plt
def dia(img):
color = ('b','g','r')
for i,col in enumerate(color):
histr = cv2.calcHist([img],[i],None,[256],[0,256])
plt.plot(histr,color = col)
plt.xlim([0,256])
plt.show()
def color(img_on, img_off):
img_off = cv2.imread(img_off)
img_on = cv2.imread(img_on)
img = np.array((np.array(img_on, dtype=np.int16)-np.array(img_off, dtype=np.int16)).clip(0,255), dtype=np.uint8)
img = cv2.medianBlur(img,3)
cv2.imshow("soustracted",cv2.resize(img, (img.shape[1]/2,img.shape[0]/2)))
cv2.waitKey(0)
lower = np.array([0, 0, 20], dtype=np.uint8)
upper = np.array([5, 5, 255], dtype=np.uint8)
mask0 = cv2.inRange(img, lower, upper)
cv2.imshow("color_threshold_red",cv2.resize(mask0, (img.shape[1]/2,img.shape[0]/2)))
cv2.waitKey(0)
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower = np.array([0, 170, 20], dtype=np.uint8)
upper = np.array([10, 255, 255], dtype=np.uint8)
mask1 = cv2.inRange(img_hsv, lower, upper)
lower = np.array([170, 170, 20], dtype=np.uint8)
upper = np.array([180, 255, 255], dtype=np.uint8)
mask2 = cv2.inRange(img_hsv, lower, upper)
mask = np.bitwise_or(mask1, mask2)
cv2.imshow("hsv_threshold_low_brightness",cv2.resize(mask, (mask.shape[1]/2,mask.shape[0]/2)))
cv2.waitKey(0)
lower = np.array([80, 0, 200], dtype=np.uint8)
upper = np.array([100, 255, 255], dtype=np.uint8)
mask3 = cv2.inRange(img_hsv, lower, upper)
cv2.imshow("hsv_threshold_high_brightness",cv2.resize(mask3, (mask3.shape[1]/2,mask3.shape[0]/2)))
cv2.waitKey(0)
mask = np.bitwise_or(mask, mask3)
mask = np.bitwise_or(mask0, mask)
mask = cv2.GaussianBlur(mask,(3,3),0)
mask = cv2.inRange(mask, np.array([250]), np.array([255]))
res = cv2.bitwise_and(img_on, img_on, mask=mask)
tmp = np.zeros(res.shape)
for line in range(res.shape[0]):
moments = cv2.moments(res[line,:,2])
if(moments['m00'] != 0):
tmp[line][round(moments['m01']/moments['m00'])] = [0,255,0]
cv2.imshow("hsv_or_color",cv2.resize(mask, (mask.shape[1]/2,mask.shape[0]/2)))
cv2.waitKey(0)
cv2.imshow("result",cv2.resize(tmp, (res.shape[1]/2,res.shape[0]/2)))
cv2.waitKey(0)
if("__main__" == __name__):
color(sys.argv[1], sys.argv[2])
| mit | -6,153,915,798,834,953,000 | 33.114286 | 116 | 0.614322 | false |
cpcloud/dask | dask/dataframe/tests/test_format.py | 1 | 12438 | # coding: utf-8
import pandas as pd
import dask.dataframe as dd
def test_repr():
df = pd.DataFrame({'x': list(range(100))})
ddf = dd.from_pandas(df, 3)
for x in [ddf, ddf.index, ddf.x]:
assert type(x).__name__ in repr(x)
assert str(x.npartitions) in repr(x)
def test_repr_meta_mutation():
# Check that the repr changes when meta changes
df = pd.DataFrame({'a': range(5),
'b': ['a', 'b', 'c', 'd', 'e']})
ddf = dd.from_pandas(df, npartitions=2)
s1 = repr(ddf)
assert repr(ddf) == s1
ddf.b = ddf.b.astype('category')
assert repr(ddf) != s1
def test_dataframe_format():
df = pd.DataFrame({'A': [1, 2, 3, 4, 5, 6, 7, 8],
'B': list('ABCDEFGH'),
'C': pd.Categorical(list('AAABBBCC'))})
ddf = dd.from_pandas(df, 3)
exp = ("Dask DataFrame Structure:\n"
" A B C\n"
"npartitions=3 \n"
"0 int64 object category[known]\n"
"3 ... ... ...\n"
"6 ... ... ...\n"
"7 ... ... ...\n"
"Dask Name: from_pandas, 3 tasks")
assert repr(ddf) == exp
assert str(ddf) == exp
exp = (" A B C\n"
"npartitions=3 \n"
"0 int64 object category[known]\n"
"3 ... ... ...\n"
"6 ... ... ...\n"
"7 ... ... ...")
assert ddf.to_string() == exp
exp_table = """<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>A</th>
<th>B</th>
<th>C</th>
</tr>
<tr>
<th>npartitions=3</th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>int64</td>
<td>object</td>
<td>category[known]</td>
</tr>
<tr>
<th>3</th>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>6</th>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>7</th>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
</tbody>
</table>"""
exp = """<div><strong>Dask DataFrame Structure:</strong></div>
{exp_table}
<div>Dask Name: from_pandas, 3 tasks</div>""".format(exp_table=exp_table)
assert ddf.to_html() == exp
# table is boxed with div
exp = """<div><strong>Dask DataFrame Structure:</strong></div>
<div>
{exp_table}
</div>
<div>Dask Name: from_pandas, 3 tasks</div>""".format(exp_table=exp_table)
assert ddf._repr_html_() == exp
def test_dataframe_format_with_index():
df = pd.DataFrame({'A': [1, 2, 3, 4, 5, 6, 7, 8],
'B': list('ABCDEFGH'),
'C': pd.Categorical(list('AAABBBCC'))},
index=list('ABCDEFGH'))
ddf = dd.from_pandas(df, 3)
exp = ("Dask DataFrame Structure:\n"
" A B C\n"
"npartitions=3 \n"
"A int64 object category[known]\n"
"D ... ... ...\n"
"G ... ... ...\n"
"H ... ... ...\n"
"Dask Name: from_pandas, 3 tasks")
assert repr(ddf) == exp
assert str(ddf) == exp
exp_table = """<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>A</th>
<th>B</th>
<th>C</th>
</tr>
<tr>
<th>npartitions=3</th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>A</th>
<td>int64</td>
<td>object</td>
<td>category[known]</td>
</tr>
<tr>
<th>D</th>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>G</th>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>H</th>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
</tbody>
</table>"""
exp = """<div><strong>Dask DataFrame Structure:</strong></div>
{exp_table}
<div>Dask Name: from_pandas, 3 tasks</div>""".format(exp_table=exp_table)
assert ddf.to_html() == exp
# table is boxed with div
exp = """<div><strong>Dask DataFrame Structure:</strong></div>
<div>
{exp_table}
</div>
<div>Dask Name: from_pandas, 3 tasks</div>""".format(exp_table=exp_table)
assert ddf._repr_html_() == exp
def test_dataframe_format_unknown_divisions():
df = pd.DataFrame({'A': [1, 2, 3, 4, 5, 6, 7, 8],
'B': list('ABCDEFGH'),
'C': pd.Categorical(list('AAABBBCC'))})
ddf = dd.from_pandas(df, 3)
ddf = ddf.clear_divisions()
assert not ddf.known_divisions
exp = ("Dask DataFrame Structure:\n"
" A B C\n"
"npartitions=3 \n"
"None int64 object category[known]\n"
"None ... ... ...\n"
"None ... ... ...\n"
"None ... ... ...\n"
"Dask Name: from_pandas, 3 tasks")
assert repr(ddf) == exp
assert str(ddf) == exp
exp = (" A B C\n"
"npartitions=3 \n"
"None int64 object category[known]\n"
"None ... ... ...\n"
"None ... ... ...\n"
"None ... ... ...")
assert ddf.to_string() == exp
exp_table = """<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>A</th>
<th>B</th>
<th>C</th>
</tr>
<tr>
<th>npartitions=3</th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>None</th>
<td>int64</td>
<td>object</td>
<td>category[known]</td>
</tr>
<tr>
<th>None</th>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>None</th>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>None</th>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
</tbody>
</table>"""
exp = """<div><strong>Dask DataFrame Structure:</strong></div>
{exp_table}
<div>Dask Name: from_pandas, 3 tasks</div>""".format(exp_table=exp_table)
assert ddf.to_html() == exp
# table is boxed with div
exp = """<div><strong>Dask DataFrame Structure:</strong></div>
<div>
{exp_table}
</div>
<div>Dask Name: from_pandas, 3 tasks</div>""".format(exp_table=exp_table)
assert ddf._repr_html_() == exp
def test_dataframe_format_long():
df = pd.DataFrame({'A': [1, 2, 3, 4, 5, 6, 7, 8] * 10,
'B': list('ABCDEFGH') * 10,
'C': pd.Categorical(list('AAABBBCC') * 10)})
ddf = dd.from_pandas(df, 10)
exp = ('Dask DataFrame Structure:\n'
' A B C\n'
'npartitions=10 \n'
'0 int64 object category[known]\n'
'8 ... ... ...\n'
'... ... ... ...\n'
'72 ... ... ...\n'
'79 ... ... ...\n'
'Dask Name: from_pandas, 10 tasks')
assert repr(ddf) == exp
assert str(ddf) == exp
exp = (" A B C\n"
"npartitions=10 \n"
"0 int64 object category[known]\n"
"8 ... ... ...\n"
"... ... ... ...\n"
"72 ... ... ...\n"
"79 ... ... ...")
assert ddf.to_string() == exp
exp_table = """<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>A</th>
<th>B</th>
<th>C</th>
</tr>
<tr>
<th>npartitions=10</th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>int64</td>
<td>object</td>
<td>category[known]</td>
</tr>
<tr>
<th>8</th>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>...</th>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>72</th>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>79</th>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
</tbody>
</table>"""
exp = """<div><strong>Dask DataFrame Structure:</strong></div>
{exp_table}
<div>Dask Name: from_pandas, 10 tasks</div>""".format(exp_table=exp_table)
assert ddf.to_html() == exp
# table is boxed with div
exp = u"""<div><strong>Dask DataFrame Structure:</strong></div>
<div>
{exp_table}
</div>
<div>Dask Name: from_pandas, 10 tasks</div>""".format(exp_table=exp_table)
assert ddf._repr_html_() == exp
def test_series_format():
s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8],
index=list('ABCDEFGH'))
ds = dd.from_pandas(s, 3)
exp = """Dask Series Structure:
npartitions=3
A int64
D ...
G ...
H ...
dtype: int64
Dask Name: from_pandas, 3 tasks"""
assert repr(ds) == exp
assert str(ds) == exp
exp = """npartitions=3
A int64
D ...
G ...
H ..."""
assert ds.to_string() == exp
s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8],
index=list('ABCDEFGH'), name='XXX')
ds = dd.from_pandas(s, 3)
exp = """Dask Series Structure:
npartitions=3
A int64
D ...
G ...
H ...
Name: XXX, dtype: int64
Dask Name: from_pandas, 3 tasks"""
assert repr(ds) == exp
assert str(ds) == exp
def test_series_format_long():
s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9, 10] * 10,
index=list('ABCDEFGHIJ') * 10)
ds = dd.from_pandas(s, 10)
exp = ("Dask Series Structure:\nnpartitions=10\nA int64\nB ...\n"
" ... \nJ ...\nJ ...\ndtype: int64\n"
"Dask Name: from_pandas, 10 tasks")
assert repr(ds) == exp
assert str(ds) == exp
exp = "npartitions=10\nA int64\nB ...\n ... \nJ ...\nJ ..."
assert ds.to_string() == exp
def test_index_format():
s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8],
index=list('ABCDEFGH'))
ds = dd.from_pandas(s, 3)
exp = """Dask Index Structure:
npartitions=3
A object
D ...
G ...
H ...
dtype: object
Dask Name: from_pandas, 6 tasks"""
assert repr(ds.index) == exp
assert str(ds.index) == exp
s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8],
index=pd.CategoricalIndex([1, 2, 3, 4, 5, 6, 7, 8], name='YYY'))
ds = dd.from_pandas(s, 3)
exp = """Dask Index Structure:
npartitions=3
1 category[known]
4 ...
7 ...
8 ...
Name: YYY, dtype: category
Dask Name: from_pandas, 6 tasks"""
assert repr(ds.index) == exp
assert str(ds.index) == exp
def test_categorical_format():
s = pd.Series(['a', 'b', 'c']).astype('category')
known = dd.from_pandas(s, npartitions=1)
unknown = known.cat.as_unknown()
exp = ("Dask Series Structure:\n"
"npartitions=1\n"
"0 category[known]\n"
"2 ...\n"
"dtype: category\n"
"Dask Name: from_pandas, 1 tasks")
assert repr(known) == exp
exp = ("Dask Series Structure:\n"
"npartitions=1\n"
"0 category[unknown]\n"
"2 ...\n"
"dtype: category\n"
"Dask Name: from_pandas, 1 tasks")
assert repr(unknown) == exp
| bsd-3-clause | 8,166,371,068,224,006,000 | 26.456954 | 86 | 0.406014 | false |
joferkington/tutorials | 1412_Tuning_and_AVO/tuning_wedge.py | 1 | 7788 | """
Python script to generate a zero-offset synthetic from a 3-layer wedge model.
Created by: Wes Hamlyn
Create Date: 19-Aug-2014
Last Mod: 1-Nov-2014
This script is provided without warranty of any kind.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
###########################################################
#
# DEFINE MODELING PARAMETERS HERE
#
# 3-Layer Model Parameters [Layer1, Layer2, Layer 3]
vp_mod = [2500.0, 2600.0, 2550.0] # P-wave velocity (m/s)
vs_mod = [1200.0, 1300.0, 1200.0] # S-wave velocity (m/s)
rho_mod= [1.95, 2.0, 1.98] # Density (g/cc)
dz_min = 0.0 # Minimum thickness of Layer 2 (m)
dz_max = 60.0 # Maximum thickness of Layer 2 (m)
dz_step= 1.0 # Thickness step from trace-to-trace (normally 1.0 m)
# Ricker Wavelet Parameters
wvlt_length= 0.128
wvlt_cfreq = 30.0
wvlt_phase = 0.0
# Trace Parameters
tmin = 0.0
tmax = 0.5
dt = 0.0001 # changing this from 0.0001 can affect the display quality
# Plot Parameters
min_plot_time = 0.15
max_plot_time = 0.3
excursion = 2
###########################################################
#
# FUNCTIONS DEFINITIONS
#
def plot_vawig(axhdl, data, t, excursion, highlight=None):
import numpy as np
import matplotlib.pyplot as plt
[ntrc, nsamp] = data.shape
t = np.hstack([0, t, t.max()])
for i in range(0, ntrc):
tbuf = excursion * data[i] / np.max(np.abs(data)) + i
tbuf = np.hstack([i, tbuf, i])
if i==highlight:
lw = 2
else:
lw = 0.5
axhdl.plot(tbuf, t, color='black', linewidth=lw)
plt.fill_betweenx(t, tbuf, i, where=tbuf>i, facecolor=[0.6,0.6,1.0], linewidth=0)
plt.fill_betweenx(t, tbuf, i, where=tbuf<i, facecolor=[1.0,0.7,0.7], linewidth=0)
axhdl.set_xlim((-excursion, ntrc+excursion))
axhdl.xaxis.tick_top()
axhdl.xaxis.set_label_position('top')
axhdl.invert_yaxis()
def ricker(cfreq, phase, dt, wvlt_length):
'''
Calculate a zero-phase ricker wavelet
Usage:
------
t, wvlt = wvlt_ricker(cfreq, dt, wvlt_length)
cfreq: central frequency of wavelet in Hz
phase: wavelet phase in degrees
dt: sample rate in seconds
wvlt_length: length of wavelet in seconds
'''
import numpy as np
import scipy.signal as signal
nsamp = int(wvlt_length/dt + 1)
t_max = wvlt_length*0.5
t_min = -t_max
t = np.arange(t_min, t_max, dt)
t = np.linspace(-wvlt_length/2, (wvlt_length-dt)/2, wvlt_length/dt)
wvlt = (1.0 - 2.0*(np.pi**2)*(cfreq**2)*(t**2)) * np.exp(-(np.pi**2)*(cfreq**2)*(t**2))
if phase != 0:
phase = phase*np.pi/180.0
wvlth = signal.hilbert(wvlt)
wvlth = np.imag(wvlth)
wvlt = np.cos(phase)*wvlt - np.sin(phase)*wvlth
return t, wvlt
def calc_rc(vp_mod, rho_mod):
'''
rc_int = calc_rc(vp_mod, rho_mod)
'''
nlayers = len(vp_mod)
nint = nlayers - 1
rc_int = []
for i in range(0, nint):
buf1 = vp_mod[i+1]*rho_mod[i+1]-vp_mod[i]*rho_mod[i]
buf2 = vp_mod[i+1]*rho_mod[i+1]+vp_mod[i]*rho_mod[i]
buf3 = buf1/buf2
rc_int.append(buf3)
return rc_int
def calc_times(z_int, vp_mod):
'''
t_int = calc_times(z_int, vp_mod)
'''
nlayers = len(vp_mod)
nint = nlayers - 1
t_int = []
for i in range(0, nint):
if i == 0:
tbuf = z_int[i]/vp_mod[i]
t_int.append(tbuf)
else:
zdiff = z_int[i]-z_int[i-1]
tbuf = 2*zdiff/vp_mod[i] + t_int[i-1]
t_int.append(tbuf)
return t_int
def digitize_model(rc_int, t_int, t):
'''
rc = digitize_model(rc, t_int, t)
rc = reflection coefficients corresponding to interface times
t_int = interface times
t = regularly sampled time series defining model sampling
'''
import numpy as np
nlayers = len(rc_int)
nint = nlayers - 1
nsamp = len(t)
rc = list(np.zeros(nsamp,dtype='float'))
lyr = 0
for i in range(0, nsamp):
if t[i] >= t_int[lyr]:
rc[i] = rc_int[lyr]
lyr = lyr + 1
if lyr > nint:
break
return rc
##########################################################
#
# COMPUTATIONS BELOW HERE...
#
# Some handy constants
nlayers = len(vp_mod)
nint = nlayers - 1
nmodel = int((dz_max-dz_min)/dz_step+1)
# Generate ricker wavelet
wvlt_t, wvlt_amp = ricker(wvlt_cfreq, wvlt_phase, dt, wvlt_length)
# Calculate reflectivities from model parameters
rc_int = calc_rc(vp_mod, rho_mod)
syn_zo = []
rc_zo = []
lyr_times = []
for model in range(0, nmodel):
# Calculate interface depths
z_int = [500.0]
z_int.append(z_int[0]+dz_min+dz_step*model)
# Calculate interface times
t_int = calc_times(z_int, vp_mod)
lyr_times.append(t_int)
# Digitize 3-layer model
nsamp = int((tmax-tmin)/dt) + 1
t = []
for i in range(0,nsamp):
t.append(i*dt)
rc = digitize_model(rc_int, t_int, t)
rc_zo.append(rc)
# Convolve wavelet with reflectivities
syn_buf = np.convolve(rc, wvlt_amp, mode='same')
syn_buf = list(syn_buf)
syn_zo.append(syn_buf)
print "finished step %i" % (model)
syn_zo = np.array(syn_zo)
t = np.array(t)
lyr_times = np.array(lyr_times)
lyr_indx = np.array(np.round(lyr_times/dt), dtype='int16')
# Use the transpose because rows are traces;
# columns are time samples.
tuning_trace = np.argmax(np.abs(syn_zo.T)) % syn_zo.T.shape[1]
tuning_thickness = tuning_trace * dz_step
# Plotting Code
[ntrc, nsamp] = syn_zo.shape
fig = plt.figure(figsize=(12, 14))
fig.set_facecolor('white')
gs = gridspec.GridSpec(3, 1, height_ratios=[1, 1, 1])
ax0 = fig.add_subplot(gs[0])
ax0.plot(lyr_times[:,0], color='blue', lw=1.5)
ax0.plot(lyr_times[:,1], color='red', lw=1.5)
ax0.set_ylim((min_plot_time,max_plot_time))
ax0.invert_yaxis()
ax0.set_xlabel('Thickness (m)')
ax0.set_ylabel('Time (s)')
plt.text(2,
min_plot_time + (lyr_times[0,0] - min_plot_time)/2.,
'Layer 1',
fontsize=16)
plt.text(dz_max/dz_step - 2,
lyr_times[-1,0] + (lyr_times[-1,1] - lyr_times[-1,0])/2.,
'Layer 2',
fontsize=16,
horizontalalignment='right')
plt.text(2,
lyr_times[0,0] + (max_plot_time - lyr_times[0,0])/2.,
'Layer 3',
fontsize=16)
plt.gca().xaxis.tick_top()
plt.gca().xaxis.set_label_position('top')
ax0.set_xlim((-excursion, ntrc+excursion))
ax1 = fig.add_subplot(gs[1])
plot_vawig(ax1, syn_zo, t, excursion, highlight=tuning_trace)
ax1.plot(lyr_times[:,0], color='blue', lw=1.5)
ax1.plot(lyr_times[:,1], color='red', lw=1.5)
ax1.set_ylim((min_plot_time,max_plot_time))
ax1.invert_yaxis()
ax1.set_xlabel('Thickness (m)')
ax1.set_ylabel('Time (s)')
ax2 = fig.add_subplot(gs[2])
ax2.plot(syn_zo[:,lyr_indx[:,0]], color='blue')
ax2.set_xlim((-excursion, ntrc+excursion))
ax2.axvline(tuning_trace, color='k', lw=2)
ax2.grid()
ax2.set_title('Upper interface amplitude')
ax2.set_xlabel('Thickness (m)')
ax2.set_ylabel('Amplitude')
plt.text(tuning_trace + 2,
plt.ylim()[0] * 1.1,
'tuning thickness = {0} m'.format(str(tuning_thickness)),
fontsize=16)
plt.savefig('figure_1.png')
plt.show()
| apache-2.0 | 5,684,026,571,111,477,000 | 22.72381 | 91 | 0.54905 | false |
eubr-bigsea/tahiti | migrations/versions/d7432648e1ea_sklearn_crossvalidation.py | 1 | 17876 | # -*- coding: utf-8 -*-
"""Sklearn operations
Revision ID: d7432648e1ea
Revises: 5430536464c7
Create Date: 2018-09-13 10:42:09.555626
"""
from alembic import context
from alembic import op
from sqlalchemy import String, Integer, Text
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import table, column, text
# revision identifiers, used by Alembic.
revision = 'd7432648e1ea'
down_revision = '5430536464c7'
branch_labels = None
depends_on = None
def _insert_operation():
tb = table('operation',
column("id", Integer),
column("slug", String),
column('enabled', Integer),
column('type', String),
column('icon', String),
)
columns = ['id', 'slug', 'enabled', 'type', 'icon']
data = [
(4018, 'execute-sql', '1', 'TRANSFORMATION', 'fa-bolt'),
(4019, 'mlp-classifier', 1, 'TRANSFORMATION', 'fa-code-branch'),
(4020, 'mlp-regressor', 1, 'TRANSFORMATION', 'fa-code-branch'),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_new_operation_platform():
tb = table(
'operation_platform',
column('operation_id', Integer),
column('platform_id', Integer))
columns = ('operation_id', 'platform_id')
data = [
(18, 4), # spark data-reader
(43, 4), # cross-validation
(82, 4), # execute-python
(4018, 4), # execute-sql
(4019, 4),
(4020, 4),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_category_operation():
tb = table(
'operation_category_operation',
column('operation_id', Integer),
column('operation_category_id', Integer))
columns = ('operation_id', 'operation_category_id')
data = [
(4018, 4001), # execute-sql
(4018, 7), # execute-sql
(4019, 8),
(4019, 18), #classifier
(4019, 4001),
(4020, 8),
(4020, 21), # regressors
(4020, 4001),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_form():
operation_form_table = table(
'operation_form',
column('id', Integer),
column('enabled', Integer),
column('order', Integer),
column('category', String), )
columns = ('id', 'enabled', 'order', 'category')
data = [
(4018, 1, 1, 'execution'),
(4019, 1, 1, 'execution'),
(4020, 1, 1, 'execution'),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(operation_form_table, rows)
def _insert_operation_form_translation():
tb = table(
'operation_form_translation',
column('id', Integer),
column('locale', String),
column('name', String))
columns = ('id', 'locale', 'name')
data = [
(4018, 'en', 'Execution'),
(4018, 'pt', 'Execução'),
(4019, 'en', 'Execution'),
(4019, 'pt', 'Execução'),
(4020, 'en', 'Execution'),
(4020, 'pt', 'Execução'),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_operation_form():
tb = table(
'operation_operation_form',
column('operation_id', Integer),
column('operation_form_id', Integer))
columns = ('operation_id', 'operation_form_id')
data = [
(4018, 41),
(4018, 110),
(4018, 4018),
(4019, 41),
(4019, 4019),
(4020, 41),
(4020, 4020),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_translation():
tb = table(
'operation_translation',
column('id', Integer),
column('locale', String),
column('name', String),
column('description', String), )
columns = ('id', 'locale', 'name', 'description')
data = [
(4018, 'pt', 'Executar consulta SQL',
'Executa uma consulta usando a linguagem SQL disponível no Pandas '
'SQL.'),
(4018, 'en', 'Execute SQL query',
'Executes a query using SQL language available in Pandas SQL.'),
(4019, 'pt', 'Classificador Perceptron multicamadas',
'Classificador Perceptron multicamadas.'),
(4019, 'en', 'Multi-layer Perceptron classifier',
'Multi-layer Perceptron classifier.'),
(4020, 'pt', 'Regressor Perceptron multicamadas',
'Regressor Perceptron multicamadas.'),
(4020, 'en', 'Multi-layer Perceptron Regressor',
'Multi-layer Perceptron Regressor.')
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_port():
tb = table(
'operation_port',
column('id', Integer),
column('type', String),
column('tags', String),
column('operation_id', Integer),
column('order', Integer),
column('multiplicity', String),
column('slug', String), )
columns = [c.name for c in tb.columns]
data = [
(4025, 'INPUT', None, 4018, 1, 'ONE', 'input data 1 '),
(4026, 'INPUT', None, 4018, 2, 'ONE', 'input data 2'),
(4027, 'OUTPUT', None, 4018, 1, 'MANY', 'output data'),
(4028, 'OUTPUT', None, 4019, 1, 'MANY', 'algorithm'),
(4029, 'OUTPUT', None, 4020, 1, 'MANY', 'algorithm'),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_port_translation():
tb = table(
'operation_port_translation',
column('id', Integer),
column('locale', String),
column('name', String),
column('description', String), )
columns = ('id', 'locale', 'name', 'description')
data = [
(4025, 'en', 'input data 1', 'Input data 1'),
(4025, 'pt', 'dados de entrada 1', 'Input data 1'),
(4026, 'en', 'input data 2', 'Input data 2'),
(4026, 'pt', 'dados de entrada 2', 'Input data 2'),
(4027, 'en', 'output data', 'Output data'),
(4027, 'pt', 'dados de saída', 'Dados de saída'),
(4028, 'en', 'algorithm', 'Untrained classification model'),
(4028, 'pt', 'algoritmo', 'Modelo de classificação não treinado'),
(4029, 'en', 'algorithm', 'Untrained regressor model'),
(4029, 'pt', 'algoritmo', 'Modelo de regressão não treinado'),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_port_interface_operation_port():
tb = table(
'operation_port_interface_operation_port',
column('operation_port_id', Integer),
column('operation_port_interface_id', Integer), )
columns = ('operation_port_id', 'operation_port_interface_id')
data = [
(4025, 1),
(4026, 1),
(4027, 1),
(4028, 5), # ClassificationAlgorithm
(4029, 17), # IRegressionAlgorithm
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_form_field():
tb = table(
'operation_form_field',
column('id', Integer),
column('name', String),
column('type', String),
column('required', Integer),
column('order', Integer),
column('default', Text),
column('suggested_widget', String),
column('values_url', String),
column('values', String),
column('scope', String),
column('form_id', Integer), )
columns = ('id', 'name', 'type', 'required', 'order', 'default',
'suggested_widget', 'values_url', 'values', 'scope', 'form_id')
data = [
(4089, 'query', 'TEXT', 1, 1, None, 'code', None, None, 'EXECUTION',
4018),
(4090, 'names', 'TEXT', 0, 2, None, 'text', None, None, 'EXECUTION',
4018),
# mpl-classifier
(4091, 'layer_sizes', 'TEXT', 1, 1, '(1,100,1)', 'text', None, None,
'EXECUTION',
4019),
(4092, 'activation', 'TEXT', 0, 2, 'relu', 'dropdown', None,
'[{"key": \"identity\", \"value\": \"identity\"}, '
'{\"key\": \"logistic\", \"value\": \"logistic\"}, '
'{\"key\": \"tanh\", \"value\": \"tanh\"}, '
'{\"key\": \"relu\", \"value\": \"relu\"}]', 'EXECUTION', 4019),
(4093, 'solver', 'TEXT', 0, 3, 'adam', 'dropdown', None,
'[{"key": \"lbfgs\", \"value\": \"lbfgs\"}, '
'{\"key\": \"sgd\", \"value\": \"sgd\"}, '
'{\"key\": \"adam\", \"value\": \"adam\"}]', 'EXECUTION', 4019),
(4094, 'alpha', 'FLOAT', 0, 4, 0.0001, 'decimal', None, None, 'EXECUTION',
4019),
(4095, 'max_iter', 'INTEGER', 0, 5, 200, 'integer', None, None,
'EXECUTION', 4019),
(4096, 'tol', 'FLOAT', 0, 6, 0.0001, 'decimal', None, None, 'EXECUTION',
4019),
(4097, 'seed', 'INTEGER', 0, 7, None, 'integer', None, None,
'EXECUTION', 4019),
# mpl-regressor
(4098, 'layer_sizes', 'TEXT', 1, 1, '(1,100,1)', 'text', None, None,
'EXECUTION',
4020),
(4099, 'activation', 'TEXT', 0, 2, 'relu', 'dropdown', None,
'[{"key": \"identity\", \"value\": \"identity\"}, '
'{\"key\": \"logistic\", \"value\": \"logistic\"}, '
'{\"key\": \"tanh\", \"value\": \"tanh\"}, '
'{\"key\": \"relu\", \"value\": \"relu\"}]', 'EXECUTION', 4020),
(4100, 'solver', 'TEXT', 0, 3, 'adam', 'dropdown', None,
'[{"key": \"lbfgs\", \"value\": \"lbfgs\"}, '
'{\"key\": \"sgd\", \"value\": \"sgd\"}, '
'{\"key\": \"adam\", \"value\": \"adam\"}]', 'EXECUTION', 4020),
(4101, 'alpha', 'FLOAT', 0, 4, 0.0001, 'decimal', None, None, 'EXECUTION',
4020),
(4102, 'max_iter', 'INTEGER', 0, 5, 200, 'integer', None, None,
'EXECUTION', 4020),
(4103, 'tol', 'FLOAT', 0, 6, 0.0001, 'decimal', None, None, 'EXECUTION',
4020),
(4104, 'seed', 'INTEGER', 0, 7, None, 'integer', None, None,
'EXECUTION', 4020),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_form_field_translation():
tb = table(
'operation_form_field_translation',
column('id', Integer),
column('locale', String),
column('label', String),
column('help', String), )
columns = ('id', 'locale', 'label', 'help')
data = [
(4089, 'en',
'SQL Query, (inputs are available as tables named ds1 and ds2)',
'SQL query compatible with SQLite sytanx. For more information, '
'see https://www.sqlite.org/lang.html or '
'https://github.com/yhat/pandasql.'),
(4089, 'pt',
'Consulta (entradas estão disponíveis como tabelas chamadas ds1 e '
'ds2)',
'Consulta SQL compatível com o Apache Spark. Para mais informações, '
'veja https://www.sqlite.org/lang.html ou '
'https://github.com/yhat/pandasql.'),
(4090, 'en', 'Names of attributes after the query',
'Name of the new attributes after executing the query (optional, '
'helps attribute suggestion).'),
(4090, 'pt', 'Nome dos novos atributos após a consulta',
'Nome dos novos atributos após executar a consulta (opcional. auxilia '
'na sugestão de atributos).'),
(4091, 'en', 'Layer sizes',
'The ith element represents the number of neurons.'),
(4092, 'en', 'Activation', 'Activation function for the hidden layer.'),
(4093, 'en', 'Solver', 'The solver for weight optimization.'),
(4094, 'en', 'Alpha', 'L2 penalty (regularization term) parameter.'),
(4095, 'en', 'Maximum number of iterations',
'The solver iterates until convergence or this number of iterations.'),
(4096, 'en', 'Tolerance', 'Tolerance for the optimization.'),
(4097, 'en', 'Seed', 'Seed used by the random number generator.'),
(4091, 'pt', 'Tamanhos das Camadas',
'O elemento de ordem i representa o número de neurónios.'),
(4092, 'pt', 'Ativação', 'Função de ativação para a camada oculta.'),
(4093, 'pt', 'Solver', 'O solucionador para otimização de peso.'),
(4094, 'pt', 'Alpha',
'Parâmetro de penalidade L2 (termo de regularização).'),
(4095, 'pt', 'Número máximo de iterações',
'O solucionador itera até a convergência ou esse número de '
'iterações.'),
(4096, 'pt', 'Tolerância', 'Tolerância para a otimização.'),
(4097, 'pt', 'Semente',
'Semente usada pelo gerador de números aleatórios.'),
(4098, 'en', 'Layer sizes',
'The ith element represents the number of neurons.'),
(4099, 'en', 'Activation', 'Activation function for the hidden layer.'),
(4100, 'en', 'Solver', 'The solver for weight optimization.'),
(4101, 'en', 'Alpha', 'L2 penalty (regularization term) parameter.'),
(4102, 'en', 'Maximum number of iterations',
'The solver iterates until convergence or this number of iterations.'),
(4103, 'en', 'Tolerance', 'Tolerance for the optimization.'),
(4104, 'en', 'Seed', 'Seed used by the random number generator.'),
(4098, 'pt', 'Tamanhos das Camadas',
'O elemento de ordem i representa o número de neurónios.'),
(4099, 'pt', 'Ativação', 'Função de ativação para a camada oculta.'),
(4100, 'pt', 'Solver', 'O solucionador para otimização de peso.'),
(4101, 'pt', 'Alpha',
'Parâmetro de penalidade L2 (termo de regularização).'),
(4102, 'pt', 'Número máximo de iterações',
'O solucionador itera até a convergência ou esse número de '
'iterações.'),
(4103, 'pt', 'Tolerância', 'Tolerância para a otimização.'),
(4104, 'pt', 'Semente',
'Semente usada pelo gerador de números aleatórios.'),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
all_commands = [
(_insert_operation, 'DELETE FROM operation WHERE id BETWEEN 4018 AND 4020'),
(_insert_new_operation_platform,
'DELETE FROM operation_platform WHERE operation_id = 18 AND '
'platform_id = 4;'
'DELETE FROM operation_platform WHERE operation_id = 43 AND '
'platform_id = 4;'
'DELETE FROM operation_platform WHERE operation_id = 82 AND '
'platform_id = 4;'
'DELETE FROM operation_platform WHERE operation_id BETWEEN 4018 AND 4020'
),
(_insert_operation_category_operation,
'DELETE FROM operation_category_operation '
'WHERE operation_id BETWEEN 4018 AND 4020'),
(_insert_operation_form,
'DELETE FROM operation_form WHERE id BETWEEN 4018 AND 4020'),
(_insert_operation_form_translation,
'DELETE FROM operation_form_translation WHERE id BETWEEN 4018 AND 4020'),
(_insert_operation_operation_form,
'DELETE FROM operation_operation_form '
'WHERE operation_id BETWEEN 4018 AND 4020'),
(_insert_operation_translation,
'DELETE FROM operation_translation WHERE id BETWEEN 4018 AND 4020'),
(_insert_operation_port,
'DELETE FROM operation_port WHERE id BETWEEN 4025 AND 4029'),
(_insert_operation_port_translation,
'DELETE FROM operation_port_translation WHERE id BETWEEN 4025 AND 4029'),
(_insert_operation_port_interface_operation_port,
'DELETE FROM operation_port_interface_operation_port WHERE '
'operation_port_id BETWEEN 4025 AND 4029'),
(_insert_operation_form_field,
'DELETE FROM operation_form_field WHERE id BETWEEN 4089 AND 4104'),
(_insert_operation_form_field_translation,
'DELETE FROM operation_form_field_translation '
'WHERE id BETWEEN 4089 AND 4104'),
("""
DELETE FROM operation_platform WHERE operation_id = 3001 AND
platform_id = 4;
UPDATE operation_category_operation
SET operation_category_id = 16
WHERE operation_id = 3004 AND operation_category_id = 12;
""",
"""
INSERT INTO operation_platform (operation_id, platform_id)
VALUES (3001, 4);
"""),
]
def upgrade():
ctx = context.get_context()
session = sessionmaker(bind=ctx.bind)()
connection = session.connection()
try:
for cmd in all_commands:
if isinstance(cmd[0], str):
cmds = cmd[0].split(';')
for new_cmd in cmds:
if new_cmd.strip():
connection.execute(new_cmd)
elif isinstance(cmd[0], list):
for row in cmd[0]:
connection.execute(row)
else:
cmd[0]()
except:
session.rollback()
raise
session.commit()
def downgrade():
ctx = context.get_context()
session = sessionmaker(bind=ctx.bind)()
connection = session.connection()
try:
for cmd in reversed(all_commands):
if isinstance(cmd[1], str):
cmds = cmd[1].split(';')
for new_cmd in cmds:
if new_cmd.strip():
connection.execute(new_cmd)
elif isinstance(cmd[1], list):
for row in cmd[1]:
connection.execute(row)
else:
cmd[1]()
except:
session.rollback()
raise
session.commit() | apache-2.0 | 5,190,015,867,149,415,000 | 34.526946 | 82 | 0.552253 | false |
juanshishido/info290-dds | assignments/assignment02/code/permutation.py | 1 | 1173 | import numpy as np
import pandas as pd
def load():
features = pd.read_csv('../data/movie.features.txt', sep='\t', header=None,
names=['feature', 'movie_id'], usecols=[0,2])
box_office = pd.read_csv('../data/movie.box_office.txt',
sep='\t', header=None, names=['movie_id', 'hit'])
return features, box_office
def merge(f, b):
f = f[f.feature == 'John_Goodman'][['movie_id']]
f.drop_duplicates(inplace=True)
f['John_Goodman'] = 1
movie = pd.merge(f, b, on='movie_id', how='outer')
movie['John_Goodman'].fillna(0, inplace=True)
return movie
def _to_arr(df, target):
df = df.copy()
df.sort_values(by=target, ascending=False, inplace=True)
arr = df.hit.values
n = df[target].sum()
return arr, n
def _tstat(arr, n):
return abs(np.mean(arr[:n]) - np.mean(arr[n:]))
def permute(df, target, permutations):
np.random.seed(42)
arr, n = _to_arr(df, target)
baseline = _tstat(arr, n)
v = []
for _ in range(permutations):
np.random.shuffle(arr)
v.append(_tstat(arr, n))
return (baseline <= np.array(v)).sum() / permutations
| mit | -1,817,238,672,682,941,000 | 29.868421 | 80 | 0.578858 | false |
ghostop14/sparrow-wifi | telemetry.py | 1 | 32406 | #!/usr/bin/python3
#
# Copyright 2017 ghostop14
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from PyQt5.QtWidgets import QDialog, QApplication,QDesktopWidget
from PyQt5.QtWidgets import QTableWidget, QHeaderView,QTableWidgetItem, QMessageBox, QFileDialog, QMenu, QAction
# from PyQt5.QtWidgets import QLabel, QComboBox, QLineEdit, QPushButton, QFileDialog
#from PyQt5.QtCore import Qt
from PyQt5 import QtWidgets
from PyQt5 import QtCore
from PyQt5.QtCore import Qt
from PyQt5.QtChart import QChart, QChartView, QLineSeries, QValueAxis
from PyQt5.QtGui import QPen, QFont, QBrush, QColor, QPainter
from PyQt5.QtWidgets import QPushButton
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from sparrowtablewidgets import IntTableWidgetItem, FloatTableWidgetItem, DateTableWidgetItem
from threading import Lock
# from wirelessengine import WirelessNetwork
# https://matplotlib.org/examples/user_interfaces/embedding_in_qt5.html
class RadarWidget(FigureCanvas):
def __init__(self, parent=None, useBlackoutColors=True, width=4, height=4, dpi=100):
# fig = Figure(figsize=(width, height), dpi=dpi)
# self.axes = fig.add_subplot(111)
# -----------------------------------------------------------
# fig = plt.figure()
# useBlackoutColors = False
self.useBlackoutColors = useBlackoutColors
if self.useBlackoutColors:
self.fontColor = 'white'
self.backgroundColor = 'black'
else:
self.fontColor = 'black'
self.backgroundColor = 'white'
self.fig = Figure(figsize=(width, height), dpi=dpi)
self.fig.patch.set_facecolor(self.backgroundColor)
# "axisbg was deprecated, use facecolor instead"
# self.axes = self.fig.add_subplot(111, polar=True, axisbg=self.backgroundColor)
self.axes = self.fig.add_subplot(111, polar=True, facecolor=self.backgroundColor)
# Angle: np.linspace(0, 2*np.pi, 100)
# Radius: np.ones(100)*5
# ax.plot(np.linspace(0, 2*np.pi, 100), np.ones(100)*5, color='r', linestyle='-')
# Each of these use 100 points. linespace creates the angles 0-2 PI with 100 points
# np.ones creates a 100 point array filled with 1's then multiplies that by the scalar 5
# Create an "invisible" line at 100 to set the max for the plot
self.axes.plot(np.linspace(0, 2*np.pi, 100), np.ones(100)*100, color=self.fontColor, linestyle='')
# Plot line: Initialize out to 100 and blank
radius = 100
self.blackline = self.axes.plot(np.linspace(0, 2*np.pi, 100), np.ones(100)*radius, color=self.fontColor, linestyle='-')
self.redline = None
# Plot a filled circle
# http://nullege.com/codes/search/matplotlib.pyplot.Circle
# Params are: Cartesian coord of center, radius, etc...
# circle = plt.Circle((0.0, 0.0), radius, transform=self.axes.transData._b, color="red", alpha=0.4)
# self.filledcircle = self.axes.add_artist(circle)
self.filledcircle = None
# Create bullseye
circle = plt.Circle((0.0, 0.0), 20, transform=self.axes.transData._b, color=self.fontColor, alpha=0.4)
self.bullseye = self.axes.add_artist(circle)
# Rotate zero up
self.axes.set_theta_zero_location("N")
self.axes.set_yticklabels(['-20', '-40', '-60', '-80', '-100'], color=self.fontColor)
# plt.show()
# -----------------------------------------------------------
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
self.title = self.fig.suptitle('Tracker', fontsize=8, fontweight='bold', color=self.fontColor)
FigureCanvas.setSizePolicy(self,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
def updateData(self, radius):
if self.redline is not None:
self.redline.pop(0).remove()
self.redline = self.axes.plot(np.linspace(0, 2*np.pi, 100), np.ones(100)*radius, color='r', linestyle='-')
if self.filledcircle:
self.filledcircle.remove()
self.bullseye.remove()
circle = plt.Circle((0.0, 0.0), radius, transform=self.axes.transData._b, color="red", alpha=0.4)
self.filledcircle = self.axes.add_artist(circle)
# Create bullseye
circle = plt.Circle((0.0, 0.0), 20, transform=self.axes.transData._b, color=self.fontColor, alpha=0.4)
self.bullseye = self.axes.add_artist(circle)
class TelemetryDialog(QDialog):
resized = QtCore.pyqtSignal()
visibility = QtCore.pyqtSignal(bool)
def __init__(self, winTitle = "Network Telemetry", parent = None):
super(TelemetryDialog, self).__init__(parent)
self.visibility.connect(self.onVisibilityChanged)
self.winTitle = winTitle
self.updateLock = Lock()
# Used to detect network change
self.lastNetKey = ""
self.lastSeen = None
self.maxPoints = 20
self.maxRowPoints = 60
self.paused = False
self.streamingSave = False
self.streamingFile = None
self.linesBeforeFlush = 10
self.currentLine = 0
# OK and Cancel buttons
#buttons = QDialogButtonBox(QDialogButtonBox.Ok,Qt.Horizontal, self)
#buttons.accepted.connect(self.accept)
#buttons.move(170, 280)
desktopSize = QApplication.desktop().screenGeometry()
#self.mainWidth=1024
#self.mainHeight=768
#self.mainWidth = desktopSize.width() * 3 / 4
#self.mainHeight = desktopSize.height() * 3 / 4
self.setGeometry(self.geometry().x(), self.geometry().y(), desktopSize.width() /2,desktopSize.height() /2)
self.setWindowTitle(winTitle)
self.radar = RadarWidget(self)
self.radar.setGeometry(self.geometry().width()/2, 10, self.geometry().width()/2-20, self.geometry().width()/2-20)
self.createTable()
self.btnExport = QPushButton("Export Table", self)
self.btnExport.clicked[bool].connect(self.onExportClicked)
self.btnExport.setStyleSheet("background-color: rgba(2,128,192,255);")
self.btnPause = QPushButton("Pause Table", self)
self.btnPause.setCheckable(True)
self.btnPause.clicked[bool].connect(self.onPauseClicked)
self.btnPause.setStyleSheet("background-color: rgba(2,128,192,255);")
self.btnStream = QPushButton("Streaming Save", self)
self.btnStream.setCheckable(True)
self.btnStream.clicked[bool].connect(self.onStreamClicked)
self.btnStream.setStyleSheet("background-color: rgba(2,128,192,255);")
self.createChart()
self.setBlackoutColors()
self.setMinimumWidth(600)
self.setMinimumHeight(600)
self.center()
def createTable(self):
# Set up location table
self.locationTable = QTableWidget(self)
self.locationTable.setColumnCount(8)
self.locationTable.setGeometry(10, 10, self.geometry().width()/2-20, self.geometry().height()/2)
self.locationTable.setShowGrid(True)
self.locationTable.setHorizontalHeaderLabels(['macAddr','SSID', 'Strength', 'Timestamp','GPS', 'Latitude', 'Longitude', 'Altitude'])
self.locationTable.resizeColumnsToContents()
self.locationTable.setRowCount(0)
self.locationTable.horizontalHeader().setSectionResizeMode(1, QHeaderView.Stretch)
self.ntRightClickMenu = QMenu(self)
newAct = QAction('Copy', self)
newAct.setStatusTip('Copy data to clipboard')
newAct.triggered.connect(self.onCopy)
self.ntRightClickMenu.addAction(newAct)
self.locationTable.setContextMenuPolicy(Qt.CustomContextMenu)
self.locationTable.customContextMenuRequested.connect(self.showNTContextMenu)
def setBlackoutColors(self):
self.locationTable.setStyleSheet("QTableView {background-color: black;gridline-color: white;color: white} QTableCornerButton::section{background-color: white;}")
headerStyle = "QHeaderView::section{background-color: white;border: 1px solid black;color: black;} QHeaderView::down-arrow,QHeaderView::up-arrow {background: none;}"
self.locationTable.horizontalHeader().setStyleSheet(headerStyle)
self.locationTable.verticalHeader().setStyleSheet(headerStyle)
mainTitleBrush = QBrush(Qt.red)
self.timeChart.setTitleBrush(mainTitleBrush)
self.timeChart.setBackgroundBrush(QBrush(Qt.black))
self.timeChart.axisX().setLabelsColor(Qt.white)
self.timeChart.axisY().setLabelsColor(Qt.white)
titleBrush = QBrush(Qt.white)
self.timeChart.axisX().setTitleBrush(titleBrush)
self.timeChart.axisY().setTitleBrush(titleBrush)
def resizeEvent(self, event):
wDim = self.geometry().width()/2-20
hDim = self.geometry().height()/2
smallerDim = wDim
if hDim < smallerDim:
smallerDim = hDim
# Radar
self.radar.setGeometry(self.geometry().width() - smallerDim - 10, 10, smallerDim, smallerDim)
# chart
self.timePlot.setGeometry(10, 10, self.geometry().width() - smallerDim - 30, smallerDim)
# Buttons
self.btnPause.setGeometry(10, self.geometry().height()/2+18, 110, 25)
self.btnExport.setGeometry(150, self.geometry().height()/2+18, 110, 25)
self.btnStream.setGeometry(290, self.geometry().height()/2+18, 110, 25)
# Table
self.locationTable.setGeometry(10, self.geometry().height()/2 + 50, self.geometry().width()-20, self.geometry().height()/2-60)
def center(self):
# Get our geometry
qr = self.frameGeometry()
# Find the desktop center point
cp = QDesktopWidget().availableGeometry().center()
# Move our center point to the desktop center point
qr.moveCenter(cp)
# Move the top-left point of the application window to the top-left point of the qr rectangle,
# basically centering the window
self.move(qr.topLeft())
def showNTContextMenu(self, pos):
curRow = self.locationTable.currentRow()
if curRow == -1:
return
self.ntRightClickMenu.exec_(self.locationTable.mapToGlobal(pos))
def onCopy(self):
self.updateLock.acquire()
curRow = self.locationTable.currentRow()
curCol = self.locationTable.currentColumn()
if curRow == -1 or curCol == -1:
self.updateLock.release()
return
curText = self.locationTable.item(curRow, curCol).text()
clipboard = QApplication.clipboard()
clipboard.setText(curText)
self.updateLock.release()
def onVisibilityChanged(self, visible):
if not visible:
self.paused = True
self.btnPause.setStyleSheet("background-color: rgba(255,0,0,255);")
# We're coming out of streaming
self.streamingSave = False
self.btnStream.setStyleSheet("background-color: rgba(2,128,192,255);")
self.btnStream.setChecked(False)
if (self.streamingFile):
self.streamingFile.close()
self.streamingFile = None
return
else:
self.paused = False
self.btnPause.setStyleSheet("background-color: rgba(2,128,192,255);")
if self.locationTable.rowCount() > 1:
self.locationTable.scrollToItem(self.locationTable.item(0, 0))
def hideEvent(self, event):
self.visibility.emit(False)
def showEvent(self, event):
self.visibility.emit(True)
def onPauseClicked(self, pressed):
if self.btnPause.isChecked():
self.paused = True
self.btnPause.setStyleSheet("background-color: rgba(255,0,0,255);")
else:
self.paused = False
self.btnPause.setStyleSheet("background-color: rgba(2,128,192,255);")
def onStreamClicked(self, pressed):
if not self.btnStream.isChecked():
# We're coming out of streaming
self.streamingSave = False
self.btnStream.setStyleSheet("background-color: rgba(2,128,192,255);")
if (self.streamingFile):
self.streamingFile.close()
self.streamingFile = None
return
self.btnStream.setStyleSheet("background-color: rgba(255,0,0,255);")
self.streamingSave = True
fileName = self.saveFileDialog()
if not fileName:
self.btnStream.setStyleSheet("background-color: rgba(2,128,192,255);")
self.btnStream.setChecked(False)
return
try:
self.streamingFile = open(fileName, 'w', 1) # 1 says use line buffering, otherwise it fully buffers and doesn't write
except:
QMessageBox.question(self, 'Error',"Unable to write to " + fileName, QMessageBox.Ok)
self.streamingFile = None
self.streamingSave = False
self.btnStream.setStyleSheet("background-color: rgba(2,128,192,255);")
self.btnStream.setChecked(False)
return
self.streamingFile.write('MAC Address,SSID,Strength,Timestamp,GPS,Latitude,Longitude,Altitude\n')
def onExportClicked(self):
fileName = self.saveFileDialog()
if not fileName:
return
try:
outputFile = open(fileName, 'w')
except:
QMessageBox.question(self, 'Error',"Unable to write to " + fileName, QMessageBox.Ok)
return
outputFile.write('MAC Address,SSID,Strength,Timestamp,GPS,Latitude,Longitude,Altitude\n')
numItems = self.locationTable.rowCount()
if numItems == 0:
outputFile.close()
return
self.updateLock.acquire()
for i in range(0, numItems):
outputFile.write(self.locationTable.item(i, 0).text() + ',"' + self.locationTable.item(i, 1).text() + '",' + self.locationTable.item(i, 2).text() + ',' + self.locationTable.item(i, 3).text())
outputFile.write(',' + self.locationTable.item(i, 4).text()+ ',' + self.locationTable.item(i, 5).text()+ ',' + self.locationTable.item(i, 6).text()+ ',' + self.locationTable.item(i, 7).text() + '\n')
self.updateLock.release()
outputFile.close()
def saveFileDialog(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getSaveFileName(self,"QFileDialog.getSaveFileName()","","CSV Files (*.csv);;All Files (*)", options=options)
if fileName:
return fileName
else:
return None
def createChart(self):
self.timeChart = QChart()
titleFont = QFont()
titleFont.setPixelSize(18)
titleBrush = QBrush(QColor(0, 0, 255))
self.timeChart.setTitleFont(titleFont)
self.timeChart.setTitleBrush(titleBrush)
self.timeChart.setTitle('Signal (Past ' + str(self.maxPoints) + ' Samples)')
# self.timeChart.addSeries(testseries)
# self.timeChart.createDefaultAxes()
self.timeChart.legend().hide()
# Axis examples: https://doc.qt.io/qt-5/qtcharts-multiaxis-example.html
newAxis = QValueAxis()
newAxis.setMin(0)
newAxis.setMax(self.maxPoints)
newAxis.setTickCount(11)
newAxis.setLabelFormat("%d")
newAxis.setTitleText("Sample")
self.timeChart.addAxis(newAxis, Qt.AlignBottom)
newAxis = QValueAxis()
newAxis.setMin(-100)
newAxis.setMax(-10)
newAxis.setTickCount(9)
newAxis.setLabelFormat("%d")
newAxis.setTitleText("dBm")
self.timeChart.addAxis(newAxis, Qt.AlignLeft)
chartBorder = Qt.darkGray
self.timePlot = QChartView(self.timeChart, self)
self.timePlot.setBackgroundBrush(chartBorder)
self.timePlot.setRenderHint(QPainter.Antialiasing)
self.timeSeries = QLineSeries()
pen = QPen(Qt.yellow)
pen.setWidth(2)
self.timeSeries.setPen(pen)
self.timeChart.addSeries(self.timeSeries)
self.timeSeries.attachAxis(self.timeChart.axisX())
self.timeSeries.attachAxis(self.timeChart.axisY())
def updateNetworkData(self, curNet):
if not self.isVisible():
return
# Signal is -NN dBm. Need to make it positive for the plot
self.radar.updateData(curNet.signal*-1)
if self.winTitle == "Client Telemetry":
self.setWindowTitle(self.winTitle + " - [" + curNet.macAddr + "] " + curNet.ssid)
else:
self.setWindowTitle(self.winTitle + " - " + curNet.ssid)
self.radar.draw()
# Network changed. Clear our table and time data
updateChartAndTable = False
self.updateLock.acquire()
if (curNet.getKey() != self.lastNetKey):
self.lastNetKey = curNet.getKey()
self.locationTable.setRowCount(0)
self.timeSeries.clear()
updateChartAndTable = True
ssidTitle = curNet.ssid
if len(ssidTitle) > 28:
ssidTitle = ssidTitle[:28]
ssidTitle = ssidTitle + '...'
self.timeChart.setTitle(ssidTitle + ' Signal (Past ' + str(self.maxPoints) + ' Samples)')
else:
if self.lastSeen != curNet.lastSeen:
updateChartAndTable = True
if updateChartAndTable:
# Update chart
numPoints = len(self.timeSeries.pointsVector())
if numPoints >= self.maxPoints:
self.timeSeries.remove(0)
# Now we need to reset the x data to pull the series back
counter = 0
for curPoint in self.timeSeries.pointsVector():
self.timeSeries.replace(counter, counter, curPoint.y())
counter += 1
if curNet.signal >= -100:
self.timeSeries.append(numPoints,curNet.signal)
else:
self.timeSeries.append(numPoints,-100)
# Update Table
self.addTableData(curNet)
# Limit points in each
if self.locationTable.rowCount() > self.maxRowPoints:
self.locationTable.setRowCount(self.maxRowPoints)
self.updateLock.release()
def addTableData(self, curNet):
if self.paused:
return
# rowPosition = self.locationTable.rowCount()
# Always insert at row(0)
rowPosition = 0
self.locationTable.insertRow(rowPosition)
#if (addedFirstRow):
# self.locationTable.setRowCount(1)
# ['macAddr','SSID', 'Strength', 'Timestamp','GPS', 'Latitude', 'Longitude', 'Altitude']
self.locationTable.setItem(rowPosition, 0, QTableWidgetItem(curNet.macAddr))
tmpssid = curNet.ssid
if (len(tmpssid) == 0):
tmpssid = '<Unknown>'
newSSID = QTableWidgetItem(tmpssid)
self.locationTable.setItem(rowPosition, 1, newSSID)
self.locationTable.setItem(rowPosition, 2, IntTableWidgetItem(str(curNet.signal)))
self.locationTable.setItem(rowPosition, 3, DateTableWidgetItem(curNet.lastSeen.strftime("%m/%d/%Y %H:%M:%S")))
if curNet.gps.isValid:
self.locationTable.setItem(rowPosition, 4, QTableWidgetItem('Yes'))
else:
self.locationTable.setItem(rowPosition, 4, QTableWidgetItem('No'))
self.locationTable.setItem(rowPosition, 5, FloatTableWidgetItem(str(curNet.gps.latitude)))
self.locationTable.setItem(rowPosition, 6, FloatTableWidgetItem(str(curNet.gps.longitude)))
self.locationTable.setItem(rowPosition, 7, FloatTableWidgetItem(str(curNet.gps.altitude)))
#order = Qt.DescendingOrder
#self.locationTable.sortItems(3, order )
# If we're in streaming mode, write the data out to disk as well
if self.streamingFile:
self.streamingFile.write(self.locationTable.item(rowPosition, 0).text() + ',"' + self.locationTable.item(rowPosition, 1).text() + '",' + self.locationTable.item(rowPosition, 2).text() + ',' +
self.locationTable.item(rowPosition, 3).text() + ',' + self.locationTable.item(rowPosition, 4).text()+ ',' + self.locationTable.item(rowPosition, 5).text()+ ',' + self.locationTable.item(rowPosition, 6).text()+ ',' + self.locationTable.item(rowPosition, 7).text() + '\n')
if (self.currentLine > self.linesBeforeFlush):
self.streamingFile.flush()
self.currentLine += 1
numRows = self.locationTable.rowCount()
if numRows > 1:
self.locationTable.scrollToItem(self.locationTable.item(0, 0))
def onTableHeadingClicked(self, logical_index):
header = self.locationTable.horizontalHeader()
order = Qt.DescendingOrder
# order = Qt.DescendingOrder
if not header.isSortIndicatorShown():
header.setSortIndicatorShown( True )
elif header.sortIndicatorSection()==logical_index:
# apparently, the sort order on the header is already switched
# when the section was clicked, so there is no need to reverse it
order = header.sortIndicatorOrder()
header.setSortIndicator( logical_index, order )
self.locationTable.sortItems(logical_index, order )
def updateData(self, newRadius):
self.radar.updateData(newRadius)
def showTelemetry(parent = None):
dialog = TelemetryDialog(parent)
result = dialog.exec_()
return (result == QDialog.Accepted)
class BluetoothTelemetry(TelemetryDialog):
def __init__(self, winTitle = "Bluetooth Telemetry", parent = None):
super().__init__(winTitle, parent)
def createTable(self):
# Set up location table
self.locationTable = QTableWidget(self)
self.locationTable.setColumnCount(10)
self.locationTable.setGeometry(10, 10, self.geometry().width()/2-20, self.geometry().height()/2)
self.locationTable.setShowGrid(True)
self.locationTable.setHorizontalHeaderLabels(['macAddr','Name', 'RSSI', 'TX Power', 'Est Range (m)', 'Timestamp','GPS', 'Latitude', 'Longitude', 'Altitude'])
self.locationTable.resizeColumnsToContents()
self.locationTable.setRowCount(0)
self.locationTable.horizontalHeader().setSectionResizeMode(1, QHeaderView.Stretch)
self.ntRightClickMenu = QMenu(self)
newAct = QAction('Copy', self)
newAct.setStatusTip('Copy data to clipboard')
newAct.triggered.connect(self.onCopy)
self.ntRightClickMenu.addAction(newAct)
self.locationTable.setContextMenuPolicy(Qt.CustomContextMenu)
self.locationTable.customContextMenuRequested.connect(self.showNTContextMenu)
def onStreamClicked(self, pressed):
if not self.btnStream.isChecked():
# We're coming out of streaming
self.streamingSave = False
self.btnStream.setStyleSheet("background-color: rgba(2,128,192,255);")
if (self.streamingFile):
self.streamingFile.close()
self.streamingFile = None
return
self.btnStream.setStyleSheet("background-color: rgba(255,0,0,255);")
self.streamingSave = True
fileName = self.saveFileDialog()
if not fileName:
self.btnStream.setStyleSheet("background-color: rgba(2,128,192,255);")
self.btnStream.setChecked(False)
return
try:
self.streamingFile = open(fileName, 'w', 1) # 1 says use line buffering, otherwise it fully buffers and doesn't write
except:
QMessageBox.question(self, 'Error',"Unable to write to " + fileName, QMessageBox.Ok)
self.streamingFile = None
self.streamingSave = False
self.btnStream.setStyleSheet("background-color: rgba(2,128,192,255);")
self.btnStream.setChecked(False)
return
self.streamingFile.write('MAC Address,Name,RSSI,TX Power,Est Range (m),Timestamp,GPS,Latitude,Longitude,Altitude\n')
def onExportClicked(self):
fileName = self.saveFileDialog()
if not fileName:
return
try:
outputFile = open(fileName, 'w')
except:
QMessageBox.question(self, 'Error',"Unable to write to " + fileName, QMessageBox.Ok)
return
outputFile.write('MAC Address,Name,RSSI,TX Power,Est Range (m),Timestamp,GPS,Latitude,Longitude,Altitude\n')
numItems = self.locationTable.rowCount()
if numItems == 0:
outputFile.close()
return
self.updateLock.acquire()
for i in range(0, numItems):
outputFile.write(self.locationTable.item(i, 0).text() + ',"' + self.locationTable.item(i, 1).text() + '",' + self.locationTable.item(i, 2).text() + ',' + self.locationTable.item(i, 3).text())
outputFile.write(',' + self.locationTable.item(i, 4).text()+ ',' + self.locationTable.item(i, 5).text()+ ',' + self.locationTable.item(i, 6).text()+ ',' + self.locationTable.item(i, 7).text() +
',' + self.locationTable.item(i, 8).text()+ ',' + self.locationTable.item(i, 9).text() + '\n')
self.updateLock.release()
outputFile.close()
def updateNetworkData(self, curDevice):
if not self.isVisible():
return
# Signal is -NN dBm. Need to make it positive for the plot
self.radar.updateData(curDevice.rssi*-1)
if len(curDevice.name) > 0:
self.setWindowTitle(self.winTitle + " - " + curDevice.name)
else:
self.setWindowTitle(self.winTitle + " - " + curDevice.macAddress)
self.radar.draw()
# Network changed. Clear our table and time data
updateChartAndTable = False
self.updateLock.acquire()
if self.lastSeen != curDevice.lastSeen:
updateChartAndTable = True
if updateChartAndTable:
# Update chart
numPoints = len(self.timeSeries.pointsVector())
if numPoints >= self.maxPoints:
self.timeSeries.remove(0)
# Now we need to reset the x data to pull the series back
counter = 0
for curPoint in self.timeSeries.pointsVector():
self.timeSeries.replace(counter, counter, curPoint.y())
counter += 1
if curDevice.rssi >= -100:
self.timeSeries.append(numPoints,curDevice.rssi)
else:
self.timeSeries.append(numPoints,-100)
# Update Table
self.addTableData(curDevice)
# Limit points in each
if self.locationTable.rowCount() > self.maxRowPoints:
self.locationTable.setRowCount(self.maxRowPoints)
self.updateLock.release()
def addTableData(self, curDevice):
if self.paused:
return
# rowPosition = self.locationTable.rowCount()
# Always insert at row(0)
rowPosition = 0
self.locationTable.insertRow(rowPosition)
#if (addedFirstRow):
# self.locationTable.setRowCount(1)
# ['macAddr','name', 'rssi','tx power','est range (m)', 'Timestamp','GPS', 'Latitude', 'Longitude', 'Altitude']
self.locationTable.setItem(rowPosition, 0, QTableWidgetItem(curDevice.macAddress))
self.locationTable.setItem(rowPosition, 1, QTableWidgetItem(curDevice.name))
self.locationTable.setItem(rowPosition, 2, IntTableWidgetItem(str(curDevice.rssi)))
if curDevice.txPowerValid:
self.locationTable.setItem(rowPosition, 3, IntTableWidgetItem(str(curDevice.txPower)))
else:
self.locationTable.setItem(rowPosition, 3, IntTableWidgetItem('Unknown'))
if curDevice.iBeaconRange != -1 and curDevice.txPowerValid:
self.locationTable.setItem(rowPosition, 4, IntTableWidgetItem(str(curDevice.iBeaconRange)))
else:
self.locationTable.setItem(rowPosition, 4, IntTableWidgetItem(str('Unknown')))
self.locationTable.setItem(rowPosition, 5, DateTableWidgetItem(curDevice.lastSeen.strftime("%m/%d/%Y %H:%M:%S")))
if curDevice.gps.isValid:
self.locationTable.setItem(rowPosition, 6, QTableWidgetItem('Yes'))
else:
self.locationTable.setItem(rowPosition, 6, QTableWidgetItem('No'))
self.locationTable.setItem(rowPosition, 7, FloatTableWidgetItem(str(curDevice.gps.latitude)))
self.locationTable.setItem(rowPosition, 8, FloatTableWidgetItem(str(curDevice.gps.longitude)))
self.locationTable.setItem(rowPosition, 9, FloatTableWidgetItem(str(curDevice.gps.altitude)))
#order = Qt.DescendingOrder
#self.locationTable.sortItems(3, order )
# If we're in streaming mode, write the data out to disk as well
if self.streamingFile:
self.streamingFile.write(self.locationTable.item(rowPosition, 0).text() + ',"' + self.locationTable.item(rowPosition, 1).text() + '",' + self.locationTable.item(rowPosition, 2).text() + ',' +
self.locationTable.item(rowPosition, 3).text() + ',' + self.locationTable.item(rowPosition, 4).text()+ ',' + self.locationTable.item(rowPosition, 5).text()+ ',' +
self.locationTable.item(rowPosition, 6).text()+ ',' + self.locationTable.item(rowPosition, 7).text() +
+ ',' + self.locationTable.item(rowPosition, 8).text()+ ',' + self.locationTable.item(rowPosition, 9).text() + '\n')
if (self.currentLine > self.linesBeforeFlush):
self.streamingFile.flush()
self.currentLine += 1
numRows = self.locationTable.rowCount()
if numRows > 1:
self.locationTable.scrollToItem(self.locationTable.item(0, 0))
# ------- Main Routine For Debugging-------------------------
if __name__ == '__main__':
app = QApplication([])
# date, time, ok = DB2Dialog.getDateTime()
# ok = TelemetryDialog.showTelemetry()
# dialog = TelemetryDialog()
dialog = BluetoothTelemetry()
dialog.show()
dialog.updateData(50)
#print("{} {} {}".format(date, time, ok))
app.exec_()
| gpl-3.0 | 1,378,647,494,263,673,000 | 41.41623 | 283 | 0.611615 | false |
JrtPec/opengrid | opengrid/recipes/mvreg_sensor.py | 2 | 4423 | # -*- coding: utf-8 -*-
"""
Script for generating a multivariable regression model for a single sensor.
The script will fetch the data, build a model and make graphs.
Created on 26/03/2017 by Roel De Coninck
"""
import sys, os
import matplotlib
matplotlib.use('Agg')
import pandas as pd
from opengrid.library import houseprint, caching, regression, forecastwrapper
from opengrid import config
c = config.Config()
import matplotlib.pyplot as plt
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = 10,5
def compute(sensorid, start_model, end_model):
end = pd.Timestamp('now', tz='Europe/Brussels')
# Create houseprint from saved file, if not available, parse the google spreadsheet
try:
hp_filename = os.path.join(c.get('data', 'folder'), 'hp_anonymous.pkl')
hp = houseprint.load_houseprint_from_file(hp_filename)
print("Houseprint loaded from {}".format(hp_filename))
except Exception as e:
print(e)
print("Because of this error we try to build the houseprint from source")
hp = houseprint.Houseprint()
hp.init_tmpo()
# Load the cached daily data
sensor = hp.find_sensor(sensorid)
cache = caching.Cache(variable='{}_daily_total'.format(sensor.type))
df_day = cache.get(sensors=[sensor])
df_day.rename(columns={sensorid: sensor.type}, inplace=True)
# Load the cached weather data, clean up and compose a combined dataframe
weather = forecastwrapper.Weather(location=(50.8024, 4.3407), start=start_model, end=end)
irradiances = [
(0, 90), # north vertical
(90, 90), # east vertical
(180, 90), # south vertical
(270, 90), # west vertical
]
orientations = [0, 90, 180, 270]
weather_data = weather.days(irradiances=irradiances,
wind_orients=orientations,
heating_base_temperatures=[0, 6, 8, 10, 12, 14, 16, 18]).dropna(axis=1)
weather_data.drop(['icon', 'summary', 'moonPhase', 'windBearing', 'temperatureMaxTime', 'temperatureMinTime',
'apparentTemperatureMaxTime', 'apparentTemperatureMinTime', 'uvIndexTime',
'sunsetTime', 'sunriseTime'],
axis=1, inplace=True)
# Add columns for the day-of-week
for i, d in zip(range(7), ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']):
weather_data[d] = 0
weather_data.loc[weather_data.index.weekday == i, d] = 1
weather_data = weather_data.applymap(float)
data = pd.concat([df_day, weather_data], axis=1).dropna()
data = data.tz_convert('Europe/Brussels')
df = data.resample(rule='MS').sum()
if len(df) < 2:
print("Not enough data for building a monthly reference model")
sys.exit(1)
# monthly model, statistical validation
mv = regression.MVLinReg(df.ix[:end_model], sensor.type, p_max=0.03)
figures = mv.plot(df=df)
figures[0].savefig(os.path.join(c.get('data', 'folder'), 'figures', 'multivar_model_' + sensorid + '.png'), dpi=100)
figures[1].savefig(os.path.join(c.get('data', 'folder'), 'figures', 'multivar_results_' + sensorid + '.png'),
dpi=100)
# weekly model, statistical validation
df = data.resample(rule='W').sum()
if len(df.ix[:end_model]) < 4:
print("Not enough data for building a weekly reference model")
sys.exit(1)
mv = regression.MVLinReg(df.ix[:end_model], sensor.type, p_max=0.02)
if len(df.ix[end_model:]) > 0:
figures = mv.plot(model=False, bar_chart=True, df=df.ix[end_model:])
figures[0].savefig(
os.path.join(c.get('data', 'folder'), 'figures', 'multivar_prediction_weekly_' + sensorid + '.png'),
dpi=100)
if __name__ == '__main__':
if not len(sys.argv) == 4:
print("""
Use of this script: python mreg_sensors.py sensorid from till
sensorid: (string) sensortoken
from: (string) starting date for the identification data of the model
till: (string) end date for the identification data of the model
""")
exit(1)
# parse arguments
sensorid = sys.argv[1]
start_model = pd.Timestamp(sys.argv[2], tz='Europe/Brussels')
end_model = pd.Timestamp(sys.argv[3], tz='Europe/Brussels') #last day of the data period for the model
compute(sensorid, start_model, end_model)
| apache-2.0 | -2,308,629,868,610,351,600 | 37.46087 | 120 | 0.632602 | false |
jakereps/qiime2 | qiime2/sdk/action.py | 1 | 22566 | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import abc
import concurrent.futures
import inspect
import tempfile
import textwrap
import itertools
import decorator
import qiime2.sdk
import qiime2.core.type as qtype
import qiime2.core.archive as archive
from qiime2.core.util import LateBindingAttribute, DropFirstParameter, tuplize
def _subprocess_apply(action, args, kwargs):
# Preprocess input artifacts as we've got pickled clones which shouldn't
# self-destruct.
for arg in itertools.chain(args, kwargs.values()):
if isinstance(arg, qiime2.sdk.Artifact):
# We can't rely on the subprocess preventing atexit hooks as the
# destructor is also called when the artifact goes out of scope
# (which happens).
arg._destructor.detach()
results = action(*args, **kwargs)
for r in results:
# The destructor doesn't keep its detatched state when sent back to the
# main process. Something about the context-manager from ctx seems to
# cause a GC of the artifacts before the process actually ends, so we
# do need to detach these. The specifics are not understood.
r._destructor.detach()
return results
class Action(metaclass=abc.ABCMeta):
"""QIIME 2 Action"""
type = 'action'
_ProvCaptureCls = archive.ActionProvenanceCapture
__call__ = LateBindingAttribute('_dynamic_call')
asynchronous = LateBindingAttribute('_dynamic_async')
# Converts a callable's signature into its wrapper's signature (i.e.
# converts the "view API" signature into the "artifact API" signature).
# Accepts a callable as input and returns a callable as output with
# converted signature.
@abc.abstractmethod
def _callable_sig_converter_(self, callable):
raise NotImplementedError
# Executes a callable on the provided `view_args`, wrapping and returning
# the callable's outputs. In other words, executes the "view API", wrapping
# and returning the outputs as the "artifact API". `view_args` is a dict
# mapping parameter name to unwrapped value (i.e. view). `view_args`
# contains an entry for each parameter accepted by the wrapper. It is the
# executor's responsibility to perform any additional transformations on
# these parameters, or provide extra parameters, in order to execute the
# callable. `output_types` is an OrderedDict mapping output name to QIIME
# type (e.g. semantic type).
@abc.abstractmethod
def _callable_executor_(self, scope, view_args, output_types):
raise NotImplementedError
# Private constructor
@classmethod
def _init(cls, callable, signature, plugin_id, name, description,
citations, deprecated, examples):
"""
Parameters
----------
callable : callable
signature : qiime2.core.type.Signature
plugin_id : str
name : str
Human-readable name for this action.
description : str
Human-readable description for this action.
"""
self = cls.__new__(cls)
self.__init(callable, signature, plugin_id, name, description,
citations, deprecated, examples)
return self
# This "extra private" constructor is necessary because `Action` objects
# can be initialized from a static (classmethod) context or on an
# existing instance (see `_init` and `__setstate__`, respectively).
def __init(self, callable, signature, plugin_id, name, description,
citations, deprecated, examples):
self._callable = callable
self.signature = signature
self.plugin_id = plugin_id
self.name = name
self.description = description
self.citations = citations
self.deprecated = deprecated
self.examples = examples
self.id = callable.__name__
self._dynamic_call = self._get_callable_wrapper()
self._dynamic_async = self._get_async_wrapper()
def __init__(self):
raise NotImplementedError(
"%s constructor is private." % self.__class__.__name__)
@property
def source(self):
"""
The source code for the action's callable.
Returns
-------
str
The source code of this action's callable formatted as Markdown
text.
"""
try:
source = inspect.getsource(self._callable)
except OSError:
raise TypeError(
"Cannot retrieve source code for callable %r" %
self._callable.__name__)
return markdown_source_template % {'source': source}
def get_import_path(self, include_self=True):
path = f'qiime2.plugins.{self.plugin_id}.{self.type}s'
if include_self:
path += f'.{self.id}'
return path
def __repr__(self):
return "<%s %s>" % (self.type, self.get_import_path())
def __getstate__(self):
return {
'callable': self._callable,
'signature': self.signature,
'plugin_id': self.plugin_id,
'name': self.name,
'description': self.description,
'citations': self.citations,
'deprecated': self.deprecated,
'examples': self.examples,
}
def __setstate__(self, state):
self.__init(**state)
def _bind(self, context_factory):
"""Bind an action to a Context factory, returning a decorated function.
This is a very primitive API and should be used primarily by the
framework and very advanced interfaces which need deep control over
the calling semantics of pipelines and garbage collection.
The basic idea behind this is outlined as follows:
Every action is defined as an *instance* that a plugin constructs.
This means that `self` represents the internal details as to what
the action is. If you need to associate additional state with the
*application* of an action, you cannot mutate `self` without
changing all future applications. So there needs to be an
additional instance variable that can serve as the state of a given
application. We call this a Context object. It is also important
that each application of an action has *independent* state, so
providing an instance of Context won't work. We need a factory.
Parameterizing the context is necessary because it is possible for
an action to call other actions. The details need to be coordinated
behind the scenes to the user, so we can parameterize the behavior
by providing different context factories to `bind` at different
points in the "call stack".
"""
def bound_callable(*args, **kwargs):
# This function's signature is rewritten below using
# `decorator.decorator`. When the signature is rewritten,
# args[0] is the function whose signature was used to rewrite
# this function's signature.
args = args[1:]
ctx = context_factory()
# Set up a scope under which we can track destructable references
# if something goes wrong, the __exit__ handler of this context
# manager will clean up. (It also cleans up when things go right)
with ctx as scope:
provenance = self._ProvCaptureCls(
self.type, self.plugin_id, self.id)
scope.add_reference(provenance)
# Collate user arguments
user_input = {name: value for value, name in
zip(args, self.signature.signature_order)}
user_input.update(kwargs)
# Type management
self.signature.check_types(**user_input)
output_types = self.signature.solve_output(**user_input)
callable_args = {}
# Record parameters
for name, spec in self.signature.parameters.items():
parameter = callable_args[name] = user_input[name]
provenance.add_parameter(name, spec.qiime_type, parameter)
# Record and transform inputs
for name, spec in self.signature.inputs.items():
artifact = user_input[name]
provenance.add_input(name, artifact)
if artifact is None:
callable_args[name] = None
elif spec.has_view_type():
recorder = provenance.transformation_recorder(name)
if qtype.is_collection_type(spec.qiime_type):
# Always put in a list. Sometimes the view isn't
# hashable, which isn't relevant, but would break
# a Set[SomeType].
callable_args[name] = [
a._view(spec.view_type, recorder)
for a in user_input[name]]
else:
callable_args[name] = artifact._view(
spec.view_type, recorder)
else:
callable_args[name] = artifact
if self.deprecated:
with qiime2.core.util.warning() as warn:
warn(self._build_deprecation_message(),
FutureWarning)
# Execute
outputs = self._callable_executor_(scope, callable_args,
output_types, provenance)
if len(outputs) != len(self.signature.outputs):
raise ValueError(
"Number of callable outputs must match number of "
"outputs defined in signature: %d != %d" %
(len(outputs), len(self.signature.outputs)))
# Wrap in a Results object mapping output name to value so
# users have access to outputs by name or position.
return qiime2.sdk.Results(self.signature.outputs.keys(),
outputs)
bound_callable = self._rewrite_wrapper_signature(bound_callable)
self._set_wrapper_properties(bound_callable)
self._set_wrapper_name(bound_callable, self.id)
return bound_callable
def _get_callable_wrapper(self):
# This is a "root" level invocation (not a nested call within a
# pipeline), so no special factory is needed.
callable_wrapper = self._bind(qiime2.sdk.Context)
self._set_wrapper_name(callable_wrapper, '__call__')
return callable_wrapper
def _get_async_wrapper(self):
def async_wrapper(*args, **kwargs):
# TODO handle this better in the future, but stop the massive error
# caused by MacOSX asynchronous runs for now.
try:
import matplotlib as plt
if plt.rcParams['backend'].lower() == 'macosx':
raise EnvironmentError(backend_error_template %
plt.matplotlib_fname())
except ImportError:
pass
# This function's signature is rewritten below using
# `decorator.decorator`. When the signature is rewritten, args[0]
# is the function whose signature was used to rewrite this
# function's signature.
args = args[1:]
pool = concurrent.futures.ProcessPoolExecutor(max_workers=1)
future = pool.submit(_subprocess_apply, self, args, kwargs)
# TODO: pool.shutdown(wait=False) caused the child process to
# hang unrecoverably. This seems to be a bug in Python 3.7
# It's probably best to gut concurrent.futures entirely, so we're
# ignoring the resource leakage for the moment.
return future
async_wrapper = self._rewrite_wrapper_signature(async_wrapper)
self._set_wrapper_properties(async_wrapper)
self._set_wrapper_name(async_wrapper, 'asynchronous')
return async_wrapper
def _rewrite_wrapper_signature(self, wrapper):
# Convert the callable's signature into the wrapper's signature and set
# it on the wrapper.
return decorator.decorator(
wrapper, self._callable_sig_converter_(self._callable))
def _set_wrapper_name(self, wrapper, name):
wrapper.__name__ = wrapper.__qualname__ = name
def _set_wrapper_properties(self, wrapper):
wrapper.__module__ = self.get_import_path(include_self=False)
wrapper.__doc__ = self._build_numpydoc()
wrapper.__annotations__ = self._build_annotations()
# This is necessary so that `inspect` doesn't display the wrapped
# function's annotations (the annotations apply to the "view API" and
# not the "artifact API").
del wrapper.__wrapped__
def _build_annotations(self):
annotations = {}
for name, spec in self.signature.signature_order.items():
annotations[name] = spec.qiime_type
output = []
for spec in self.signature.outputs.values():
output.append(spec.qiime_type)
output = tuple(output)
annotations["return"] = output
return annotations
def _build_numpydoc(self):
numpydoc = []
numpydoc.append(textwrap.fill(self.name, width=75))
if self.deprecated:
base_msg = textwrap.indent(
textwrap.fill(self._build_deprecation_message(), width=72),
' ')
numpydoc.append('.. deprecated::\n' + base_msg)
numpydoc.append(textwrap.fill(self.description, width=75))
sig = self.signature
parameters = self._build_section("Parameters", sig.signature_order)
returns = self._build_section("Returns", sig.outputs)
# TODO: include Usage-rendered examples here
for section in (parameters, returns):
if section:
numpydoc.append(section)
return '\n\n'.join(numpydoc) + '\n'
def _build_section(self, header, iterable):
section = []
if iterable:
section.append(header)
section.append('-'*len(header))
for key, value in iterable.items():
variable_line = (
"{item} : {type}".format(item=key, type=value.qiime_type))
if value.has_default():
variable_line += ", optional"
section.append(variable_line)
if value.has_description():
section.append(textwrap.indent(textwrap.fill(
str(value.description), width=71), ' '))
return '\n'.join(section).strip()
def _build_deprecation_message(self):
return (f'This {self.type.title()} is deprecated and will be removed '
'in a future version of this plugin.')
class Method(Action):
"""QIIME 2 Method"""
type = 'method'
# Abstract method implementations:
def _callable_sig_converter_(self, callable):
# No conversion necessary.
return callable
def _callable_executor_(self, scope, view_args, output_types, provenance):
output_views = self._callable(**view_args)
output_views = tuplize(output_views)
# TODO this won't work if the user has annotated their "view API" to
# return a `typing.Tuple` with some number of components. Python will
# return a tuple when there are multiple return values, and this length
# check will fail because the tuple as a whole should be matched up to
# a single output type instead of its components. This is an edgecase
# due to how Python handles multiple returns, and can be worked around
# by using something like `typing.List` instead.
if len(output_views) != len(output_types):
raise TypeError(
"Number of output views must match number of output "
"semantic types: %d != %d"
% (len(output_views), len(output_types)))
output_artifacts = []
for output_view, (name, spec) in zip(output_views,
output_types.items()):
if type(output_view) is not spec.view_type:
raise TypeError(
"Expected output view type %r, received %r" %
(spec.view_type.__name__, type(output_view).__name__))
prov = provenance.fork(name)
scope.add_reference(prov)
artifact = qiime2.sdk.Artifact._from_view(
spec.qiime_type, output_view, spec.view_type, prov)
scope.add_parent_reference(artifact)
output_artifacts.append(artifact)
return tuple(output_artifacts)
@classmethod
def _init(cls, callable, inputs, parameters, outputs, plugin_id, name,
description, input_descriptions, parameter_descriptions,
output_descriptions, citations, deprecated, examples):
signature = qtype.MethodSignature(callable, inputs, parameters,
outputs, input_descriptions,
parameter_descriptions,
output_descriptions)
return super()._init(callable, signature, plugin_id, name, description,
citations, deprecated, examples)
class Visualizer(Action):
"""QIIME 2 Visualizer"""
type = 'visualizer'
# Abstract method implementations:
def _callable_sig_converter_(self, callable):
return DropFirstParameter.from_function(callable)
def _callable_executor_(self, scope, view_args, output_types, provenance):
# TODO use qiime2.plugin.OutPath when it exists, and update visualizers
# to work with OutPath instead of str. Visualization._from_data_dir
# will also need to be updated to support OutPath instead of str.
with tempfile.TemporaryDirectory(prefix='qiime2-temp-') as temp_dir:
ret_val = self._callable(output_dir=temp_dir, **view_args)
if ret_val is not None:
raise TypeError(
"Visualizer %r should not return anything. "
"Received %r as a return value." % (self, ret_val))
provenance.output_name = 'visualization'
viz = qiime2.sdk.Visualization._from_data_dir(temp_dir,
provenance)
scope.add_parent_reference(viz)
return (viz,)
@classmethod
def _init(cls, callable, inputs, parameters, plugin_id, name, description,
input_descriptions, parameter_descriptions, citations,
deprecated, examples):
signature = qtype.VisualizerSignature(callable, inputs, parameters,
input_descriptions,
parameter_descriptions)
return super()._init(callable, signature, plugin_id, name, description,
citations, deprecated, examples)
class Pipeline(Action):
"""QIIME 2 Pipeline"""
type = 'pipeline'
_ProvCaptureCls = archive.PipelineProvenanceCapture
def _callable_sig_converter_(self, callable):
return DropFirstParameter.from_function(callable)
def _callable_executor_(self, scope, view_args, output_types, provenance):
outputs = self._callable(scope.ctx, **view_args)
outputs = tuplize(outputs)
for output in outputs:
if not isinstance(output, qiime2.sdk.Result):
raise TypeError("Pipelines must return `Result` objects, "
"not %s" % (type(output), ))
# This condition *is* tested by the caller of _callable_executor_, but
# the kinds of errors a plugin developer see will make more sense if
# this check happens before the subtype check. Otherwise forgetting an
# output would more likely error as a wrong type, which while correct,
# isn't root of the problem.
if len(outputs) != len(output_types):
raise TypeError(
"Number of outputs must match number of output "
"semantic types: %d != %d"
% (len(outputs), len(output_types)))
results = []
for output, (name, spec) in zip(outputs, output_types.items()):
if not (output.type <= spec.qiime_type):
raise TypeError(
"Expected output type %r, received %r" %
(spec.qiime_type, output.type))
prov = provenance.fork(name, output)
scope.add_reference(prov)
aliased_result = output._alias(prov)
scope.add_parent_reference(aliased_result)
results.append(aliased_result)
return tuple(results)
@classmethod
def _init(cls, callable, inputs, parameters, outputs, plugin_id, name,
description, input_descriptions, parameter_descriptions,
output_descriptions, citations, deprecated, examples):
signature = qtype.PipelineSignature(callable, inputs, parameters,
outputs, input_descriptions,
parameter_descriptions,
output_descriptions)
return super()._init(callable, signature, plugin_id, name, description,
citations, deprecated, examples)
markdown_source_template = """
```python
%(source)s
```
"""
# TODO add unit test for callables raising this
backend_error_template = """
Your current matplotlib backend (MacOSX) does not work with asynchronous calls.
A recommended backend is Agg, and can be changed by modifying your
matplotlibrc "backend" parameter, which can be found at: \n\n %s
"""
| bsd-3-clause | 1,595,594,902,803,466,200 | 40.634686 | 79 | 0.589294 | false |
harterj/moose | python/peacock/tests/postprocessor_tab/test_LineGroupWidgetPostprocessor.py | 12 | 6298 | #!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import sys
import os
import unittest
import shutil
from PyQt5 import QtCore, QtWidgets
from peacock.PostprocessorViewer.PostprocessorDataWidget import PostprocessorDataWidget
from peacock.PostprocessorViewer.plugins.LineGroupWidget import main
from peacock.utils import Testing
import mooseutils
class TestLineGroupWidgetPostprocessor(Testing.PeacockImageTestCase):
"""
Test class for the ArtistToggleWidget which toggles postprocessor lines.
"""
#: QApplication: The main App for QT, this must be static to work correctly.
qapp = QtWidgets.QApplication(sys.argv)
def copyfiles(self):
"""
Copy the data file to a local temporary.
"""
src = os.path.abspath(os.path.join(__file__, '../../input/white_elephant_jan_2016.csv'))
shutil.copyfile(src, self._filename)
def create(self, timer=False):
"""
Creates the widgets for testing.
This is done here rather than in setUp to allow for testing of delayed loading.
"""
self._reader = mooseutils.PostprocessorReader(self._filename)
self._data = PostprocessorDataWidget(self._reader, timer=timer)
# Build the widgets
self._control, self._widget, self._window = main(self._data)
self._widget.currentWidget().FigurePlugin.setFixedSize(QtCore.QSize(625, 625))
def setUp(self):
"""
Creates the GUI containing the ArtistGroupWidget and the matplotlib figure axes.
"""
self._filename = '{}_{}'.format(self.__class__.__name__, 'test.csv')
def tearDown(self):
"""
Clean up.
"""
if os.path.exists(self._filename):
os.remove(self._filename)
def testEmpty(self):
"""
Test that an empty plot is possible.
"""
self.copyfiles()
self.create()
self.assertImage('testEmpty.png')
# Test that controls are initialized and disabled correctly
self.assertEqual(self._control.AxisVariable.currentText(), "time")
self.assertFalse(self._control._toggles['time'].isEnabled(), "Time toggle should be disabled.")
def testSelect(self):
"""
Test that selecting variables works.
"""
self.copyfiles()
self.create()
vars = ['air_temp_set_1', 'precip_accum_set_1']
for var in vars:
self._control._toggles[var].CheckBox.setCheckState(QtCore.Qt.Checked)
self._control._toggles[var].clicked.emit()
self.assertImage('testSelect.png')
self.assertEqual('; '.join(vars), self._window.axes()[0].get_yaxis().get_label().get_text())
self.assertEqual('time', self._window.axes()[0].get_xaxis().get_label().get_text())
# Switch axis
self._control._toggles[vars[0]].PlotAxis.setCurrentIndex(1)
self._control._toggles[vars[0]].clicked.emit()
self.assertImage('testSelect2.png')
self.assertEqual(vars[0], self._window.axes()[1].get_yaxis().get_label().get_text())
self.assertEqual(vars[1], self._window.axes()[0].get_yaxis().get_label().get_text())
self.assertEqual('time', self._window.axes()[0].get_xaxis().get_label().get_text())
def testChangePrimaryVariable(self):
"""
Test that the primary variable may be modified.
"""
self.copyfiles()
self.create()
# Plot something
x_var = 'snow_water_equiv_set_1'
y_var = 'precip_accum_set_1'
self._control._toggles[y_var].CheckBox.setCheckState(QtCore.Qt.Checked)
self._control._toggles[y_var].clicked.emit()
self.assertImage('testChangePrimaryVariable0.png')
# Change the primary variable
self._control.AxisVariable.setCurrentIndex(5)
self._control.AxisVariable.currentIndexChanged.emit(5)
self.assertEqual(self._control.AxisVariable.currentText(), x_var)
self.assertFalse(self._control._toggles[x_var].isEnabled(), "Toggle should be disabled.")
self.assertTrue(self._control._toggles['time'].isEnabled(), "Toggle should be enabled.")
self.assertImage('testChangePrimaryVariable1.png')
def testDelayLoadAndUnload(self):
"""
Test that delayed loading and removal of files works.
"""
self.create()
# Plot should be empty and the message should be visible.
self.assertImage('testEmpty.png')
self.assertTrue(self._control.NoDataMessage.isVisible())
# Load data
self.copyfiles()
self._data.load()
self.assertFalse(self._control.NoDataMessage.isVisible())
# Plot something
var = 'air_temp_set_1'
self._control._toggles[var].CheckBox.setCheckState(QtCore.Qt.Checked)
self._control._toggles[var].clicked.emit()
self.assertImage('testDelayLoadPlot.png')
# Remove data
os.remove(self._filename)
self._data.load()
self.assertTrue(self._control.NoDataMessage.isVisible())
self.assertImage('testEmpty.png')
# Re-load data
self.copyfiles()
self._data.load()
self.assertFalse(self._control.NoDataMessage.isVisible())
self._control._toggles[var].CheckBox.setCheckState(QtCore.Qt.Checked)
self._control._toggles[var].clicked.emit()
self.assertImage('testDelayLoadPlot2.png', allowed=0.98) # The line color/style is different because the cycle keeps going
def testRepr(self):
"""
Test script creation.
"""
self.copyfiles()
self.create()
var = 'air_temp_set_1'
self._control._toggles[var].CheckBox.setCheckState(QtCore.Qt.Checked)
self._control._toggles[var].clicked.emit()
output, imports = self._control.repr()
self.assertIn("x = data('time')", output)
self.assertIn("y = data('air_temp_set_1')", output)
if __name__ == '__main__':
unittest.main(module=__name__, verbosity=2)
| lgpl-2.1 | -804,819,431,110,992,800 | 34.382022 | 130 | 0.639886 | false |
dflowers7/kroneckerbio | External/sundials-2.6.2/examples/arkode/CXX_serial/plot_sol.py | 1 | 1676 | #!/usr/bin/env python
# ----------------------------------------------------------------
# Programmer(s): Daniel R. Reynolds @ SMU
# ----------------------------------------------------------------
# LLNS/SMU Copyright Start
# Copyright (c) 2015, Southern Methodist University and
# Lawrence Livermore National Security
#
# This work was performed under the auspices of the U.S. Department
# of Energy by Southern Methodist University and Lawrence Livermore
# National Laboratory under Contract DE-AC52-07NA27344.
# Produced at Southern Methodist University and the Lawrence
# Livermore National Laboratory.
#
# All rights reserved.
# For details, see the LICENSE file.
# LLNS/SMU Copyright End
# Copyright (c) 2013, Southern Methodist University.
# All rights reserved.
# For details, see the LICENSE file.
# ----------------------------------------------------------------
# matplotlib-based plotting script for ODE examples
# imports
import sys
import pylab as plt
import numpy as np
# load solution data file
data = np.loadtxt('solution.txt', dtype=np.double)
# determine number of time steps, number of fields
nt,nv = np.shape(data)
# extract time array
times = data[:,0]
# parse comment line to determine solution names
f = open('solution.txt', 'r')
commentline = f.readline()
commentsplit = commentline.split()
names = commentsplit[2:]
# create plot
plt.figure()
# add curves to figure
for i in range(nv-1):
plt.plot(times,data[:,i+1],label=names[i])
plt.xlabel('t')
if (nv > 2):
plt.ylabel('solutions')
else:
plt.ylabel('solution')
plt.legend(loc='upper right', shadow=True)
plt.grid()
plt.savefig('solution.png')
##### end of script #####
| mit | -7,415,551,892,411,444,000 | 26.032258 | 68 | 0.643795 | false |
csgwon/dl-pipeline | flaskapp/tools.py | 1 | 1415 | # -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
labels = ['Arabic', 'Chinese', 'Czech', 'Dutch', 'English', 'French', 'German', 'Greek', 'Irish', 'Italian', 'Japanese', 'Korean', 'Polish', 'Portuguese', 'Russian', 'Scottish', 'Spanish', 'Vietnamese']
label_to_number = {y: i for i, y in enumerate(labels)}
chars = 'abcdefghijklmnopqrstuvwxyz-,;!?:\'\\|_@#$\%^&*˜‘+-=<>()[]{} '
char_to_index = {char:i for i, char in enumerate(chars)}
index_to_char = {i: char for i, char in enumerate(chars)}
max_name_len = 17
def add_begin_end_tokens(name):
# return "^{}$".format(name)
begin_token_marker = "^"
end_token_marker = '$'
tokened_name = "".join((begin_token_marker, name, end_token_marker))
return tokened_name
def encode_input(name, maxlen=max_name_len):
name = add_begin_end_tokens(name.lower().strip())
encoding = np.zeros((len(chars), maxlen), dtype=np.int64)
for i, char in enumerate(name[:maxlen]):
index = char_to_index.get(char, 'unknown')
if index is not 'unknown':
encoding[index,i] = 1
return encoding
def decode_input( encoding, maxlen=max_name_len ):
name = ''
for i in range(maxlen):
idx = np.nonzero(encoding[:,i])
if len(idx[0]) > 0:
enc_char = index_to_char.get(idx[0][0], 'unknown')
if enc_char is not 'unknown':
name += enc_char
return name
| apache-2.0 | 3,713,825,964,852,960,000 | 34.3 | 202 | 0.59915 | false |
0todd0000/spm1d | spm1d/examples/stats1d_roi/ex_ttest_paired.py | 1 | 1038 |
import numpy as np
from matplotlib import pyplot
import spm1d
#(0) Load data:
dataset = spm1d.data.uv1d.t2.PlantarArchAngle()
YA,YB = dataset.get_data() #normal and fast walking
#(0a) Create region of interest(ROI):
roi = np.array([False]*YA.shape[1])
roi[0:10] = True
#(1) Conduct t test:
alpha = 0.05
t = spm1d.stats.ttest_paired(YA, YB, roi=roi)
ti = t.inference(alpha, two_tailed=False, interp=True)
#(2) Plot:
pyplot.close('all')
### plot mean and SD:
pyplot.figure( figsize=(8, 3.5) )
ax = pyplot.axes( (0.1, 0.15, 0.35, 0.8) )
spm1d.plot.plot_mean_sd(YA)
spm1d.plot.plot_mean_sd(YB, linecolor='r', facecolor='r')
spm1d.plot.plot_roi(roi, facecolor='b', alpha=0.3)
ax.axhline(y=0, color='k', linestyle=':')
ax.set_xlabel('Time (%)')
ax.set_ylabel('Plantar arch angle (deg)')
### plot SPM results:
ax = pyplot.axes((0.55,0.15,0.35,0.8))
ti.plot()
ti.plot_threshold_label(fontsize=8)
ti.plot_p_values(size=10, offsets=[(0,0.3)])
ax.set_xlabel('Time (%)')
pyplot.show()
| gpl-3.0 | 1,469,546,076,965,856,300 | 22.590909 | 62 | 0.637765 | false |
dksr/REMIND | python/base/vis/generate_cartoon_events/event_prototyper.py | 1 | 121498 | """ sp_rels_gui
Based on pySketch from wx demos.
Known Bugs:
***** When a saved file is loaded, then you cannot move group of objects
using arrow keys. For this to work just press the change the tools and get
back to the selection tool and then move
* Scrolling the window causes the drawing panel to be mucked up until you
refresh it. I've got no idea why.
* I suspect that the reference counting for some wxPoint objects is
getting mucked up; when the user quits, we get errors about being
unable to call del on a 'None' object.
* Saving files via pickling is not a robust cross-platform solution.
"""
import copy
import cPickle
import math
import matplotlib
import numpy as np
import os.path
import sys
import time
import traceback, types
import wx
from matplotlib.figure import Figure
from matplotlib.mlab import normpdf
from matplotlib.backends.backend_agg import FigureCanvasAgg
from numpy.random import randn
from pylab import linspace, meshgrid, sqrt
from wx.lib.buttons import GenBitmapButton,GenBitmapToggleButton
#matplotlib.use('WXAgg')
#from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
#----------------------------------------------------------------------------
# System Constants
#----------------------------------------------------------------------------
sys.setrecursionlimit(500)
# Our menu item IDs:
menu_DUPLICATE = wx.NewId() # Edit menu items.
menu_GROUP = wx.NewId()
menu_UNGROUP = wx.NewId()
slider_TIMER = wx.NewId()
redraw_TIMER = wx.NewId()
menu_EDIT_PROPS = wx.NewId()
menu_SELECT = wx.NewId() # Tools menu items.
menu_RECT = wx.NewId()
menu_DC = wx.NewId() # View menu items.
menu_GCDC = wx.NewId()
menu_MOVE_FORWARD = wx.NewId() # Object menu items.
menu_MOVE_TO_FRONT = wx.NewId()
menu_MOVE_BACKWARD = wx.NewId()
menu_MOVE_TO_BACK = wx.NewId()
menu_ORIENT_RIGHT = wx.NewId()
menu_ORIENT_LEFT = wx.NewId()
menu_ORIENT_DOWN = wx.NewId()
menu_ORIENT_UP = wx.NewId()
menu_ORIENT_NONE = wx.NewId()
menu_SET_AS_REF_OBJ = wx.NewId()
menu_ABOUT = wx.NewId() # Help menu items.
# Our tool IDs:
id_SELECT = wx.NewId()
id_RECT = wx.NewId()
# Our tool option IDs:
id_FILL_OPT = wx.NewId()
id_PEN_OPT = wx.NewId()
id_LINE_OPT = wx.NewId()
id_EDIT = wx.NewId()
id_LINESIZE_0 = wx.NewId()
id_LINESIZE_1 = wx.NewId()
id_LINESIZE_2 = wx.NewId()
id_LINESIZE_3 = wx.NewId()
id_LINESIZE_4 = wx.NewId()
id_LINESIZE_5 = wx.NewId()
id_OBJTYPE_0 = wx.NewId()
id_OBJTYPE_1 = wx.NewId()
id_OBJTYPE_2 = wx.NewId()
id_OBJTYPE_3 = wx.NewId()
id_OBJTYPE_4 = wx.NewId()
id_OBJTYPE_5 = wx.NewId()
id_OBJTYPE_6 = wx.NewId()
id_OBJTYPE_7 = wx.NewId()
id_OBJTYPE_8 = wx.NewId()
INITIAL_FRAME = 1
# Size of the drawing page, in pixels.
PAGE_WIDTH = 1000
PAGE_HEIGHT = 1000
def adjust_borders(fig, targets):
"Translate desired pixel sizes into percentages based on figure size."
dpi = fig.get_dpi()
width, height = [float(v * dpi) for v in fig.get_size_inches()]
conversions = {
'top': lambda v: 1.0 - (v / height),
'bottom': lambda v: v / height,
'right': lambda v: 1.0 - (v / width),
'left': lambda v: v / width,
'hspace': lambda v: v / height,
'wspace': lambda v: v / width,
}
opts = dict((k, conversions[k](v)) for k, v in targets.items())
fig.subplots_adjust(**opts)
def adjust_spines(ax,spines):
for loc, spine in ax.spines.iteritems():
if loc in spines:
spine.set_position(('outward',10)) # outward by 10 points
spine.set_smart_bounds(True)
else:
spine.set_color('none') # don't draw spine
# turn off ticks where there is no spine
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
# no yaxis ticks
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
# no xaxis ticks
ax.xaxis.set_ticks([])
def distance(x1, y1, x2, y2):
# Calculates the length of a line in 2d space.
return math.sqrt(math.pow(x1 - x2, 2) + math.pow(y1 - y2, 2))
def find_angle(x1,y1,x2,y2,x3,y3):
a = distance(x1,y1,x2,y2)
b = distance(x1,y1,x3,y3)
c = distance(x2,y2,x3,y3)
try:
cos1 = (math.pow(a,2) + math.pow(b,2) - math.pow(c,2))/ (2 * a * b)
except ZeroDivisionError:
cos1 = 1
try:
cos2 = (math.pow(a,2) + math.pow(c,2) - math.pow(b,2))/ (2 * a * c)
except ZeroDivisionError:
cos2 = 1
ang1 = math.acos(round(cos1,2))
ang2 = math.acos(round(cos2,2))
ang = min(ang1, ang2)
return ang
def gen_logistic(A, K, B, Q, M, t, v=0.5):
num = K - A
den = 1 + Q*(math.exp(-B*(t-M)))
den = math.pow(den, 1/v)
return A + num/den
#----------------------------------------------------------------------------
class DrawingFrame(wx.Frame):
""" A frame showing the contents of a single document. """
# ==========================================
# == Initialisation and Window Management ==
# ==========================================
def __init__(self, parent, id, title, fileName=None):
""" Standard constructor.
'parent', 'id' and 'title' are all passed to the standard wx.Frame
constructor. 'buffer = figurecanvas.tostring_rgb()
fileName' is the name and path of a saved file to
load into this frame, if any.
"""
wx.Frame.__init__(self, parent, id, title,
style = wx.DEFAULT_FRAME_STYLE | wx.WANTS_CHARS |
wx.NO_FULL_REPAINT_ON_RESIZE)
self.ID_SLIDER = 1
self.ID_STOP = 2
self.ID_PLAY = 3
self.ID_EDIT = 4
self.ID_TIMER_PLAY = 5
self.fps = 5
self.playing = False
self.current_frame = 1
self.edit = True
self.core9_rels = {}
self.proj_rels = {}
self.obj_counter = 1
self.group_counter = 1
self.frame_data = {}
self.groups = {}
self.group_selection = []
#self.frame_data[self.current_frame] = {'contents':[], 'selection':[], 'groups':[]}
# Setup our menu bar.
menuBar = wx.MenuBar()
self.fileMenu = wx.Menu()
self.fileMenu.Append(wx.ID_NEW, "New\tCtrl-N", "Create a new document")
self.fileMenu.Append(wx.ID_OPEN, "Open...\tCtrl-O", "Open an existing document")
self.fileMenu.Append(wx.ID_CLOSE, "Close\tCtrl-W")
self.fileMenu.AppendSeparator()
self.fileMenu.Append(wx.ID_SAVE, "Save\tCtrl-S")
self.fileMenu.Append(wx.ID_SAVEAS, "Save As...")
self.fileMenu.Append(wx.ID_REVERT, "Revert...")
self.fileMenu.AppendSeparator()
self.fileMenu.Append(wx.ID_EXIT, "Quit\tCtrl-Q")
menuBar.Append(self.fileMenu, "File")
self.editMenu = wx.Menu()
self.editMenu.Append(wx.ID_UNDO, "Undo\tCtrl-Z")
self.editMenu.Append(wx.ID_REDO, "Redo\tCtrl-Y")
self.editMenu.AppendSeparator()
self.editMenu.Append(wx.ID_SELECTALL, "Select All\tCtrl-A")
self.editMenu.AppendSeparator()
self.editMenu.Append(menu_DUPLICATE, "Duplicate\tCtrl-D")
self.editMenu.Append(menu_EDIT_PROPS,"Edit...\tCtrl-E", "Edit object properties")
self.editMenu.Append(wx.ID_CLEAR, "Delete\tDel")
menuBar.Append(self.editMenu, "Edit")
self.viewMenu = wx.Menu()
self.viewMenu.Append(menu_DC, "Normal quality",
"Normal rendering using wx.DC",
kind=wx.ITEM_RADIO)
self.viewMenu.Append(menu_GCDC,"High quality",
"Anti-aliased rendering using wx.GCDC",
kind=wx.ITEM_RADIO)
menuBar.Append(self.viewMenu, "View")
self.toolsMenu = wx.Menu()
self.toolsMenu.Append(id_SELECT, "Selection", kind=wx.ITEM_RADIO)
self.toolsMenu.Append(id_RECT, "Rectangle", kind=wx.ITEM_RADIO)
menuBar.Append(self.toolsMenu, "Tools")
self.objectMenu = wx.Menu()
self.objectMenu.Append(menu_MOVE_FORWARD, "Move Forward")
self.objectMenu.Append(menu_MOVE_TO_FRONT, "Move to Front\tCtrl-F")
self.objectMenu.Append(menu_MOVE_BACKWARD, "Move Backward")
self.objectMenu.Append(menu_MOVE_TO_BACK, "Move to Back\tCtrl-B")
menuBar.Append(self.objectMenu, "Object")
self.helpMenu = wx.Menu()
self.helpMenu.Append(menu_ABOUT, "About pySketch...")
menuBar.Append(self.helpMenu, "Help")
self.SetMenuBar(menuBar)
# Create our statusbar
self.CreateStatusBar()
# Create our toolbar.
tsize = (15,15)
self.toolbar = self.CreateToolBar(wx.TB_HORIZONTAL | wx.NO_BORDER | wx.TB_FLAT)
artBmp = wx.ArtProvider.GetBitmap
self.toolbar.AddSimpleTool(
wx.ID_NEW, artBmp(wx.ART_NEW, wx.ART_TOOLBAR, tsize), "New")
self.toolbar.AddSimpleTool(
wx.ID_OPEN, artBmp(wx.ART_FILE_OPEN, wx.ART_TOOLBAR, tsize), "Open")
self.toolbar.AddSimpleTool(
wx.ID_SAVE, artBmp(wx.ART_FILE_SAVE, wx.ART_TOOLBAR, tsize), "Save")
self.toolbar.AddSimpleTool(
wx.ID_SAVEAS, artBmp(wx.ART_FILE_SAVE_AS, wx.ART_TOOLBAR, tsize),
"Save As...")
#-------
self.toolbar.AddSeparator()
self.toolbar.AddSimpleTool(
wx.ID_UNDO, artBmp(wx.ART_UNDO, wx.ART_TOOLBAR, tsize), "Undo")
self.toolbar.AddSimpleTool(
wx.ID_REDO, artBmp(wx.ART_REDO, wx.ART_TOOLBAR, tsize), "Redo")
self.toolbar.AddSeparator()
self.toolbar.AddSimpleTool(
menu_DUPLICATE, wx.Bitmap("images/duplicate.bmp", wx.BITMAP_TYPE_BMP),
"Duplicate")\
#-------
self.toolbar.AddSeparator()
self.toolbar.AddSimpleTool(
menu_MOVE_FORWARD, wx.Bitmap("images/moveForward.bmp", wx.BITMAP_TYPE_BMP),
"Move Forward")
self.toolbar.AddSimpleTool(
menu_MOVE_BACKWARD, wx.Bitmap("images/moveBack.bmp", wx.BITMAP_TYPE_BMP),
"Move Backward")
self.toolbar.AddSeparator()
self.slider = wx.Slider(self.toolbar, -1, 1, 1, 300, None, (500, 50), wx.SL_HORIZONTAL)
self.toolbar.AddSeparator()
self.play = self.toolbar.AddLabelTool(self.ID_PLAY, '', wx.Bitmap('images/play.png'))
self.stop = self.toolbar.AddLabelTool(self.ID_STOP, '', wx.Bitmap('images/stop.png'))
self.toolbar.AddControl(self.slider)
self.toolbar.AddSeparator()
self.edit_button = self.toolbar.AddLabelTool(self.ID_EDIT, '', wx.Bitmap('images/logo.bmp'))
self.toolbar.Realize()
self.playTimer = wx.Timer(self, slider_TIMER)
self.Bind(wx.EVT_TIMER, self.onNextFrame, self.playTimer)
self.Bind(wx.EVT_SLIDER, self.onSlider, self.slider)
self.Bind(wx.EVT_TOOL, self.onStop, self.stop)
self.Bind(wx.EVT_TOOL, self.onPlay, self.play)
self.Bind(wx.EVT_TOOL, self.onEdit, self.edit_button)
# Associate menu/toolbar items with their handlers.
menuHandlers = [
(wx.ID_NEW, self.doNew),
(wx.ID_OPEN, self.doOpen),
(wx.ID_CLOSE, self.doClose),
(wx.ID_SAVE, self.doSave),
(wx.ID_SAVEAS, self.doSaveAs),
(wx.ID_REVERT, self.doRevert),
(wx.ID_EXIT, self.doExit),
(wx.ID_UNDO, self.doUndo),
(wx.ID_REDO, self.doRedo),
(wx.ID_SELECTALL, self.doSelectAll),
(menu_DUPLICATE, self.doDuplicate),
(menu_GROUP, self.doGroup),
(menu_UNGROUP, self.doUngroup),
(menu_EDIT_PROPS, self.doEditObject),
(wx.ID_CLEAR, self.doDelete),
(id_SELECT, self.onChooseTool, self.updChooseTool),
(id_RECT, self.onChooseTool, self.updChooseTool),
(menu_DC, self.doChooseQuality),
(menu_GCDC, self.doChooseQuality),
(menu_MOVE_FORWARD, self.doMoveForward),
(menu_MOVE_TO_FRONT, self.doMoveToFront),
(menu_MOVE_BACKWARD, self.doMoveBackward),
(menu_MOVE_TO_BACK, self.doMoveToBack),
(menu_ORIENT_RIGHT, self.doOrientRight),
(menu_ORIENT_LEFT, self.doOrientLeft),
(menu_ORIENT_DOWN, self.doOrientDown),
(menu_ORIENT_UP, self.doOrientUp),
(menu_ORIENT_NONE, self.doOrientNone),
(menu_SET_AS_REF_OBJ, self.doSetRefObj),
(menu_ABOUT, self.doShowAbout)]
for combo in menuHandlers:
id, handler = combo[:2]
self.Bind(wx.EVT_MENU, handler, id = id)
if len(combo)>2:
self.Bind(wx.EVT_UPDATE_UI, combo[2], id = id)
# Install our own method to handle closing the window. This allows us
# to ask the user if he/she wants to save before closing the window, as
# well as keeping track of which windows are currently open.
self.Bind(wx.EVT_CLOSE, self.doClose)
# Install our own method for handling keystrokes. We use this to let
# the user move the selected object(s) around using the arrow keys.
self.Bind(wx.EVT_CHAR_HOOK, self.onKeyEvent)
# Setup our top-most panel. This holds the entire contents of the
# window, excluding the menu bar.
self.topPanel = wx.Panel(self, -1, style=wx.SIMPLE_BORDER)
# Setup our tool palette, with all our drawing tools and option icons.
self.toolPalette = wx.BoxSizer(wx.VERTICAL)
self.selectIcon = ToolPaletteToggle(self.topPanel, id_SELECT,
"select", "Selection Tool", mode=wx.ITEM_RADIO)
self.rectIcon = ToolPaletteToggle(self.topPanel, id_RECT,
"rect", "Rectangle Tool", mode=wx.ITEM_RADIO)
# Create the tools
self.tools = {
'select' : (self.selectIcon, SelectDrawingTool()),
'rect' : (self.rectIcon, RectDrawingTool()),
}
toolSizer = wx.GridSizer(0, 2, 5, 5)
toolSizer.Add(self.selectIcon)
toolSizer.Add(self.rectIcon)
self.optionIndicator = ToolOptionIndicator(self.topPanel)
self.optionIndicator.SetToolTip(
wx.ToolTip("Shows Current Pen/Fill/Line Size Settings"))
optionSizer = wx.BoxSizer(wx.HORIZONTAL)
self.penOptIcon = ToolPaletteButton(self.topPanel, id_PEN_OPT,
"penOpt", "Set Pen Colour",)
self.fillOptIcon = ToolPaletteButton(self.topPanel, id_FILL_OPT,
"fillOpt", "Set Fill Colour")
self.lineOptIcon = ToolPaletteButton(self.topPanel, id_LINE_OPT,
"lineOpt", "Set Line Size")
margin = wx.LEFT | wx.RIGHT
optionSizer.Add(self.penOptIcon, 0, margin, 1)
optionSizer.Add(self.fillOptIcon, 0, margin, 1)
optionSizer.Add(self.lineOptIcon, 0, margin, 1)
editSizer = wx.BoxSizer(wx.HORIZONTAL)
self.editOptIcon = ToolPaletteButton(self.topPanel, id_EDIT,
"lineOpt", "Set the obj type",)
editSizer.Add(self.editOptIcon, 0, margin, 1)
# By default the name of object is number, change it using the text field
self.objName = wx.TextCtrl(self.topPanel, -1)
self.objNameBtn = wx.Button(self.topPanel, -1,"ChangeName")
self.Bind(wx.EVT_BUTTON, self.changeObjName, id = self.objNameBtn.GetId())
#self.some_text.SetLabel(mysql_data)
self.objSize = wx.StaticText(self.topPanel, wx.ID_ANY, label="0 x 0", style=wx.ALIGN_CENTER)
self.objPos = wx.StaticText(self.topPanel, wx.ID_ANY, label="0 x 0", style=wx.ALIGN_CENTER)
self.frameLabel = wx.StaticText(self.topPanel, wx.ID_ANY, label="1", style=wx.ALIGN_CENTER)
objInfoSizer = wx.BoxSizer(wx.VERTICAL)
objInfoSizer.Add(self.frameLabel, 0, margin, 3)
objInfoSizer.Add((0, 0), 0, margin, 5) # Spacer.
objInfoSizer.Add((0, 0), 0, margin, 5) # Spacer.
objInfoSizer.Add(self.objSize, 0, margin, 3)
objInfoSizer.Add((0, 0), 0, margin, 5) # Spacer.
objInfoSizer.Add((0, 0), 0, margin, 5) # Spacer.
objInfoSizer.Add(self.objPos, 0, margin, 3)
objInfoSizer.Add((0, 0), 0, margin, 5) # Spacer.
objInfoSizer.Add((0, 0), 0, margin, 5) # Spacer.
objInfoSizer.Add(self.objName, 0, margin, 3)
objInfoSizer.Add((0, 0), 0, margin, 5) # Spacer.
objInfoSizer.Add(self.objNameBtn, 0, margin, 3)
margin = wx.TOP | wx.LEFT | wx.RIGHT | wx.ALIGN_CENTRE
self.toolPalette.Add(toolSizer, 0, margin, 5)
self.toolPalette.Add((0, 0), 0, margin, 5) # Spacer.
self.toolPalette.Add(self.optionIndicator, 0, margin, 5)
self.toolPalette.Add(optionSizer, 0, margin, 5)
self.toolPalette.Add((0, 0), 0, margin, 5) # Spacer.
self.toolPalette.Add(editSizer, 0, margin, 5)
self.toolPalette.Add((0, 0), 0, margin, 5) # Spacer.
self.toolPalette.Add((0, 0), 0, margin, 5) # Spacer.
self.toolPalette.Add(objInfoSizer, 0, margin, 5)
# Make the tool palette icons respond when the user clicks on them.
for tool in self.tools.itervalues():
tool[0].Bind(wx.EVT_BUTTON, self.onChooseTool)
self.selectIcon.Bind(wx.EVT_BUTTON, self.onChooseTool)
self.penOptIcon.Bind(wx.EVT_BUTTON, self.onPenOptionIconClick)
self.fillOptIcon.Bind(wx.EVT_BUTTON, self.onFillOptionIconClick)
self.lineOptIcon.Bind(wx.EVT_BUTTON, self.onLineOptionIconClick)
self.editOptIcon.Bind(wx.EVT_BUTTON, self.onEditOptionIconClick)
# Setup the main drawing area.
self.drawPanel = wx.ScrolledWindow(self.topPanel, -1,
style=wx.SUNKEN_BORDER|wx.NO_FULL_REPAINT_ON_RESIZE)
#self.infoPanel = wx.ScrolledWindow(self.topPanel, -1,
# style=wx.SUNKEN_BORDER|wx.NO_FULL_REPAINT_ON_RESIZE)
self.infoPanel = wx.TextCtrl(self.topPanel, style=wx.TE_MULTILINE|wx.TE_RICH2)
self.infoPanel.Enable(0)
self.infoPanel.SetEditable(False)
gray_colour = wx.Colour(red=192, green=192, blue=192)
self.infoPanel.SetBackgroundColour(wx.BLACK)
dastyle = wx.TextAttr()
#dastyle.SetBackgroundColour(wx.Colour(0,0,255))
dastyle.SetTextColour(wx.Colour(255,255,255))
points = self.infoPanel.GetFont().GetPointSize()
bold_font = wx.Font(points+3, wx.ROMAN, wx.BOLD, True)
dastyle.SetFont(bold_font)
self.infoPanel.SetDefaultStyle(dastyle)
self.drawPanel.SetBackgroundColour(gray_colour)
self.drawPanel.EnableScrolling(True, True)
#self.infoPanel.EnableScrolling(True, True)
self.drawPanel.SetScrollbars(40, 40, PAGE_WIDTH / 40, PAGE_HEIGHT / 40)
#self.infoPanel.SetScrollbars(20, 75, PAGE_WIDTH / 30, PAGE_HEIGHT / 30)
self.drawPanel.Bind(wx.EVT_MOUSE_EVENTS, self.onMouseEvent)
self.drawPanel.Bind(wx.EVT_IDLE, self.onIdle)
self.drawPanel.Bind(wx.EVT_SIZE, self.onSize)
self.drawPanel.Bind(wx.EVT_PAINT, self.onPaint)
self.drawPanel.Bind(wx.EVT_ERASE_BACKGROUND, self.onEraseBackground)
self.drawPanel.Bind(wx.EVT_SCROLLWIN, self.onPanelScroll)
#self.Bind(wx.EVT_TIMER, self.onIdle)
# Position everything in the window.
topSizer = wx.BoxSizer(wx.HORIZONTAL)
topSizer.Add(self.toolPalette, 0)
topSizer.Add(self.drawPanel, proportion=2, flag=wx.EXPAND)
topSizer.Add(self.infoPanel, proportion=1, flag=wx.EXPAND)
self.topPanel.SetAutoLayout(True)
self.topPanel.SetSizer(topSizer)
self.SetSizeHints(250, 200)
self.SetSize(wx.Size(1800, 1200))
# Select an initial tool.
self.curToolName = None
self.curToolIcon = None
self.curTool = None
self.setCurrentTool("select")
# Set initial dc mode to fast
self.wrapDC = lambda dc: dc
# Setup our frame to hold the contents of a sketch document.
self.dirty = False
self.fileName = fileName
self.contents = [] # front-to-back ordered list of DrawingObjects.
self.selection = [] # List of selected DrawingObjects.
self.undoStack = [] # Stack of saved contents for undo.
self.redoStack = [] # Stack of saved contents for redo.
if self.fileName != None:
self.loadContents()
self._initBuffer()
self._adjustMenus()
# Finally, set our initial pen, fill and line options.
self._setPenColour(wx.BLACK)
self._setFillColour(wx.Colour(215,253,254))
self._setLineSize(2)
self.backgroundFillBrush = None # create on demand
# Start the background redraw timer
# This is optional, but it gives the double-buffered contents a
# chance to redraw even when idle events are disabled (like during
# resize and scrolling)
self.redrawTimer = wx.Timer(self, redraw_TIMER)
self.Bind(wx.EVT_TIMER, self.onIdle, self.redrawTimer)
self.redrawTimer.Start(800)
# ============================
# == Event Handling Methods ==
# ============================
def changeObjName(self, event):
"""Change the object from default number to what ever entered in the text field"""
objName = self.objName.GetValue()
# If more than one obj is selected, skip changing names. Only one obj must be selected.
if len(self.selection) > 1:
return
self.selection[0].setName(objName.encode())
self.requestRedraw()
def onNextFrame(self, event):
self.slider.SetValue(self.current_frame)
self.frameLabel.SetLabel(repr(self.current_frame))
if self.current_frame not in self.frame_data or self.edit:
self.frame_data[self.current_frame] = self._buildStoredState()
elif self.current_frame in self.frame_data and not self.edit:
state = self.frame_data[self.current_frame]
self._restoreStoredState(state)
self.current_frame += 1
def onEdit(self, event):
if not self.edit:
self.edit = True
else:
self.edit = False
def onPlay(self, event):
print 'PLAY'
if not self.playing:
self.playTimer.Start(100)
self.playing = True
def onStop(self, event):
print 'STOP'
if self.playing:
self.playTimer.Stop()
self.playing = False
def onSlider(self, event):
self.current_frame = self.slider.GetValue()
#print self.current_frame
if self.current_frame not in self.frame_data:
self.frame_data[self.current_frame] = self._buildStoredState()
else:
state = self.frame_data[self.current_frame]
self._restoreStoredState(state)
def onEditOptionIconClick(self, event):
""" Respond to the user clicking on the "Line Options" icon.
"""
if len(self.selection) == 1:
menu = self._buildObjTypePopup(self.selection[0].getObjType())
else:
menu = self._buildObjTypePopup(self.objType)
pos = self.editOptIcon.GetPosition()
pos.y = pos.y + self.lineOptIcon.GetSize().height
self.PopupMenu(menu, pos)
menu.Destroy()
def onPenOptionIconClick(self, event):
""" Respond to the user clicking on the "Pen Options" icon.
"""
data = wx.ColourData()
if len(self.selection) == 1:
data.SetColour(self.selection[0].getPenColour())
else:
data.SetColour(self.penColour)
dialog = wx.ColourDialog(self, data)
dialog.SetTitle('Choose line colour')
if dialog.ShowModal() == wx.ID_OK:
c = dialog.GetColourData().GetColour()
self._setPenColour(wx.Colour(c.Red(), c.Green(), c.Blue()))
dialog.Destroy()
def onFillOptionIconClick(self, event):
""" Respond to the user clicking on the "Fill Options" icon.
"""
data = wx.ColourData()
if len(self.selection) == 1:
data.SetColour(self.selection[0].getFillColour())
else:
data.SetColour(self.fillColour)
dialog = wx.ColourDialog(self, data)
dialog.SetTitle('Choose fill colour')
if dialog.ShowModal() == wx.ID_OK:
c = dialog.GetColourData().GetColour()
self._setFillColour(wx.Colour(c.Red(), c.Green(), c.Blue()))
dialog.Destroy()
def onLineOptionIconClick(self, event):
""" Respond to the user clicking on the "Line Options" icon.
"""
if len(self.selection) == 1:
menu = self._buildLineSizePopup(self.selection[0].getLineSize())
else:
menu = self._buildLineSizePopup(self.lineSize)
pos = self.lineOptIcon.GetPosition()
pos.y = pos.y + self.lineOptIcon.GetSize().height
self.PopupMenu(menu, pos)
menu.Destroy()
def onKeyEvent(self, event):
""" Respond to a keypress event.
We make the arrow keys move the selected object(s) by one pixel in
the given direction.
"""
step = 1
if event.ShiftDown():
step = 5
if event.GetKeyCode() == wx.WXK_UP:
self._moveObject(0, -step)
elif event.GetKeyCode() == wx.WXK_DOWN:
self._moveObject(0, step)
elif event.GetKeyCode() == wx.WXK_LEFT:
self._moveObject(-step, 0)
elif event.GetKeyCode() == wx.WXK_RIGHT:
self._moveObject(step, 0)
else:
event.Skip()
def onMouseEvent(self, event):
""" Respond to mouse events in the main drawing panel
How we respond depends on the currently selected tool.
"""
if self.curTool is None: return
# Translate event into canvas coordinates and pass to current tool
origx,origy = event.X, event.Y
pt = self._getEventCoordinates(event)
event.m_x = pt.x
event.m_y = pt.y
handled = self.curTool.onMouseEvent(self,event)
event.m_x = origx
event.m_y = origy
if handled: return
# otherwise handle it ourselves
if event.RightDown():
self.doPopupContextMenu(event)
def doPopupContextMenu(self, event):
""" Respond to the user right-clicking within our drawing panel.
We select the clicked-on item, if necessary, and display a pop-up
menu of available options which can be applied to the selected
item(s).
"""
mousePt = self._getEventCoordinates(event)
obj = self.getObjectAt(mousePt)
if obj == None: return # Nothing selected.
# Select the clicked-on object.
self.select(obj)
# Build our pop-up menu.
menu = wx.Menu()
menu.Append(menu_DUPLICATE, "Duplicate")
menu.Append(menu_GROUP, "Group")
menu.Append(menu_UNGROUP, "Ungroup")
menu.Append(menu_EDIT_PROPS,"Edit...")
menu.Append(wx.ID_CLEAR, "Delete")
menu.AppendSeparator()
menu.Append(menu_ORIENT_RIGHT, "Set orientation to RIGHT")
menu.Append(menu_ORIENT_LEFT, "Set orientation to LEFT")
menu.Append(menu_ORIENT_DOWN, "Set orientation to DOWN")
menu.Append(menu_ORIENT_UP, "Set orientation to UP")
menu.Append(menu_ORIENT_NONE, "Set orientation to None")
menu.Append(menu_SET_AS_REF_OBJ, "Set/Unset as Reference Obj")
menu.AppendSeparator()
menu.Append(menu_MOVE_FORWARD, "Move Forward")
menu.Append(menu_MOVE_TO_FRONT, "Move to Front")
menu.Append(menu_MOVE_BACKWARD, "Move Backward")
menu.Append(menu_MOVE_TO_BACK, "Move to Back")
menu.Enable(menu_EDIT_PROPS, obj.hasPropertyEditor())
menu.Enable(menu_MOVE_FORWARD, obj != self.contents[0])
menu.Enable(menu_MOVE_TO_FRONT, obj != self.contents[0])
menu.Enable(menu_MOVE_BACKWARD, obj != self.contents[-1])
menu.Enable(menu_MOVE_TO_BACK, obj != self.contents[-1])
menu.Enable(menu_ORIENT_RIGHT, obj.orientation != 'RIGHT')
menu.Enable(menu_ORIENT_LEFT, obj.orientation != 'LEFT')
menu.Enable(menu_ORIENT_DOWN, obj.orientation != 'DOWN')
menu.Enable(menu_ORIENT_UP, obj.orientation != 'UP')
menu.Enable(menu_ORIENT_NONE, obj.orientation != None)
self.Bind(wx.EVT_MENU, self.doDuplicate, id=menu_DUPLICATE)
self.Bind(wx.EVT_MENU, self.doGroup, id=menu_GROUP)
self.Bind(wx.EVT_MENU, self.doUngroup, id=menu_UNGROUP)
self.Bind(wx.EVT_MENU, self.doEditObject, id=menu_EDIT_PROPS)
self.Bind(wx.EVT_MENU, self.doDelete, id=wx.ID_CLEAR)
self.Bind(wx.EVT_MENU, self.doMoveForward, id=menu_MOVE_FORWARD)
self.Bind(wx.EVT_MENU, self.doMoveToFront, id=menu_MOVE_TO_FRONT)
self.Bind(wx.EVT_MENU, self.doMoveBackward,id=menu_MOVE_BACKWARD)
self.Bind(wx.EVT_MENU, self.doMoveToBack, id=menu_MOVE_TO_BACK)
self.Bind(wx.EVT_MENU, self.doOrientRight, id=menu_ORIENT_RIGHT)
self.Bind(wx.EVT_MENU, self.doOrientLeft, id=menu_ORIENT_LEFT)
self.Bind(wx.EVT_MENU, self.doOrientDown, id=menu_ORIENT_DOWN)
self.Bind(wx.EVT_MENU, self.doOrientUp, id=menu_ORIENT_UP)
self.Bind(wx.EVT_MENU, self.doOrientNone, id=menu_ORIENT_NONE)
self.Bind(wx.EVT_MENU, self.doSetRefObj, id=menu_SET_AS_REF_OBJ)
# Show the pop-up menu.
clickPt = wx.Point(mousePt.x + self.drawPanel.GetPosition().x,
mousePt.y + self.drawPanel.GetPosition().y)
self.drawPanel.PopupMenu(menu, mousePt)
menu.Destroy()
def onSize(self, event):
"""
Called when the window is resized. We set a flag so the idle
handler will resize the buffer.
"""
self.requestRedraw()
def onIdle(self, event):
"""
If the size was changed then resize the bitmap used for double
buffering to match the window size. We do it in Idle time so
there is only one refresh after resizing is done, not lots while
it is happening.
"""
if self._reInitBuffer:
#print 'Compute the relations in onIdle'
self.compute_spatial_rels()
if self.IsShown():
self._initBuffer()
self.drawPanel.Refresh(False)
def compute_spatial_rels(self):
if len(self.contents) <= 1:
self.infoPanel.Clear()
return
#core9_rels = {}
for obj1 in self.contents:
if obj1 not in self.selection:
continue
for obj2 in self.contents:
if obj1.name == obj2.name:
continue
self.core9_rels[(obj1.name, obj2.name)] = self.compute_core9_rel(obj1, obj2)
self.core9_rels[(obj2.name, obj1.name)] = self.compute_core9_rel(obj2, obj1)
self.proj_rels[(obj1.name, obj2.name)] = self.compute_proj_rels(self.core9_rels[(obj1.name, obj2.name)], obj1.orientation)
self.proj_rels[(obj2.name, obj1.name)] = self.compute_proj_rels(self.core9_rels[(obj2.name, obj1.name)], obj2.orientation)
display_text = self.get_rels_pretty_text(self.core9_rels, self.proj_rels)
self.infoPanel.Clear()
self.infoPanel.Enable(1)
self.infoPanel.SetInsertionPoint(0)
self.infoPanel.WriteText(display_text)
self.infoPanel.Enable(0)
def get_rels_pretty_text(self, core9_rels_dict, proj_rels_dict):
pretty_text = 'CORE-9\n'
for key in core9_rels_dict:
pretty_text += repr(key[0]) + ', ' + repr(key[1]) + ' : ' + '(' + \
core9_rels_dict[key][0] + ', ' + core9_rels_dict[key][1] + ')''\n'
pretty_text += '\n-----------------------------------------------------------\n'
pretty_text += '\nProjective Relations\n'
for key in proj_rels_dict:
pretty_text += repr(key[0]) + ', ' + repr(key[1]) + ' : ' + proj_rels_dict[key] + '\n'
return pretty_text
def compute_proj_rels(self, (x_rel, y_rel), ref_obj_orientation):
if ref_obj_orientation == 'DOWN' and x_rel != 'before' and x_rel != 'after' \
and (y_rel == 'before' or y_rel == 'meets'):
return 'in_front_of'
elif ref_obj_orientation == 'UP' and x_rel != 'before' and x_rel != 'after' \
and (y_rel == 'after' or y_rel == 'meets_i'):
return 'in_front_of'
elif ref_obj_orientation == 'RIGHT' and (x_rel == 'before' or x_rel == 'meets') \
and (y_rel != 'before' and y_rel != 'after'):
return 'in_front_of'
elif ref_obj_orientation == 'LEFT' and (x_rel == 'after' or x_rel == 'meets_i') \
and (y_rel != 'before' and y_rel != 'after'):
return 'in_front_of'
else:
return 'NOT in_front_of'
def compute_core9_rel(self, obj1, obj2):
o1_xs, o1_xe = obj1.position.x, obj1.position.x + obj1.size.width
o2_xs, o2_xe = obj2.position.x, obj2.position.x + obj2.size.width
o1_ys, o1_ye = obj1.position.y, obj1.position.y + obj1.size.height
o2_ys, o2_ye = obj2.position.y, obj2.position.y + obj2.size.height
# In X-axis
if o1_xe < o2_xs:
x_rel = 'before'
elif o2_xe < o1_xs:
x_rel = 'after'
elif o1_xe == o2_xs:
x_rel = 'meets'
elif o2_xe == o1_xs:
x_rel = 'meets_i'
elif o1_xs == o2_xs and o1_xe < o2_xe:
x_rel = 'starts'
elif o1_xs == o2_xs and o1_xe > o2_xe:
x_rel = 'starts_i'
elif o1_xe == o2_xe and o1_xs > o2_xs:
x_rel = 'finishes'
elif o1_xe == o2_xe and o1_xs < o2_xs:
x_rel = 'finishes_i'
elif o1_xe == o2_xe and o1_xs == o2_xs:
x_rel = 'equal'
elif o1_xs > o2_xs and o1_xe < o2_xe:
x_rel = 'during'
elif o1_xs < o2_xs and o1_xe > o2_xe:
x_rel = 'during_i'
elif o1_xs < o2_xs and o1_xe < o2_xe:
x_rel = 'overlaps'
elif o1_xs > o2_xs and o1_xe > o2_xe:
x_rel = 'overlaps_i'
# In Y-axis
if o1_ye < o2_ys:
y_rel = 'before'
elif o2_ye < o1_ys:
y_rel = 'after'
elif o1_ye == o2_ys:
y_rel = 'meets'
elif o2_ye == o1_ys:
y_rel = 'meets_i'
elif o1_ys == o2_ys and o1_ye < o2_ye:
y_rel = 'starts'
elif o1_ys == o2_ys and o1_ye > o2_ye:
y_rel = 'starts_i'
elif o1_ye == o2_ye and o1_ys > o2_ys:
y_rel = 'finishes'
elif o1_ye == o2_ye and o1_ys < o2_ys:
y_rel = 'finishes_i'
elif o1_ye == o2_ye and o1_ys == o2_ys:
y_rel = 'equal'
elif o1_ys > o2_ys and o1_ye < o2_ye:
y_rel = 'during'
elif o1_ys < o2_ys and o1_ye > o2_ye:
y_rel = 'during_i'
elif o1_ys < o2_ys and o1_ye < o2_ye:
y_rel = 'overlaps'
elif o1_ys > o2_ys and o1_ye > o2_ye:
y_rel = 'overlaps_i'
return(x_rel, y_rel)
def requestRedraw(self):
"""Requests a redraw of the drawing panel contents.
The actual redrawing doesn't happen until the next idle time.
"""
self._reInitBuffer = True
#print 'redrawing'
def onPaint(self, event):
"""
Called when the window is exposed.
"""
# Create a buffered paint DC. It will create the real
# wx.PaintDC and then blit the bitmap to it when dc is
# deleted.
dc = wx.BufferedPaintDC(self.drawPanel, self.buffer)
# On Windows, if that's all we do things look a little rough
# So in order to make scrolling more polished-looking
# we iterate over the exposed regions and fill in unknown
# areas with a fall-back pattern.
if wx.Platform != '__WXMSW__':
return
# First get the update rects and subtract off the part that
# self.buffer has correct already
region = self.drawPanel.GetUpdateRegion()
panelRect = self.drawPanel.GetClientRect()
offset = list(self.drawPanel.CalcUnscrolledPosition(0,0))
offset[0] -= self.saved_offset[0]
offset[1] -= self.saved_offset[1]
region.Subtract(-offset[0],- offset[1],panelRect.Width, panelRect.Height)
# Now iterate over the remaining region rects and fill in with a pattern
rgn_iter = wx.RegionIterator(region)
if rgn_iter.HaveRects():
self.setBackgroundMissingFillStyle(dc)
offset = self.drawPanel.CalcUnscrolledPosition(0,0)
while rgn_iter:
r = rgn_iter.GetRect()
if r.Size != self.drawPanel.ClientSize:
dc.DrawRectangleRect(r)
rgn_iter.Next()
def setBackgroundMissingFillStyle(self, dc):
if self.backgroundFillBrush is None:
# Win95 can only handle a 8x8 stipple bitmaps max
#stippleBitmap = wx.BitmapFromBits("\xf0"*4 + "\x0f"*4,8,8)
# ...but who uses Win95?
stippleBitmap = wx.BitmapFromBits("\x06",2,2)
stippleBitmap.SetMask(wx.Mask(stippleBitmap))
bgbrush = wx.Brush(wx.WHITE, wx.STIPPLE_MASK_OPAQUE)
bgbrush.SetStipple(stippleBitmap)
self.backgroundFillBrush = bgbrush
dc.SetPen(wx.TRANSPARENT_PEN)
dc.SetBrush(self.backgroundFillBrush)
dc.SetTextForeground(wx.LIGHT_GREY)
dc.SetTextBackground(wx.WHITE)
def onEraseBackground(self, event):
"""
Overridden to do nothing to prevent flicker
"""
pass
def onPanelScroll(self, event):
"""
Called when the user changes scrolls the drawPanel
"""
# make a note to ourselves to redraw when we get a chance
self.requestRedraw()
event.Skip()
pass
def drawContents(self, dc):
"""
Does the actual drawing of all drawing contents with the specified dc
"""
# PrepareDC sets the device origin according to current scrolling
self.drawPanel.PrepareDC(dc)
gdc = self.wrapDC(dc)
# First pass draws objects
ordered_selection = []
for obj in self.contents[::-1]:
if obj in self.selection:
obj.draw(gdc, True)
ordered_selection.append(obj)
else:
obj.draw(gdc, False)
# First pass draws objects
if self.curTool is not None:
self.curTool.draw(gdc)
# Second pass draws selection handles so they're always on top
for obj in ordered_selection:
obj.drawHandles(gdc)
# ==========================
# == Menu Command Methods ==
# ==========================
def doNew(self, event):
""" Respond to the "New" menu command.
"""
global _docList
newFrame = DrawingFrame(None, -1, "Untitled")
newFrame.Show(True)
_docList.append(newFrame)
def doOpen(self, event):
""" Respond to the "Open" menu command.
"""
global _docList
curDir = os.getcwd()
fileName = wx.FileSelector("Open File", default_extension="psk",
flags = wx.OPEN | wx.FILE_MUST_EXIST)
if fileName == "": return
fileName = os.path.join(os.getcwd(), fileName)
os.chdir(curDir)
title = os.path.basename(fileName)
if (self.fileName == None) and (len(self.contents) == 0):
# Load contents into current (empty) document.
self.fileName = fileName
self.SetTitle(os.path.basename(fileName))
self.loadContents()
else:
# Open a new frame for this document.
newFrame = DrawingFrame(None, -1, os.path.basename(fileName),
fileName=fileName)
newFrame.Show(True)
_docList.append(newFrame)
def doClose(self, event):
""" Respond to the "Close" menu command.
"""
global _docList
if self.dirty:
if not self.askIfUserWantsToSave("closing"): return
_docList.remove(self)
self.Destroy()
def doSave(self, event):
""" Respond to the "Save" menu command.
"""
if self.fileName != None:
self.saveContents()
def doSaveAs(self, event):
""" Respond to the "Save As" menu command.
"""
if self.fileName == None:
default = ""
else:
default = self.fileName
curDir = os.getcwd()
fileName = wx.FileSelector("Save File As", "Saving",
default_filename=default,
default_extension="psk",
wildcard="*.psk",
flags = wx.SAVE | wx.OVERWRITE_PROMPT)
if fileName == "": return # User cancelled.
fileName = os.path.join(os.getcwd(), fileName)
os.chdir(curDir)
title = os.path.basename(fileName)
self.SetTitle(title)
self.fileName = fileName
self.saveContents()
def doRevert(self, event):
""" Respond to the "Revert" menu command.
"""
if not self.dirty: return
if wx.MessageBox("Discard changes made to this document?", "Confirm",
style = wx.OK | wx.CANCEL | wx.ICON_QUESTION,
parent=self) == wx.CANCEL: return
self.loadContents()
def doExit(self, event):
""" Respond to the "Quit" menu command.
"""
global _docList, _app
for doc in _docList:
if not doc.dirty: continue
doc.Raise()
if not doc.askIfUserWantsToSave("quitting"): return
_docList.remove(doc)
doc.Destroy()
_app.ExitMainLoop()
def doUndo(self, event):
""" Respond to the "Undo" menu command.
"""
if not self.undoStack: return
state = self._buildStoredState()
self.redoStack.append(state)
state = self.undoStack.pop()
self._restoreStoredState(state)
def doRedo(self, event):
""" Respond to the "Redo" menu.
"""
if not self.redoStack: return
state = self._buildStoredState()
self.undoStack.append(state)
state = self.redoStack.pop()
self._restoreStoredState(state)
def doSelectAll(self, event):
""" Respond to the "Select All" menu command.
"""
self.selectAll()
def doDuplicate(self, event):
""" Respond to the "Duplicate" menu command.
"""
self.saveUndoInfo()
objs = []
for obj in self.contents:
if obj in self.selection:
newObj = copy.deepcopy(obj)
pos = obj.getPosition()
newObj.setPosition(wx.Point(pos.x + 10, pos.y + 10))
newObj.name = repr(self.obj_counter)
self.obj_counter += 1
objs.append(newObj)
self.contents = objs + self.contents
self.selectMany(objs)
def doGroup(self, event):
""" Respond to the "Group" menu command.
"""
self.saveUndoInfo()
if len(self.group_selection) > 1:
self.groups[self.group_counter] = []
print 'Grouping group: ' + repr(self.group_counter)
for obj in self.group_selection:
self.groups[self.group_counter].append(obj)
print 'Grouping obj: ' + repr(obj.getName())
self.group_counter += 1
def doUngroup(self, event):
""" Respond to the "Ungroup" menu command.
"""
self.saveUndoInfo()
if len(self.group_selection) > 1:
for group_num in self.groups:
if len(self.group_selection) == len(self.groups[group_num]):
if len(set(self.group_selection).intersection(set(self.groups[group_num]))) == len(self.groups[group_num]):
self.groups.pop(group_num)
print 'Ungrouping group: ' + repr(group_num)
break
def doOrientRight(self, event):
""" Respond to the "Orient Right" menu command.
"""
self.saveUndoInfo()
objs = []
for obj in self.contents:
if obj in self.selection:
obj.orientation = 'RIGHT'
self.requestRedraw()
break
def doOrientLeft(self, event):
""" Respond to the "Orient Left" menu command.
"""
self.saveUndoInfo()
objs = []
for obj in self.contents:
if obj in self.selection:
obj.orientation = 'LEFT'
self.requestRedraw()
break
def doOrientDown(self, event):
""" Respond to the "Orient Down" menu command.
"""
self.saveUndoInfo()
objs = []
for obj in self.contents:
if obj in self.selection:
obj.orientation = 'DOWN'
self.requestRedraw()
break
def doOrientUp(self, event):
""" Respond to the "Orient Up" menu command.
"""
self.saveUndoInfo()
objs = []
for obj in self.contents:
if obj in self.selection:
obj.orientation = 'UP'
self.requestRedraw()
break
def doOrientNone(self, event):
""" Respond to the "Orient None" menu command.
"""
self.saveUndoInfo()
objs = []
for obj in self.contents:
if obj in self.selection:
obj.orientation = None
self.requestRedraw()
break
def doSetRefObj(self, event):
""" Respond to the "Set/Unset as Ref Obj" menu command.
"""
self.saveUndoInfo()
objs = []
for obj in self.contents:
if obj in self.selection:
if obj.ref_obj == False:
obj.ref_obj = True
else:
obj.ref_obj = False
self.requestRedraw()
break
def doEditObject(self, event):
""" Respond to the "Edit..." menu command.
"""
if len(self.selection) != 1: return
obj = self.selection[0]
if not obj.hasPropertyEditor():
assert False, "doEditObject called on non-editable"
ret = obj.doPropertyEdit(self)
if ret:
self.dirty = True
self.requestRedraw()
self._adjustMenus()
def doDelete(self, event):
""" Respond to the "Delete" menu command.
"""
self.saveUndoInfo()
for obj in self.selection:
self.contents.remove(obj)
# Remove the corresponding object pairs from core9_rels
objs = self.core9_rels.keys()
for key in objs:
if obj.name == key[0] or obj.name == key[1]:
self.core9_rels.pop(key)
del obj
self.deselectAll()
def onChooseTool(self, event):
""" Respond to tool selection menu and tool palette selections
"""
print 'chossing tool'
obj = event.GetEventObject()
id2name = { id_SELECT: "select",
id_RECT: "rect",
}
toolID = event.GetId()
name = id2name.get( toolID )
if name:
self.setCurrentTool(name)
def updChooseTool(self, event):
"""UI update event that keeps tool menu in sync with the PaletteIcons"""
obj = event.GetEventObject()
id2name = { id_SELECT: "select",
id_RECT: "rect",
}
toolID = event.GetId()
event.Check( toolID == self.curToolIcon.GetId() )
def doChooseQuality(self, event):
"""Respond to the render quality menu commands
"""
if event.GetId() == menu_DC:
self.wrapDC = lambda dc: dc
else:
self.wrapDC = lambda dc: wx.GCDC(dc)
self._adjustMenus()
self.requestRedraw()
def doMoveForward(self, event):
""" Respond to the "Move Forward" menu command.
"""
if len(self.selection) != 1: return
self.saveUndoInfo()
obj = self.selection[0]
index = self.contents.index(obj)
if index == 0: return
del self.contents[index]
self.contents.insert(index-1, obj)
self.requestRedraw()
self._adjustMenus()
def doMoveToFront(self, event):
""" Respond to the "Move to Front" menu command.
"""
if len(self.selection) != 1: return
self.saveUndoInfo()
obj = self.selection[0]
self.contents.remove(obj)
self.contents.insert(0, obj)
self.requestRedraw()
self._adjustMenus()
def doMoveBackward(self, event):
""" Respond to the "Move Backward" menu command.
"""
if len(self.selection) != 1: return
self.saveUndoInfo()
obj = self.selection[0]
index = self.contents.index(obj)
if index == len(self.contents) - 1: return
del self.contents[index]
self.contents.insert(index+1, obj)
self.requestRedraw()
self._adjustMenus()
def doMoveToBack(self, event):
""" Respond to the "Move to Back" menu command.
"""
if len(self.selection) != 1: return
self.saveUndoInfo()
obj = self.selection[0]
self.contents.remove(obj)
self.contents.append(obj)
self.requestRedraw()
self._adjustMenus()
def doShowAbout(self, event):
""" Respond to the "About pySketch" menu command.
"""
dialog = wx.Dialog(self, -1, "About pySketch") # ,
#style=wx.DIALOG_MODAL | wx.STAY_ON_TOP)
dialog.SetBackgroundColour(wx.WHITE)
panel = wx.Panel(dialog, -1)
panel.SetBackgroundColour(wx.WHITE)
panelSizer = wx.BoxSizer(wx.VERTICAL)
boldFont = wx.Font(panel.GetFont().GetPointSize(),
panel.GetFont().GetFamily(),
wx.NORMAL, wx.BOLD)
logo = wx.StaticBitmap(panel, -1, wx.Bitmap("images/logo.bmp",
wx.BITMAP_TYPE_BMP))
lab1 = wx.StaticText(panel, -1, "sp_rels_gui")
lab1.SetFont(wx.Font(36, boldFont.GetFamily(), wx.ITALIC, wx.BOLD))
lab1.SetSize(lab1.GetBestSize())
imageSizer = wx.BoxSizer(wx.HORIZONTAL)
imageSizer.Add(logo, 0, wx.ALL | wx.ALIGN_CENTRE_VERTICAL, 5)
imageSizer.Add(lab1, 0, wx.ALL | wx.ALIGN_CENTRE_VERTICAL, 5)
lab2 = wx.StaticText(panel, -1, "A simple object-oriented sp_rels gui program.")
lab2.SetFont(boldFont)
lab2.SetSize(lab2.GetBestSize())
lab3 = wx.StaticText(panel, -1, "sp_rels_gui is completely free " + \
"software; please")
lab3.SetFont(boldFont)
lab3.SetSize(lab3.GetBestSize())
lab4 = wx.StaticText(panel, -1, "feel free to adapt or use this " + \
"in any way you like.")
lab4.SetFont(boldFont)
lab4.SetSize(lab4.GetBestSize())
lab5 = wx.StaticText(panel, -1,
"Author: Krishna Dubba " + \
"(scksrd@leeds.ac.uk)\n"
)
lab5.SetFont(boldFont)
lab5.SetSize(lab5.GetBestSize())
btnOK = wx.Button(panel, wx.ID_OK, "OK")
panelSizer.Add(imageSizer, 0, wx.ALIGN_CENTRE)
panelSizer.Add((10, 10)) # Spacer.
panelSizer.Add(lab2, 0, wx.ALIGN_CENTRE)
panelSizer.Add((10, 10)) # Spacer.
panelSizer.Add(lab3, 0, wx.ALIGN_CENTRE)
panelSizer.Add(lab4, 0, wx.ALIGN_CENTRE)
panelSizer.Add((10, 10)) # Spacer.
panelSizer.Add(lab5, 0, wx.ALIGN_CENTRE)
panelSizer.Add((10, 10)) # Spacer.
panelSizer.Add(btnOK, 0, wx.ALL | wx.ALIGN_CENTRE, 5)
panel.SetAutoLayout(True)
panel.SetSizer(panelSizer)
panelSizer.Fit(panel)
topSizer = wx.BoxSizer(wx.HORIZONTAL)
topSizer.Add(panel, 0, wx.ALL, 10)
dialog.SetAutoLayout(True)
dialog.SetSizer(topSizer)
topSizer.Fit(dialog)
dialog.Centre()
btn = dialog.ShowModal()
dialog.Destroy()
def getTextEditor(self):
if not hasattr(self,'textEditor') or not self.textEditor:
self.textEditor = EditTextObjectDialog(self, "Edit Text Object")
return self.textEditor
# =============================
# == Object Creation Methods ==
# =============================
def addObject(self, obj, select=True):
"""Add a new drawing object to the canvas.
If select is True then also select the object
"""
self.saveUndoInfo()
self.contents.insert(0, obj)
self.dirty = True
if select:
self.select(obj)
#self.setCurrentTool('select')
def saveUndoInfo(self):
""" Remember the current state of the document, to allow for undo.^^egnath
We make a copy of the document's contents, so that we can return to
the previous contents if the user does something and then wants to
undo the operation.
This should be called only for a new modification to the document
since it erases the redo history.
"""
state = self._buildStoredState()
self.undoStack.append(state)
self.redoStack = []
self.dirty = True
self._adjustMenus()
# =======================
# == Selection Methods ==
# =======================
def setCurrentTool(self, toolName):
""" Set the currently selected tool.
"""
toolIcon, tool = self.tools[toolName]
if self.curToolIcon is not None:
self.curToolIcon.SetValue(False)
toolIcon.SetValue(True)
self.curToolName = toolName
self.curToolIcon = toolIcon
self.curTool = tool
self.drawPanel.SetCursor(tool.getDefaultCursor())
def selectAll(self):
""" Select every DrawingObject in our document.
"""
self.selection = []
for obj in self.contents:
self.selection.append(obj)
self.requestRedraw()
self._adjustMenus()
def deselectAll(self):
""" Deselect every DrawingObject in our document.
"""
self.selection = []
self.requestRedraw()
self._adjustMenus()
def select(self, obj, add=False):
""" Select the given DrawingObject within our document.
If 'add' is True obj is added onto the current selection
"""
if not add:
self.selection = []
if obj not in self.selection:
self.selection += [obj]
self.objSize.SetLabel(repr(obj.size.GetWidth()) + ' x ' + repr(obj.size.GetHeight()))
self.objPos.SetLabel(repr(obj.position.x) + ', ' + repr(obj.position.y))
self.objName.SetValue(obj.name)
self.requestRedraw()
self._adjustMenus()
def selectMany(self, objs):
""" Select the given list of DrawingObjects.
"""
self.selection = objs
self.requestRedraw()
self._adjustMenus()
def selectByRectangle(self, x, y, width, height):
""" Select every DrawingObject in the given rectangular region.
"""
self.selection = []
for obj in self.contents:
if obj.objectWithinRect(x, y, width, height):
self.selection.append(obj)
# This is in case we want to group the selected objects
self.group_selection = self.selection
self.requestRedraw()
self._adjustMenus()
def getObjectAndSelectionHandleAt(self, pt):
""" Return the object and selection handle at the given point.
We draw selection handles (small rectangles) around the currently
selected object(s). If the given point is within one of the
selection handle rectangles, we return the associated object and a
code indicating which selection handle the point is in. If the
point isn't within any selection handle at all, we return the tuple
(None, None).
"""
for obj in self.selection:
handle = obj.getSelectionHandleContainingPoint(pt.x, pt.y)
if handle is not None:
return obj, handle
return None, None
def getObjectAt(self, pt):
""" Return the first object found which is at the given point.
"""
for obj in self.contents:
if obj.objectContainsPoint(pt.x, pt.y):
return obj
return None
# ======================
# == File I/O Methods ==
# ======================
def loadContents(self):
""" Load the contents of our document into memory.
"""
try:
f = open(self.fileName, "rb")
self.frame_data = cPickle.load(f)
f.close()
except:
response = wx.MessageBox("Unable to load " + self.fileName + ".",
"Error", wx.OK|wx.ICON_ERROR, self)
self.edit = False
self.slider.SetValue(INITIAL_FRAME)
self.dirty = False
self.selection = []
self.undoStack = []
self.redoStack = []
try:
state = self.frame_data[self.current_frame]
except KeyError:
pass
self._restoreStoredState(state)
self._adjustMenus()
def saveContents(self):
""" Save the contents of our document to disk.
"""
# SWIG-wrapped native wx contents cannot be pickled, so
# we have to convert our data to something pickle-friendly.
self.frame_data[self.current_frame] = self._buildStoredState()
try:
f = open(self.fileName, "wb")
cPickle.dump(self.frame_data, f)
f.close()
except:
response = wx.MessageBox("Unable to load " + self.fileName + ".",
"Error", wx.OK|wx.ICON_ERROR, self)
self.dirty = False
self._adjustMenus()
def askIfUserWantsToSave(self, action):
""" Give the user the opportunity to save the current document.
'action' is a string describing the action about to be taken. If
the user wants to save the document, it is saved immediately. If
the user cancels, we return False.
"""
if not self.dirty: return True # Nothing to do.
response = wx.MessageBox("Save changes before " + action + "?",
"Confirm", wx.YES_NO | wx.CANCEL, self)
if response == wx.YES:
if self.fileName == None:
fileName = wx.FileSelector("Save File As", "Saving",
default_extension="psk",
wildcard="*.psk",
flags = wx.SAVE | wx.OVERWRITE_PROMPT)
if fileName == "": return False # User cancelled.
self.fileName = fileName
self.saveContents()
return True
elif response == wx.NO:
return True # User doesn't want changes saved.
elif response == wx.CANCEL:
return False # User cancelled.
# =====================
# == Private Methods ==
# =====================
def _initBuffer(self):
"""Initialize the bitmap used for buffering the display."""
size = self.drawPanel.GetSize()
self.buffer = wx.EmptyBitmap(max(1,size.width),max(1,size.height))
dc = wx.BufferedDC(None, self.buffer)
dc.SetBackground(wx.Brush(self.drawPanel.GetBackgroundColour()))
dc.Clear()
self.drawContents(dc)
del dc # commits all drawing to the buffer
self.saved_offset = self.drawPanel.CalcUnscrolledPosition(0,0)
self._reInitBuffer = False
def _adjustMenus(self):
""" Adjust our menus and toolbar to reflect the current state of the
world.
Doing this manually rather than using an EVT_UPDATE_UI is a bit
more efficient (since it's only done when it's really needed),
but it means we have to remember to call _adjustMenus any time
menus may need adjusting.
"""
canSave = (self.fileName != None) and self.dirty
canRevert = (self.fileName != None) and self.dirty
canUndo = self.undoStack!=[]
canRedo = self.redoStack!=[]
selection = len(self.selection) > 0
onlyOne = len(self.selection) == 1
hasEditor = onlyOne and self.selection[0].hasPropertyEditor()
front = onlyOne and (self.selection[0] == self.contents[0])
back = onlyOne and (self.selection[0] == self.contents[-1])
# Enable/disable our menu items.
self.fileMenu.Enable(wx.ID_SAVE, canSave)
self.fileMenu.Enable(wx.ID_REVERT, canRevert)
self.editMenu.Enable(wx.ID_UNDO, canUndo)
self.editMenu.Enable(wx.ID_REDO, canRedo)
self.editMenu.Enable(menu_DUPLICATE, selection)
self.editMenu.Enable(menu_EDIT_PROPS,hasEditor)
self.editMenu.Enable(wx.ID_CLEAR, selection)
self.objectMenu.Enable(menu_MOVE_FORWARD, onlyOne and not front)
self.objectMenu.Enable(menu_MOVE_TO_FRONT, onlyOne and not front)
self.objectMenu.Enable(menu_MOVE_BACKWARD, onlyOne and not back)
self.objectMenu.Enable(menu_MOVE_TO_BACK, onlyOne and not back)
# Enable/disable our toolbar icons.
self.toolbar.EnableTool(wx.ID_NEW, True)
self.toolbar.EnableTool(wx.ID_OPEN, True)
self.toolbar.EnableTool(wx.ID_SAVE, canSave)
self.toolbar.EnableTool(wx.ID_UNDO, canUndo)
self.toolbar.EnableTool(wx.ID_REDO, canRedo)
self.toolbar.EnableTool(menu_DUPLICATE, selection)
self.toolbar.EnableTool(menu_MOVE_FORWARD, onlyOne and not front)
self.toolbar.EnableTool(menu_MOVE_BACKWARD, onlyOne and not back)
def _setPenColour(self, colour):
""" Set the default or selected object's pen colour.
"""
if len(self.selection) > 0:
self.saveUndoInfo()
for obj in self.selection:
obj.setPenColour(colour)
self.requestRedraw()
self.penColour = colour
self.optionIndicator.setPenColour(colour)
def _setFillColour(self, colour):
""" Set the default or selected object's fill colour.
"""
if len(self.selection) > 0:
self.saveUndoInfo()
for obj in self.selection:
obj.setFillColour(colour)
self.requestRedraw()
self.fillColour = colour
self.optionIndicator.setFillColour(colour)
def _setLineSize(self, size):
""" Set the default or selected object's line size.
"""
if len(self.selection) > 0:
self.saveUndoInfo()
for obj in self.selection:
obj.setLineSize(size)
self.requestRedraw()
self.lineSize = size
self.optionIndicator.setLineSize(size)
def _setObjType(self, objType):
""" Set the default or selected object's type.
"""
if len(self.selection) > 0:
self.saveUndoInfo()
for obj in self.selection:
obj.setObjType(objType)
self.requestRedraw()
self.optionIndicator.setObjType(objType)
def _buildStoredState(self):
""" Remember the current state of the document, to allow for undo.
We make a copy of the document's contents, so that we can return to
the previous contents if the user does something and then wants to
undo the operation.
Returns an object representing the current document state.
"""
savedContents = []
for obj in self.contents:
savedContents.append([obj.__class__, obj.getData()])
savedSelection = []
for i in range(len(self.contents)):
if self.contents[i] in self.selection:
savedSelection.append(i)
info = {"contents" : savedContents,
"selection" : savedSelection,
"groups" : self.groups}
return info
def _restoreStoredState(self, savedState):
"""Restore the state of the document to a previous point for undo/redo.
Takes a stored state object and recreates the document from it.
Used by undo/redo implementation.
"""
self.contents = []
for draw_class, obj_data in savedState["contents"]:
obj_name = obj_data[-1]
# Get initial values, these are temporary
(name, position, size, penColour, fillColour, lineSize) = (obj_name, wx.Point(0, 0), \
wx.Size(0, 0), wx.BLACK, wx.WHITE, 1)
obj = draw_class(name, position, size, penColour, fillColour, lineSize)
# Now set the real values
obj.setData(obj_data)
self.contents.append(obj)
self.selection = []
for i in savedState["selection"]:
self.selection.append(self.contents[i])
self.groups = savedState['groups']
self.dirty = True
self._adjustMenus()
self.requestRedraw()
def _resizeObject(self, obj, anchorPt, oldPt, newPt):
""" Resize the given object.
'anchorPt' is the unchanging corner of the object, while the
opposite corner has been resized. 'oldPt' are the current
coordinates for this corner, while 'newPt' are the new coordinates.
The object should fit within the given dimensions, though if the
new point is less than the anchor point the object will need to be
moved as well as resized, to avoid giving it a negative size.
"""
if isinstance(obj, TextDrawingObject):
# Not allowed to resize text objects -- they're sized to fit text.
wx.Bell()
return
self.saveUndoInfo()
topLeft = wx.Point(min(anchorPt.x, newPt.x),
min(anchorPt.y, newPt.y))
botRight = wx.Point(max(anchorPt.x, newPt.x),
max(anchorPt.y, newPt.y))
newWidth = botRight.x - topLeft.x
newHeight = botRight.y - topLeft.y
if isinstance(obj, LineDrawingObject):
# Adjust the line so that its start and end points match the new
# overall object size.
startPt = obj.getStartPt()
endPt = obj.getEndPt()
slopesDown = ((startPt.x < endPt.x) and (startPt.y < endPt.y)) or \
((startPt.x > endPt.x) and (startPt.y > endPt.y))
# Handle the user flipping the line.
hFlip = ((anchorPt.x < oldPt.x) and (anchorPt.x > newPt.x)) or \
((anchorPt.x > oldPt.x) and (anchorPt.x < newPt.x))
vFlip = ((anchorPt.y < oldPt.y) and (anchorPt.y > newPt.y)) or \
((anchorPt.y > oldPt.y) and (anchorPt.y < newPt.y))
if (hFlip and not vFlip) or (vFlip and not hFlip):
slopesDown = not slopesDown # Line flipped.
if slopesDown:
obj.setStartPt(wx.Point(0, 0))
obj.setEndPt(wx.Point(newWidth, newHeight))
else:
obj.setStartPt(wx.Point(0, newHeight))
obj.setEndPt(wx.Point(newWidth, 0))
# Finally, adjust the bounds of the object to match the new dimensions.
obj.setPosition(topLeft)
obj.setSize(wx.Size(botRight.x - topLeft.x, botRight.y - topLeft.y))
print 'resized'
self.requestRedraw()
def _moveObject(self, offsetX, offsetY):
""" Move the currently selected object(s) by the given offset using arrow keys.
"""
self.saveUndoInfo()
# Use this only to move group of objects
#for obj in self.selection:
for obj in self.group_selection:
pos = obj.getPosition()
pos.x = pos.x + offsetX
pos.y = pos.y + offsetY
obj.setPosition(pos)
self.requestRedraw()
def _buildLineSizePopup(self, lineSize):
""" Build the pop-up menu used to set the line size.
'lineSize' is the current line size value. The corresponding item
is checked in the pop-up menu.
"""
menu = wx.Menu()
menu.Append(id_LINESIZE_0, "no line", kind=wx.ITEM_CHECK)
menu.Append(id_LINESIZE_1, "1-pixel line", kind=wx.ITEM_CHECK)
menu.Append(id_LINESIZE_2, "2-pixel line", kind=wx.ITEM_CHECK)
menu.Append(id_LINESIZE_3, "3-pixel line", kind=wx.ITEM_CHECK)
menu.Append(id_LINESIZE_4, "4-pixel line", kind=wx.ITEM_CHECK)
menu.Append(id_LINESIZE_5, "5-pixel line", kind=wx.ITEM_CHECK)
if lineSize == 0: menu.Check(id_LINESIZE_0, True)
elif lineSize == 1: menu.Check(id_LINESIZE_1, True)
elif lineSize == 2: menu.Check(id_LINESIZE_2, True)
elif lineSize == 3: menu.Check(id_LINESIZE_3, True)
elif lineSize == 4: menu.Check(id_LINESIZE_4, True)
elif lineSize == 5: menu.Check(id_LINESIZE_5, True)
self.Bind(wx.EVT_MENU, self._lineSizePopupSelected, id=id_LINESIZE_0, id2=id_LINESIZE_5)
return menu
def _lineSizePopupSelected(self, event):
""" Respond to the user selecting an item from the line size popup menu
"""
id = event.GetId()
if id == id_LINESIZE_0: self._setLineSize(0)
elif id == id_LINESIZE_1: self._setLineSize(1)
elif id == id_LINESIZE_2: self._setLineSize(2)
elif id == id_LINESIZE_3: self._setLineSize(3)
elif id == id_LINESIZE_4: self._setLineSize(4)
elif id == id_LINESIZE_5: self._setLineSize(5)
else:
wx.Bell()
return
self.optionIndicator.setLineSize(self.lineSize)
def _buildObjTypePopup(self, objType):
""" Build the pop-up menu used to set the object type.
'lineSize' is the current line size value. The corresponding item
is checked in the pop-up menu.
"""
menu = wx.Menu()
menu.Append(id_OBJTYPE_0, "robot", kind=wx.ITEM_CHECK)
menu.Append(id_OBJTYPE_1, "guest", kind=wx.ITEM_CHECK)
menu.Append(id_OBJTYPE_2, "counter", kind=wx.ITEM_CHECK)
menu.Append(id_OBJTYPE_3, "table", kind=wx.ITEM_CHECK)
menu.Append(id_OBJTYPE_4, "chair", kind=wx.ITEM_CHECK)
menu.Append(id_OBJTYPE_5, "mug", kind=wx.ITEM_CHECK)
menu.Append(id_OBJTYPE_6, "spoon", kind=wx.ITEM_CHECK)
menu.Append(id_OBJTYPE_7, "region", kind=wx.ITEM_CHECK)
if objType == 0: menu.Check(id_OBJTYPE_0, True)
elif objType == 1: menu.Check(id_OBJTYPE_1, True)
elif objType == 2: menu.Check(id_OBJTYPE_2, True)
elif objType == 3: menu.Check(id_OBJTYPE_3, True)
elif objType == 4: menu.Check(id_OBJTYPE_4, True)
elif objType == 5: menu.Check(id_OBJTYPE_5, True)
elif objType == 6: menu.Check(id_OBJTYPE_6, True)
elif objType == 7: menu.Check(id_OBJTYPE_7, True)
self.Bind(wx.EVT_MENU, self._objTypePopupSelected, id=id_OBJTYPE_0, id2=id_OBJTYPE_7)
return menu
def _objTypePopupSelected(self, event):
""" Respond to the user selecting an item from the line size popup menu
"""
id = event.GetId()
if id == id_OBJTYPE_0: self._setObjType("robot")
elif id == id_OBJTYPE_1: self._setObjType("guest")
elif id == id_OBJTYPE_2: self._setObjType("counter")
elif id == id_OBJTYPE_3: self._setObjType("table")
elif id == id_OBJTYPE_4: self._setObjType("chair")
elif id == id_OBJTYPE_5: self._setObjType("mug")
elif id == id_OBJTYPE_6: self._setObjType("spoon")
elif id == id_OBJTYPE_7: self._setObjType("region")
else:
wx.Bell()
return
self.optionIndicator.setObjType(None)
def _getEventCoordinates(self, event):
""" Return the coordinates associated with the given mouse event.
The coordinates have to be adjusted to allow for the current scroll
position.
"""
originX, originY = self.drawPanel.GetViewStart()
unitX, unitY = self.drawPanel.GetScrollPixelsPerUnit()
return wx.Point(event.GetX() + (originX * unitX),
event.GetY() + (originY * unitY))
def _drawObjectOutline(self, offsetX, offsetY):
""" Draw an outline of the currently selected object.
The selected object's outline is drawn at the object's position
plus the given offset.
Note that the outline is drawn by *inverting* the window's
contents, so calling _drawObjectOutline twice in succession will
restore the window's contents back to what they were previously.
"""
if len(self.selection) != 1: return
position = self.selection[0].getPosition()
size = self.selection[0].getSize()
dc = wx.ClientDC(self.drawPanel)
self.drawPanel.PrepareDC(dc)
dc.BeginDrawing()
dc.SetPen(wx.BLACK_DASHED_PEN)
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.SetLogicalFunction(wx.INVERT)
dc.DrawRectangle(position.x + offsetX, position.y + offsetY,
size.width, size.height)
dc.EndDrawing()
#============================================================================
class DrawingTool(object):
"""Base class for drawing tools"""
def __init__(self):
pass
def getDefaultCursor(self):
"""Return the cursor to use by default which this drawing tool is selected"""
return wx.STANDARD_CURSOR
def draw(self,dc):
pass
def onMouseEvent(self,parent, event):
"""Mouse events passed in from the parent.
Returns True if the event is handled by the tool,
False if the canvas can try to use it.
"""
event.Skip()
return False
#----------------------------------------------------------------------------
class SelectDrawingTool(DrawingTool):
"""Represents the tool for selecting things"""
def __init__(self):
self.curHandle = None
self.curObject = None
self.objModified = False
self.startPt = None
self.curPt = None
def getDefaultCursor(self):
"""Return the cursor to use by default which this drawing tool is selected"""
return wx.STANDARD_CURSOR
def draw(self, dc):
if self._doingRectSelection():
dc.SetPen(wx.BLACK_DASHED_PEN)
dc.SetBrush(wx.TRANSPARENT_BRUSH)
x = [self.startPt.x, self.curPt.x]; x.sort()
y = [self.startPt.y, self.curPt.y]; y.sort()
dc.DrawRectangle(x[0],y[0], x[1]-x[0],y[1]-y[0])
def onMouseEvent(self,parent, event):
handlers = { wx.EVT_LEFT_DOWN.evtType[0]: self.onMouseLeftDown,
wx.EVT_MOTION.evtType[0]: self.onMouseMotion,
wx.EVT_LEFT_UP.evtType[0]: self.onMouseLeftUp,
wx.EVT_LEFT_DCLICK.evtType[0]: self.onMouseLeftDClick }
handler = handlers.get(event.GetEventType())
if handler is not None:
return handler(parent,event)
else:
event.Skip()
return False
def onMouseLeftDown(self,parent,event):
mousePt = wx.Point(event.X,event.Y)
obj, handle = parent.getObjectAndSelectionHandleAt(mousePt)
self.startPt = mousePt
self.curPt = mousePt
if obj is not None and handle is not None:
self.curObject = obj
self.curHandle = handle
else:
self.curObject = None
self.curHandle = None
obj = parent.getObjectAt(mousePt)
if self.curObject is None and obj is not None:
self.curObject = obj
self.dragDelta = obj.position-mousePt
self.curHandle = None
parent.select(obj, event.ShiftDown())
return True
def onMouseMotion(self,parent,event):
if not event.LeftIsDown(): return
self.curPt = wx.Point(event.X,event.Y)
obj,handle = self.curObject,self.curHandle
if self._doingDragHandle():
self._prepareToModify(parent)
obj.moveHandle(handle,event.X,event.Y)
#parent.requestRedraw()
elif self._doingDragObject():
self._prepareToModify(parent)
obj.position = self.curPt + self.dragDelta
#parent.requestRedraw()
#elif self._doingRectSelection():
# parent.requestRedraw()
if self.curObject != None:
parent.objSize.SetLabel(repr(self.curObject.size.GetWidth()) + ' x ' + repr(self.curObject.size.GetHeight()))
parent.requestRedraw()
return True
def onMouseLeftUp(self,parent,event):
obj,handle = self.curObject,self.curHandle
if self._doingDragHandle():
obj.moveHandle(handle,event.X,event.Y)
obj.finalizeHandle(handle,event.X,event.Y)
elif self._doingDragObject():
curPt = wx.Point(event.X,event.Y)
obj.position = curPt + self.dragDelta
elif self._doingRectSelection():
x = [event.X, self.startPt.x]
y = [event.Y, self.startPt.y]
x.sort()
y.sort()
parent.selectByRectangle(x[0],y[0],x[1]-x[0],y[1]-y[0])
self.curObject = None
self.curHandle = None
self.curPt = None
self.startPt = None
self.objModified = False
parent.requestRedraw()
return True
def onMouseLeftDClick(self,parent,event):
event.Skip()
mousePt = wx.Point(event.X,event.Y)
obj = parent.getObjectAt(mousePt)
if obj and obj.hasPropertyEditor():
if obj.doPropertyEdit(parent):
parent.requestRedraw()
return True
return False
def _prepareToModify(self,parent):
if not self.objModified:
parent.saveUndoInfo()
self.objModified = True
def _doingRectSelection(self):
return self.curObject is None \
and self.startPt is not None \
and self.curPt is not None
def _doingDragObject(self):
return self.curObject is not None and self.curHandle is None
def _doingDragHandle(self):
return self.curObject is not None and self.curHandle is not None
#----------------------------------------------------------------------------
class RectDrawingTool(DrawingTool):
"""Represents the tool for drawing rectangles"""
def __init__(self):
self.newObject = None
def getDefaultCursor(self):
"""Return the cursor to use by default which this drawing tool is selected"""
return wx.CROSS_CURSOR
def draw(self, dc):
if self.newObject is None: return
self.newObject.draw(dc,True)
def onMouseEvent(self,parent, event):
handlers = { wx.EVT_LEFT_DOWN.evtType[0]: self.onMouseLeftDown,
wx.EVT_MOTION.evtType[0]: self.onMouseMotion,
wx.EVT_LEFT_UP.evtType[0]: self.onMouseLeftUp }
handler = handlers.get(event.GetEventType())
if handler is not None:
return handler(parent,event)
else:
event.Skip()
return False
def onMouseLeftDown(self,parent, event):
self.startPt = wx.Point(event.GetX(), event.GetY())
self.newObject = None
event.Skip()
return True
def onMouseMotion(self,parent, event):
if not event.Dragging(): return
if self.newObject is None:
obj = RectDrawingObject(penColour=parent.penColour,
fillColour=parent.fillColour,
lineSize=parent.lineSize,
name=parent.obj_counter)
parent.obj_counter += 1
self.newObject = obj
self._updateObjFromEvent(self.newObject, event)
parent.objSize.SetLabel(repr(self.newObject.size.GetWidth()) + ' x ' + repr(self.newObject.size.GetHeight()))
parent.objPos.SetLabel(repr(self.newObject.position.x) + ', ' + repr(self.newObject.position.y))
parent.requestRedraw()
event.Skip()
return True
def onMouseLeftUp(self,parent, event):
if self.newObject is None:
return
self._updateObjFromEvent(self.newObject,event)
parent.addObject(self.newObject)
self.newObject = None
event.Skip()
return True
def _updateObjFromEvent(self,obj,event):
x = [event.X, self.startPt.x]
y = [event.Y, self.startPt.y]
x.sort()
y.sort()
width = x[1]-x[0]
height = y[1]-y[0]
obj.setPosition(wx.Point(x[0],y[0]))
obj.setSize(wx.Size(width,height))
#============================================================================
class DrawingObject(object):
""" Base class for objects within the drawing panel.
A pySketch document consists of a front-to-back ordered list of
DrawingObjects. Each DrawingObject has the following properties:
'position' The position of the object within the document.
'size' The size of the object within the document.
'penColour' The colour to use for drawing the object's outline.
'fillColour' Colour to use for drawing object's interior.
'lineSize' Line width (in pixels) to use for object's outline.
"""
# ==================
# == Constructors ==
# ==================
def __init__(self, name, position=wx.Point(0, 0), size=wx.Size(0, 0),
penColour=wx.BLACK, fillColour=wx.WHITE, lineSize=1, objType='obj'
):
""" Standard constructor.
The remaining parameters let you set various options for the newly
created DrawingObject.
"""
# One must take great care with constructed default arguments
# like wx.Point(0,0) above. *EVERY* caller that uses the
# default will get the same instance. Thus, below we make a
# deep copy of those arguments with object defaults.
self.position = wx.Point(position.x,position.y)
self.size = wx.Size(size.x,size.y)
self.penColour = penColour
self.fillColour = fillColour
self.lineSize = lineSize
self.objType = objType
self.name = repr(name)
# =============================
# == Object Property Methods ==
# =============================
def getData(self):
""" Return a copy of the object's internal data.
This is used to save this DrawingObject to disk.
"""
return [self.position.x, self.position.y,
self.size.width, self.size.height,
self.orientation,
self.penColour.Red(),
self.penColour.Green(),
self.penColour.Blue(),
self.fillColour.Red(),
self.fillColour.Green(),
self.fillColour.Blue(),
self.lineSize,
self.ref_obj,
self.objType,
self.name]
def setData(self, data):
""" Set the object's internal data.
'data' is a copy of the object's saved data, as returned by
getData() above. This is used to restore a previously saved
DrawingObject.
Returns an iterator to any remaining data not consumed by
this base class method.
"""
#data = copy.deepcopy(data) # Needed?
d = iter(data)
try:
self.position = wx.Point(d.next(), d.next())
self.size = wx.Size(d.next(), d.next())
self.orientation = d.next()
self.penColour = wx.Colour(red=d.next(),
green=d.next(),
blue=d.next())
self.fillColour = wx.Colour(red=d.next(),
green=d.next(),
blue=d.next())
self.lineSize = d.next()
self.ref_obj = d.next()
self.objType = d.next()
self.name = d.next()
except StopIteration:
raise ValueError('Not enough data in setData call')
return d
def hasPropertyEditor(self):
#return False
return True
def doPropertyEdit(self, parent):
#assert False, "Must be overridden if hasPropertyEditor returns True"
assert True, "Must be overridden if hasPropertyEditor returns True"
def setPosition(self, position):
""" Set the origin (top-left corner) for this DrawingObject.
"""
self.position = position
def getPosition(self):
""" Return this DrawingObject's position.
"""
return self.position
def setSize(self, size):
""" Set the size for this DrawingObject.
"""
self.size = size
def getSize(self):
""" Return this DrawingObject's size.
"""
return self.size
def setObjType(self, objType):
""" Set the obj type for this DrawingObject.
"""
self.objType = objType
def getObjType(self):
""" Get the obj type for this DrawingObject.
"""
return self.objType
def getName(self):
""" Return this DrawingObject's name.
"""
return self.name
def setName(self, name):
""" Set this DrawingObject's name.
"""
self.name = name
def setPenColour(self, colour):
""" Set the pen colour used for this DrawingObject.
"""
self.penColour = colour
def getPenColour(self):
""" Return this DrawingObject's pen colour.
"""
return self.penColour
def setFillColour(self, colour):
""" Set the fill colour used for this DrawingObject.
"""
self.fillColour = colour
def getFillColour(self):
""" Return this DrawingObject's fill colour.
"""
return self.fillColour
def setLineSize(self, lineSize):
""" Set the linesize used for this DrawingObject.
"""
self.lineSize = lineSize
def getLineSize(self):
""" Return this DrawingObject's line size.
"""
return self.lineSize
# ============================
# == Object Drawing Methods ==
# ============================
def draw(self, dc, selected):
""" Draw this DrawingObject into our window.
'dc' is the device context to use for drawing.
If 'selected' is True, the object is currently selected.
Drawing objects can use this to change the way selected objects
are drawn, however the actual drawing of selection handles
should be done in the 'drawHandles' method
"""
if self.lineSize == 0:
dc.SetPen(wx.Pen(self.penColour, self.lineSize, wx.TRANSPARENT))
else:
dc.SetPen(wx.Pen(self.penColour, self.lineSize, wx.SOLID))
dc.SetBrush(wx.Brush(self.fillColour, wx.SOLID))
self._privateDraw(dc, self.position, selected)
def drawHandles(self, dc):
"""Draw selection handles for this DrawingObject"""
# Default is to draw selection handles at all four corners.
dc.SetPen(wx.BLACK_PEN)
dc.SetBrush(wx.BLACK_BRUSH)
x,y = self.position
self._drawSelHandle(dc, x, y)
self._drawSelHandle(dc, x + self.size.width, y)
self._drawSelHandle(dc, x, y + self.size.height)
self._drawSelHandle(dc, x + self.size.width, y + self.size.height)
# =======================
# == Selection Methods ==
# =======================
def objectContainsPoint(self, x, y):
""" Returns True iff this object contains the given point.
This is used to determine if the user clicked on the object.
"""
# Firstly, ignore any points outside of the object's bounds.
if x < self.position.x: return False
if x > self.position.x + self.size.x: return False
if y < self.position.y: return False
if y > self.position.y + self.size.y: return False
# Now things get tricky. There's no straightforward way of
# knowing whether the point is within an arbitrary object's
# bounds...to get around this, we draw the object into a
# memory-based bitmap and see if the given point was drawn.
# This could no doubt be done more efficiently by some tricky
# maths, but this approach works and is simple enough.
# Subclasses can implement smarter faster versions of this.
bitmap = wx.EmptyBitmap(self.size.x + 10, self.size.y + 10)
dc = wx.MemoryDC()
dc.SelectObject(bitmap)
dc.BeginDrawing()
dc.SetBackground(wx.WHITE_BRUSH)
dc.Clear()
dc.SetPen(wx.Pen(wx.BLACK, self.lineSize + 5, wx.SOLID))
dc.SetBrush(wx.BLACK_BRUSH)
self._privateDraw(dc, wx.Point(5, 5), True)
dc.EndDrawing()
pixel = dc.GetPixel(x - self.position.x + 5, y - self.position.y + 5)
if (pixel.Red() == 0) and (pixel.Green() == 0) and (pixel.Blue() == 0):
return True
else:
return False
handle_TOP = 0
handle_BOTTOM = 1
handle_LEFT = 0
handle_RIGHT = 1
def getSelectionHandleContainingPoint(self, x, y):
""" Return the selection handle containing the given point, if any.
We return one of the predefined selection handle ID codes.
"""
# Default implementation assumes selection handles at all four bbox corners.
# Return a list so we can modify the contents later in moveHandle()
if self._pointInSelRect(x, y, self.position.x, self.position.y):
return [self.handle_TOP, self.handle_LEFT]
elif self._pointInSelRect(x, y, self.position.x + self.size.width,
self.position.y):
return [self.handle_TOP, self.handle_RIGHT]
elif self._pointInSelRect(x, y, self.position.x,
self.position.y + self.size.height):
return [self.handle_BOTTOM, self.handle_LEFT]
elif self._pointInSelRect(x, y, self.position.x + self.size.width,
self.position.y + self.size.height):
return [self.handle_BOTTOM, self.handle_RIGHT]
else:
return None
def moveHandle(self, handle, x, y):
""" Move the specified selection handle to given canvas location.
"""
assert handle is not None
# Default implementation assumes selection handles at all four bbox corners.
pt = wx.Point(x,y)
x,y = self.position
w,h = self.size
if handle[0] == self.handle_TOP:
if handle[1] == self.handle_LEFT:
dpos = pt - self.position
self.position = pt
self.size.width -= dpos.x
self.size.height -= dpos.y
else:
dx = pt.x - ( x + w )
dy = pt.y - ( y )
self.position.y = pt.y
self.size.width += dx
self.size.height -= dy
else: # BOTTOM
if handle[1] == self.handle_LEFT:
dx = pt.x - ( x )
dy = pt.y - ( y + h )
self.position.x = pt.x
self.size.width -= dx
self.size.height += dy
else:
dpos = pt - self.position
dpos.x -= w
dpos.y -= h
self.size.width += dpos.x
self.size.height += dpos.y
# Finally, normalize so no negative widths or heights.
# And update the handle variable accordingly.
if self.size.height<0:
self.position.y += self.size.height
self.size.height = -self.size.height
handle[0] = 1-handle[0]
if self.size.width<0:
self.position.x += self.size.width
self.size.width = -self.size.width
handle[1] = 1-handle[1]
def finalizeHandle(self, handle, x, y):
pass
def objectWithinRect(self, x, y, width, height):
""" Return True iff this object falls completely within the given rect.
"""
if x > self.position.x: return False
if x + width < self.position.x + self.size.width: return False
if y > self.position.y: return False
if y + height < self.position.y + self.size.height: return False
return True
# =====================
# == Private Methods ==
# =====================
def _privateDraw(self, dc, position, selected):
""" Private routine to draw this DrawingObject.
'dc' is the device context to use for drawing, while 'position' is
the position in which to draw the object.
"""
pass
def _drawSelHandle(self, dc, x, y):
""" Draw a selection handle around this DrawingObject.
'dc' is the device context to draw the selection handle within,
while 'x' and 'y' are the coordinates to use for the centre of the
selection handle.
"""
dc.DrawRectangle(x - 3, y - 3, 6, 6)
def _pointInSelRect(self, x, y, rX, rY):
""" Return True iff (x, y) is within the selection handle at (rX, ry).
"""
if x < rX - 3: return False
elif x > rX + 3: return False
elif y < rY - 3: return False
elif y > rY + 3: return False
else: return True
#----------------------------------------------------------------------------
class RectDrawingObject(DrawingObject):
""" DrawingObject subclass that represents an axis-aligned rectangle.
"""
def __init__(self, *varg, **kwarg):
DrawingObject.__init__(self, *varg, **kwarg)
self.orientation = 'DOWN'
self.ref_obj = False
def objectContainsPoint(self, x, y):
""" Returns True iff this object contains the given point.
This is used to determine if the user clicked on the object.
"""
# Firstly, ignore any points outside of the object's bounds.
if x < self.position.x: return False
if x > self.position.x + self.size.x: return False
if y < self.position.y: return False
if y > self.position.y + self.size.y: return False
# Rectangles are easy -- they're always selected if the
# point is within their bounds.
return True
# =====================
# == Private Methods ==
# =====================
def draw_contour_lines(self, dc, position, orientation='RIGHT'):
if self.ref_obj:
if self.orientation == 'UP' or self.orientation == 'DOWN':
fig_w = self.size.width
fig_h = self.size.width
x_left_lim = self.position.x
x_right_lim = self.position.x + self.size.width
if self.orientation == 'UP':
y_left_lim = self.position.y
y_right_lim = self.position.y + fig_h
x1, y1 = self.position.x, self.position.y
x2, y2 = self.position.x + self.size.width, self.position.y
if self.orientation == 'DOWN':
x1, y1 = self.position.x, self.position.y + self.size.height
x2, y2 = self.position.x + self.size.width, self.position.y + self.size.height
y_left_lim = self.position.y + self.size.height
y_right_lim = self.position.y + self.size.height + fig_h
if self.orientation == 'LEFT' or self.orientation == 'RIGHT':
fig_w = self.size.height
fig_h = self.size.height
y_left_lim = self.position.y
y_right_lim = self.position.y + fig_h
if self.orientation == 'LEFT':
x_left_lim = self.position.x
x_right_lim = self.position.x + fig_h
x1, y1 = self.position.x, self.position.y
x2, y2 = self.position.x, self.position.y + self.size.height
if self.orientation == 'RIGHT':
x_left_lim = self.position.x + self.size.width
x_right_lim = self.position.x + self.size.width + fig_w
x1, y1 = self.position.x + self.size.width, self.position.y
x2, y2 = self.position.x + self.size.width, self.position.y + self.size.height
fig = Figure(figsize=(fig_w,fig_h), dpi=120,frameon=False)
plot = fig.add_subplot(111)
A = 0
K = 1
B = 2
Q = 0.01
M = 0.01
#x1, y1 = self.position.x, self.position.y
#x2, y2 = self.position.x, self.position.y
#x_left_lim = 0
#x_right_lim = 11
#y_left_lim = 0
#y_right_lim = 4
x = linspace(x_left_lim, x_right_lim, x_right_lim - x_left_lim)
y = linspace(y_left_lim, y_right_lim, y_right_lim - y_left_lim)
X, Y = meshgrid(x, y)
Z = np.zeros((X.shape[0],X.shape[1]))
for i in range(y.shape[0]):
for j in range(x.shape[0]):
ang = find_angle(x1,y1,x2,y2,x[j],y[i])
dist1 = distance(x1, y1, x[j], y[i])
dist2 = distance(x2, y2, x[j], y[i])
avg_dist = (dist1+dist2)/2
if avg_dist == 0:
avg_dist = 0.0000001
Z[i,j] = 2*gen_logistic(A, K, B, Q, M, ang) + 1/avg_dist
contour = plot.contour(X, Y, Z)
for collection in contour.collections:
for path in collection.get_paths():
points_list = []
for point in path.vertices:
#dc.DrawPoint(point[0], point[1])
points_list.append(wx.RealPoint(point[0], point[1]))
#dc.DrawSpline(points_list)
def old_draw_contour(self, dc, position, orientation='RIGHT'):
"""
Can I use this function in the future?
DrawSpline(self, points)
Draws a spline between all given control points, (a list of wx.Point objects) using the current pen.
The spline is drawn using a series of lines, using an algorithm taken from the X drawing program 'XFIG'.
Parameters:
points
(type=List)
TODO:
implement orientation!
"""
if self.ref_obj:
#delta = 0.025
#x = np.arange(-3.0, 3.0, delta)
#y = np.arange(-2.0, 2.0, delta)
#X, Y = np.meshgrid(x, y)
#Z = mlab.bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
#matplotlib.rc('figure.subplot', left=.01, right=.01, bottom=.01, top=.01)
targets = dict(left=0, right=0, top=0, bottom=0, hspace=30, wspace=30)
fig_w = self.size.width/96.0
fig_h = self.size.width/100.0
fig = Figure(figsize=(fig_w,fig_h), dpi=120,frameon=False)
plot = fig.add_subplot(111)
adjust_borders(fig, targets)
canvas = FigureCanvasAgg(fig)
xlist = linspace(-3.0, 3.0, 4)
ylist = linspace(-3.0, 0, 3)
X, Y = meshgrid(xlist, ylist)
Z = sqrt(X**2 + Y**2)
levels = [0.0, 0.5, 1, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0]
#CP3 = plot.contour(X, Y, Z, levels, colors='k')
#plot.clabel(CP3, colors = 'k', fmt = '%2.1f', fontsize=14)
contour = plot.contour(X, Y, Z, levels,alpha=0.8)
splines = []
for collection in contour.collections:
for path in collection.get_paths():
points_list = []
for point in path.vertices:
points_list.append(wx.RealPoint(point[0], point[1]))
dc.DrawSpline(points_list)
plot.set_xlim( (-3, 3))
plot.set_ylim( (-3, 0))
#plot.set_frame_on(False)
#adjust_spines(plot,[])
plot.axis('off')
canvas.draw()
s = canvas.tostring_rgb() # save this and convert to bitmap as needed
l,b,w,h = fig.bbox.bounds
w, h = int(w), int(h)
# convert to a numpy array
X = np.fromstring(s, np.uint8)
X.shape = h, w, 3
image = wx.EmptyImage(w,h)
image.SetData(X.tostring())
wxBitmap = image.ConvertToBitmap()
contour_offset = self.size.width/100.0 * 10
dc.DrawBitmap(wxBitmap, position.x - contour_offset, position.y+self.size.height)
#dc.DrawBitmap(wxBitmap, position.x, position.y+self.size.height)
def draw_contour(self, dc, position, orientation='RIGHT'):
if self.ref_obj:
A = 0
K = 1
B = 2
Q = 0.01
M = 0.01
x1, y1 = 2,0
x2, y2 = 8,0
x_left_lim = 0
x_right_lim = 11
y_left_lim = 0
y_right_lim = 4
#delta = 1
#x = np.arange(x_left_lim, x_right_lim, delta)
#y = np.arange(y_left_lim, y_right_lim, delta)
#X, Y = np.meshgrid(x, y)
#Z = np.zeros((X.shape[0],X.shape[1]))
x = linspace(x_left_lim, x_right_lim, 11)
y = linspace(y_left_lim, y_right_lim, 4)
X, Y = meshgrid(x, y)
Z = np.zeros((X.shape[0],X.shape[1]))
for i in range(y.shape[0]):
for j in range(x.shape[0]):
ang = find_angle(x1,y1,x2,y2,x[j],y[i])
dist1 = distance(x1, y1, x[j], y[i])
dist2 = distance(x2, y2, x[j], y[i])
avg_dist = (dist1+dist2)/2
if avg_dist == 0:
avg_dist = 0.0000001
Z[i,j] = 4*gen_logistic(A, K, B, Q, M, ang) + 1/avg_dist
targets = dict(left=0, right=0, top=0, bottom=0, hspace=30, wspace=30)
if self.orientation == 'UP' or self.orientation == 'DOWN':
fig_w = self.size.width/96.0
fig_h = self.size.width/100.0
contour_offset = self.size.width/100.0 * 10
if self.orientation == 'LEFT' or self.orientation == 'RIGHT':
fig_w = self.size.height/96.0
fig_h = self.size.height/100.0
contour_offset = self.size.height/100.0 * 10
fig = Figure(figsize=(fig_w,fig_h), dpi=120,frameon=False)
plot = fig.add_subplot(111)
adjust_borders(fig, targets)
canvas = FigureCanvasAgg(fig)
if self.orientation == 'UP':
#plot.contourf(X, Y, Z, levels,alpha=0.8)
plot.contourf(X, Y, Z, alpha=0.8)
plot.set_xlim( (x_left_lim, x_right_lim) )
plot.set_ylim( (y_left_lim, y_right_lim) )
elif self.orientation == 'DOWN':
#plot.contourf(X, Y, Z, levels,alpha=0.8)
plot.contourf(X, Y, Z, alpha=0.8)
plot.set_xlim( (x_left_lim, x_right_lim) )
plot.set_ylim( (y_right_lim, y_left_lim) )
elif self.orientation == 'RIGHT':
#plot.contourf(Y, X, Z, levels,alpha=0.8)
plot.contourf(Y, X, Z, alpha=0.8)
plot.set_xlim( (y_left_lim, y_right_lim) )
plot.set_ylim( (x_right_lim, x_left_lim) )
elif self.orientation == 'LEFT':
#plot.contourf(Y, X, Z, levels,alpha=0.8)
plot.contourf(Y, X, Z, alpha=0.8)
plot.set_xlim( (y_right_lim, y_left_lim) )
plot.set_ylim( (x_left_lim, x_right_lim) )
plot.axis('off')
canvas.draw()
# save this and convert to bitmap as needed
s = canvas.tostring_rgb()
l,b,w,h = fig.bbox.bounds
w, h = int(w), int(h)
# convert to a numpy array
X = np.fromstring(s, np.uint8)
X.shape = h, w, 3
image = wx.EmptyImage(w,h)
image.SetData(X.tostring())
wxBitmap = image.ConvertToBitmap()
if self.orientation == 'UP':
dc.DrawBitmap(wxBitmap, position.x - contour_offset, position.y - h)
elif self.orientation == 'DOWN':
dc.DrawBitmap(wxBitmap, position.x - contour_offset, position.y+self.size.height)
elif self.orientation == 'RIGHT':
dc.DrawBitmap(wxBitmap, position.x + self.size.width, position.y - contour_offset)
elif self.orientation == 'LEFT':
dc.DrawBitmap(wxBitmap, position.x - w, position.y - contour_offset)
def doPropertyEdit(self, parent):
getTextEditor()
def _privateDraw(self, dc, position, selected):
""" Private routine to draw this DrawingObject.
'dc' is the device context to use for drawing, while 'position' is
the position in which to draw the object. If 'selected' is True,
the object is drawn with selection handles. This private drawing
routine assumes that the pen and brush have already been set by the
caller.
"""
dc.DrawRectangle(position.x, position.y,
self.size.width, self.size.height)
#dc.DrawText(self.objType + '_' + self.name, position.x+3, position.y+3)
dc.DrawText(self.name, position.x+3, position.y+3)
# Draw arrow to show the orientation
if self.orientation == 'DOWN':
arrow_start = (position.x + self.size.width/2, position.y + self.size.height)
arrow_end = (arrow_start[0], arrow_start[1] + self.size.height/6)
left_wing_start = (arrow_start[0] + self.size.width/10, arrow_start[1] + self.size.height/10)
right_wing_start = (arrow_start[0] - self.size.width/10, arrow_start[1] + self.size.height/10)
if self.orientation == 'UP':
arrow_start = (position.x + self.size.width/2, position.y)
arrow_end = (arrow_start[0], arrow_start[1] - self.size.height/6)
left_wing_start = (arrow_start[0] + self.size.width/10, arrow_start[1] - self.size.height/10)
right_wing_start = (arrow_start[0] - self.size.width/10, arrow_start[1] - self.size.height/10)
if self.orientation == 'RIGHT':
arrow_start = (position.x + self.size.width, position.y + self.size.height/2)
arrow_end = (arrow_start[0] + self.size.width/6, arrow_start[1])
left_wing_start = (arrow_start[0] + self.size.width/10, arrow_start[1] + self.size.height/10)
right_wing_start = (arrow_start[0] + self.size.width/10, arrow_start[1] - self.size.height/10)
if self.orientation == 'LEFT':
arrow_start = (position.x, position.y + self.size.height/2)
arrow_end = (arrow_start[0] - self.size.width/6, arrow_start[1])
left_wing_start = (arrow_start[0] - self.size.width/10, arrow_start[1] + self.size.height/10)
right_wing_start = (arrow_start[0] - self.size.width/10, arrow_start[1] - self.size.height/10)
if self.orientation != None:
dc.DrawLine(arrow_start[0], arrow_start[1], arrow_end[0], arrow_end[1])
dc.DrawLine(left_wing_start[0], left_wing_start[1], arrow_end[0], arrow_end[1])
dc.DrawLine(right_wing_start[0], right_wing_start[1], arrow_end[0], arrow_end[1])
if self.ref_obj:
self.draw_contour(dc, position, self.orientation)
#self.draw_contour_lines(dc, position, self.orientation)
#----------------------------------------------------------------------------
class ToolPaletteToggleX(wx.ToggleButton):
""" An icon appearing in the tool palette area of our sketching window.
Note that this is actually implemented as a wx.Bitmap rather
than as a wx.Icon. wx.Icon has a very specific meaning, and isn't
appropriate for this more general use.
"""
def __init__(self, parent, iconID, iconName, toolTip, mode = wx.ITEM_NORMAL):
""" Standard constructor.
'parent' is the parent window this icon will be part of.
'iconID' is the internal ID used for this icon.
'iconName' is the name used for this icon.
'toolTip' is the tool tip text to show for this icon.
'mode' is one of wx.ITEM_NORMAL, wx.ITEM_CHECK, wx.ITEM_RADIO
The icon name is used to get the appropriate bitmap for this icon.
"""
bmp = wx.Bitmap("images/" + iconName + "Icon.bmp", wx.BITMAP_TYPE_BMP)
bmpsel = wx.Bitmap("images/" + iconName + "IconSel.bmp", wx.BITMAP_TYPE_BMP)
wx.ToggleButton.__init__(self, parent, iconID,
size=(bmp.GetWidth()+1, bmp.GetHeight()+1)
)
self.SetLabel( iconName )
self.SetToolTip(wx.ToolTip(toolTip))
#self.SetBitmapLabel(bmp)
#self.SetBitmapSelected(bmpsel)
self.iconID = iconID
self.iconName = iconName
class ToolPaletteToggle(GenBitmapToggleButton):
""" An icon appearing in the tool palette area of our sketching window.
Note that this is actually implemented as a wx.Bitmap rather
than as a wx.Icon. wx.Icon has a very specific meaning, and isn't
appropriate for this more general use.
"""
def __init__(self, parent, iconID, iconName, toolTip, mode = wx.ITEM_NORMAL):
""" Standard constructor.
'parent' is the parent window this icon will be part of.
'iconID' is the internal ID used for this icon.
'iconName' is the name used for this icon.
'toolTip' is the tool tip text to show for this icon.
'mode' is one of wx.ITEM_NORMAL, wx.ITEM_CHECK, wx.ITEM_RADIO
The icon name is used to get the appropriate bitmap for this icon.
"""
bmp = wx.Bitmap("images/" + iconName + "Icon.bmp", wx.BITMAP_TYPE_BMP)
bmpsel = wx.Bitmap("images/" + iconName + "IconSel.bmp", wx.BITMAP_TYPE_BMP)
GenBitmapToggleButton.__init__(self, parent, iconID, bitmap=bmp,
size=(bmp.GetWidth()+1, bmp.GetHeight()+1),
style=wx.BORDER_NONE)
self.SetToolTip(wx.ToolTip(toolTip))
self.SetBitmapLabel(bmp)
self.SetBitmapSelected(bmpsel)
self.iconID = iconID
self.iconName = iconName
class ToolPaletteButton(GenBitmapButton):
""" An icon appearing in the tool palette area of our sketching window.
Note that this is actually implemented as a wx.Bitmap rather
than as a wx.Icon. wx.Icon has a very specific meaning, and isn't
appropriate for this more general use.
"""
def __init__(self, parent, iconID, iconName, toolTip):
""" Standard constructor.
'parent' is the parent window this icon will be part of.
'iconID' is the internal ID used for this icon.
'iconName' is the name used for this icon.
'toolTip' is the tool tip text to show for this icon.
The icon name is used to get the appropriate bitmap for this icon.
"""
bmp = wx.Bitmap("images/" + iconName + "Icon.bmp", wx.BITMAP_TYPE_BMP)
GenBitmapButton.__init__(self, parent, iconID, bitmap=bmp,
size=(bmp.GetWidth()+1, bmp.GetHeight()+1),
style=wx.BORDER_NONE)
self.SetToolTip(wx.ToolTip(toolTip))
self.SetBitmapLabel(bmp)
self.iconID = iconID
self.iconName = iconName
#----------------------------------------------------------------------------
class ToolOptionIndicator(wx.Window):
""" A visual indicator which shows the current tool options.
"""
def __init__(self, parent):
""" Standard constructor.
"""
wx.Window.__init__(self, parent, -1, wx.DefaultPosition, wx.Size(52, 32))
self.penColour = wx.BLACK
self.fillColour = wx.WHITE
self.lineSize = 1
self.objType = None
# Win95 can only handle a 8x8 stipple bitmaps max
#self.stippleBitmap = wx.BitmapFromBits("\xf0"*4 + "\x0f"*4,8,8)
# ...but who uses Win95?
self.stippleBitmap = wx.BitmapFromBits("\xff\x00"*8+"\x00\xff"*8,16,16)
self.stippleBitmap.SetMask(wx.Mask(self.stippleBitmap))
self.Bind(wx.EVT_PAINT, self.onPaint)
def setPenColour(self, penColour):
""" Set the indicator's current pen colour.
"""
self.penColour = penColour
self.Refresh()
def setFillColour(self, fillColour):
""" Set the indicator's current fill colour.
"""
self.fillColour = fillColour
self.Refresh()
def setLineSize(self, lineSize):
""" Set the indicator's current pen colour.
"""
self.lineSize = lineSize
self.Refresh()
def setObjType(self, objType):
""" Set the indicator's current obj type.
"""
self.objType = objType
self.Refresh()
def onPaint(self, event):
""" Paint our tool option indicator.
"""
dc = wx.PaintDC(self)
dc.BeginDrawing()
dc.SetPen(wx.BLACK_PEN)
bgbrush = wx.Brush(wx.WHITE, wx.STIPPLE_MASK_OPAQUE)
bgbrush.SetStipple(self.stippleBitmap)
dc.SetTextForeground(wx.LIGHT_GREY)
dc.SetTextBackground(wx.WHITE)
dc.SetBrush(bgbrush)
dc.DrawRectangle(0, 0, self.GetSize().width,self.GetSize().height)
if self.lineSize == 0:
dc.SetPen(wx.Pen(self.penColour, self.lineSize, wx.TRANSPARENT))
else:
dc.SetPen(wx.Pen(self.penColour, self.lineSize, wx.SOLID))
dc.SetBrush(wx.Brush(self.fillColour, wx.SOLID))
size = self.GetSize()
ctrx = size.x/2
ctry = size.y/2
radius = min(size)//2 - 5
dc.DrawCircle(ctrx, ctry, radius)
dc.EndDrawing()
#----------------------------------------------------------------------------
class ExceptionHandler:
""" A simple error-handling class to write exceptions to a text file.
Under MS Windows, the standard DOS console window doesn't scroll and
closes as soon as the application exits, making it hard to find and
view Python exceptions. This utility class allows you to handle Python
exceptions in a more friendly manner.
"""
def __init__(self):
""" Standard constructor.
"""
self._buff = ""
if os.path.exists("errors.txt"):
os.remove("errors.txt") # Delete previous error log, if any.
def write(self, s):
""" Write the given error message to a text file.
Note that if the error message doesn't end in a carriage return, we
have to buffer up the inputs until a carriage return is received.
"""
if (s[-1] != "\n") and (s[-1] != "\r"):
self._buff = self._buff + s
return
try:
s = self._buff + s
self._buff = ""
f = open("errors.txt", "a")
f.write(s)
print s
f.close()
if s[:9] == "Traceback":
# Tell the user than an exception occurred.
wx.MessageBox("An internal error has occurred.\nPlease " + \
"refer to the 'errors.txt' file for details.",
"Error", wx.OK | wx.CENTRE | wx.ICON_EXCLAMATION)
sys.exit()
except:
pass # Don't recursively crash on errors.
#----------------------------------------------------------------------------
class SketchApp(wx.PySimpleApp):
""" The main pySketch application object.
"""
def OnInit(self):
""" Initialise the application.
"""
global _docList
_docList = []
if len(sys.argv) == 1:
# No file name was specified on the command line -> start with a
# blank document.
frame = DrawingFrame(None, -1, "Untitled")
frame.Centre()
frame.Show(True)
_docList.append(frame)
else:
# Load the file(s) specified on the command line.
for arg in sys.argv[1:]:
fileName = os.path.join(os.getcwd(), arg)
if os.path.isfile(fileName):
frame = DrawingFrame(None, -1,
os.path.basename(fileName),
fileName=fileName)
frame.Show(True)
_docList.append(frame)
return True
#----------------------------------------------------------------------------
def main():
""" Start up the pySketch application.
"""
global _app
# Redirect python exceptions to a log file.
sys.stderr = ExceptionHandler()
# Create and start the pySketch application.
_app = SketchApp(0)
_app.MainLoop()
if __name__ == "__main__":
main()
| mit | -1,873,411,600,984,155,400 | 35.474932 | 139 | 0.550997 | false |
bert9bert/statsmodels | statsmodels/genmod/tests/test_glm.py | 2 | 72739 | """
Test functions for models.GLM
"""
from __future__ import division
from statsmodels.compat import range
import os
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal, assert_raises,
assert_allclose, assert_, assert_array_less, dec)
from scipy import stats
import statsmodels.api as sm
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.tools.tools import add_constant
from statsmodels.tools.sm_exceptions import PerfectSeparationError
from statsmodels.discrete import discrete_model as discrete
from nose import SkipTest
import warnings
# Test Precisions
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
DECIMAL_1 = 1
DECIMAL_0 = 0
try:
import matplotlib.pyplot as plt #makes plt available for test functions
have_matplotlib = True
except:
have_matplotlib = False
pdf_output = False
if pdf_output:
from matplotlib.backends.backend_pdf import PdfPages
pdf = PdfPages("test_glm.pdf")
else:
pdf = None
def close_or_save(pdf, fig):
if pdf_output:
pdf.savefig(fig)
plt.close(fig)
def teardown_module():
if have_matplotlib:
plt.close('all')
if pdf_output:
pdf.close()
class CheckModelResultsMixin(object):
'''
res2 should be either the results from RModelWrap
or the results as defined in model_results_data
'''
decimal_params = DECIMAL_4
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params,
self.decimal_params)
decimal_bse = DECIMAL_4
def test_standard_errors(self):
assert_almost_equal(self.res1.bse, self.res2.bse, self.decimal_bse)
decimal_resids = DECIMAL_4
def test_residuals(self):
# fix incorrect numbers in resid_working results
# residuals for Poisson are also tested in test_glm_weights.py
import copy
# new numpy would have copy method
resid2 = copy.copy(self.res2.resids)
resid2[:, 2] *= self.res1.family.link.deriv(self.res1.mu)**2
atol = 10**(-self.decimal_resids)
resids = np.column_stack((self.res1.resid_pearson,
self.res1.resid_deviance, self.res1.resid_working,
self.res1.resid_anscombe, self.res1.resid_response))
assert_allclose(resids, resid2, rtol=1e-6, atol=atol)
decimal_aic_R = DECIMAL_4
def test_aic_R(self):
# R includes the estimation of the scale as a lost dof
# Doesn't with Gamma though
if self.res1.scale != 1:
dof = 2
else:
dof = 0
assert_almost_equal(self.res1.aic+dof, self.res2.aic_R,
self.decimal_aic_R)
decimal_aic_Stata = DECIMAL_4
def test_aic_Stata(self):
# Stata uses the below llf for aic definition for these families
if isinstance(self.res1.model.family, (sm.families.Gamma,
sm.families.InverseGaussian)):
llf = self.res1.model.family.loglike(self.res1.model.endog,
self.res1.mu, self.res1.model.freq_weights, scale=1)
aic = (-2*llf+2*(self.res1.df_model+1))/self.res1.nobs
else:
aic = self.res1.aic/self.res1.nobs
assert_almost_equal(aic, self.res2.aic_Stata, self.decimal_aic_Stata)
decimal_deviance = DECIMAL_4
def test_deviance(self):
assert_almost_equal(self.res1.deviance, self.res2.deviance,
self.decimal_deviance)
decimal_scale = DECIMAL_4
def test_scale(self):
assert_almost_equal(self.res1.scale, self.res2.scale,
self.decimal_scale)
decimal_loglike = DECIMAL_4
def test_loglike(self):
# Stata uses the below llf for these families
# We differ with R for them
if isinstance(self.res1.model.family, (sm.families.Gamma,
sm.families.InverseGaussian)):
llf = self.res1.model.family.loglike(self.res1.model.endog,
self.res1.mu, self.res1.model.freq_weights, scale=1)
else:
llf = self.res1.llf
assert_almost_equal(llf, self.res2.llf, self.decimal_loglike)
decimal_null_deviance = DECIMAL_4
def test_null_deviance(self):
assert_almost_equal(self.res1.null_deviance, self.res2.null_deviance,
self.decimal_null_deviance)
decimal_bic = DECIMAL_4
def test_bic(self):
assert_almost_equal(self.res1.bic, self.res2.bic_Stata,
self.decimal_bic)
def test_degrees(self):
assert_equal(self.res1.model.df_resid,self.res2.df_resid)
decimal_fittedvalues = DECIMAL_4
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues, self.res2.fittedvalues,
self.decimal_fittedvalues)
def test_tpvalues(self):
# test comparing tvalues and pvalues with normal implementation
# make sure they use normal distribution (inherited in results class)
params = self.res1.params
tvalues = params / self.res1.bse
pvalues = stats.norm.sf(np.abs(tvalues)) * 2
half_width = stats.norm.isf(0.025) * self.res1.bse
conf_int = np.column_stack((params - half_width, params + half_width))
assert_almost_equal(self.res1.tvalues, tvalues)
assert_almost_equal(self.res1.pvalues, pvalues)
assert_almost_equal(self.res1.conf_int(), conf_int)
def test_summary(self):
#SMOKE test
self.res1.summary()
self.res1.summary2()
class CheckComparisonMixin(object):
def test_compare_discrete(self):
res1 = self.res1
resd = self.resd
assert_allclose(res1.llf, resd.llf, rtol=1e-10)
score_obs1 = res1.model.score_obs(res1.params)
score_obsd = resd.model.score_obs(resd.params)
assert_allclose(score_obs1, score_obsd, rtol=1e-10)
# score
score1 = res1.model.score(res1.params)
assert_allclose(score1, score_obs1.sum(0), atol=1e-20)
assert_allclose(score1, np.zeros(score_obs1.shape[1]), atol=1e-7)
hessian1 = res1.model.hessian(res1.params, observed=False)
hessiand = resd.model.hessian(resd.params)
assert_allclose(hessian1, hessiand, rtol=1e-10)
hessian1 = res1.model.hessian(res1.params, observed=True)
hessiand = resd.model.hessian(resd.params)
assert_allclose(hessian1, hessiand, rtol=1e-9)
def test_score_test(self):
res1 = self.res1
# fake example, should be zero, k_constraint should be 0
st, pv, df = res1.model.score_test(res1.params, k_constraints=1)
assert_allclose(st, 0, atol=1e-20)
assert_allclose(pv, 1, atol=1e-10)
assert_equal(df, 1)
st, pv, df = res1.model.score_test(res1.params, k_constraints=0)
assert_allclose(st, 0, atol=1e-20)
assert_(np.isnan(pv), msg=repr(pv))
assert_equal(df, 0)
# TODO: no verified numbers largely SMOKE test
exog_extra = res1.model.exog[:,1]**2
st, pv, df = res1.model.score_test(res1.params, exog_extra=exog_extra)
assert_array_less(0.1, st)
assert_array_less(0.1, pv)
assert_equal(df, 1)
class TestGlmGaussian(CheckModelResultsMixin):
def __init__(self):
'''
Test Gaussian family with canonical identity link
'''
# Test Precisions
self.decimal_resids = DECIMAL_3
self.decimal_params = DECIMAL_2
self.decimal_bic = DECIMAL_0
self.decimal_bse = DECIMAL_3
from statsmodels.datasets.longley import load
self.data = load()
self.data.exog = add_constant(self.data.exog, prepend=False)
self.res1 = GLM(self.data.endog, self.data.exog,
family=sm.families.Gaussian()).fit()
from .results.results_glm import Longley
self.res2 = Longley()
def test_compare_OLS(self):
res1 = self.res1
# OLS doesn't define score_obs
from statsmodels.regression.linear_model import OLS
resd = OLS(self.data.endog, self.data.exog).fit()
self.resd = resd # attach to access from the outside
assert_allclose(res1.llf, resd.llf, rtol=1e-10)
score_obs1 = res1.model.score_obs(res1.params, scale=None)
score_obsd = resd.resid[:, None] / resd.scale * resd.model.exog
# low precision because of badly scaled exog
assert_allclose(score_obs1, score_obsd, rtol=1e-8)
score_obs1 = res1.model.score_obs(res1.params, scale=1)
score_obsd = resd.resid[:, None] * resd.model.exog
assert_allclose(score_obs1, score_obsd, rtol=1e-8)
hess_obs1 = res1.model.hessian(res1.params, scale=None)
hess_obsd = -1. / resd.scale * resd.model.exog.T.dot(resd.model.exog)
# low precision because of badly scaled exog
assert_allclose(hess_obs1, hess_obsd, rtol=1e-8)
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed."
# Gauss = r.gaussian
# self.res2 = RModel(self.data.endog, self.data.exog, r.glm, family=Gauss)
# self.res2.resids = np.array(self.res2.resid)[:,None]*np.ones((1,5))
# self.res2.null_deviance = 185008826 # taken from R. Rpy bug?
class TestGaussianLog(CheckModelResultsMixin):
def __init__(self):
# Test Precision
self.decimal_aic_R = DECIMAL_0
self.decimal_aic_Stata = DECIMAL_2
self.decimal_loglike = DECIMAL_0
self.decimal_null_deviance = DECIMAL_1
nobs = 100
x = np.arange(nobs)
np.random.seed(54321)
# y = 1.0 - .02*x - .001*x**2 + 0.001 * np.random.randn(nobs)
self.X = np.c_[np.ones((nobs,1)),x,x**2]
self.lny = np.exp(-(-1.0 + 0.02*x + 0.0001*x**2)) +\
0.001 * np.random.randn(nobs)
GaussLog_Model = GLM(self.lny, self.X, \
family=sm.families.Gaussian(sm.families.links.log()))
self.res1 = GaussLog_Model.fit()
from .results.results_glm import GaussianLog
self.res2 = GaussianLog()
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed"
# GaussLogLink = r.gaussian(link = "log")
# GaussLog_Res_R = RModel(self.lny, self.X, r.glm, family=GaussLogLink)
# self.res2 = GaussLog_Res_R
class TestGaussianInverse(CheckModelResultsMixin):
def __init__(self):
# Test Precisions
self.decimal_bic = DECIMAL_1
self.decimal_aic_R = DECIMAL_1
self.decimal_aic_Stata = DECIMAL_3
self.decimal_loglike = DECIMAL_1
self.decimal_resids = DECIMAL_3
nobs = 100
x = np.arange(nobs)
np.random.seed(54321)
y = 1.0 + 2.0 * x + x**2 + 0.1 * np.random.randn(nobs)
self.X = np.c_[np.ones((nobs,1)),x,x**2]
self.y_inv = (1. + .02*x + .001*x**2)**-1 + .001 * np.random.randn(nobs)
InverseLink_Model = GLM(self.y_inv, self.X,
family=sm.families.Gaussian(sm.families.links.inverse_power()))
InverseLink_Res = InverseLink_Model.fit()
self.res1 = InverseLink_Res
from .results.results_glm import GaussianInverse
self.res2 = GaussianInverse()
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed."
# InverseLink = r.gaussian(link = "inverse")
# InverseLink_Res_R = RModel(self.y_inv, self.X, r.glm, family=InverseLink)
# self.res2 = InverseLink_Res_R
class TestGlmBinomial(CheckModelResultsMixin):
def __init__(self):
'''
Test Binomial family with canonical logit link using star98 dataset.
'''
self.decimal_resids = DECIMAL_1
self.decimal_bic = DECIMAL_2
from statsmodels.datasets.star98 import load
from .results.results_glm import Star98
data = load()
data.exog = add_constant(data.exog, prepend=False)
self.res1 = GLM(data.endog, data.exog, \
family=sm.families.Binomial()).fit()
#NOTE: if you want to replicate with RModel
#res2 = RModel(data.endog[:,0]/trials, data.exog, r.glm,
# family=r.binomial, weights=trials)
self.res2 = Star98()
#TODO:
#Non-Canonical Links for the Binomial family require the algorithm to be
#slightly changed
#class TestGlmBinomialLog(CheckModelResultsMixin):
# pass
#class TestGlmBinomialLogit(CheckModelResultsMixin):
# pass
#class TestGlmBinomialProbit(CheckModelResultsMixin):
# pass
#class TestGlmBinomialCloglog(CheckModelResultsMixin):
# pass
#class TestGlmBinomialPower(CheckModelResultsMixin):
# pass
#class TestGlmBinomialLoglog(CheckModelResultsMixin):
# pass
#class TestGlmBinomialLogc(CheckModelResultsMixin):
#TODO: need include logc link
# pass
class TestGlmBernoulli(CheckModelResultsMixin, CheckComparisonMixin):
def __init__(self):
from .results.results_glm import Lbw
self.res2 = Lbw()
self.res1 = GLM(self.res2.endog, self.res2.exog,
family=sm.families.Binomial()).fit()
modd = discrete.Logit(self.res2.endog, self.res2.exog)
self.resd = modd.fit(start_params=self.res1.params * 0.9, disp=False)
def score_test_r(self):
res1 = self.res1
res2 = self.res2
st, pv, df = res1.model.score_test(res1.params,
exog_extra=res1.model.exog[:, 1]**2)
st_res = 0.2837680293459376 # (-0.5326988167303712)**2
assert_allclose(st, st_res, rtol=1e-4)
st, pv, df = res1.model.score_test(res1.params,
exog_extra=res1.model.exog[:, 0]**2)
st_res = 0.6713492821514992 # (-0.8193590679009413)**2
assert_allclose(st, st_res, rtol=1e-4)
select = list(range(9))
select.pop(7)
res1b = GLM(res2.endog, res2.exog[:, select],
family=sm.families.Binomial()).fit()
tres = res1b.model.score_test(res1b.params,
exog_extra=res1.model.exog[:, -2])
tres = np.asarray(tres[:2]).ravel()
tres_r = (2.7864148487452, 0.0950667)
assert_allclose(tres, tres_r, rtol=1e-4)
cmd_r = """\
data = read.csv("...statsmodels\\statsmodels\\genmod\\tests\\results\\stata_lbw_glm.csv")
data["race_black"] = data["race"] == "black"
data["race_other"] = data["race"] == "other"
mod = glm(low ~ age + lwt + race_black + race_other + smoke + ptl + ht + ui, family=binomial, data=data)
options(digits=16)
anova(mod, test="Rao")
library(statmod)
s = glm.scoretest(mod, data["age"]**2)
s**2
s = glm.scoretest(mod, data["lwt"]**2)
s**2
"""
#class TestGlmBernoulliIdentity(CheckModelResultsMixin):
# pass
#class TestGlmBernoulliLog(CheckModelResultsMixin):
# pass
#class TestGlmBernoulliProbit(CheckModelResultsMixin):
# pass
#class TestGlmBernoulliCloglog(CheckModelResultsMixin):
# pass
#class TestGlmBernoulliPower(CheckModelResultsMixin):
# pass
#class TestGlmBernoulliLoglog(CheckModelResultsMixin):
# pass
#class test_glm_bernoulli_logc(CheckModelResultsMixin):
# pass
class TestGlmGamma(CheckModelResultsMixin):
def __init__(self):
'''
Tests Gamma family with canonical inverse link (power -1)
'''
# Test Precisions
self.decimal_aic_R = -1 #TODO: off by about 1, we are right with Stata
self.decimal_resids = DECIMAL_2
from statsmodels.datasets.scotland import load
from .results.results_glm import Scotvote
data = load()
data.exog = add_constant(data.exog, prepend=False)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res1 = GLM(data.endog, data.exog,
family=sm.families.Gamma()).fit()
self.res1 = res1
# res2 = RModel(data.endog, data.exog, r.glm, family=r.Gamma)
res2 = Scotvote()
res2.aic_R += 2 # R doesn't count degree of freedom for scale with gamma
self.res2 = res2
class TestGlmGammaLog(CheckModelResultsMixin):
def __init__(self):
# Test Precisions
self.decimal_resids = DECIMAL_3
self.decimal_aic_R = DECIMAL_0
self.decimal_fittedvalues = DECIMAL_3
from .results.results_glm import CancerLog
res2 = CancerLog()
self.res1 = GLM(res2.endog, res2.exog,
family=sm.families.Gamma(link=sm.families.links.log())).fit()
self.res2 = res2
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed."
# self.res2 = RModel(self.data.endog, self.data.exog, r.glm,
# family=r.Gamma(link="log"))
# self.res2.null_deviance = 27.92207137420696 # From R (bug in rpy)
# self.res2.bic = -154.1582089453923 # from Stata
class TestGlmGammaIdentity(CheckModelResultsMixin):
def __init__(self):
# Test Precisions
self.decimal_resids = -100 #TODO Very off from Stata?
self.decimal_params = DECIMAL_2
self.decimal_aic_R = DECIMAL_0
self.decimal_loglike = DECIMAL_1
from .results.results_glm import CancerIdentity
res2 = CancerIdentity()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.res1 = GLM(res2.endog, res2.exog,
family=sm.families.Gamma(
link=sm.families.links.identity())
).fit()
self.res2 = res2
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed."
# self.res2 = RModel(self.data.endog, self.data.exog, r.glm,
# family=r.Gamma(link="identity"))
# self.res2.null_deviance = 27.92207137420696 # from R, Rpy bug
class TestGlmPoisson(CheckModelResultsMixin, CheckComparisonMixin):
def __init__(self):
'''
Tests Poisson family with canonical log link.
Test results were obtained by R.
'''
from .results.results_glm import Cpunish
from statsmodels.datasets.cpunish import load
self.data = load()
self.data.exog[:,3] = np.log(self.data.exog[:,3])
self.data.exog = add_constant(self.data.exog, prepend=False)
self.res1 = GLM(self.data.endog, self.data.exog,
family=sm.families.Poisson()).fit()
self.res2 = Cpunish()
# compare with discrete, start close to save time
modd = discrete.Poisson(self.data.endog, self.data.exog)
self.resd = modd.fit(start_params=self.res1.params * 0.9, disp=False)
#class TestGlmPoissonIdentity(CheckModelResultsMixin):
# pass
#class TestGlmPoissonPower(CheckModelResultsMixin):
# pass
class TestGlmInvgauss(CheckModelResultsMixin):
def __init__(self):
'''
Tests the Inverse Gaussian family in GLM.
Notes
-----
Used the rndivgx.ado file provided by Hardin and Hilbe to
generate the data. Results are read from model_results, which
were obtained by running R_ig.s
'''
# Test Precisions
self.decimal_aic_R = DECIMAL_0
self.decimal_loglike = DECIMAL_0
from .results.results_glm import InvGauss
res2 = InvGauss()
res1 = GLM(res2.endog, res2.exog, \
family=sm.families.InverseGaussian()).fit()
self.res1 = res1
self.res2 = res2
class TestGlmInvgaussLog(CheckModelResultsMixin):
def __init__(self):
# Test Precisions
self.decimal_aic_R = -10 # Big difference vs R.
self.decimal_resids = DECIMAL_3
from .results.results_glm import InvGaussLog
res2 = InvGaussLog()
self.res1 = GLM(res2.endog, res2.exog,
family=sm.families.InverseGaussian(
link=sm.families.links.log())).fit()
self.res2 = res2
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed."
# self.res2 = RModel(self.data.endog, self.data.exog, r.glm,
# family=r.inverse_gaussian(link="log"))
# self.res2.null_deviance = 335.1539777981053 # from R, Rpy bug
# self.res2.llf = -12162.72308 # from Stata, R's has big rounding diff
class TestGlmInvgaussIdentity(CheckModelResultsMixin):
def __init__(self):
# Test Precisions
self.decimal_aic_R = -10 #TODO: Big difference vs R
self.decimal_fittedvalues = DECIMAL_3
self.decimal_params = DECIMAL_3
from .results.results_glm import Medpar1
data = Medpar1()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.res1 = GLM(data.endog, data.exog,
family=sm.families.InverseGaussian(
link=sm.families.links.identity())).fit()
from .results.results_glm import InvGaussIdentity
self.res2 = InvGaussIdentity()
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed."
# self.res2 = RModel(self.data.endog, self.data.exog, r.glm,
# family=r.inverse_gaussian(link="identity"))
# self.res2.null_deviance = 335.1539777981053 # from R, Rpy bug
# self.res2.llf = -12163.25545 # from Stata, big diff with R
class TestGlmNegbinomial(CheckModelResultsMixin):
def __init__(self):
'''
Test Negative Binomial family with log link
'''
# Test Precision
self.decimal_resid = DECIMAL_1
self.decimal_params = DECIMAL_3
self.decimal_resids = -1 # 1 % mismatch at 0
self.decimal_fittedvalues = DECIMAL_1
from statsmodels.datasets.committee import load
self.data = load()
self.data.exog[:,2] = np.log(self.data.exog[:,2])
interaction = self.data.exog[:,2]*self.data.exog[:,1]
self.data.exog = np.column_stack((self.data.exog,interaction))
self.data.exog = add_constant(self.data.exog, prepend=False)
self.res1 = GLM(self.data.endog, self.data.exog,
family=sm.families.NegativeBinomial()).fit()
from .results.results_glm import Committee
res2 = Committee()
res2.aic_R += 2 # They don't count a degree of freedom for the scale
self.res2 = res2
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed"
# r.library('MASS') # this doesn't work when done in rmodelwrap?
# self.res2 = RModel(self.data.endog, self.data.exog, r.glm,
# family=r.negative_binomial(1))
# self.res2.null_deviance = 27.8110469364343
#class TestGlmNegbinomial_log(CheckModelResultsMixin):
# pass
#class TestGlmNegbinomial_power(CheckModelResultsMixin):
# pass
#class TestGlmNegbinomial_nbinom(CheckModelResultsMixin):
# pass
class TestGlmPoissonOffset(CheckModelResultsMixin):
@classmethod
def setupClass(cls):
from .results.results_glm import Cpunish_offset
from statsmodels.datasets.cpunish import load
cls.decimal_params = DECIMAL_4
cls.decimal_bse = DECIMAL_4
cls.decimal_aic_R = 3
data = load()
data.exog[:,3] = np.log(data.exog[:,3])
data.exog = add_constant(data.exog, prepend=True)
exposure = [100] * len(data.endog)
cls.data = data
cls.exposure = exposure
cls.res1 = GLM(data.endog, data.exog, family=sm.families.Poisson(),
exposure=exposure).fit()
cls.res2 = Cpunish_offset()
def test_missing(self):
# make sure offset is dropped correctly
endog = self.data.endog.copy()
endog[[2,4,6,8]] = np.nan
mod = GLM(endog, self.data.exog, family=sm.families.Poisson(),
exposure=self.exposure, missing='drop')
assert_equal(mod.exposure.shape[0], 13)
def test_offset_exposure(self):
# exposure=x and offset=log(x) should have the same effect
np.random.seed(382304)
endog = np.random.randint(0, 10, 100)
exog = np.random.normal(size=(100,3))
exposure = np.random.uniform(1, 2, 100)
offset = np.random.uniform(1, 2, 100)
mod1 = GLM(endog, exog, family=sm.families.Poisson(),
offset=offset, exposure=exposure).fit()
offset2 = offset + np.log(exposure)
mod2 = GLM(endog, exog, family=sm.families.Poisson(),
offset=offset2).fit()
assert_almost_equal(mod1.params, mod2.params)
# test recreating model
mod1_ = mod1.model
kwds = mod1_._get_init_kwds()
assert_allclose(kwds['exposure'], exposure, rtol=1e-14)
assert_allclose(kwds['offset'], mod1_.offset, rtol=1e-14)
mod3 = mod1_.__class__(mod1_.endog, mod1_.exog, **kwds)
assert_allclose(mod3.exposure, mod1_.exposure, rtol=1e-14)
assert_allclose(mod3.offset, mod1_.offset, rtol=1e-14)
def test_predict(self):
np.random.seed(382304)
endog = np.random.randint(0, 10, 100)
exog = np.random.normal(size=(100,3))
exposure = np.random.uniform(1, 2, 100)
mod1 = GLM(endog, exog, family=sm.families.Poisson(),
exposure=exposure).fit()
exog1 = np.random.normal(size=(10,3))
exposure1 = np.random.uniform(1, 2, 10)
# Doubling exposure time should double expected response
pred1 = mod1.predict(exog=exog1, exposure=exposure1)
pred2 = mod1.predict(exog=exog1, exposure=2*exposure1)
assert_almost_equal(pred2, 2*pred1)
# Check exposure defaults
pred3 = mod1.predict()
pred4 = mod1.predict(exposure=exposure)
pred5 = mod1.predict(exog=exog, exposure=exposure)
assert_almost_equal(pred3, pred4)
assert_almost_equal(pred4, pred5)
# Check offset defaults
offset = np.random.uniform(1, 2, 100)
mod2 = GLM(endog, exog, offset=offset, family=sm.families.Poisson()).fit()
pred1 = mod2.predict()
pred2 = mod2.predict(offset=offset)
pred3 = mod2.predict(exog=exog, offset=offset)
assert_almost_equal(pred1, pred2)
assert_almost_equal(pred2, pred3)
# Check that offset shifts the linear predictor
mod3 = GLM(endog, exog, family=sm.families.Poisson()).fit()
offset = np.random.uniform(1, 2, 10)
pred1 = mod3.predict(exog=exog1, offset=offset, linear=True)
pred2 = mod3.predict(exog=exog1, offset=2*offset, linear=True)
assert_almost_equal(pred2, pred1+offset)
def test_perfect_pred():
cur_dir = os.path.dirname(os.path.abspath(__file__))
iris = np.genfromtxt(os.path.join(cur_dir, 'results', 'iris.csv'),
delimiter=",", skip_header=1)
y = iris[:, -1]
X = iris[:, :-1]
X = X[y != 2]
y = y[y != 2]
X = add_constant(X, prepend=True)
glm = GLM(y, X, family=sm.families.Binomial())
assert_raises(PerfectSeparationError, glm.fit)
def test_score_test_OLS():
# nicer example than Longley
from statsmodels.regression.linear_model import OLS
np.random.seed(5)
nobs = 100
sige = 0.5
x = np.random.uniform(0, 1, size=(nobs, 5))
x[:, 0] = 1
beta = 1. / np.arange(1., x.shape[1] + 1)
y = x.dot(beta) + sige * np.random.randn(nobs)
res_ols = OLS(y, x).fit()
res_olsc = OLS(y, x[:, :-2]).fit()
co = res_ols.compare_lm_test(res_olsc, demean=False)
res_glm = GLM(y, x[:, :-2], family=sm.families.Gaussian()).fit()
co2 = res_glm.model.score_test(res_glm.params, exog_extra=x[:, -2:])
# difference in df_resid versus nobs in scale see #1786
assert_allclose(co[0] * 97 / 100., co2[0], rtol=1e-13)
def test_attribute_writable_resettable():
# Regression test for mutables and class constructors.
data = sm.datasets.longley.load()
endog, exog = data.endog, data.exog
glm_model = sm.GLM(endog, exog)
assert_equal(glm_model.family.link.power, 1.0)
glm_model.family.link.power = 2.
assert_equal(glm_model.family.link.power, 2.0)
glm_model2 = sm.GLM(endog, exog)
assert_equal(glm_model2.family.link.power, 1.0)
class Test_start_params(CheckModelResultsMixin):
def __init__(self):
'''
Test Gaussian family with canonical identity link
'''
# Test Precisions
self.decimal_resids = DECIMAL_3
self.decimal_params = DECIMAL_2
self.decimal_bic = DECIMAL_0
self.decimal_bse = DECIMAL_3
from statsmodels.datasets.longley import load
self.data = load()
self.data.exog = add_constant(self.data.exog, prepend=False)
params = sm.OLS(self.data.endog, self.data.exog).fit().params
self.res1 = GLM(self.data.endog, self.data.exog,
family=sm.families.Gaussian()).fit(start_params=params)
from .results.results_glm import Longley
self.res2 = Longley()
def test_glm_start_params():
# see 1604
y2 = np.array('0 1 0 0 0 1'.split(), int)
wt = np.array([50,1,50,1,5,10])
y2 = np.repeat(y2, wt)
x2 = np.repeat([0,0,0.001,100,-1,-1], wt)
mod = sm.GLM(y2, sm.add_constant(x2), family=sm.families.Binomial())
res = mod.fit(start_params=[-4, -5])
np.testing.assert_almost_equal(res.params, [-4.60305022, -5.29634545], 6)
def test_loglike_no_opt():
# see 1728
y = np.asarray([0, 1, 0, 0, 1, 1, 0, 1, 1, 1])
x = np.arange(10, dtype=np.float64)
def llf(params):
lin_pred = params[0] + params[1]*x
pr = 1 / (1 + np.exp(-lin_pred))
return np.sum(y*np.log(pr) + (1-y)*np.log(1-pr))
for params in [0,0], [0,1], [0.5,0.5]:
mod = sm.GLM(y, sm.add_constant(x), family=sm.families.Binomial())
res = mod.fit(start_params=params, maxiter=0)
like = llf(params)
assert_almost_equal(like, res.llf)
def test_formula_missing_exposure():
# see 2083
import statsmodels.formula.api as smf
import pandas as pd
d = {'Foo': [1, 2, 10, 149], 'Bar': [1, 2, 3, np.nan],
'constant': [1] * 4, 'exposure' : np.random.uniform(size=4),
'x': [1, 3, 2, 1.5]}
df = pd.DataFrame(d)
family = sm.families.Gaussian(link=sm.families.links.log())
mod = smf.glm("Foo ~ Bar", data=df, exposure=df.exposure,
family=family)
assert_(type(mod.exposure) is np.ndarray, msg='Exposure is not ndarray')
exposure = pd.Series(np.random.uniform(size=5))
assert_raises(ValueError, smf.glm, "Foo ~ Bar", data=df,
exposure=exposure, family=family)
assert_raises(ValueError, GLM, df.Foo, df[['constant', 'Bar']],
exposure=exposure, family=family)
@dec.skipif(not have_matplotlib)
def test_plots():
np.random.seed(378)
n = 200
exog = np.random.normal(size=(n, 2))
lin_pred = exog[:, 0] + exog[:, 1]**2
prob = 1 / (1 + np.exp(-lin_pred))
endog = 1 * (np.random.uniform(size=n) < prob)
model = sm.GLM(endog, exog, family=sm.families.Binomial())
result = model.fit()
import matplotlib.pyplot as plt
import pandas as pd
from statsmodels.graphics.regressionplots import add_lowess
# array interface
for j in 0,1:
fig = result.plot_added_variable(j)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
fig = result.plot_partial_residuals(j)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
fig = result.plot_ceres_residuals(j)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
# formula interface
data = pd.DataFrame({"y": endog, "x1": exog[:, 0], "x2": exog[:, 1]})
model = sm.GLM.from_formula("y ~ x1 + x2", data, family=sm.families.Binomial())
result = model.fit()
for j in 0,1:
xname = ["x1", "x2"][j]
fig = result.plot_added_variable(xname)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
fig = result.plot_partial_residuals(xname)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
fig = result.plot_ceres_residuals(xname)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
def gen_endog(lin_pred, family_class, link, binom_version=0):
np.random.seed(872)
fam = sm.families
mu = link().inverse(lin_pred)
if family_class == fam.Binomial:
if binom_version == 0:
endog = 1*(np.random.uniform(size=len(lin_pred)) < mu)
else:
endog = np.empty((len(lin_pred), 2))
n = 10
endog[:, 0] = (np.random.uniform(size=(len(lin_pred), n)) < mu[:, None]).sum(1)
endog[:, 1] = n - endog[:, 0]
elif family_class == fam.Poisson:
endog = np.random.poisson(mu)
elif family_class == fam.Gamma:
endog = np.random.gamma(2, mu)
elif family_class == fam.Gaussian:
endog = mu + np.random.normal(size=len(lin_pred))
elif family_class == fam.NegativeBinomial:
from scipy.stats.distributions import nbinom
endog = nbinom.rvs(mu, 0.5)
elif family_class == fam.InverseGaussian:
from scipy.stats.distributions import invgauss
endog = invgauss.rvs(mu)
else:
raise ValueError
return endog
def test_summary():
"""
Smoke test for summary.
"""
np.random.seed(4323)
n = 100
exog = np.random.normal(size=(n, 2))
exog[:, 0] = 1
endog = np.random.normal(size=n)
for method in "irls", "cg":
fa = sm.families.Gaussian()
model = sm.GLM(endog, exog, family=fa)
rslt = model.fit(method=method)
s = rslt.summary()
def test_gradient_irls():
# Compare the results when using gradient optimization and IRLS.
# TODO: Find working examples for inverse_squared link
np.random.seed(87342)
fam = sm.families
lnk = sm.families.links
families = [(fam.Binomial, [lnk.logit, lnk.probit, lnk.cloglog, lnk.log, lnk.cauchy]),
(fam.Poisson, [lnk.log, lnk.identity, lnk.sqrt]),
(fam.Gamma, [lnk.log, lnk.identity, lnk.inverse_power]),
(fam.Gaussian, [lnk.identity, lnk.log, lnk.inverse_power]),
(fam.InverseGaussian, [lnk.log, lnk.identity, lnk.inverse_power, lnk.inverse_squared]),
(fam.NegativeBinomial, [lnk.log, lnk.inverse_power, lnk.inverse_squared, lnk.identity])]
n = 100
p = 3
exog = np.random.normal(size=(n, p))
exog[:, 0] = 1
skip_one = False
for family_class, family_links in families:
for link in family_links:
for binom_version in 0,1:
if family_class != fam.Binomial and binom_version == 1:
continue
if (family_class, link) == (fam.Poisson, lnk.identity):
lin_pred = 20 + exog.sum(1)
elif (family_class, link) == (fam.Binomial, lnk.log):
lin_pred = -1 + exog.sum(1) / 8
elif (family_class, link) == (fam.Poisson, lnk.sqrt):
lin_pred = 2 + exog.sum(1)
elif (family_class, link) == (fam.InverseGaussian, lnk.log):
#skip_zero = True
lin_pred = -1 + exog.sum(1)
elif (family_class, link) == (fam.InverseGaussian, lnk.identity):
lin_pred = 20 + 5*exog.sum(1)
lin_pred = np.clip(lin_pred, 1e-4, np.inf)
elif (family_class, link) == (fam.InverseGaussian, lnk.inverse_squared):
lin_pred = 0.5 + exog.sum(1) / 5
continue # skip due to non-convergence
elif (family_class, link) == (fam.InverseGaussian, lnk.inverse_power):
lin_pred = 1 + exog.sum(1) / 5
elif (family_class, link) == (fam.NegativeBinomial, lnk.identity):
lin_pred = 20 + 5*exog.sum(1)
lin_pred = np.clip(lin_pred, 1e-4, np.inf)
elif (family_class, link) == (fam.NegativeBinomial, lnk.inverse_squared):
lin_pred = 0.1 + np.random.uniform(size=exog.shape[0])
continue # skip due to non-convergence
elif (family_class, link) == (fam.NegativeBinomial, lnk.inverse_power):
lin_pred = 1 + exog.sum(1) / 5
elif (family_class, link) == (fam.Gaussian, lnk.inverse_power):
# adding skip because of convergence failure
skip_one = True
else:
lin_pred = np.random.uniform(size=exog.shape[0])
endog = gen_endog(lin_pred, family_class, link, binom_version)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod_irls = sm.GLM(endog, exog, family=family_class(link=link()))
rslt_irls = mod_irls.fit(method="IRLS")
# Try with and without starting values.
for max_start_irls, start_params in (0, rslt_irls.params), (3, None):
# TODO: skip convergence failures for now
if max_start_irls > 0 and skip_one:
continue
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod_gradient = sm.GLM(endog, exog, family=family_class(link=link()))
rslt_gradient = mod_gradient.fit(max_start_irls=max_start_irls,
start_params=start_params,
method="newton")
assert_allclose(rslt_gradient.params,
rslt_irls.params, rtol=1e-6, atol=5e-5)
assert_allclose(rslt_gradient.llf, rslt_irls.llf,
rtol=1e-6, atol=1e-6)
assert_allclose(rslt_gradient.scale, rslt_irls.scale,
rtol=1e-6, atol=1e-6)
# Get the standard errors using expected information.
gradient_bse = rslt_gradient.bse
ehess = mod_gradient.hessian(rslt_gradient.params, observed=False)
gradient_bse = np.sqrt(-np.diag(np.linalg.inv(ehess)))
assert_allclose(gradient_bse, rslt_irls.bse, rtol=1e-6, atol=5e-5)
class CheckWtdDuplicationMixin(object):
decimal_params = DECIMAL_4
def __init__(self):
from statsmodels.datasets.cpunish import load
self.data = load()
self.endog = self.data.endog
self.exog = self.data.exog
np.random.seed(1234)
self.weight = np.random.randint(5, 100, len(self.endog))
self.endog_big = np.repeat(self.endog, self.weight)
self.exog_big = np.repeat(self.exog, self.weight, axis=0)
def test_params(self):
assert_allclose(self.res1.params, self.res2.params, atol=1e-6,
rtol=1e-6)
decimal_bse = DECIMAL_4
def test_standard_errors(self):
assert_allclose(self.res1.bse, self.res2.bse, rtol=1e-5, atol=1e-6)
decimal_resids = DECIMAL_4
# TODO: This doesn't work... Arrays are of different shape.
# Perhaps we use self.res1.model.family.resid_XXX()?
"""
def test_residuals(self):
resids1 = np.column_stack((self.res1.resid_pearson,
self.res1.resid_deviance,
self.res1.resid_working,
self.res1.resid_anscombe,
self.res1.resid_response))
resids2 = np.column_stack((self.res1.resid_pearson,
self.res2.resid_deviance,
self.res2.resid_working,
self.res2.resid_anscombe,
self.res2.resid_response))
assert_allclose(resids1, resids2, self.decimal_resids)
"""
def test_aic(self):
# R includes the estimation of the scale as a lost dof
# Doesn't with Gamma though
assert_allclose(self.res1.aic, self.res2.aic, atol=1e-6, rtol=1e-6)
def test_deviance(self):
assert_allclose(self.res1.deviance, self.res2.deviance, atol=1e-6,
rtol=1e-6)
def test_scale(self):
assert_allclose(self.res1.scale, self.res2.scale, atol=1e-6, rtol=1e-6)
def test_loglike(self):
# Stata uses the below llf for these families
# We differ with R for them
assert_allclose(self.res1.llf, self.res2.llf, 1e-6)
decimal_null_deviance = DECIMAL_4
def test_null_deviance(self):
assert_allclose(self.res1.null_deviance, self.res2.null_deviance,
atol=1e-6, rtol=1e-6)
decimal_bic = DECIMAL_4
def test_bic(self):
assert_allclose(self.res1.bic, self.res2.bic, atol=1e-6, rtol=1e-6)
decimal_fittedvalues = DECIMAL_4
def test_fittedvalues(self):
res2_fitted = self.res2.predict(self.res1.model.exog)
assert_allclose(self.res1.fittedvalues, res2_fitted, atol=1e-5,
rtol=1e-5)
decimal_tpvalues = DECIMAL_4
def test_tpvalues(self):
# test comparing tvalues and pvalues with normal implementation
# make sure they use normal distribution (inherited in results class)
assert_allclose(self.res1.tvalues, self.res2.tvalues, atol=1e-6,
rtol=2e-4)
assert_allclose(self.res1.pvalues, self.res2.pvalues, atol=1e-6,
rtol=1e-6)
assert_allclose(self.res1.conf_int(), self.res2.conf_int(), atol=1e-6,
rtol=1e-6)
class TestWtdGlmPoisson(CheckWtdDuplicationMixin):
def __init__(self):
'''
Tests Poisson family with canonical log link.
'''
super(TestWtdGlmPoisson, self).__init__()
self.res1 = GLM(self.endog, self.exog,
freq_weights=self.weight,
family=sm.families.Poisson()).fit()
self.res2 = GLM(self.endog_big, self.exog_big,
family=sm.families.Poisson()).fit()
class TestWtdGlmPoissonNewton(CheckWtdDuplicationMixin):
def __init__(self):
'''
Tests Poisson family with canonical log link.
'''
super(TestWtdGlmPoissonNewton, self).__init__()
start_params = np.array([1.82794424e-04, -4.76785037e-02,
-9.48249717e-02, -2.92293226e-04,
2.63728909e+00, -2.05934384e+01])
fit_kwds = dict(method='newton')
self.res1 = GLM(self.endog, self.exog,
freq_weights=self.weight,
family=sm.families.Poisson()).fit(**fit_kwds)
fit_kwds = dict(method='newton', start_params=start_params)
self.res2 = GLM(self.endog_big, self.exog_big,
family=sm.families.Poisson()).fit(**fit_kwds)
class TestWtdGlmPoissonHC0(CheckWtdDuplicationMixin):
def __init__(self):
'''
Tests Poisson family with canonical log link.
'''
super(TestWtdGlmPoissonHC0, self).__init__()
start_params = np.array([1.82794424e-04, -4.76785037e-02,
-9.48249717e-02, -2.92293226e-04,
2.63728909e+00, -2.05934384e+01])
fit_kwds = dict(cov_type='HC0')
self.res1 = GLM(self.endog, self.exog,
freq_weights=self.weight,
family=sm.families.Poisson()).fit(**fit_kwds)
fit_kwds = dict(cov_type='HC0', start_params=start_params)
self.res2 = GLM(self.endog_big, self.exog_big,
family=sm.families.Poisson()).fit(**fit_kwds)
class TestWtdGlmPoissonClu(CheckWtdDuplicationMixin):
def __init__(self):
'''
Tests Poisson family with canonical log link.
'''
super(TestWtdGlmPoissonClu, self).__init__()
start_params = np.array([1.82794424e-04, -4.76785037e-02,
-9.48249717e-02, -2.92293226e-04,
2.63728909e+00, -2.05934384e+01])
gid = np.arange(1, len(self.endog) + 1) // 2
fit_kwds = dict(cov_type='cluster', cov_kwds={'groups': gid, 'use_correction':False})
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.res1 = GLM(self.endog, self.exog,
freq_weights=self.weight,
family=sm.families.Poisson()).fit(**fit_kwds)
gidr = np.repeat(gid, self.weight)
fit_kwds = dict(cov_type='cluster', cov_kwds={'groups': gidr, 'use_correction':False})
self.res2 = GLM(self.endog_big, self.exog_big,
family=sm.families.Poisson()).fit(start_params=start_params,
**fit_kwds)
class TestWtdGlmBinomial(CheckWtdDuplicationMixin):
def __init__(self):
'''
Tests Binomial family with canonical logit link.
'''
super(TestWtdGlmBinomial, self).__init__()
self.endog = self.endog / 100
self.endog_big = self.endog_big / 100
self.res1 = GLM(self.endog, self.exog,
freq_weights=self.weight,
family=sm.families.Binomial()).fit()
self.res2 = GLM(self.endog_big, self.exog_big,
family=sm.families.Binomial()).fit()
class TestWtdGlmNegativeBinomial(CheckWtdDuplicationMixin):
def __init__(self):
'''
Tests Negative Binomial family with canonical link
g(p) = log(p/(p + 1/alpha))
'''
super(TestWtdGlmNegativeBinomial, self).__init__()
alpha=1.
family_link = sm.families.NegativeBinomial(
link=sm.families.links.nbinom(alpha=alpha),
alpha=alpha)
self.res1 = GLM(self.endog, self.exog,
freq_weights=self.weight,
family=family_link).fit()
self.res2 = GLM(self.endog_big, self.exog_big,
family=family_link).fit()
class TestWtdGlmGamma(CheckWtdDuplicationMixin):
def __init__(self):
'''
Tests Gamma family with log link.
'''
super(TestWtdGlmGamma, self).__init__()
family_link = sm.families.Gamma(sm.families.links.log())
self.res1 = GLM(self.endog, self.exog,
freq_weights=self.weight,
family=family_link).fit()
self.res2 = GLM(self.endog_big, self.exog_big,
family=family_link).fit()
class TestWtdGlmGaussian(CheckWtdDuplicationMixin):
def __init__(self):
'''
Tests Gaussian family with log link.
'''
super(TestWtdGlmGaussian, self).__init__()
family_link = sm.families.Gaussian(sm.families.links.log())
self.res1 = GLM(self.endog, self.exog,
freq_weights=self.weight,
family=family_link).fit()
self.res2 = GLM(self.endog_big, self.exog_big,
family=family_link).fit()
class TestWtdGlmInverseGaussian(CheckWtdDuplicationMixin):
def __init__(self):
'''
Tests InverseGuassian family with log link.
'''
super(TestWtdGlmInverseGaussian, self).__init__()
family_link = sm.families.InverseGaussian(sm.families.links.log())
self.res1 = GLM(self.endog, self.exog,
freq_weights=self.weight,
family=family_link).fit()
self.res2 = GLM(self.endog_big, self.exog_big,
family=family_link).fit()
class TestWtdGlmGammaNewton(CheckWtdDuplicationMixin):
def __init__(self):
'''
Tests Gamma family with log link.
'''
super(TestWtdGlmGammaNewton, self).__init__()
family_link = sm.families.Gamma(sm.families.links.log())
self.res1 = GLM(self.endog, self.exog,
freq_weights=self.weight,
family=family_link,
method='newton').fit()
self.res2 = GLM(self.endog_big, self.exog_big,
family=family_link,
method='newton').fit()
class TestWtdGlmGammaScale_X2(CheckWtdDuplicationMixin):
def __init__(self):
'''
Tests Gamma family with log link.
'''
super(TestWtdGlmGammaScale_X2, self).__init__()
family_link = sm.families.Gamma(sm.families.links.log())
self.res1 = GLM(self.endog, self.exog,
freq_weights=self.weight,
family=family_link,
scale='X2').fit()
self.res2 = GLM(self.endog_big, self.exog_big,
family=family_link,
scale='X2').fit()
class TestWtdGlmGammaScale_dev(CheckWtdDuplicationMixin):
def __init__(self):
'''
Tests Gamma family with log link.
'''
super(TestWtdGlmGammaScale_dev, self).__init__()
family_link = sm.families.Gamma(sm.families.links.log())
self.res1 = GLM(self.endog, self.exog,
freq_weights=self.weight,
family=family_link,
scale='dev').fit()
self.res2 = GLM(self.endog_big, self.exog_big,
family=family_link,
scale='dev').fit()
def test_missing(self):
endog = self.data.endog.copy()
exog = self.data.exog.copy()
exog[0, 0] = np.nan
endog[[2, 4, 6, 8]] = np.nan
freq_weights = self.weight
mod_misisng = GLM(endog, exog, family=self.res1.model.family,
freq_weights=freq_weights, missing='drop')
assert_equal(mod_misisng.freq_weights.shape[0],
mod_misisng.endog.shape[0])
assert_equal(mod_misisng.freq_weights.shape[0],
mod_misisng.exog.shape[0])
keep_idx = np.array([1, 3, 5, 7, 9, 10, 11, 12, 13, 14, 15, 16])
assert_equal(mod_misisng.freq_weights, self.weight[keep_idx])
class TestWtdTweedieLog(CheckWtdDuplicationMixin):
def __init__(self):
'''
Tests Tweedie family with log link and var_power=1.
'''
super(TestWtdTweedieLog, self).__init__()
family_link = sm.families.Tweedie(link=sm.families.links.log(),
var_power=1)
self.res1 = GLM(self.endog, self.exog,
freq_weights=self.weight,
family=family_link).fit()
self.res2 = GLM(self.endog_big, self.exog_big,
family=family_link).fit()
class TestWtdTweediePower2(CheckWtdDuplicationMixin):
def __init__(self):
'''
Tests Tweedie family with Power(1) link and var_power=2.
'''
from statsmodels.datasets.cpunish import load_pandas
self.data = load_pandas()
self.endog = self.data.endog
self.exog = self.data.exog[['INCOME', 'SOUTH']]
np.random.seed(1234)
self.weight = np.random.randint(5, 100, len(self.endog))
self.endog_big = np.repeat(self.endog.values, self.weight)
self.exog_big = np.repeat(self.exog.values, self.weight, axis=0)
link = sm.families.links.Power(1)
family_link = sm.families.Tweedie(link=link, var_power=2)
self.res1 = GLM(self.endog, self.exog,
freq_weights=self.weight,
family=family_link).fit()
self.res2 = GLM(self.endog_big, self.exog_big,
family=family_link).fit()
class TestWtdTweediePower15(CheckWtdDuplicationMixin):
def __init__(self):
'''
Tests Tweedie family with Power(0.5) link and var_power=1.5.
'''
super(TestWtdTweediePower15, self).__init__()
family_link = sm.families.Tweedie(link=sm.families.links.Power(0.5),
var_power=1.5)
self.res1 = GLM(self.endog, self.exog,
freq_weights=self.weight,
family=family_link).fit()
self.res2 = GLM(self.endog_big, self.exog_big,
family=family_link).fit()
def test_wtd_patsy_missing():
from statsmodels.datasets.cpunish import load
import pandas as pd
data = load()
data.exog[0, 0] = np.nan
data.endog[[2, 4, 6, 8]] = np.nan
data.pandas = pd.DataFrame(data.exog, columns=data.exog_name)
data.pandas['EXECUTIONS'] = data.endog
weights = np.arange(1, len(data.endog)+1)
formula = """EXECUTIONS ~ INCOME + PERPOVERTY + PERBLACK + VC100k96 +
SOUTH + DEGREE"""
mod_misisng = GLM.from_formula(formula, data=data.pandas,
freq_weights=weights)
assert_equal(mod_misisng.freq_weights.shape[0],
mod_misisng.endog.shape[0])
assert_equal(mod_misisng.freq_weights.shape[0],
mod_misisng.exog.shape[0])
assert_equal(mod_misisng.freq_weights.shape[0], 12)
keep_weights = np.array([2, 4, 6, 8, 10, 11, 12, 13, 14, 15, 16, 17])
assert_equal(mod_misisng.freq_weights, keep_weights)
class CheckTweedie(object):
def test_resid(self):
l = len(self.res1.resid_response) - 1
l2 = len(self.res2.resid_response) - 1
assert_allclose(np.concatenate((self.res1.resid_response[:17],
[self.res1.resid_response[l]])),
np.concatenate((self.res2.resid_response[:17],
[self.res2.resid_response[l2]])),
rtol=1e-5, atol=1e-5)
assert_allclose(np.concatenate((self.res1.resid_pearson[:17],
[self.res1.resid_pearson[l]])),
np.concatenate((self.res2.resid_pearson[:17],
[self.res2.resid_pearson[l2]])),
rtol=1e-5, atol=1e-5)
assert_allclose(np.concatenate((self.res1.resid_deviance[:17],
[self.res1.resid_deviance[l]])),
np.concatenate((self.res2.resid_deviance[:17],
[self.res2.resid_deviance[l2]])),
rtol=1e-5, atol=1e-5)
assert_allclose(np.concatenate((self.res1.resid_working[:17],
[self.res1.resid_working[l]])),
np.concatenate((self.res2.resid_working[:17],
[self.res2.resid_working[l2]])),
rtol=1e-5, atol=1e-5)
def test_bse(self):
assert_allclose(self.res1.bse, self.res2.bse, atol=1e-6, rtol=1e6)
def test_params(self):
assert_allclose(self.res1.params, self.res2.params, atol=1e-5,
rtol=1e-5)
def test_deviance(self):
assert_allclose(self.res1.deviance, self.res2.deviance, atol=1e-6,
rtol=1e-6)
def test_df(self):
assert_equal(self.res1.df_model, self.res2.df_model)
assert_equal(self.res1.df_resid, self.res2.df_resid)
def test_fittedvalues(self):
l = len(self.res1.fittedvalues) - 1
l2 = len(self.res2.resid_response) - 1
assert_allclose(np.concatenate((self.res1.fittedvalues[:17],
[self.res1.fittedvalues[l]])),
np.concatenate((self.res2.fittedvalues[:17],
[self.res2.fittedvalues[l2]])),
atol=1e-4, rtol=1e-4)
def test_summary(self):
self.res1.summary()
self.res1.summary2()
class TestTweediePower15(CheckTweedie):
@classmethod
def setupClass(self):
from .results.results_glm import CpunishTweediePower15
from statsmodels.datasets.cpunish import load_pandas
self.data = load_pandas()
self.exog = self.data.exog[['INCOME', 'SOUTH']]
self.endog = self.data.endog
family_link = sm.families.Tweedie(link=sm.families.links.Power(1),
var_power=1.5)
self.res1 = sm.GLM(endog=self.data.endog,
exog=self.data.exog[['INCOME', 'SOUTH']],
family=family_link).fit()
self.res2 = CpunishTweediePower15()
class TestTweediePower2(CheckTweedie):
@classmethod
def setupClass(self):
from .results.results_glm import CpunishTweediePower2
from statsmodels.datasets.cpunish import load_pandas
self.data = load_pandas()
self.exog = self.data.exog[['INCOME', 'SOUTH']]
self.endog = self.data.endog
family_link = sm.families.Tweedie(link=sm.families.links.Power(1),
var_power=2.)
self.res1 = sm.GLM(endog=self.data.endog,
exog=self.data.exog[['INCOME', 'SOUTH']],
family=family_link).fit()
self.res2 = CpunishTweediePower2()
class TestTweedieLog1(CheckTweedie):
@classmethod
def setupClass(self):
from .results.results_glm import CpunishTweedieLog1
from statsmodels.datasets.cpunish import load_pandas
self.data = load_pandas()
self.exog = self.data.exog[['INCOME', 'SOUTH']]
self.endog = self.data.endog
family_link = sm.families.Tweedie(link=sm.families.links.log(),
var_power=1.)
self.res1 = sm.GLM(endog=self.data.endog,
exog=self.data.exog[['INCOME', 'SOUTH']],
family=family_link).fit()
self.res2 = CpunishTweedieLog1()
class TestTweedieLog15Fair(CheckTweedie):
@classmethod
def setupClass(self):
from .results.results_glm import FairTweedieLog15
from statsmodels.datasets.fair import load_pandas
data = load_pandas()
family_link = sm.families.Tweedie(link=sm.families.links.log(),
var_power=1.5)
self.res1 = sm.GLM(endog=data.endog,
exog=data.exog[['rate_marriage', 'age',
'yrs_married']],
family=family_link).fit()
self.res2 = FairTweedieLog15()
class CheckTweedieSpecial(object):
def test_mu(self):
assert_allclose(self.res1.mu, self.res2.mu, rtol=1e-5, atol=1e-5)
def test_resid(self):
assert_allclose(self.res1.resid_response, self.res2.resid_response,
rtol=1e-5, atol=1e-5)
assert_allclose(self.res1.resid_pearson, self.res2.resid_pearson,
rtol=1e-5, atol=1e-5)
assert_allclose(self.res1.resid_deviance, self.res2.resid_deviance,
rtol=1e-5, atol=1e-5)
assert_allclose(self.res1.resid_working, self.res2.resid_working,
rtol=1e-5, atol=1e-5)
assert_allclose(self.res1.resid_anscombe, self.res2.resid_anscombe,
rtol=1e-5, atol=1e-5)
class TestTweedieSpecialLog0(CheckTweedieSpecial):
@classmethod
def setupClass(self):
from statsmodels.datasets.cpunish import load_pandas
self.data = load_pandas()
self.exog = self.data.exog[['INCOME', 'SOUTH']]
self.endog = self.data.endog
family1 = sm.families.Gaussian(link=sm.families.links.log())
self.res1 = sm.GLM(endog=self.data.endog,
exog=self.data.exog[['INCOME', 'SOUTH']],
family=family1).fit()
family2 = sm.families.Tweedie(link=sm.families.links.log(),
var_power=0)
self.res2 = sm.GLM(endog=self.data.endog,
exog=self.data.exog[['INCOME', 'SOUTH']],
family=family2).fit()
class TestTweedieSpecialLog1(CheckTweedieSpecial):
@classmethod
def setupClass(self):
from statsmodels.datasets.cpunish import load_pandas
self.data = load_pandas()
self.exog = self.data.exog[['INCOME', 'SOUTH']]
self.endog = self.data.endog
family1 = sm.families.Poisson(link=sm.families.links.log())
self.res1 = sm.GLM(endog=self.data.endog,
exog=self.data.exog[['INCOME', 'SOUTH']],
family=family1).fit()
family2 = sm.families.Tweedie(link=sm.families.links.log(),
var_power=1)
self.res2 = sm.GLM(endog=self.data.endog,
exog=self.data.exog[['INCOME', 'SOUTH']],
family=family2).fit()
class TestTweedieSpecialLog2(CheckTweedieSpecial):
@classmethod
def setupClass(self):
from statsmodels.datasets.cpunish import load_pandas
self.data = load_pandas()
self.exog = self.data.exog[['INCOME', 'SOUTH']]
self.endog = self.data.endog
family1 = sm.families.Gamma(link=sm.families.links.log())
self.res1 = sm.GLM(endog=self.data.endog,
exog=self.data.exog[['INCOME', 'SOUTH']],
family=family1).fit()
family2 = sm.families.Tweedie(link=sm.families.links.log(),
var_power=2)
self.res2 = sm.GLM(endog=self.data.endog,
exog=self.data.exog[['INCOME', 'SOUTH']],
family=family2).fit()
class TestTweedieSpecialLog3(CheckTweedieSpecial):
@classmethod
def setupClass(self):
from statsmodels.datasets.cpunish import load_pandas
self.data = load_pandas()
self.exog = self.data.exog[['INCOME', 'SOUTH']]
self.endog = self.data.endog
family1 = sm.families.InverseGaussian(link=sm.families.links.log())
self.res1 = sm.GLM(endog=self.data.endog,
exog=self.data.exog[['INCOME', 'SOUTH']],
family=family1).fit()
family2 = sm.families.Tweedie(link=sm.families.links.log(),
var_power=3)
self.res2 = sm.GLM(endog=self.data.endog,
exog=self.data.exog[['INCOME', 'SOUTH']],
family=family2).fit()
def testTweediePowerEstimate():
"""
Test the Pearson estimate of the Tweedie variance and scale parameters.
Ideally, this would match the following R code, but I can't make it work...
setwd('c:/workspace')
data <- read.csv('cpunish.csv', sep=",")
library(tweedie)
y <- c(1.00113835e+05, 6.89668315e+03, 6.15726842e+03,
1.41718806e+03, 5.11776456e+02, 2.55369154e+02,
1.07147443e+01, 3.56874698e+00, 4.06797842e-02,
7.06996731e-05, 2.10165106e-07, 4.34276938e-08,
1.56354040e-09, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00)
data$NewY <- y
out <- tweedie.profile( NewY ~ INCOME + SOUTH - 1,
p.vec=c(1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8,
1.9), link.power=0,
data=data,do.plot = TRUE)
"""
data = sm.datasets.cpunish.load_pandas()
y = [1.00113835e+05, 6.89668315e+03, 6.15726842e+03,
1.41718806e+03, 5.11776456e+02, 2.55369154e+02,
1.07147443e+01, 3.56874698e+00, 4.06797842e-02,
7.06996731e-05, 2.10165106e-07, 4.34276938e-08,
1.56354040e-09, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00]
model1 = sm.GLM(y, data.exog[['INCOME', 'SOUTH']],
family=sm.families.Tweedie(link=sm.families.links.log(),
var_power=1.5))
res1 = model1.fit()
model2 = sm.GLM((y - res1.mu) ** 2,
np.column_stack((np.ones(len(res1.mu)), np.log(res1.mu))),
family=sm.families.Gamma(sm.families.links.log()))
res2 = model2.fit()
# Sample may be too small for this...
# assert_allclose(res1.scale, np.exp(res2.params[0]), rtol=0.25)
p = model1.estimate_tweedie_power(res1.mu)
assert_allclose(p, res2.params[1], rtol=0.25)
class TestRegularized(object):
def test_regularized(self):
import os
from . import glmnet_r_results
for dtype in "binomial", "poisson":
cur_dir = os.path.dirname(os.path.abspath(__file__))
data = np.loadtxt(os.path.join(cur_dir, "results", "enet_%s.csv" % dtype),
delimiter=",")
endog = data[:, 0]
exog = data[:, 1:]
fam = {"binomial" : sm.families.Binomial,
"poisson" : sm.families.Poisson}[dtype]
for j in range(9):
vn = "rslt_%s_%d" % (dtype, j)
r_result = getattr(glmnet_r_results, vn)
L1_wt = r_result[0]
alpha = r_result[1]
params = r_result[2:]
model = GLM(endog, exog, family=fam())
sm_result = model.fit_regularized(L1_wt=L1_wt, alpha=alpha)
# Agreement is OK, see below for further check
assert_allclose(params, sm_result.params, atol=1e-2, rtol=0.3)
# The penalized log-likelihood that we are maximizing.
def plf(params):
llf = model.loglike(params) / len(endog)
llf = llf - alpha * ((1 - L1_wt)*np.sum(params**2) / 2 + L1_wt*np.sum(np.abs(params)))
return llf
# Confirm that we are doing better than glmnet.
from numpy.testing import assert_equal
llf_r = plf(params)
llf_sm = plf(sm_result.params)
assert_equal(np.sign(llf_sm - llf_r), 1)
class TestConvergence(object):
def __init__(self):
'''
Test Binomial family with canonical logit link using star98 dataset.
'''
from statsmodels.datasets.star98 import load
data = load()
data.exog = add_constant(data.exog, prepend=False)
self.model = GLM(data.endog, data.exog,
family=sm.families.Binomial())
def _when_converged(self, atol=1e-8, rtol=0, tol_criterion='deviance'):
for i, dev in enumerate(self.res.fit_history[tol_criterion]):
orig = self.res.fit_history[tol_criterion][i]
new = self.res.fit_history[tol_criterion][i + 1]
if np.allclose(orig, new, atol=atol, rtol=rtol):
return i
raise ValueError('CONVERGENCE CHECK: It seems this doens\'t converge!')
def test_convergence_atol_only(self):
atol = 1e-8
rtol = 0
self.res = self.model.fit(atol=atol, rtol=rtol)
expected_iterations = self._when_converged(atol=atol, rtol=rtol)
actual_iterations = self.res.fit_history['iteration']
# Note the first value is the list is np.inf. The second value
# is the initial guess based off of start_params or the
# estimate thereof. The third value (index = 2) is the actual "first
# iteration"
assert_equal(expected_iterations, actual_iterations)
assert_equal(len(self.res.fit_history['deviance']) - 2,
actual_iterations)
def test_convergence_rtol_only(self):
atol = 0
rtol = 1e-8
self.res = self.model.fit(atol=atol, rtol=rtol)
expected_iterations = self._when_converged(atol=atol, rtol=rtol)
actual_iterations = self.res.fit_history['iteration']
# Note the first value is the list is np.inf. The second value
# is the initial guess based off of start_params or the
# estimate thereof. The third value (index = 2) is the actual "first
# iteration"
assert_equal(expected_iterations, actual_iterations)
assert_equal(len(self.res.fit_history['deviance']) - 2,
actual_iterations)
def test_convergence_atol_rtol(self):
atol = 1e-8
rtol = 1e-8
self.res = self.model.fit(atol=atol, rtol=rtol)
expected_iterations = self._when_converged(atol=atol, rtol=rtol)
actual_iterations = self.res.fit_history['iteration']
# Note the first value is the list is np.inf. The second value
# is the initial guess based off of start_params or the
# estimate thereof. The third value (index = 2) is the actual "first
# iteration"
assert_equal(expected_iterations, actual_iterations)
assert_equal(len(self.res.fit_history['deviance']) - 2,
actual_iterations)
def test_convergence_atol_only_params(self):
atol = 1e-8
rtol = 0
self.res = self.model.fit(atol=atol, rtol=rtol, tol_criterion='params')
expected_iterations = self._when_converged(atol=atol, rtol=rtol,
tol_criterion='params')
actual_iterations = self.res.fit_history['iteration']
# Note the first value is the list is np.inf. The second value
# is the initial guess based off of start_params or the
# estimate thereof. The third value (index = 2) is the actual "first
# iteration"
assert_equal(expected_iterations, actual_iterations)
assert_equal(len(self.res.fit_history['deviance']) - 2,
actual_iterations)
def test_convergence_rtol_only_params(self):
atol = 0
rtol = 1e-8
self.res = self.model.fit(atol=atol, rtol=rtol, tol_criterion='params')
expected_iterations = self._when_converged(atol=atol, rtol=rtol,
tol_criterion='params')
actual_iterations = self.res.fit_history['iteration']
# Note the first value is the list is np.inf. The second value
# is the initial guess based off of start_params or the
# estimate thereof. The third value (index = 2) is the actual "first
# iteration"
assert_equal(expected_iterations, actual_iterations)
assert_equal(len(self.res.fit_history['deviance']) - 2,
actual_iterations)
def test_convergence_atol_rtol_params(self):
atol = 1e-8
rtol = 1e-8
self.res = self.model.fit(atol=atol, rtol=rtol, tol_criterion='params')
expected_iterations = self._when_converged(atol=atol, rtol=rtol,
tol_criterion='params')
actual_iterations = self.res.fit_history['iteration']
# Note the first value is the list is np.inf. The second value
# is the initial guess based off of start_params or the
# estimate thereof. The third value (index = 2) is the actual "first
# iteration"
assert_equal(expected_iterations, actual_iterations)
assert_equal(len(self.res.fit_history['deviance']) - 2,
actual_iterations)
def test_poisson_deviance():
# see #3355 missing term in deviance if resid_response.sum() != 0
np.random.seed(123987)
nobs, k_vars = 50, 3-1
x = sm.add_constant(np.random.randn(nobs, k_vars))
mu_true = np.exp(x.sum(1))
y = np.random.poisson(mu_true, size=nobs)
mod = sm.GLM(y, x[:, :], family=sm.genmod.families.Poisson())
res = mod.fit()
d_i = res.resid_deviance
d = res.deviance
lr = (mod.family.loglike(y, y+1e-20) -
mod.family.loglike(y, res.fittedvalues)) * 2
assert_allclose(d, (d_i**2).sum(), rtol=1e-12)
assert_allclose(d, lr, rtol=1e-12)
# case without constant, resid_response.sum() != 0
mod_nc = sm.GLM(y, x[:, 1:], family=sm.genmod.families.Poisson())
res_nc = mod_nc.fit()
d_i = res_nc.resid_deviance
d = res_nc.deviance
lr = (mod.family.loglike(y, y+1e-20) -
mod.family.loglike(y, res_nc.fittedvalues)) * 2
assert_allclose(d, (d_i**2).sum(), rtol=1e-12)
assert_allclose(d, lr, rtol=1e-12)
if __name__ == "__main__":
# run_module_suite()
# taken from Fernando Perez:
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb'],
exit=False)
| bsd-3-clause | 8,514,530,532,170,439,000 | 37.960364 | 112 | 0.5794 | false |
TinyOS-Camp/DDEA-DEV | Archive/[14_10_11] Dr_Jung_Update/df_data_analysis_gsbc.py | 1 | 17880 | # coding: utf-8
"""
======================================================================
Learning and Visualizing the BMS sensor-time-weather data structure
======================================================================
This example employs several unsupervised learning techniques to extract
the energy data structure from variations in Building Automation System (BAS)
and historial weather data.
The fundermental timelet for analysis are 15 min, referred to as Q.
** currently use H (Hour) as a fundermental timelet, need to change later **
The following analysis steps are designed and to be executed.
Data Pre-processing
--------------------------
- Data Retrieval and Standardization
- Outlier Detection
- Interpolation
Data Summarization
--------------------------
- Data Transformation
- Sensor Clustering
Model Discovery Bayesian Network
--------------------------
- Automatic State Classification
- Structure Discovery and Analysis
"""
#print(__doc__)
# Author: Deokwooo Jung deokwoo.jung@gmail.compile
##################################################################
# General Moduels
from __future__ import division # To forace float point division
import os
import sys
import numpy as np
from numpy.linalg import inv
from numpy.linalg import norm
import uuid
import pylab as pl
from scipy import signal
from scipy import stats
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
from multiprocessing import Pool
#from datetime import datetime
import datetime as dt
from dateutil import tz
import shlex, subprocess
import mytool as mt
import time
import retrieve_weather as rw
import itertools
import calendar
import random
from matplotlib.collections import LineCollection
#from stackedBarGraph import StackedBarGrapher
import pprint
#import radar_chart
# Custom library
from data_tools import *
from data_retrieval import *
from pack_cluster import *
from data_preprocess import *
from shared_constants import *
from pre_bn_state_processing import *
from data_summerization import *
##################################################################
# Interactive mode for plotting
plt.ion()
##################################################################
# Processing Configuraiton Settings
##################################################################
# Analysis buildings set
# Main building x where x is 1-16
# Conference bldg
# Machine Room
# All Power Measurements
IS_USING_SAVED_DICT=-1
print 'Extract a common time range...'
##################################################################
# List buildings and substation names
gsbc_bgid_dict=mt.loadObjectBinaryFast('gsbc_bgid_dict.bin')
PRE_BN_STAGE=1
if PRE_BN_STAGE==0:
bldg_key_set=[]
print 'skip PRE_BN_STAGE....'
else:
bldg_key_set=gsbc_bgid_dict.keys()
#########################################
# 1. Electricity Room and Machine Room - 'elec_machine_room_bldg'
#########################################
#########################################
# 2. Conference Building - 'conference_bldg'
#########################################
#########################################
# 3. Main Building - 'main_bldg_x'
#########################################
for bldg_key in bldg_key_set:
print '###############################################################################'
print '###############################################################################'
print 'Processing '+ bldg_key+'.....'
print '###############################################################################'
print '###############################################################################'
bldg_id=[key_val[1] for key_val in gsbc_bgid_dict.items() if key_val[0]==bldg_key][0]
temp=''
for bldg_id_temp in bldg_id:
temp=temp+subprocess.check_output('ls '+DATA_DIR+'*'+bldg_id_temp+'*.bin', shell=True)
input_files_temp =shlex.split(temp)
# Get rid of duplicated files
input_files_temp=list(set(input_files_temp))
input_files=input_files_temp
#input_files=['../gvalley/Binfiles/'+temp for temp in input_files_temp]
IS_USING_SAVED_DICT=-1
print 'Extract a common time range...'
# Analysis period
ANS_START_T=dt.datetime(2013,6,1,0)
ANS_END_T=dt.datetime(2013,11,15,0)
# Interval of timelet, currently set to 1 Hour
TIMELET_INV=dt.timedelta(minutes=30)
print TIMELET_INV, 'time slot interval is set for this data set !!'
print '-------------------------------------------------------------------'
PROC_AVG=True
PROC_DIFF=False
###############################################################################
# This directly searches files from bin file name
print '###############################################################################'
print '# Data Pre-Processing'
print '###############################################################################'
# define input_files to be read
if IS_USING_SAVED_DICT==0:
ANS_START_T,ANS_END_T,input_file_to_be_included=\
time_range_check(input_files,ANS_START_T,ANS_END_T,TIMELET_INV)
print 'time range readjusted to (' ,ANS_START_T, ', ', ANS_END_T,')'
start__dictproc_t=time.time()
if IS_SAVING_INDIVIDUAL==True:
data_dict=construct_data_dict_2\
(input_files,ANS_START_T,ANS_END_T,TIMELET_INV,binfilename='data_dict', \
IS_USING_PARALLEL=IS_USING_PARALLEL_OPT)
else:
data_dict,purge_list=\
construct_data_dict(input_file_to_be_included,ANS_START_T,ANS_END_T,TIMELET_INV,\
binfilename='data_dict',IS_USING_PARALLEL=IS_USING_PARALLEL_OPT)
end__dictproc_t=time.time()
print 'the time of construct data dict.bin is ', end__dictproc_t-start__dictproc_t, ' sec'
print '--------------------------------------'
elif IS_USING_SAVED_DICT==1:
print 'Loading data dictionary......'
start__dictproc_t=time.time()
data_dict = mt.loadObjectBinaryFast('data_dict.bin')
end__dictproc_t=time.time()
print 'the time of loading data dict.bin is ', end__dictproc_t-start__dictproc_t, ' sec'
print '--------------------------------------'
else:
print 'Skip data dict'
CHECK_DATA_FORMAT=0
if CHECK_DATA_FORMAT==1:
if IS_SAVING_INDIVIDUAL==True:
list_of_wrong_data_format=verify_data_format_2(data_used,data_dict,time_slots)
else:
list_of_wrong_data_format=verify_data_format(data_used,data_dict,time_slots)
if len(list_of_wrong_data_format)>0:
print 'Measurement list below'
print '----------------------------------------'
print list_of_wrong_data_format
raise NameError('Errors in data format')
Data_Summarization=0
if Data_Summarization==1:
bldg_out=data_summerization(bldg_key,data_dict,PROC_AVG=True,PROC_DIFF=False)
print '###############################################################################'
print '# Model_Discovery'
print '###############################################################################'
gsbc_key_dict=mt.loadObjectBinaryFast('./gsbc_key_dict_all.bin')
# Analysis of BN network result
def convert_gsbc_name(id_labels):
if isinstance(id_labels,list)==False:
id_labels=[id_labels]
out_name=[gsbc_key_dict[key_label_[2:]] if key_label_[2:] \
in gsbc_key_dict else key_label_ for key_label_ in id_labels ]
return out_name
Model_Discovery=0
if Model_Discovery==1:
pwr_key='30......$';dict_dir='./GSBC/'
LOAD_BLDG_OBJ=0
if LOAD_BLDG_OBJ==1:
print 'not yet ready'
bldg_=mt.loadObjectBinaryFast(PROC_OUT_DIR+'gsbc_bldg_obj.bin')
else:
bldg_dict={}
for bldg_load_key in gsbc_bgid_dict.keys():
print 'Building for ',bldg_load_key, '....'
try:
bldg_tag='gsbc_'+bldg_load_key
bldg_load_out=mt.loadObjectBinaryFast(dict_dir+bldg_load_key+'_out.bin')
except:
print 'not found, skip....'
pass
mt.saveObjectBinaryFast(bldg_load_out['data_dict'],dict_dir+'data_dict.bin')
if 'avgdata_dict' in bldg_load_out.keys():
mt.saveObjectBinaryFast(bldg_load_out['avgdata_dict'],dict_dir+'avgdata_dict.bin')
if 'diffdata_dict' in bldg_load_out.keys():
mt.saveObjectBinaryFast(bldg_load_out['avgdata_dict'],dict_dir+'diffdata_dict.bin')
pname_key= pwr_key
bldg_dict.update({bldg_tag:create_bldg_obj(dict_dir,bldg_tag,pname_key)})
bldg_=obj(bldg_dict)
# Commented out to avoid memory error
#cmd_str='bldg_.'+bldg_tag+'.data_out=obj(bldg_load_out)'
#exec(cmd_str)
cmd_str='bldg_obj=bldg_.'+bldg_tag
exec(cmd_str)
anal_out={}
if 'avgdata_dict' in bldg_load_out.keys():
anal_out.update({'avg':bn_prob_analysis(bldg_obj,sig_tag_='avg')})
if 'diffdata_dict' in bldg_load_out.keys():
anal_out.update({'diff':bn_prob_analysis(bldg_obj,sig_tag_='diff')})
cmd_str='bldg_.'+bldg_tag+'.anal_out=obj(anal_out)'
exec(cmd_str)
break
cmd_str='bldg_.'+'convert_name=convert_gsbc_name'
exec(cmd_str)
mt.saveObjectBinaryFast(bldg_ ,PROC_OUT_DIR+'gsbc_bldg_obj.bin')
mt.saveObjectBinaryFast('LOAD_BLDG_OBJ' ,PROC_OUT_DIR+'gsbc_bldg_obj_is_done.txt')
#######################################################################################
# Analysis For GSBC
#######################################################################################
# Analysis of BN network result
BN_ANAL=1
if BN_ANAL==1:
bldg_ = mt.loadObjectBinaryFast(PROC_OUT_DIR+'gsbc_bldg_obj.bin')
# Plotting individual LHs
PLOTTING_LH=1
if PLOTTING_LH==1:
#plotting_bldg_lh(bldg_,attr_class='sensor',num_picks=30)
#plotting_bldg_lh(bldg_,attr_class='time',num_picks=30)
#plotting_bldg_lh(bldg_,attr_class='weather',num_picks=30)
printing_bldg_lh(bldg_,attr_class='sensor',num_picks=30)
printing_bldg_lh(bldg_,attr_class='time',num_picks=30)
printing_bldg_lh(bldg_,attr_class='weather',num_picks=30)
PLOTTING_BN=1
if PLOTTING_BN==1:
#plotting_bldg_bn(bldg_)
printing_bldg_bn(bldg_)
More_BN_ANAL=0
if More_BN_ANAL==1:
#######################################################################################
# Analysis For GSBC
#######################################################################################
#bldg_obj=bldg_.GSBC_main_bldg_power_machine_room
bldg_obj=bldg_.GSBC_main_bldg_power_machine_room
bldg_.GSBC_main_bldg_power_machine_room.anal_out=bn_prob_analysis(bldg_obj,sig_tag_='avg')
bldg_obj=bldg_.GSBC_main_bldg_1
bldg_.GSBC_main_bldg_1.anal_out=bn_prob_analysis(bldg_obj,sig_tag_='avg')
import pdb;pdb.set_trace()
#--------------------------------------------------------------------------
# Analysis Display
#--------------------------------------------------------------------------
# Data set 1 - GSBC_main_bldg_power_machine_room
p_name_sets_1=bldg_.GSBC_main_bldg_power_machine_room.anal_out.__dict__.keys()
bn_out_sets_1=bldg_.GSBC_main_bldg_power_machine_room.anal_out.__dict__
# Data set 2 - GSBC_main_bldg_1
p_name_sets_2=bldg_.GSBC_main_bldg_1.anal_out.__dict__.keys()
bn_out_sets_2=bldg_.GSBC_main_bldg_1.anal_out.__dict__
# Data set 2 Analysis
print 'List power meters for analysis'
print '------------------------------------'
pprint.pprint(np.array([p_name_sets_1,convert_gsbc_name(p_name_sets_1)]).T)
print '------------------------------------'
p_name=p_name_sets_1[3]
bn_out=bn_out_sets_1[p_name]
fig_name='BN for Sensors '+convert_gsbc_name(p_name)[0]
fig=figure(fig_name,figsize=(30.0,30.0))
col_name=[str(np.array([[lab1],[remove_dot(lab2)]])) \
for lab1,lab2 in zip(bn_out.s_labels, convert_gsbc_name(bn_out.s_labels))]
rbn.nx_plot(bn_out.s_hc,col_name,graph_layout='spring',node_text_size=15)
png_name=fig_name+'_'+str(uuid.uuid4().get_hex().upper()[0:2])
fig.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
fig_name='BN for Time '+convert_gsbc_name(p_name)[0]
fig=figure(fig_name,figsize=(30.0,30.0))
rbn.nx_plot(bn_out.t_hc,convert_gsbc_name(bn_out.t_labels),graph_layout='spring',node_text_size=12)
png_name=fig_name+'_'+str(uuid.uuid4().get_hex().upper()[0:2])
fig.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
fig_name='BN for Weather '+convert_gsbc_name(p_name)[0]
fig=figure(fig_name,figsize=(30.0,30.0))
rbn.nx_plot(bn_out.w_hc,convert_gsbc_name(bn_out.w_labels),graph_layout='spring',node_text_size=12)
png_name=fig_name+str(uuid.uuid4().get_hex().upper()[0:2])
fig.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
fig_name='BN for Sensor-Time-Weather '+convert_gsbc_name(p_name)[0]
fig=figure(fig_name,figsize=(30.0,30.0))
rbn.nx_plot(bn_out.all_hc,convert_gsbc_name(bn_out.all_labels),graph_layout='spring',node_text_size=20)
png_name=fig_name+'_'+str(uuid.uuid4().get_hex().upper()[0:2])
fig.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
fig_name='BN PEAK LH Analysis for Sensor-Time-Weather '+convert_gsbc_name(p_name)[0]
fig=figure(fig_name, figsize=(30.0,30.0))
subplot(2,1,1)
plot(bn_out.all_cause_symbol_xtick,bn_out.high_peak_prob,'-^')
plot(bn_out.all_cause_symbol_xtick,bn_out.low_peak_prob,'-v')
plt.ylabel('Likelihood',fontsize='large')
plt.xticks(bn_out.all_cause_symbol_xtick,bn_out.all_cause_symbol_xlabel,rotation=270, fontsize=10)
plt.tick_params(labelsize='large')
plt.legend(('High Peak', 'Low Peak'),loc='center right')
plt.tick_params(labelsize='large')
plt.grid();plt.ylim([-0.05,1.05])
plt.title('Likelihood of '+ str(remove_dot(convert_gsbc_name(p_name)))+\
' given '+'\n'+str(remove_dot(convert_gsbc_name(bn_out.all_cause_label))))
png_name=fig_name+'_'+str(uuid.uuid4().get_hex().upper()[0:2])
fig.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
# Compare with the raw data
#-------------------------------------------
start_t=datetime.datetime(2013, 8, 9, 0, 0, 0)
end_t=datetime.datetime(2013, 8, 13, 0, 0, 0)
data_x=get_data_set([label_[2:] for label_ in bn_out.all_cause_label]+[p_name[2:]],start_t,end_t)
png_namex=plot_data_x(data_x,stype='raw',smark='-^')
png_namex=plot_data_x(data_x,stype='diff',smark='-^')
name_list_out=[[p_name]+bn_out.all_cause_label,convert_gsbc_name([p_name]+bn_out.all_cause_label)]
pprint.pprint(np.array(name_list_out).T)
pprint.pprint(name_list_out)
start_t=datetime.datetime(2013, 7, 1, 0, 0, 0)
end_t=datetime.datetime(2013, 12, 31, 0, 0, 0)
data_x=get_data_set([label_[2:] for label_ in bn_out.s_labels],start_t,end_t)
png_namex=plot_data_x(data_x,stype='raw',smark='-^',fontsize='small',xpos=0.00)
png_namex=plot_data_x(data_x,stype='diff',smark='-^')
"""
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
print '----------------------------------------'
print 'Likelihoods '
print '----------------------------------------'
print cause_label+['Low Peak','High Peak']
print '----------------------------------------'
print np.vstack((np.int0(peak_state).T,np.int0(100*lowpeak_prob).T,np.int0(100*peak_prob).T)).T
print '----------------------------------------'
s_val_set=set(peak_state[:,0])
m_val_set=set(peak_state[:,1])
Z_peak=np.ones((len(s_val_set),len(m_val_set)))*np.inf
for i,s_val in enumerate(s_val_set):
for j,m_val in enumerate(m_val_set):
idx=np.nonzero((peak_state[:,0]==s_val)&(peak_state[:,1]==m_val))[0][0]
Z_peak[i,j]=peak_prob[idx]
s_val_set=set(lowpeak_state[:,0])
m_val_set=set(lowpeak_state[:,1])
Z_lowpeak=np.ones((len(s_val_set),len(m_val_set)))*np.inf
for i,s_val in enumerate(s_val_set):
for j,m_val in enumerate(m_val_set):
idx=np.nonzero((lowpeak_state[:,0]==s_val)&(lowpeak_state[:,1]==m_val))[0][0]
Z_lowpeak[i,j]=lowpeak_prob[idx]
Z_lowpeak=lowpeak_prob.reshape((len(s_val_set),len(m_val_set)))
Z_peak=peak_prob.reshape((len(s_val_set),len(m_val_set)))
fig1=figure()
im = plt.imshow(Z_peak, cmap='hot',vmin=0, vmax=1,aspect='auto')
plt.colorbar(im, orientation='horizontal')
plt.xticks(monthDict.keys(),monthDict.values(),fontsize='large')
plt.yticks(range(len(s_val_set)),list(s_val_set),fontsize='large')
plt.xlabel(cause_label[1],fontsize='large')
plt.ylabel(cause_label[0],fontsize='large')
plt.title('Likelihood of High-Peak')
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig1.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
fig2=figure()
im = plt.imshow(Z_lowpeak, cmap='hot',vmin=0, vmax=1,aspect='auto')
plt.colorbar(im, orientation='horizontal')
plt.xticks(monthDict.keys(),monthDict.values(),fontsize='large')
plt.yticks(range(len(s_val_set)),list(s_val_set),fontsize='large')
plt.xlabel(cause_label[1],fontsize='large')
plt.ylabel(cause_label[0],fontsize='large')
plt.title('Likelihood of Low-Peak')
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig2.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
"""
print '**************************** End of Program ****************************'
| gpl-2.0 | -4,262,225,546,452,412,000 | 41.877698 | 107 | 0.552349 | false |
sebastian-nagel/cc-crawl-statistics | plot/tld.py | 1 | 11639 | import sys
from collections import defaultdict
import pandas
from crawlplot import CrawlPlot, PLOTDIR
from crawlstats import CST, MonthlyCrawl, MultiCount
from top_level_domain import TopLevelDomain
from stats.tld_alexa_top_1m import alexa_top_1m_tlds
from stats.tld_cisco_umbrella_top_1m import cisco_umbrella_top_1m_tlds
from stats.tld_majestic_top_1m import majestic_top_1m_tlds
# min. share of URLs for a TLD to be shown in metrics
min_urls_percentage = .05
field_percentage_formatter = '{0:,.2f}'.format
class TldStats(CrawlPlot):
def __init__(self):
self.tlds = defaultdict(dict)
self.tld_stats = defaultdict(dict)
self.N = 0
def add(self, key, val):
cst = CST[key[0]]
if cst != CST.tld:
return
tld = key[1]
crawl = key[2]
self.tlds[tld][crawl] = val
def transform_data(self):
crawl_has_host_domain_counts = {}
for tld in self.tlds:
tld_repr = tld
tld_obj = None
if tld in ('', '(ip address)'):
continue
else:
try:
tld_obj = TopLevelDomain(tld)
tld_repr = tld_obj.tld
except:
print('error', tld)
continue
for crawl in self.tlds[tld]:
self.tld_stats['suffix'][self.N] = tld_repr
self.tld_stats['crawl'][self.N] = crawl
date = pandas.Timestamp(MonthlyCrawl.date_of(crawl))
self.tld_stats['date'][self.N] = date
if tld_obj:
self.tld_stats['type'][self.N] \
= TopLevelDomain.short_type(tld_obj.tld_type)
self.tld_stats['subtype'][self.N] = tld_obj.sub_type
self.tld_stats['tld'][self.N] = tld_obj.first_level
else:
self.tld_stats['type'][self.N] = ''
self.tld_stats['subtype'][self.N] = ''
self.tld_stats['tld'][self.N] = ''
value = self.tlds[tld][crawl]
n_pages = MultiCount.get_count(0, value)
self.tld_stats['pages'][self.N] = n_pages
n_urls = MultiCount.get_count(1, value)
self.tld_stats['urls'][self.N] = n_urls
n_hosts = MultiCount.get_count(2, value)
self.tld_stats['hosts'][self.N] = n_hosts
n_domains = MultiCount.get_count(3, value)
self.tld_stats['domains'][self.N] = n_domains
if n_urls != n_hosts:
# multi counts including host counts are not (yet)
# available for all crawls
crawl_has_host_domain_counts[crawl] = True
elif crawl not in crawl_has_host_domain_counts:
crawl_has_host_domain_counts[crawl] = False
self.N += 1
for crawl in crawl_has_host_domain_counts:
if not crawl_has_host_domain_counts[crawl]:
print('No host and domain counts for', crawl)
for n in self.tld_stats['crawl']:
if self.tld_stats['crawl'][n] == crawl:
del(self.tld_stats['hosts'][n])
del(self.tld_stats['domains'][n])
self.tld_stats = pandas.DataFrame(self.tld_stats)
def save_data(self):
self.tld_stats.to_csv('data/tlds.csv')
def percent_agg(self, data, columns, index, values, aggregate):
data = data[[columns, index, values]]
data = data.groupby([columns, index]).agg(aggregate)
data = data.groupby(level=0).apply(lambda x: 100.0*x/float(x.sum()))
# print("\n-----\n")
# print(data.to_string(formatters={'urls': field_percentage_formatter}))
return data
def pivot_percentage(self, data, columns, index, values, aggregate):
data = self.percent_agg(data, columns, index, values, aggregate)
return data.reset_index().pivot(index=index,
columns=columns, values=values)
def plot_groups(self):
title = 'Groups of Top-Level Domains'
ylabel = 'URLs %'
clabel = ''
img_file = 'tld/groups.png'
data = self.pivot_percentage(self.tld_stats, 'crawl', 'type',
'urls', {'urls': 'sum'})
data = data.transpose()
print("\n-----\n")
types = set(self.tld_stats['type'].tolist())
formatters = {c: field_percentage_formatter for c in types}
print(data.to_string(formatters=formatters))
data.to_html('{}/tld/groups-percentage.html'.format(PLOTDIR),
formatters=formatters,
classes=['tablesorter', 'tablepercentage'])
data = self.percent_agg(self.tld_stats, 'date', 'type',
'urls', {'urls': 'sum'}).reset_index()
return self.line_plot(data, title, ylabel, img_file,
x='date', y='urls', c='type', clabel=clabel)
def plot(self, crawls, latest_crawl):
field_formatters = {c: '{:,.0f}'.format
for c in ['pages', 'urls', 'hosts', 'domains']}
for c in ['%urls', '%hosts', '%domains']:
field_formatters[c] = field_percentage_formatter
data = self.tld_stats
data = data[data['crawl'].isin(crawls)]
crawl_data = data
top_tlds = []
# stats per crawl
for crawl in crawls:
print("\n-----\n{}\n".format(crawl))
for aggr_type in ('type', 'tld'):
data = crawl_data
data = data[data['crawl'].isin([crawl])]
data = data.set_index([aggr_type], drop=False)
data = data.sum(level=aggr_type).sort_values(
by=['urls'], ascending=False)
for count in ('urls', 'hosts', 'domains'):
data['%'+count] = 100.0 * data[count] / data[count].sum()
if aggr_type == 'tld':
# skip less frequent TLDs
data = data[data['%urls'] >= min_urls_percentage]
for tld in data.index.values:
top_tlds.append(tld)
print(data.to_string(formatters=field_formatters))
print()
if crawl == latest_crawl:
# latest crawl by convention
type_name = aggr_type
if aggr_type == 'type':
type_name = 'group'
path = '{}/tld/latest-crawl-{}s.html'.format(
PLOTDIR, type_name)
data.to_html(path,
formatters=field_formatters,
classes=['tablesorter'])
# stats comparison for selected crawls
for aggr_type in ('type', 'tld'):
data = crawl_data
if aggr_type == 'tld':
data = data[data['tld'].isin(top_tlds)]
data = self.pivot_percentage(data, 'crawl', aggr_type,
'urls', {'urls': 'sum'})
print("\n----- {}\n".format(aggr_type))
print(data.to_string(formatters={c: field_percentage_formatter
for c in crawls}))
if aggr_type == 'tld':
# save as HTML table
path = '{}/tld/selected-crawls-percentage.html'.format(
PLOTDIR, len(crawls))
data.to_html(path,
formatters={c: '{0:,.4f}'.format
for c in crawls},
classes=['tablesorter', 'tablepercentage'])
def plot_comparison(self, crawl, name, topNlimit=None, method='spearman'):
print()
print('Comparison for', crawl, '-', name, '-', method)
data = self.tld_stats
data = data[data['crawl'].isin([crawl])]
data = data[data['urls'] >= topNlimit]
data = data.set_index(['tld'], drop=False)
data = data.sum(level='tld')
print(data)
data['alexa'] = pandas.Series(alexa_top_1m_tlds)
data['cisco'] = pandas.Series(cisco_umbrella_top_1m_tlds)
data['majestic'] = pandas.Series(majestic_top_1m_tlds)
fields = ('pages', 'urls', 'hosts', 'domains',
'alexa', 'cisco', 'majestic')
formatters = {c: '{0:,.3f}'.format for c in fields}
# relative frequency (percent)
for count in fields:
data[count] = 100.0 * data[count] / data[count].sum()
# Spearman's rank correlation for all TLDs
corr = data.corr(method=method, min_periods=1)
print(corr.to_string(formatters=formatters))
corr.to_html('{}/tld/{}-comparison-{}-all-tlds.html'
.format(PLOTDIR, name, method),
formatters=formatters,
classes=['matrix'])
if topNlimit is None:
return
# Spearman's rank correlation for TLDs covering
# at least topNlimit % of urls
data = data[data['urls'] >= topNlimit]
print()
print('Top', len(data), 'TLDs (>= ', topNlimit, '%)')
print(data)
data.to_html('{}/tld/{}-comparison.html'.format(PLOTDIR, name),
formatters=formatters,
classes=['tablesorter', 'tablepercentage'])
print()
corr = data.corr(method=method, min_periods=1)
print(corr.to_string(formatters=formatters))
corr.to_html('{}/tld/{}-comparison-{}-frequent-tlds.html'
.format(PLOTDIR, name, method),
formatters=formatters,
classes=['matrix'])
print()
def plot_comparison_groups(self):
# Alexa and Cisco types/groups:
for (name, data) in [('Alexa', alexa_top_1m_tlds),
('Cisco', cisco_umbrella_top_1m_tlds),
('Majestic', majestic_top_1m_tlds)]:
compare_types = defaultdict(int)
for tld in data:
compare_types[TopLevelDomain(tld).tld_type] += data[tld]
print(name, 'TLD groups:')
for tld in compare_types:
c = compare_types[tld]
print(' {:6d}\t{:4.1f}\t{}'.format(c, (100.0*c/1000000), tld))
print()
if __name__ == '__main__':
plot_crawls = sys.argv[1:]
latest_crawl = plot_crawls[-1]
if len(plot_crawls) == 0:
print(sys.argv[0], 'crawl-id...')
print()
print('Distribution of top-level domains for (selected) monthly crawls')
print()
print('Example:')
print('', sys.argv[0], '[options]', 'CC-MAIN-2014-52', 'CC-MAIN-2016-50')
print()
print('Last argument is considered to be the latest crawl')
print()
print('Options:')
print()
sys.exit(1)
plot = TldStats()
plot.read_data(sys.stdin)
plot.transform_data()
plot.save_data()
plot.plot_groups()
plot.plot(plot_crawls, latest_crawl)
if latest_crawl == 'CC-MAIN-2018-22':
# plot comparison only for crawl of similar date as benchmark data
plot.plot_comparison(latest_crawl, 'selected-crawl',
min_urls_percentage)
# plot.plot_comparison(latest_crawl, 'selected-crawl',
# min_urls_percentage, 'pearson')
plot.plot_comparison_groups()
| apache-2.0 | 5,796,425,409,925,910,000 | 42.429104 | 81 | 0.511642 | false |
liufuyang/deep_learning_tutorial | char-based-classification/repeat-py-crepe-news-classification/data_helpers.py | 1 | 1790 | import string
import numpy as np
import pandas as pd
from keras.utils.np_utils import to_categorical
def load_ag_data():
train = pd.read_csv('data/ag_news_csv/train.csv', header=None)
train = train.dropna()
x_train = train[1] + train[2]
x_train = np.array(x_train)
y_train = train[0] - 1
y_train = to_categorical(y_train)
test = pd.read_csv('data/ag_news_csv/test.csv', header=None)
x_test = test[1] + test[2]
x_test = np.array(x_test)
y_test = test[0] - 1
y_test = to_categorical(y_test)
return (x_train, y_train), (x_test, y_test)
def encode_data(x, maxlen, vocab):
# Iterate over the loaded data and create a matrix of size (len(x), maxlen)
# Each character is encoded into a one-hot array later at the lambda layer.
# Chars not in the vocab are encoded as -1, into an all zero vector.
input_data = np.zeros((len(x), maxlen), dtype=np.int)
for dix, sent in enumerate(x):
counter = 0
for c in sent:
if counter >= maxlen:
pass
else:
ix = vocab.get(c, -1) # get index from vocab dictionary, if not in vocab, return -1
input_data[dix, counter] = ix
counter += 1
return input_data
def create_vocab_set():
# This alphabet is 69 chars vs. 70 reported in the paper since they include two
# '-' characters. See https://github.com/zhangxiangxiao/Crepe#issues.
alphabet = set(list(string.ascii_lowercase) + list(string.digits) +
list(string.punctuation) + ['\n'])
vocab_size = len(alphabet)
vocab = {}
reverse_vocab = {}
for ix, t in enumerate(alphabet):
vocab[t] = ix
reverse_vocab[ix] = t
return vocab, reverse_vocab, vocab_size, alphabet | mit | -6,646,422,103,499,365,000 | 29.87931 | 100 | 0.60838 | false |
ericsomdahl/compfiOne | HW3/analyze/analyze.py | 1 | 3441 | __author__ = 'eric'
from port_input import PortfolioInput
import numpy as np
import matplotlib.pyplot as plt
def look_at(portfolio_input, s_study_pdf):
i_trading_days = 252
dt_start, dt_end = portfolio_input.get_start_end_dates()
s_date_format = "%Y-%m-%d %H:%M:%S"
na_benchmark_returns = portfolio_input.df_analysis_data['benchmark_returns']
f_benchmark_stddev = np.std(na_benchmark_returns)
f_benchmark_avg_daily_return = np.mean(na_benchmark_returns)
f_benchmark_sharpe_ratio = (np.sqrt(i_trading_days) * f_benchmark_avg_daily_return) / f_benchmark_stddev
na_portfolio_returns = portfolio_input.df_analysis_data['portfolio_returns']
f_portfolio_stddev = np.std(na_portfolio_returns)
f_portfolio_avg_daily_return = np.mean(na_portfolio_returns)
f_portfolio_sharpe_ratio = (np.sqrt(i_trading_days) * f_portfolio_avg_daily_return) / f_portfolio_stddev
na_benchmark_total_returns = portfolio_input.df_analysis_data['benchmark_normalized']
f_benchmark_total_return = na_benchmark_total_returns[-1]
na_portfolio_total_returns = portfolio_input.df_analysis_data['portfolio_normalized']
f_portfolio_total_return = na_portfolio_total_returns[-1]
# we only want to graph the cumulative returns
plt.clf()
fig = plt.figure()
fig.add_subplot(111)
plt.plot(portfolio_input.df_analysis_data['portfolio_values'])
plt.plot(portfolio_input.df_analysis_data['benchmark_values'], alpha=0.7)
ls_names = ['Portfolio', portfolio_input.symbol]
plt.legend(ls_names)
plt.ylabel('Cumulative Returns')
plt.xlabel('Trading Day')
fig.autofmt_xdate(rotation=45)
plt.savefig(s_study_pdf, format='pdf')
print "The final value of the portfolio using the sample file is -- {0:s}".format(portfolio_input.na_raw_input[0][-1])
print "Details of the Performance of the portfolio :"
print "Date Range: {0:s} to {1:s}".format(dt_start.strftime(s_date_format), dt_end.strftime(s_date_format))
print "\nSharpe Ratio of Fund: {0:f}\nSharpe Ratio of {1:s}: {2:f}"\
.format(f_portfolio_sharpe_ratio, portfolio_input.symbol, f_benchmark_sharpe_ratio)
print "\nTotal Return of Fund: {0:f}\nTotal Return of {1:s}: {2:f}"\
.format(f_portfolio_total_return, portfolio_input.symbol, f_benchmark_total_return)
print "\nStandard Deviation of Fund: {0:f}\nStandard Deviation of {1:s}: {2:f}"\
.format(f_portfolio_stddev, portfolio_input.symbol, f_benchmark_stddev)
print "\nAverage Daily Return of Fund: {0:f}\nAverage Daily Return of {1:s}: {2:f}"\
.format(f_portfolio_avg_daily_return, portfolio_input.symbol, f_benchmark_avg_daily_return)
def main(input_args):
import os
values_csv = '{0:s}/{1:s}'.format(os.path.dirname(os.path.realpath(__file__)), input_args.values_csv)
portfolio_input = PortfolioInput(values_csv, input_args.benchmark)
look_at(portfolio_input, input_args.study_pdf)
pass
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Analyze a portfolios returns and compare it against a specified benchmark')
parser.add_argument('values_csv', help='(input) CSV file specifying the daily value of the portfolio')
parser.add_argument('benchmark', help='symbol of the benchmark to use in comparison')
parser.add_argument('study_pdf', help='(output) PDF of the study')
args = parser.parse_args()
main(args) | unlicense | -8,457,472,291,068,676,000 | 46.150685 | 122 | 0.700378 | false |
berkowitze/hveto | hveto/plot.py | 1 | 12277 | # -*- coding: utf-8 -*-
# Copyright (C) Joshua Smith (2016-)
#
# This file is part of the hveto python package.
#
# hveto is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# hveto is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with hveto. If not, see <http://www.gnu.org/licenses/>.
"""Plotting routines for hveto
"""
from __future__ import division
import warnings
from math import (log10, floor)
from io import BytesIO
from lxml import etree
from matplotlib.colors import LogNorm
from gwpy.plotter import (rcParams, HistogramPlot, EventTablePlot,
TimeSeriesPlot, Plot)
from gwpy.plotter.table import get_column_string
__author__ = 'Duncan Macleod <duncan.macleod@ligo.org>'
__credits__ = 'Josh Smith, Joe Areeda'
rcParams.update({
'figure.subplot.bottom': 0.17,
'figure.subplot.left': 0.1,
'figure.subplot.right': 0.9,
'figure.subplot.top': 0.90,
'axes.labelsize': 24,
'axes.labelpad': 2,
'grid.color': 'gray',
})
SHOW_HIDE_JAVASCRIPT = """
<script type="text/ecmascript">
<![CDATA[
function init(evt) {
if ( window.svgDocument == null ) {
svgDocument = evt.target.ownerDocument;
}
}
function ShowTooltip(obj) {
var cur = obj.id.substr(obj.id.lastIndexOf('-')+1);
var tip = svgDocument.getElementById('tooltip-' + cur);
tip.setAttribute('visibility',"visible")
}
function HideTooltip(obj) {
var cur = obj.id.substr(obj.id.lastIndexOf('-')+1);
var tip = svgDocument.getElementById('tooltip-' + cur);
tip.setAttribute('visibility',"hidden")
}
]]>
</script>"""
def before_after_histogram(
outfile, x, y, label1='Before', label2='After',
bins=100, histtype='stepfilled', range=None, figsize=[9, 6], **kwargs):
"""Plot a histogram of SNR for two event distributions
"""
# format axis arguments
axargs = {
'xscale': 'log',
'xlabel': 'Loudness',
'yscale': 'log',
'ylabel': 'Number of events',
}
axargs.update(kwargs)
# create figure
plot = HistogramPlot(figsize=figsize)
ax = plot.gca()
# make histogram
if range is None:
range = ax.common_limits((x, y))
axargs.setdefault('xlim', range)
histargs = {
'range': range,
'histtype': histtype,
'bins': bins,
'linewidth': 2,
'logbins': axargs['xscale'] == 'log',
'alpha': .8,
}
ax.hist(x, label=label1, facecolor='red', edgecolor='darkred',
**histargs)
ax.hist(y, label=label2, facecolor='dodgerblue', edgecolor='blue',
**histargs)
# add legend
ax.legend(loc='upper right')
# format axes
axargs.setdefault('ylim', (.5, ax.yaxis.get_data_interval()[1] * 1.05))
_finalize_plot(plot, ax, outfile, **axargs)
def veto_scatter(
outfile, a, b, label1='All', label2='Vetoed', x='time', y='snr',
color=None, clim=None, clabel=None, cmap=None, clog=True,
figsize=[9, 6],**kwargs):
"""Plot an x-y scatter of all/vetoed events
"""
# format axis arguments
axargs = {
'yscale': 'log',
'ylabel': 'Loudness',
}
if x != 'time':
axargs['xscale'] = 'log'
axargs.update(kwargs)
# create figure
plot = EventTablePlot(base=x=='time' and TimeSeriesPlot or Plot,
figsize=figsize)
ax = plot.gca()
# add data
scatterargs = {'s': 40}
if color is None:
ax.scatter(a[x], a[y], color='black', marker='o', label=label1, s=40)
else:
colorargs = {'edgecolor': 'none'}
if clim:
colorargs['vmin'] = clim[0]
colorargs['vmax'] = clim[1]
if clog:
colorargs['norm'] = LogNorm(vmin=clim[0], vmax=clim[1])
a = a.copy()
a.sort(order=color)
m = ax.scatter(a[x], a[y], c=a[color], label=label1, **colorargs)
# add colorbar
plot.add_colorbar(mappable=m, ax=ax, cmap=cmap, label=clabel)
if isinstance(b, list):
colors = list(rcParams['axes.prop_cycle'])
else:
b = [b]
label2 = [label2]
colors = [{'color': 'red'}]
for i, data in enumerate(b):
# setting the color here looks complicated, but is just a fancy
# way of looping through the color cycle when scattering, but using
# red if we only have one other data set
ax.scatter(data[x], data[y], marker='+', linewidth=1.5,
label=label2[i], s=40, **colors[i % len(colors)])
# add legend
if ax.get_legend_handles_labels()[0]:
legargs = {
'loc': 'upper left',
'bbox_to_anchor': (1.01, 1),
'borderaxespad': 0,
'numpoints': 1,
'scatterpoints': 1,
'handlelength': 1,
'handletextpad': .5
}
legargs.update(dict((x[7:], axargs.pop(x)) for x in axargs.keys()
if x.startswith('legend_')))
ax.legend(**legargs)
# finalize
for axis in ['x', 'y']:
lim = list(getattr(ax, '%saxis' % axis).get_data_interval())
lim[0] = axargs.get('%sbound' % axis, lim[0])
axargs.setdefault('%slim' % axis, (lim[0] * 0.95, lim[1] * 1.05))
_finalize_plot(plot, ax, outfile, **axargs)
def _finalize_plot(plot, ax, outfile, bbox_inches=None, close=True, **axargs):
xlim = axargs.pop('xlim', None)
ylim = axargs.pop('ylim', None)
# set title and subtitle
subtitle = axargs.pop('subtitle', None)
# format axes
for key in axargs:
getattr(ax, 'set_%s' % key)(axargs[key])
if subtitle:
pos = list(ax.title.get_position())
pos[1] += 0.05
ax.title.set_position(pos)
ax.text(.5, 1., subtitle, transform=ax.transAxes, va='bottom',
ha='center')
# set minor grid for log scale
if ax.get_xscale() == 'log':
ax.grid(True, axis='x', which='both')
if ax.get_yscale() == 'log':
ax.grid(True, axis='y', which='both')
# set limits after everything else (matplotlib might undo it)
if xlim is not None:
ax.set_xlim(*xlim)
if ylim is not None:
ax.set_ylim(*ylim)
# add colorbar
if not plot.colorbars:
plot.add_colorbar(ax=ax, visible=False)
# save and close
plot.save(outfile, bbox_inches=bbox_inches)
if close:
plot.close()
def significance_drop(outfile, old, new, show_channel_names=None, **kwargs):
"""Plot the signifiance drop for each channel
"""
channels = sorted(old.keys())
if show_channel_names is None:
show_channel_names = len(channels) <= 50
plot = Plot(figsize=(18, 6))
plot.subplots_adjust(left=.07, right=.93)
ax = plot.gca()
if show_channel_names:
plot.subplots_adjust(bottom=.4)
winner = sorted(old.items(), key=lambda x: x[1])[-1][0]
for i, c in enumerate(channels):
if c == winner:
color = 'orange'
elif old[c] > new[c]:
color = 'dodgerblue'
else:
color = 'red'
ax.plot([i, i], [old[c], new[c]], color=color, linestyle='-',
marker='o', markersize=10, label=c, zorder=old[c])
ax.set_xlim(-1, len(channels))
ax.set_ybound(lower=0)
# set xticks to show channel names
if show_channel_names:
ax.set_xticks(range(len(channels)))
ax.set_xticklabels([c.replace('_','\_') for c in channels])
for i, t in enumerate(ax.get_xticklabels()):
t.set_rotation(270)
t.set_verticalalignment('top')
t.set_horizontalalignment('center')
t.set_fontsize(8)
# or just show systems of channels
else:
plot.canvas.draw()
systems = {}
for i, c in enumerate(channels):
sys = c.split(':', 1)[1].split('-')[0].split('_')[0]
try:
systems[sys][1] += 1
except KeyError:
systems[sys] = [i, 1]
systems = sorted(systems.items(), key=lambda x: x[1][0])
labels, counts = zip(*systems)
xticks, xmticks = zip(*[(a, a+b/2.) for (a, b) in counts])
# show ticks at the edge of each group
ax.set_xticks(xticks, minor=False)
ax.set_xticklabels([], minor=False)
# show label in the centre of each group
ax.set_xticks(xmticks, minor=True)
for t in ax.set_xticklabels(labels, minor=True):
t.set_rotation(270)
kwargs.setdefault('ylabel', 'Significance')
# create interactivity
if outfile.endswith('.svg'):
_finalize_plot(plot, ax, outfile.replace('.svg', '.png'),
close=False, **kwargs)
tooltips = []
ylim = ax.get_ylim()
yoffset = (ylim[1] - ylim[0]) * 0.061
bbox = {'fc': 'w', 'ec': '.5', 'alpha': .9, 'boxstyle': 'round'}
xthresh = len(channels) / 10.
for i, l in enumerate(ax.lines):
x = l.get_xdata()[1]
if x < xthresh:
ha = 'left'
elif x > (len(channels) - xthresh):
ha ='right'
else:
ha = 'center'
y = l.get_ydata()[0] + yoffset
c = l.get_label()
tooltips.append(ax.annotate(c.replace('_', r'\_'), (x, y),
ha=ha, zorder=ylim[1], bbox=bbox))
l.set_gid('line-%d' % i)
tooltips[-1].set_gid('tooltip-%d' % i)
f = BytesIO()
plot.savefig(f, format='svg')
tree, xmlid = etree.XMLID(f.getvalue())
tree.set('onload', 'init(evt)')
for i in range(len(tooltips)):
try:
e = xmlid['tooltip-%d' % i]
except KeyError:
warnings.warn("Failed to recover tooltip %d" % i)
continue
e.set('visibility', 'hidden')
for i, l in enumerate(ax.lines):
e = xmlid['line-%d' % i]
e.set('onmouseover', 'ShowTooltip(this)')
e.set('onmouseout', 'HideTooltip(this)')
tree.insert(0, etree.XML(SHOW_HIDE_JAVASCRIPT))
etree.ElementTree(tree).write(outfile)
plot.close()
else:
_finalize_plot(plot, ax, outfile, **kwargs)
def hveto_roc(outfile, rounds, figsize=[9, 6], constants=[1, 5, 10, 20],
**kwargs):
efficiency = []
deadtime = []
for r in rounds:
try:
efficiency.append(r.cum_efficiency[0] / r.cum_efficiency[1])
except ZeroDivisionError:
efficiency.append(0.)
try:
deadtime.append(r.cum_deadtime[0] / r.cum_deadtime[1])
except ZeroDivisionError:
deadtime.append(0.)
plot = Plot(figsize=figsize)
ax = plot.gca()
ax.plot(deadtime, efficiency, marker='o', linestyle='-')
try:
xbound = 10 ** floor(log10(deadtime[0]))
except ValueError:
xbound = 1e-4
try:
ybound = 10 ** floor(log10(efficiency[0]))
except ValueError:
ybound = 1e-4
bound = min(xbound, ybound)
axargs = {
'xlabel': 'Fractional deadtime',
'ylabel': 'Fractional efficiency',
'xscale': 'log',
'yscale': 'log',
'xlim': (bound, 1.),
'ylim': (bound, 1.),
}
axargs.update(kwargs)
# draw some eff/dt contours
if len(constants):
for i, c in enumerate(constants):
g = 1 - ((i+1)/len(constants) * .5)
x = axargs['xlim']
y = [a * c for a in x]
ax.plot(x, y, linestyle='--', color=(g, g, g), label=str(c))
ax.legend(title='Eff/dt:', borderaxespad=0, bbox_to_anchor=(1.01, 1),
handlelength=1, handletextpad=.5, loc='upper left')
# save and close
_finalize_plot(plot, ax, outfile, **axargs)
| gpl-3.0 | -8,362,391,763,697,477,000 | 32.543716 | 79 | 0.555103 | false |
ernewton/aas-abs | FindKeywordsInSessions.py | 1 | 3380 | # ==================================================================================================
# FindKeywordsInSessions
# ----------------------
#
# Parse a 'pickle' file containing arrays of AAS Number, AAS Session Number, AAS Session Title
# Convert AAS Number to years, and count the number of keyword occurrences per year, in session titles
#
# ------------------
# Luke Zoltan Kelley
# LKelley@cfa.harvard.edu
# ==================================================================================================
import os
import shutil
import pickle
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# Hardcode keywords for now, each session title is searched for each
keywords = [ "supernova" , "dark matter" , "planet" , "dark energy" , "star", "galax"]
def loadPickle(fname="AAS_Lists.pickle"):
'''
Load the Pickle of AAS Data. Must be 3 arrays:
AAS Number, AAS Session Number, Session Title
returns dictionary with keys 'num', 'snum', 'title'
'''
dat = pickle.load( open(fname, 'r') )
return { "num":dat[0] , "snum":dat[1] , "title":dat[2] }
def aasNumToYear(num):
'''
Convert from AAS Number to year.
returns years float
'''
return 1993.0 + (np.float(num)-182.0)/2.0
def counter(indat, regex):
'''
Count the number of occurrences of 'regex' in the given AAS data (from a pickle)
Returns years and counts per year
'''
outcount = []; outyears = []; outnorms = []
# Extract arrays from data
n = indat['num']
s = indat['snum']
t = indat['title']
# Convert AAS Numbers to years
years = [ aasNumToYear(num) for num in n ]
lastyear = None #years[0]
count = 0.0
norm = 0
# Iteratire through all entries
for it in range(len(years)):
thisyear = years[it]
thistitle = t[it]
# If this is a new year, start new entry
if( thisyear != lastyear ):
outcount.append(0)
outyears.append(thisyear)
outnorms.append(0)
lastyear = thisyear
temp = thistitle.upper().count( regex.upper() )
if( temp > 0 ): outcount[-1] += 1 # Count titles matching
outnorms[-1] += 1 # Count total number of sessions
return outyears, outcount, outnorms
def main():
aas = loadPickle()
nums = len(aas)
years = []; counts = []; norms = []
# Iterate through each keyword and count matches
for it in range(len(keywords)):
yrs, cnts, nms = counter(aas, keywords[it])
years.append(yrs)
counts.append(cnts)
norms.append(nms)
numkeys = len(keywords)
plt.clf()
# For each keyword, report results, and plot
for ii in range(numkeys):
print " - Keyword = '%s' " % (keywords[ii])
for jj in range(len(years[ii])):
print " - - %f %d" % (years[ii][jj], counts[ii][jj])
plt.plot(years[ii], counts[ii], label=keywords[ii])
# Pretty plot
plt.legend(loc='upper left')
ax = plt.gca(); ax.grid()
ax.set_xlabel('Year'); ax.set_ylabel('Count')
plt.savefig('aas-trend.png') # Save plot
if __name__ == '__main__':
main()
| mit | 4,278,928,940,455,059,000 | 26.04 | 132 | 0.523964 | false |
bdrung/audacity | lib-src/portaudio-v19/test/patest_suggested_vs_streaminfo_latency.py | 74 | 5354 | #!/usr/bin/env python
"""
Run and graph the results of patest_suggested_vs_streaminfo_latency.c
Requires matplotlib for plotting: http://matplotlib.sourceforge.net/
"""
import os
from pylab import *
import numpy
from matplotlib.backends.backend_pdf import PdfPages
testExeName = "PATest.exe" # rename to whatever the compiled patest_suggested_vs_streaminfo_latency.c binary is
dataFileName = "patest_suggested_vs_streaminfo_latency.csv" # code below calls the exe to generate this file
inputDeviceIndex = -1 # -1 means default
outputDeviceIndex = -1 # -1 means default
sampleRate = 44100
pdfFilenameSuffix = "_wmme"
pdfFile = PdfPages("patest_suggested_vs_streaminfo_latency_" + str(sampleRate) + pdfFilenameSuffix +".pdf") #output this pdf file
def loadCsvData( dataFileName ):
params= ""
inputDevice = ""
outputDevice = ""
startLines = file(dataFileName).readlines(1024)
for line in startLines:
if "output device" in line:
outputDevice = line.strip(" \t\n\r#")
if "input device" in line:
inputDevice = line.strip(" \t\n\r#")
params = startLines[0].strip(" \t\n\r#")
data = numpy.loadtxt(dataFileName, delimiter=",", skiprows=4).transpose()
class R(object): pass
result = R()
result.params = params
for s in params.split(','):
if "sample rate" in s:
result.sampleRate = s
result.inputDevice = inputDevice
result.outputDevice = outputDevice
result.suggestedLatency = data[0]
result.halfDuplexOutputLatency = data[1]
result.halfDuplexInputLatency = data[2]
result.fullDuplexOutputLatency = data[3]
result.fullDuplexInputLatency = data[4]
return result;
def setFigureTitleAndAxisLabels( framesPerBufferString ):
title("PortAudio suggested (requested) vs. resulting (reported) stream latency\n" + framesPerBufferString)
ylabel("PaStreamInfo::{input,output}Latency (s)")
xlabel("Pa_OpenStream suggestedLatency (s)")
grid(True)
legend(loc="upper left")
def setDisplayRangeSeconds( maxSeconds ):
xlim(0, maxSeconds)
ylim(0, maxSeconds)
# run the test with different frames per buffer values:
compositeTestFramesPerBufferValues = [0]
# powers of two
for i in range (1,11):
compositeTestFramesPerBufferValues.append( pow(2,i) )
# multiples of 50
for i in range (1,20):
compositeTestFramesPerBufferValues.append( i * 50 )
# 10ms buffer sizes
compositeTestFramesPerBufferValues.append( 441 )
compositeTestFramesPerBufferValues.append( 882 )
# large primes
#compositeTestFramesPerBufferValues.append( 39209 )
#compositeTestFramesPerBufferValues.append( 37537 )
#compositeTestFramesPerBufferValues.append( 26437 )
individualPlotFramesPerBufferValues = [0,64,128,256,512] #output separate plots for these
isFirst = True
for framesPerBuffer in compositeTestFramesPerBufferValues:
commandString = testExeName + " " + str(inputDeviceIndex) + " " + str(outputDeviceIndex) + " " + str(sampleRate) + " " + str(framesPerBuffer) + ' > ' + dataFileName
print commandString
os.system(commandString)
d = loadCsvData(dataFileName)
if isFirst:
figure(1) # title sheet
gcf().text(0.1, 0.0,
"patest_suggested_vs_streaminfo_latency\n%s\n%s\n%s\n"%(d.inputDevice,d.outputDevice,d.sampleRate))
pdfFile.savefig()
figure(2) # composite plot, includes all compositeTestFramesPerBufferValues
if isFirst:
plot( d.suggestedLatency, d.suggestedLatency, label="Suggested latency" )
plot( d.suggestedLatency, d.halfDuplexOutputLatency )
plot( d.suggestedLatency, d.halfDuplexInputLatency )
plot( d.suggestedLatency, d.fullDuplexOutputLatency )
plot( d.suggestedLatency, d.fullDuplexInputLatency )
if framesPerBuffer in individualPlotFramesPerBufferValues: # individual plots
figure( 3 + individualPlotFramesPerBufferValues.index(framesPerBuffer) )
plot( d.suggestedLatency, d.suggestedLatency, label="Suggested latency" )
plot( d.suggestedLatency, d.halfDuplexOutputLatency, label="Half-duplex output latency" )
plot( d.suggestedLatency, d.halfDuplexInputLatency, label="Half-duplex input latency" )
plot( d.suggestedLatency, d.fullDuplexOutputLatency, label="Full-duplex output latency" )
plot( d.suggestedLatency, d.fullDuplexInputLatency, label="Full-duplex input latency" )
if framesPerBuffer == 0:
framesPerBufferText = "paFramesPerBufferUnspecified"
else:
framesPerBufferText = str(framesPerBuffer)
setFigureTitleAndAxisLabels( "user frames per buffer: "+str(framesPerBufferText) )
setDisplayRangeSeconds(2.2)
pdfFile.savefig()
setDisplayRangeSeconds(0.1)
setFigureTitleAndAxisLabels( "user frames per buffer: "+str(framesPerBufferText)+" (detail)" )
pdfFile.savefig()
isFirst = False
figure(2)
setFigureTitleAndAxisLabels( "composite of frames per buffer values:\n"+str(compositeTestFramesPerBufferValues) )
setDisplayRangeSeconds(2.2)
pdfFile.savefig()
setDisplayRangeSeconds(0.1)
setFigureTitleAndAxisLabels( "composite of frames per buffer values:\n"+str(compositeTestFramesPerBufferValues)+" (detail)" )
pdfFile.savefig()
pdfFile.close()
#uncomment this to display interactively, otherwise we just output a pdf
#show()
| gpl-2.0 | 809,695,119,437,916,000 | 34.693333 | 168 | 0.722637 | false |
gtesei/fast-furious | competitions/cdiscount-image-classification-challenge/mynet.py | 1 | 14353 | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from subprocess import check_output
import io
import bson # this is installed with the pymongo package
import matplotlib.pyplot as plt
from skimage.data import imread # or, whatever image library you prefer
import multiprocessing as mp # will come in handy due to the size of the data
import os
from tqdm import *
import struct
from collections import defaultdict
import cv2
from keras import backend as K
import threading
from keras.preprocessing.image import load_img, img_to_array
import tensorflow as tf
from keras.layers import Input, Dense
from keras.models import Model
from keras.preprocessing.image import Iterator
from keras.preprocessing.image import ImageDataGenerator
from keras import backend as K
from keras.models import Sequential
from keras.layers import Dropout, Flatten, Dense
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D, GlobalAveragePooling2D
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers.normalization import BatchNormalization
from keras.layers.merge import concatenate
from skimage.color import rgb2yuv
############################################################################
__GLOBAL_PARAMS__ = {
'MODEL' : "mynet" ,
'DEBUG' : True,
'NORMALIZATION' : True,
'YUV' : True ,
'MULTI_SCALE' : False
}
########
if __GLOBAL_PARAMS__['MULTI_SCALE']:
raise Exception("MULTI_SCALE not supported yet!")
__MODEL__KEY__ = ""
for k in sorted(__GLOBAL_PARAMS__.keys()):
if not k.startswith("_"):
__MODEL__KEY__ += "__" + str(k) + "_" + str(__GLOBAL_PARAMS__[k])
if (__GLOBAL_PARAMS__['DEBUG']):
LOG_FILE = "simple.log"
else:
LOG_FILE = "log" + __MODEL__KEY__ + ".log"
SUB_FILE = "sub" + __MODEL__KEY__ + ".csv.gz"
import logging
logging.basicConfig(format='%(asctime)s %(message)s', filename=LOG_FILE,level=logging.DEBUG)
#logging.debug('This message should go to the log file')
if (__GLOBAL_PARAMS__['DEBUG']):
logging.info('** DEBUG: '+__MODEL__KEY__+' ****************************************************************')
else:
logging.info('** PRODUCTION:'+__MODEL__KEY__+' ****************************************************************')
#logging.warning('And this, too')
########### -------------> FUNC
def preprocess_image(x):
if __GLOBAL_PARAMS__['NORMALIZATION']:
x = (x - 128.0) / 128.0
if __GLOBAL_PARAMS__['YUV']:
x = np.array([rgb2yuv(x.reshape((1,180,180,3)))])
x = x.reshape((180,180,3))
return x
class BSONIterator(Iterator):
def __init__(self, bson_file, images_df, offsets_df, num_class,
image_data_generator, lock, target_size=(180, 180),
with_labels=True, batch_size=32, shuffle=False, seed=None):
self.file = bson_file
self.images_df = images_df
self.offsets_df = offsets_df
self.with_labels = with_labels
self.samples = len(images_df)
self.num_class = num_class
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
self.image_shape = self.target_size + (3,)
print("Found %d images belonging to %d classes." % (self.samples, self.num_class))
super(BSONIterator, self).__init__(self.samples, batch_size, shuffle, seed)
self.lock = lock
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros((len(index_array),) + self.image_shape, dtype=K.floatx())
if self.with_labels:
batch_y = np.zeros((len(batch_x), self.num_class), dtype=K.floatx())
for i, j in enumerate(index_array):
# Protect file and dataframe access with a lock.
with self.lock:
image_row = self.images_df.iloc[j]
product_id = image_row["product_id"]
offset_row = self.offsets_df.loc[product_id]
# Read this product's data from the BSON file.
self.file.seek(offset_row["offset"])
item_data = self.file.read(offset_row["length"])
# Grab the image from the product.
item = bson.BSON.decode(item_data)
img_idx = image_row["img_idx"]
bson_img = item["imgs"][img_idx]["picture"]
# Load the image.
img = load_img(io.BytesIO(bson_img), target_size=self.target_size)
# Preprocess the image.
x = img_to_array(img)
x = preprocess_image(x)
#x = self.image_data_generator.random_transform(x)
#x = self.image_data_generator.standardize(x)
# Add the image and the label to the batch (one-hot encoded).
batch_x[i] = x
if self.with_labels:
batch_y[i, image_row["category_idx"]] = 1
if self.with_labels:
return batch_x, batch_y
else:
return batch_x
def next(self):
with self.lock:
index_array = next(self.index_generator)
return self._get_batches_of_transformed_samples(index_array[0])
def make_category_tables():
cat2idx = {}
idx2cat = {}
for ir in categories_df.itertuples():
category_id = ir[0]
category_idx = ir[4]
cat2idx[category_id] = category_idx
idx2cat[category_idx] = category_id
return cat2idx, idx2cat
def read_bson(bson_path, num_records, with_categories):
rows = {}
with open(bson_path, "rb") as f, tqdm(total=num_records) as pbar:
offset = 0
while True:
item_length_bytes = f.read(4)
if len(item_length_bytes) == 0:
break
length = struct.unpack("<i", item_length_bytes)[0]
f.seek(offset)
item_data = f.read(length)
assert len(item_data) == length
item = bson.BSON.decode(item_data)
product_id = item["_id"]
num_imgs = len(item["imgs"])
row = [num_imgs, offset, length]
if with_categories:
row += [item["category_id"]]
rows[product_id] = row
offset += length
f.seek(offset)
pbar.update()
columns = ["num_imgs", "offset", "length"]
if with_categories:
columns += ["category_id"]
df = pd.DataFrame.from_dict(rows, orient="index")
df.index.name = "product_id"
df.columns = columns
df.sort_index(inplace=True)
return df
def make_val_set(df, split_percentage=0.2, drop_percentage=0.):
# Find the product_ids for each category.
category_dict = defaultdict(list)
for ir in tqdm(df.itertuples()):
category_dict[ir[4]].append(ir[0])
train_list = []
val_list = []
with tqdm(total=len(df)) as pbar:
for category_id, product_ids in category_dict.items():
category_idx = cat2idx[category_id]
# Randomly remove products to make the dataset smaller.
keep_size = int(len(product_ids) * (1. - drop_percentage))
if keep_size < len(product_ids):
product_ids = np.random.choice(product_ids, keep_size, replace=False)
# Randomly choose the products that become part of the validation set.
val_size = int(len(product_ids) * split_percentage)
if val_size > 0:
val_ids = np.random.choice(product_ids, val_size, replace=False)
else:
val_ids = []
# Create a new row for each image.
for product_id in product_ids:
row = [product_id, category_idx]
for img_idx in range(df.loc[product_id, "num_imgs"]):
if product_id in val_ids:
val_list.append(row + [img_idx])
else:
train_list.append(row + [img_idx])
pbar.update()
columns = ["product_id", "category_idx", "img_idx"]
train_df = pd.DataFrame(train_list, columns=columns)
val_df = pd.DataFrame(val_list, columns=columns)
return train_df, val_df
########### -------------> MAIN
categories_path = os.path.join("data", "category_names.csv")
categories_df = pd.read_csv(categories_path, index_col="category_id")
# Maps the category_id to an integer index. This is what we'll use to
# one-hot encode the labels.
print(">>> Mapping category_id to an integer index ... ")
categories_df["category_idx"] = pd.Series(range(len(categories_df)), index=categories_df.index)
print(categories_df.head())
cat2idx, idx2cat = make_category_tables()
# Test if it works:
print(cat2idx[1000012755], idx2cat[4] , len(cat2idx))
print(">>> Train set ... ")
data_dir = "data"
if (__GLOBAL_PARAMS__['DEBUG']):
print(">>> DEBUG mode ... ")
train_bson_path = os.path.join(data_dir, "train_example.bson")
num_train_products = 82
else:
print(">>> PRODUCTION mode ... ")
train_bson_path = os.path.join(data_dir, "train.bson")
num_train_products = 7069896
test_bson_path = os.path.join(data_dir, "test.bson")
num_test_products = 1768182
print(train_bson_path,num_train_products)
if (not __GLOBAL_PARAMS__['DEBUG']):
if os.path.isfile("train_offsets.csv"):
print(">> reading from file train_offsets ... ")
train_offsets_df = pd.read_csv("train_offsets.csv")
train_offsets_df.set_index( "product_id" , inplace= True)
train_offsets_df.sort_index(inplace=True)
else:
train_offsets_df = read_bson(train_bson_path, num_records=num_train_products, with_categories=True)
train_offsets_df.to_csv("train_offsets.csv")
print(train_offsets_df.head())
if os.path.isfile("train_images.csv"):
print(">> reading from file train_images / val_images ... ")
train_images_df = pd.read_csv("train_images.csv")
train_images_df = train_images_df[['product_id','category_idx','img_idx']]
val_images_df = pd.read_csv("val_images.csv")
val_images_df = val_images_df[['product_id', 'category_idx', 'img_idx']]
else:
train_images_df, val_images_df = make_val_set(train_offsets_df, split_percentage=0.2, drop_percentage=0)
train_images_df.to_csv("train_images.csv")
val_images_df.to_csv("val_images.csv")
print(train_images_df.head())
print(val_images_df.head())
categories_df.to_csv("categories.csv")
else:
train_offsets_df = read_bson(train_bson_path, num_records=num_train_products, with_categories=True)
train_images_df, val_images_df = make_val_set(train_offsets_df, split_percentage=0.2, drop_percentage=0)
print(train_images_df.head())
print(val_images_df.head())
## Generator
print(">>> Generator ... ")
# Tip: use ImageDataGenerator for data augmentation and preprocessing ??
train_bson_file = open(train_bson_path, "rb")
lock = threading.Lock()
num_classes = len(cat2idx)
num_train_images = len(train_images_df)
num_val_images = len(val_images_df)
batch_size = 256
train_datagen = ImageDataGenerator()
train_gen = BSONIterator(train_bson_file, train_images_df, train_offsets_df,
num_classes, train_datagen, lock,
batch_size=batch_size, shuffle=True)
val_datagen = ImageDataGenerator()
val_gen = BSONIterator(train_bson_file, val_images_df, train_offsets_df,
num_classes, val_datagen, lock,
batch_size=batch_size, shuffle=True)
## Model
inputs = Input(shape=(180, 180, 3))
x = Conv2D(32, 5, padding="valid", activation="relu")(inputs)
x = BatchNormalization()(x) # hope similar to local response normalization
x = MaxPooling2D(pool_size=(3, 3))(x)
fl1 = Flatten()(x)
x2 = Conv2D(64, 5, padding="valid", activation="relu")(x)
x2 = BatchNormalization()(x2) # hope similar to local response normalization
x2 = MaxPooling2D(pool_size=(3, 3))(x2)
fl2 = Flatten()(x2)
merged = concatenate([fl1, fl2]) # multi scale features
merged = Dropout(0.5)(merged)
merged = BatchNormalization()(merged)
merged = Dense(2*num_classes, activation='relu')(merged)
merged = Dropout(0.5)(merged)
merged = BatchNormalization()(merged)
predictions = Dense(num_classes, activation='softmax')(merged)
model = Model(inputs=inputs, outputs=predictions)
model.compile(optimizer="adam",
loss="categorical_crossentropy",
metrics=["accuracy"])
model.summary()
early_stopping = EarlyStopping(monitor='val_loss', patience=0 )
bst_model_path = "mod" + __MODEL__KEY__ + '.h5'
model_checkpoint = ModelCheckpoint(bst_model_path, save_best_only=True, save_weights_only=True)
# To train the model:
history = model.fit_generator(train_gen,
epochs=200,
steps_per_epoch = num_train_images // batch_size + 1,
validation_data = val_gen,
validation_steps = num_val_images // batch_size + 1,
callbacks=[early_stopping, model_checkpoint])
print(history.history.keys())
# logging
logging.info('N. epochs == '+str(len(history.history['val_acc'])))
logging.info('Val accuracy == '+str(max(history.history['val_acc'])))
## Predict on Test-set
print(">>> Predicting on test-set ... ")
submission_df = pd.read_csv("data/sample_submission.csv")
print(submission_df.head())
test_datagen = ImageDataGenerator()
data = bson.decode_file_iter(open(test_bson_path, "rb"))
with tqdm(total=num_test_products) as pbar:
for c, d in enumerate(data):
product_id = d["_id"]
num_imgs = len(d["imgs"])
batch_x = np.zeros((num_imgs, 180, 180, 3), dtype=K.floatx())
for i in range(num_imgs):
bson_img = d["imgs"][i]["picture"]
# Load and preprocess the image.
img = load_img(io.BytesIO(bson_img), target_size=(180, 180))
x = img_to_array(img)
x = preprocess_image(x)
# = test_datagen.random_transform(x)
# = test_datagen.standardize(x)
# Add the image to the batch.
batch_x[i] = x
prediction = model.predict(batch_x, batch_size=num_imgs)
avg_pred = prediction.mean(axis=0)
cat_idx = np.argmax(avg_pred)
submission_df.iloc[c]["category_id"] = idx2cat[cat_idx]
pbar.update()
submission_df.to_csv(SUB_FILE, compression="gzip", index=False)
| mit | -7,434,785,598,195,030,000 | 38.539945 | 117 | 0.610395 | false |
joferkington/python-geoprobe | geoprobe/colormap.py | 1 | 1875 | import six
import numpy as np
class colormap(object):
"""Reads a Geoprobe formatted colormap"""
num_colors = 256
def __init__(self, filename):
self.filename = filename
self._parse_infile()
def _parse_infile(self):
infile = open(self.filename, 'r')
header = next(infile)
if header.startswith('#'):
_ = next(infile)
self.num_keys = int(next(infile).strip())
keys = []
for i in range(self.num_keys):
keys.append(next(infile).strip().split())
self.keys = np.array(keys, dtype=np.float)
num_colors = int(next(infile).strip())
colors = []
for i in range(num_colors):
colors.append(next(infile).strip().split())
self.lut = np.array(colors, dtype=np.float)
dtype = {'names':['red', 'green', 'blue', 'alpha', 'keys'],
'formats':5 * [np.float]}
self.lut = self.lut.view(dtype)
@property
def as_matplotlib(self):
from matplotlib.colors import LinearSegmentedColormap
cdict = dict(red=[], green=[], blue=[])
# Make sure that there is a key at 0.0 and 1.0
keys = self.keys.tolist()
if keys[0][0] != 0:
keys = [[0.0] + keys[0][1:]] + keys
if keys[-1][0] != 1.0:
keys.append([1.0] + keys[-1][1:])
for stop_value, red, green, blue, alpha, in keys:
for name, val in zip(['red', 'green', 'blue'], [red, green, blue]):
cdict[name].append([stop_value, val, val])
return LinearSegmentedColormap(self.filename, cdict, self.num_colors)
@property
def as_pil(self):
return list((255 * self.lut.view(np.float)[:,:3]).astype(np.int).flat)
@property
def as_pil_rgba(self):
return list((255 * self.lut.view(np.float)[:,:4]).astype(np.int).flat)
| mit | 4,631,208,239,711,772,000 | 30.25 | 79 | 0.548267 | false |
louispotok/pandas | pandas/io/json/normalize.py | 1 | 9196 | # ---------------------------------------------------------------------
# JSON normalization routines
import copy
from collections import defaultdict
import numpy as np
from pandas._libs.writers import convert_json_to_lines
from pandas import compat, DataFrame
def _convert_to_line_delimits(s):
"""Helper function that converts json lists to line delimited json."""
# Determine we have a JSON list to turn to lines otherwise just return the
# json object, only lists can
if not s[0] == '[' and s[-1] == ']':
return s
s = s[1:-1]
return convert_json_to_lines(s)
def nested_to_record(ds, prefix="", sep=".", level=0):
"""a simplified json_normalize
converts a nested dict into a flat dict ("record"), unlike json_normalize,
it does not attempt to extract a subset of the data.
Parameters
----------
ds : dict or list of dicts
prefix: the prefix, optional, default: ""
sep : string, default '.'
Nested records will generate names separated by sep,
e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar
.. versionadded:: 0.20.0
level: the number of levels in the jason string, optional, default: 0
Returns
-------
d - dict or list of dicts, matching `ds`
Examples
--------
IN[52]: nested_to_record(dict(flat1=1,dict1=dict(c=1,d=2),
nested=dict(e=dict(c=1,d=2),d=2)))
Out[52]:
{'dict1.c': 1,
'dict1.d': 2,
'flat1': 1,
'nested.d': 2,
'nested.e.c': 1,
'nested.e.d': 2}
"""
singleton = False
if isinstance(ds, dict):
ds = [ds]
singleton = True
new_ds = []
for d in ds:
new_d = copy.deepcopy(d)
for k, v in d.items():
# each key gets renamed with prefix
if not isinstance(k, compat.string_types):
k = str(k)
if level == 0:
newkey = k
else:
newkey = prefix + sep + k
# only dicts gets recurse-flattend
# only at level>1 do we rename the rest of the keys
if not isinstance(v, dict):
if level != 0: # so we skip copying for top level, common case
v = new_d.pop(k)
new_d[newkey] = v
continue
else:
v = new_d.pop(k)
new_d.update(nested_to_record(v, newkey, sep, level + 1))
new_ds.append(new_d)
if singleton:
return new_ds[0]
return new_ds
def json_normalize(data, record_path=None, meta=None,
meta_prefix=None,
record_prefix=None,
errors='raise',
sep='.'):
"""
"Normalize" semi-structured JSON data into a flat table
Parameters
----------
data : dict or list of dicts
Unserialized JSON objects
record_path : string or list of strings, default None
Path in each object to list of records. If not passed, data will be
assumed to be an array of records
meta : list of paths (string or list of strings), default None
Fields to use as metadata for each record in resulting table
record_prefix : string, default None
If True, prefix records with dotted (?) path, e.g. foo.bar.field if
path to records is ['foo', 'bar']
meta_prefix : string, default None
errors : {'raise', 'ignore'}, default 'raise'
* 'ignore' : will ignore KeyError if keys listed in meta are not
always present
* 'raise' : will raise KeyError if keys listed in meta are not
always present
.. versionadded:: 0.20.0
sep : string, default '.'
Nested records will generate names separated by sep,
e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar
.. versionadded:: 0.20.0
Returns
-------
frame : DataFrame
Examples
--------
>>> from pandas.io.json import json_normalize
>>> data = [{'id': 1, 'name': {'first': 'Coleen', 'last': 'Volk'}},
... {'name': {'given': 'Mose', 'family': 'Regner'}},
... {'id': 2, 'name': 'Faye Raker'}]
>>> json_normalize(data)
id name name.family name.first name.given name.last
0 1.0 NaN NaN Coleen NaN Volk
1 NaN NaN Regner NaN Mose NaN
2 2.0 Faye Raker NaN NaN NaN NaN
>>> data = [{'state': 'Florida',
... 'shortname': 'FL',
... 'info': {
... 'governor': 'Rick Scott'
... },
... 'counties': [{'name': 'Dade', 'population': 12345},
... {'name': 'Broward', 'population': 40000},
... {'name': 'Palm Beach', 'population': 60000}]},
... {'state': 'Ohio',
... 'shortname': 'OH',
... 'info': {
... 'governor': 'John Kasich'
... },
... 'counties': [{'name': 'Summit', 'population': 1234},
... {'name': 'Cuyahoga', 'population': 1337}]}]
>>> result = json_normalize(data, 'counties', ['state', 'shortname',
... ['info', 'governor']])
>>> result
name population info.governor state shortname
0 Dade 12345 Rick Scott Florida FL
1 Broward 40000 Rick Scott Florida FL
2 Palm Beach 60000 Rick Scott Florida FL
3 Summit 1234 John Kasich Ohio OH
4 Cuyahoga 1337 John Kasich Ohio OH
"""
def _pull_field(js, spec):
result = js
if isinstance(spec, list):
for field in spec:
result = result[field]
else:
result = result[spec]
return result
if isinstance(data, list) and not data:
return DataFrame()
# A bit of a hackjob
if isinstance(data, dict):
data = [data]
if record_path is None:
if any([[isinstance(x, dict)
for x in compat.itervalues(y)] for y in data]):
# naive normalization, this is idempotent for flat records
# and potentially will inflate the data considerably for
# deeply nested structures:
# {VeryLong: { b: 1,c:2}} -> {VeryLong.b:1 ,VeryLong.c:@}
#
# TODO: handle record value which are lists, at least error
# reasonably
data = nested_to_record(data, sep=sep)
return DataFrame(data)
elif not isinstance(record_path, list):
record_path = [record_path]
if meta is None:
meta = []
elif not isinstance(meta, list):
meta = [meta]
meta = [m if isinstance(m, list) else [m] for m in meta]
# Disastrously inefficient for now
records = []
lengths = []
meta_vals = defaultdict(list)
if not isinstance(sep, compat.string_types):
sep = str(sep)
meta_keys = [sep.join(val) for val in meta]
def _recursive_extract(data, path, seen_meta, level=0):
if len(path) > 1:
for obj in data:
for val, key in zip(meta, meta_keys):
if level + 1 == len(val):
seen_meta[key] = _pull_field(obj, val[-1])
_recursive_extract(obj[path[0]], path[1:],
seen_meta, level=level + 1)
else:
for obj in data:
recs = _pull_field(obj, path[0])
# For repeating the metadata later
lengths.append(len(recs))
for val, key in zip(meta, meta_keys):
if level + 1 > len(val):
meta_val = seen_meta[key]
else:
try:
meta_val = _pull_field(obj, val[level:])
except KeyError as e:
if errors == 'ignore':
meta_val = np.nan
else:
raise \
KeyError("Try running with "
"errors='ignore' as key "
"{err} is not always present"
.format(err=e))
meta_vals[key].append(meta_val)
records.extend(recs)
_recursive_extract(data, record_path, {}, level=0)
result = DataFrame(records)
if record_prefix is not None:
result.rename(columns=lambda x: record_prefix + x, inplace=True)
# Data types, a problem
for k, v in compat.iteritems(meta_vals):
if meta_prefix is not None:
k = meta_prefix + k
if k in result:
raise ValueError('Conflicting metadata name {name}, '
'need distinguishing prefix '.format(name=k))
result[k] = np.array(v).repeat(lengths)
return result
| bsd-3-clause | -2,162,957,561,049,091,600 | 32.44 | 79 | 0.492605 | false |
gfyoung/pandas | pandas/core/arrays/numpy_.py | 1 | 15093 | from __future__ import annotations
import numbers
from typing import Optional, Tuple, Type, Union
import numpy as np
from numpy.lib.mixins import NDArrayOperatorsMixin
from pandas._libs import lib
from pandas._typing import Dtype, NpDtype, Scalar
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.missing import isna
from pandas.core import nanops, ops
from pandas.core.arraylike import OpsMixin
from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
from pandas.core.strings.object_array import ObjectStringArrayMixin
class PandasDtype(ExtensionDtype):
"""
A Pandas ExtensionDtype for NumPy dtypes.
.. versionadded:: 0.24.0
This is mostly for internal compatibility, and is not especially
useful on its own.
Parameters
----------
dtype : object
Object to be converted to a NumPy data type object.
See Also
--------
numpy.dtype
"""
_metadata = ("_dtype",)
def __init__(self, dtype: Optional[NpDtype]):
self._dtype = np.dtype(dtype)
def __repr__(self) -> str:
return f"PandasDtype({repr(self.name)})"
@property
def numpy_dtype(self) -> np.dtype:
"""
The NumPy dtype this PandasDtype wraps.
"""
return self._dtype
@property
def name(self) -> str:
"""
A bit-width name for this data-type.
"""
return self._dtype.name
@property
def type(self) -> Type[np.generic]:
"""
The type object used to instantiate a scalar of this NumPy data-type.
"""
return self._dtype.type
@property
def _is_numeric(self) -> bool:
# exclude object, str, unicode, void.
return self.kind in set("biufc")
@property
def _is_boolean(self) -> bool:
return self.kind == "b"
@classmethod
def construct_from_string(cls, string: str) -> PandasDtype:
try:
dtype = np.dtype(string)
except TypeError as err:
if not isinstance(string, str):
msg = f"'construct_from_string' expects a string, got {type(string)}"
else:
msg = f"Cannot construct a 'PandasDtype' from '{string}'"
raise TypeError(msg) from err
return cls(dtype)
@classmethod
def construct_array_type(cls) -> Type[PandasArray]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
return PandasArray
@property
def kind(self) -> str:
"""
A character code (one of 'biufcmMOSUV') identifying the general kind of data.
"""
return self._dtype.kind
@property
def itemsize(self) -> int:
"""
The element size of this data-type object.
"""
return self._dtype.itemsize
class PandasArray(
OpsMixin,
NDArrayBackedExtensionArray,
NDArrayOperatorsMixin,
ObjectStringArrayMixin,
):
"""
A pandas ExtensionArray for NumPy data.
.. versionadded:: 0.24.0
This is mostly for internal compatibility, and is not especially
useful on its own.
Parameters
----------
values : ndarray
The NumPy ndarray to wrap. Must be 1-dimensional.
copy : bool, default False
Whether to copy `values`.
Attributes
----------
None
Methods
-------
None
"""
# If you're wondering why pd.Series(cls) doesn't put the array in an
# ExtensionBlock, search for `ABCPandasArray`. We check for
# that _typ to ensure that users don't unnecessarily use EAs inside
# pandas internals, which turns off things like block consolidation.
_typ = "npy_extension"
__array_priority__ = 1000
_ndarray: np.ndarray
# ------------------------------------------------------------------------
# Constructors
def __init__(self, values: Union[np.ndarray, PandasArray], copy: bool = False):
if isinstance(values, type(self)):
values = values._ndarray
if not isinstance(values, np.ndarray):
raise ValueError(
f"'values' must be a NumPy array, not {type(values).__name__}"
)
if values.ndim == 0:
# Technically we support 2, but do not advertise that fact.
raise ValueError("PandasArray must be 1-dimensional.")
if copy:
values = values.copy()
self._ndarray = values
self._dtype = PandasDtype(values.dtype)
@classmethod
def _from_sequence(
cls, scalars, *, dtype: Optional[Dtype] = None, copy: bool = False
) -> PandasArray:
if isinstance(dtype, PandasDtype):
dtype = dtype._dtype
result = np.asarray(scalars, dtype=dtype)
if copy and result is scalars:
result = result.copy()
return cls(result)
@classmethod
def _from_factorized(cls, values, original) -> PandasArray:
return cls(values)
def _from_backing_data(self, arr: np.ndarray) -> PandasArray:
return type(self)(arr)
# ------------------------------------------------------------------------
# Data
@property
def dtype(self) -> PandasDtype:
return self._dtype
# ------------------------------------------------------------------------
# NumPy Array Interface
def __array__(self, dtype: Optional[NpDtype] = None) -> np.ndarray:
return np.asarray(self._ndarray, dtype=dtype)
_HANDLED_TYPES = (np.ndarray, numbers.Number)
def __array_ufunc__(self, ufunc, method: str, *inputs, **kwargs):
# Lightly modified version of
# https://numpy.org/doc/stable/reference/generated/numpy.lib.mixins.NDArrayOperatorsMixin.html
# The primary modification is not boxing scalar return values
# in PandasArray, since pandas' ExtensionArrays are 1-d.
out = kwargs.get("out", ())
for x in inputs + out:
# Only support operations with instances of _HANDLED_TYPES.
# Use PandasArray instead of type(self) for isinstance to
# allow subclasses that don't override __array_ufunc__ to
# handle PandasArray objects.
if not isinstance(x, self._HANDLED_TYPES + (PandasArray,)):
return NotImplemented
if ufunc not in [np.logical_or, np.bitwise_or, np.bitwise_xor]:
# For binary ops, use our custom dunder methods
# We haven't implemented logical dunder funcs, so exclude these
# to avoid RecursionError
result = ops.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
# Defer to the implementation of the ufunc on unwrapped values.
inputs = tuple(x._ndarray if isinstance(x, PandasArray) else x for x in inputs)
if out:
kwargs["out"] = tuple(
x._ndarray if isinstance(x, PandasArray) else x for x in out
)
result = getattr(ufunc, method)(*inputs, **kwargs)
if type(result) is tuple and len(result):
# multiple return values
if not lib.is_scalar(result[0]):
# re-box array-like results
return tuple(type(self)(x) for x in result)
else:
# but not scalar reductions
return result
elif method == "at":
# no return value
return None
else:
# one return value
if not lib.is_scalar(result):
# re-box array-like results, but not scalar reductions
result = type(self)(result)
return result
# ------------------------------------------------------------------------
# Pandas ExtensionArray Interface
def isna(self) -> np.ndarray:
return isna(self._ndarray)
def _validate_fill_value(self, fill_value):
if fill_value is None:
# Primarily for subclasses
fill_value = self.dtype.na_value
return fill_value
def _values_for_factorize(self) -> Tuple[np.ndarray, int]:
return self._ndarray, -1
# ------------------------------------------------------------------------
# Reductions
def any(self, *, axis=None, out=None, keepdims=False, skipna=True):
nv.validate_any((), {"out": out, "keepdims": keepdims})
result = nanops.nanany(self._ndarray, axis=axis, skipna=skipna)
return self._wrap_reduction_result(axis, result)
def all(self, *, axis=None, out=None, keepdims=False, skipna=True):
nv.validate_all((), {"out": out, "keepdims": keepdims})
result = nanops.nanall(self._ndarray, axis=axis, skipna=skipna)
return self._wrap_reduction_result(axis, result)
def min(self, *, axis=None, skipna: bool = True, **kwargs) -> Scalar:
nv.validate_min((), kwargs)
result = nanops.nanmin(
values=self._ndarray, axis=axis, mask=self.isna(), skipna=skipna
)
return self._wrap_reduction_result(axis, result)
def max(self, *, axis=None, skipna: bool = True, **kwargs) -> Scalar:
nv.validate_max((), kwargs)
result = nanops.nanmax(
values=self._ndarray, axis=axis, mask=self.isna(), skipna=skipna
)
return self._wrap_reduction_result(axis, result)
def sum(self, *, axis=None, skipna=True, min_count=0, **kwargs) -> Scalar:
nv.validate_sum((), kwargs)
result = nanops.nansum(
self._ndarray, axis=axis, skipna=skipna, min_count=min_count
)
return self._wrap_reduction_result(axis, result)
def prod(self, *, axis=None, skipna=True, min_count=0, **kwargs) -> Scalar:
nv.validate_prod((), kwargs)
result = nanops.nanprod(
self._ndarray, axis=axis, skipna=skipna, min_count=min_count
)
return self._wrap_reduction_result(axis, result)
def mean(
self,
*,
axis=None,
dtype: Optional[NpDtype] = None,
out=None,
keepdims=False,
skipna=True,
):
nv.validate_mean((), {"dtype": dtype, "out": out, "keepdims": keepdims})
result = nanops.nanmean(self._ndarray, axis=axis, skipna=skipna)
return self._wrap_reduction_result(axis, result)
def median(
self, *, axis=None, out=None, overwrite_input=False, keepdims=False, skipna=True
):
nv.validate_median(
(), {"out": out, "overwrite_input": overwrite_input, "keepdims": keepdims}
)
result = nanops.nanmedian(self._ndarray, axis=axis, skipna=skipna)
return self._wrap_reduction_result(axis, result)
def std(
self,
*,
axis=None,
dtype: Optional[NpDtype] = None,
out=None,
ddof=1,
keepdims=False,
skipna=True,
):
nv.validate_stat_ddof_func(
(), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="std"
)
result = nanops.nanstd(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)
return self._wrap_reduction_result(axis, result)
def var(
self,
*,
axis=None,
dtype: Optional[NpDtype] = None,
out=None,
ddof=1,
keepdims=False,
skipna=True,
):
nv.validate_stat_ddof_func(
(), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="var"
)
result = nanops.nanvar(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)
return self._wrap_reduction_result(axis, result)
def sem(
self,
*,
axis=None,
dtype: Optional[NpDtype] = None,
out=None,
ddof=1,
keepdims=False,
skipna=True,
):
nv.validate_stat_ddof_func(
(), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="sem"
)
result = nanops.nansem(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)
return self._wrap_reduction_result(axis, result)
def kurt(
self,
*,
axis=None,
dtype: Optional[NpDtype] = None,
out=None,
keepdims=False,
skipna=True,
):
nv.validate_stat_ddof_func(
(), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="kurt"
)
result = nanops.nankurt(self._ndarray, axis=axis, skipna=skipna)
return self._wrap_reduction_result(axis, result)
def skew(
self,
*,
axis=None,
dtype: Optional[NpDtype] = None,
out=None,
keepdims=False,
skipna=True,
):
nv.validate_stat_ddof_func(
(), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="skew"
)
result = nanops.nanskew(self._ndarray, axis=axis, skipna=skipna)
return self._wrap_reduction_result(axis, result)
# ------------------------------------------------------------------------
# Additional Methods
def to_numpy(
self,
dtype: Optional[NpDtype] = None,
copy: bool = False,
na_value=lib.no_default,
) -> np.ndarray:
result = np.asarray(self._ndarray, dtype=dtype)
if (copy or na_value is not lib.no_default) and result is self._ndarray:
result = result.copy()
if na_value is not lib.no_default:
result[self.isna()] = na_value
return result
# ------------------------------------------------------------------------
# Ops
def __invert__(self):
return type(self)(~self._ndarray)
def _cmp_method(self, other, op):
if isinstance(other, PandasArray):
other = other._ndarray
pd_op = ops.get_array_op(op)
result = pd_op(self._ndarray, other)
if op is divmod or op is ops.rdivmod:
a, b = result
if isinstance(a, np.ndarray):
# for e.g. op vs TimedeltaArray, we may already
# have an ExtensionArray, in which case we do not wrap
return self._wrap_ndarray_result(a), self._wrap_ndarray_result(b)
return a, b
if isinstance(result, np.ndarray):
# for e.g. multiplication vs TimedeltaArray, we may already
# have an ExtensionArray, in which case we do not wrap
return self._wrap_ndarray_result(result)
return result
_arith_method = _cmp_method
def _wrap_ndarray_result(self, result: np.ndarray):
# If we have timedelta64[ns] result, return a TimedeltaArray instead
# of a PandasArray
if result.dtype == "timedelta64[ns]":
from pandas.core.arrays import TimedeltaArray
return TimedeltaArray._simple_new(result)
return type(self)(result)
# ------------------------------------------------------------------------
# String methods interface
_str_na_value = np.nan
| bsd-3-clause | -1,491,776,084,248,907,800 | 30.774737 | 102 | 0.561651 | false |
asoliveira/NumShip | scripts/plot/beta-ace-r-cg-plt.py | 1 | 2105 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#É adimensional?
adi = False
#É para salvar as figuras(True|False)?
save = True
#Caso seja para salvar, qual é o formato desejado?
formato = 'jpg'
#Caso seja para salvar, qual é o diretório que devo salvar?
dircg = 'fig-sen'
#Caso seja para salvar, qual é o nome do arquivo?
nome = 'beta-acel-r-cg'
#Qual título colocar no gráficos?
titulo = ''#'Curva de Giro'
#Qual a cor dos gráficos?
pc = 'k'
r1c = 'b'
r2c = 'y'
r3c = 'r'
#Estilo de linha
ps = '-'
r1s = '-'
r2s = '-'
r3s = '-'
import os
import scipy as sp
import matplotlib.pyplot as plt
from libplot import *
acehis = sp.genfromtxt('../entrada/padrao/CurvaGiro/acel.dat')
acehis2 = sp.genfromtxt('../entrada/beta/saida1.1/CurvaGiro/acel.dat')
acehis3 = sp.genfromtxt('../entrada/beta/saida1.2/CurvaGiro/acel.dat')
acehis4 = sp.genfromtxt('../entrada/beta/saida1.3/CurvaGiro/acel.dat')
axl = [0, 1000, -0.005, 0.025]
#Plotando a Curva de Giro
if adi:
ylabel = r'$t\prime$'
xacelabel = r'$\dot r\prime$'
else:
ylabel = r'$\dot r \quad graus/s^2$'
xacelabel = r'$t \quad segundos$'
plt.subplot2grid((1,4),(0,0), colspan=3)
#Padrao
plt.plot(acehis[:, 0], acehis[:, 6] * (180 / sp.pi), color = pc,
linestyle = ps, linewidth = 1, label=ur'padrão')
plt.plot(acehis2[:, 0], acehis2[:, 6] * (180 / sp.pi), color = r1c,
linestyle= r1s, linewidth = 1, label=ur'1.1beta')
plt.plot(acehis3[:, 0], acehis3[:, 6] * (180 / sp.pi), color = r2c,
linewidth = 1, linestyle = r2s, label=ur'1.2beta')
plt.plot(acehis4[:, 0], acehis4[:, 6] * (180 / sp.pi),color = r3c,
linestyle = r3s, linewidth = 1, label=ur'1.3beta')
plt.title(titulo)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.ylabel(ylabel)
plt.xlabel(xacelabel)
plt.axis(axl)
plt.grid(True)
if save:
if not os.path.exists(dircg):
os.makedirs(dircg)
if os.path.exists(dircg + '/' + nome + '.' + formato):
os.remove(dircg + '/' + nome + '.' + formato)
plt.savefig(dircg + '/' + nome + '.' + formato , format=formato)
else:
plt.show()
| gpl-3.0 | -7,591,450,999,993,588,000 | 26.565789 | 70 | 0.62864 | false |
MJuddBooth/pandas | pandas/core/config.py | 1 | 23294 | """
The config module holds package-wide configurables and provides
a uniform API for working with them.
Overview
========
This module supports the following requirements:
- options are referenced using keys in dot.notation, e.g. "x.y.option - z".
- keys are case-insensitive.
- functions should accept partial/regex keys, when unambiguous.
- options can be registered by modules at import time.
- options can be registered at init-time (via core.config_init)
- options have a default value, and (optionally) a description and
validation function associated with them.
- options can be deprecated, in which case referencing them
should produce a warning.
- deprecated options can optionally be rerouted to a replacement
so that accessing a deprecated option reroutes to a differently
named option.
- options can be reset to their default value.
- all option can be reset to their default value at once.
- all options in a certain sub - namespace can be reset at once.
- the user can set / get / reset or ask for the description of an option.
- a developer can register and mark an option as deprecated.
- you can register a callback to be invoked when the option value
is set or reset. Changing the stored value is considered misuse, but
is not verboten.
Implementation
==============
- Data is stored using nested dictionaries, and should be accessed
through the provided API.
- "Registered options" and "Deprecated options" have metadata associated
with them, which are stored in auxiliary dictionaries keyed on the
fully-qualified key, e.g. "x.y.z.option".
- the config_init module is imported by the package's __init__.py file.
placing any register_option() calls there will ensure those options
are available as soon as pandas is loaded. If you use register_option
in a module, it will only be available after that module is imported,
which you should be aware of.
- `config_prefix` is a context_manager (for use with the `with` keyword)
which can save developers some typing, see the docstring.
"""
from collections import namedtuple
from contextlib import contextmanager
import re
import warnings
import pandas.compat as compat
from pandas.compat import lmap, map, u
DeprecatedOption = namedtuple('DeprecatedOption', 'key msg rkey removal_ver')
RegisteredOption = namedtuple('RegisteredOption',
'key defval doc validator cb')
_deprecated_options = {} # holds deprecated option metdata
_registered_options = {} # holds registered option metdata
_global_config = {} # holds the current values for registered options
_reserved_keys = ['all'] # keys which have a special meaning
class OptionError(AttributeError, KeyError):
"""Exception for pandas.options, backwards compatible with KeyError
checks
"""
#
# User API
def _get_single_key(pat, silent):
keys = _select_options(pat)
if len(keys) == 0:
if not silent:
_warn_if_deprecated(pat)
raise OptionError('No such keys(s): {pat!r}'.format(pat=pat))
if len(keys) > 1:
raise OptionError('Pattern matched multiple keys')
key = keys[0]
if not silent:
_warn_if_deprecated(key)
key = _translate_key(key)
return key
def _get_option(pat, silent=False):
key = _get_single_key(pat, silent)
# walk the nested dict
root, k = _get_root(key)
return root[k]
def _set_option(*args, **kwargs):
# must at least 1 arg deal with constraints later
nargs = len(args)
if not nargs or nargs % 2 != 0:
raise ValueError("Must provide an even number of non-keyword "
"arguments")
# default to false
silent = kwargs.pop('silent', False)
if kwargs:
msg = '_set_option() got an unexpected keyword argument "{kwarg}"'
raise TypeError(msg.format(list(kwargs.keys())[0]))
for k, v in zip(args[::2], args[1::2]):
key = _get_single_key(k, silent)
o = _get_registered_option(key)
if o and o.validator:
o.validator(v)
# walk the nested dict
root, k = _get_root(key)
root[k] = v
if o.cb:
if silent:
with warnings.catch_warnings(record=True):
o.cb(key)
else:
o.cb(key)
def _describe_option(pat='', _print_desc=True):
keys = _select_options(pat)
if len(keys) == 0:
raise OptionError('No such keys(s)')
s = u('')
for k in keys: # filter by pat
s += _build_option_description(k)
if _print_desc:
print(s)
else:
return s
def _reset_option(pat, silent=False):
keys = _select_options(pat)
if len(keys) == 0:
raise OptionError('No such keys(s)')
if len(keys) > 1 and len(pat) < 4 and pat != 'all':
raise ValueError('You must specify at least 4 characters when '
'resetting multiple keys, use the special keyword '
'"all" to reset all the options to their default '
'value')
for k in keys:
_set_option(k, _registered_options[k].defval, silent=silent)
def get_default_val(pat):
key = _get_single_key(pat, silent=True)
return _get_registered_option(key).defval
class DictWrapper(object):
""" provide attribute-style access to a nested dict"""
def __init__(self, d, prefix=""):
object.__setattr__(self, "d", d)
object.__setattr__(self, "prefix", prefix)
def __setattr__(self, key, val):
prefix = object.__getattribute__(self, "prefix")
if prefix:
prefix += "."
prefix += key
# you can't set new keys
# can you can't overwrite subtrees
if key in self.d and not isinstance(self.d[key], dict):
_set_option(prefix, val)
else:
raise OptionError("You can only set the value of existing options")
def __getattr__(self, key):
prefix = object.__getattribute__(self, "prefix")
if prefix:
prefix += "."
prefix += key
try:
v = object.__getattribute__(self, "d")[key]
except KeyError:
raise OptionError("No such option")
if isinstance(v, dict):
return DictWrapper(v, prefix)
else:
return _get_option(prefix)
def __dir__(self):
return list(self.d.keys())
# For user convenience, we'd like to have the available options described
# in the docstring. For dev convenience we'd like to generate the docstrings
# dynamically instead of maintaining them by hand. To this, we use the
# class below which wraps functions inside a callable, and converts
# __doc__ into a property function. The doctsrings below are templates
# using the py2.6+ advanced formatting syntax to plug in a concise list
# of options, and option descriptions.
class CallableDynamicDoc(object):
def __init__(self, func, doc_tmpl):
self.__doc_tmpl__ = doc_tmpl
self.__func__ = func
def __call__(self, *args, **kwds):
return self.__func__(*args, **kwds)
@property
def __doc__(self):
opts_desc = _describe_option('all', _print_desc=False)
opts_list = pp_options_list(list(_registered_options.keys()))
return self.__doc_tmpl__.format(opts_desc=opts_desc,
opts_list=opts_list)
_get_option_tmpl = """
get_option(pat)
Retrieves the value of the specified option.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp which should match a single option.
Note: partial matches are supported for convenience, but unless you use the
full option name (e.g. x.y.z.option_name), your code may break in future
versions if new options with similar names are introduced.
Returns
-------
result : the value of the option
Raises
------
OptionError : if no such option exists
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_set_option_tmpl = """
set_option(pat, value)
Sets the value of the specified option.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp which should match a single option.
Note: partial matches are supported for convenience, but unless you use the
full option name (e.g. x.y.z.option_name), your code may break in future
versions if new options with similar names are introduced.
value : object
New value of option.
Returns
-------
None
Raises
------
OptionError if no such option exists
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_describe_option_tmpl = """
describe_option(pat, _print_desc=False)
Prints the description for one or more registered options.
Call with not arguments to get a listing for all registered options.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp pattern. All matching keys will have their description displayed.
_print_desc : bool, default True
If True (default) the description(s) will be printed to stdout.
Otherwise, the description(s) will be returned as a unicode string
(for testing).
Returns
-------
None by default, the description(s) as a unicode string if _print_desc
is False
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_reset_option_tmpl = """
reset_option(pat)
Reset one or more options to their default value.
Pass "all" as argument to reset all options.
Available options:
{opts_list}
Parameters
----------
pat : str/regex
If specified only options matching `prefix*` will be reset.
Note: partial matches are supported for convenience, but unless you
use the full option name (e.g. x.y.z.option_name), your code may break
in future versions if new options with similar names are introduced.
Returns
-------
None
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
# bind the functions with their docstrings into a Callable
# and use that as the functions exposed in pd.api
get_option = CallableDynamicDoc(_get_option, _get_option_tmpl)
set_option = CallableDynamicDoc(_set_option, _set_option_tmpl)
reset_option = CallableDynamicDoc(_reset_option, _reset_option_tmpl)
describe_option = CallableDynamicDoc(_describe_option, _describe_option_tmpl)
options = DictWrapper(_global_config)
#
# Functions for use by pandas developers, in addition to User - api
class option_context(object):
"""
Context manager to temporarily set options in the `with` statement context.
You need to invoke as ``option_context(pat, val, [(pat, val), ...])``.
Examples
--------
>>> with option_context('display.max_rows', 10, 'display.max_columns', 5):
... ...
"""
def __init__(self, *args):
if not (len(args) % 2 == 0 and len(args) >= 2):
raise ValueError('Need to invoke as'
' option_context(pat, val, [(pat, val), ...]).')
self.ops = list(zip(args[::2], args[1::2]))
def __enter__(self):
self.undo = [(pat, _get_option(pat, silent=True))
for pat, val in self.ops]
for pat, val in self.ops:
_set_option(pat, val, silent=True)
def __exit__(self, *args):
if self.undo:
for pat, val in self.undo:
_set_option(pat, val, silent=True)
def register_option(key, defval, doc='', validator=None, cb=None):
"""Register an option in the package-wide pandas config object
Parameters
----------
key - a fully-qualified key, e.g. "x.y.option - z".
defval - the default value of the option
doc - a string description of the option
validator - a function of a single argument, should raise `ValueError` if
called with a value which is not a legal value for the option.
cb - a function of a single argument "key", which is called
immediately after an option value is set/reset. key is
the full name of the option.
Returns
-------
Nothing.
Raises
------
ValueError if `validator` is specified and `defval` is not a valid value.
"""
import tokenize
import keyword
key = key.lower()
if key in _registered_options:
msg = "Option '{key}' has already been registered"
raise OptionError(msg.format(key=key))
if key in _reserved_keys:
msg = "Option '{key}' is a reserved key"
raise OptionError(msg.format(key=key))
# the default value should be legal
if validator:
validator(defval)
# walk the nested dict, creating dicts as needed along the path
path = key.split('.')
for k in path:
if not bool(re.match('^' + tokenize.Name + '$', k)):
raise ValueError("{k} is not a valid identifier".format(k=k))
if keyword.iskeyword(k):
raise ValueError("{k} is a python keyword".format(k=k))
cursor = _global_config
msg = "Path prefix to option '{option}' is already an option"
for i, p in enumerate(path[:-1]):
if not isinstance(cursor, dict):
raise OptionError(msg.format(option='.'.join(path[:i])))
if p not in cursor:
cursor[p] = {}
cursor = cursor[p]
if not isinstance(cursor, dict):
raise OptionError(msg.format(option='.'.join(path[:-1])))
cursor[path[-1]] = defval # initialize
# save the option metadata
_registered_options[key] = RegisteredOption(key=key, defval=defval,
doc=doc, validator=validator,
cb=cb)
def deprecate_option(key, msg=None, rkey=None, removal_ver=None):
"""
Mark option `key` as deprecated, if code attempts to access this option,
a warning will be produced, using `msg` if given, or a default message
if not.
if `rkey` is given, any access to the key will be re-routed to `rkey`.
Neither the existence of `key` nor that if `rkey` is checked. If they
do not exist, any subsequence access will fail as usual, after the
deprecation warning is given.
Parameters
----------
key - the name of the option to be deprecated. must be a fully-qualified
option name (e.g "x.y.z.rkey").
msg - (Optional) a warning message to output when the key is referenced.
if no message is given a default message will be emitted.
rkey - (Optional) the name of an option to reroute access to.
If specified, any referenced `key` will be re-routed to `rkey`
including set/get/reset.
rkey must be a fully-qualified option name (e.g "x.y.z.rkey").
used by the default message if no `msg` is specified.
removal_ver - (Optional) specifies the version in which this option will
be removed. used by the default message if no `msg`
is specified.
Returns
-------
Nothing
Raises
------
OptionError - if key has already been deprecated.
"""
key = key.lower()
if key in _deprecated_options:
msg = "Option '{key}' has already been defined as deprecated."
raise OptionError(msg.format(key=key))
_deprecated_options[key] = DeprecatedOption(key, msg, rkey, removal_ver)
#
# functions internal to the module
def _select_options(pat):
"""returns a list of keys matching `pat`
if pat=="all", returns all registered options
"""
# short-circuit for exact key
if pat in _registered_options:
return [pat]
# else look through all of them
keys = sorted(_registered_options.keys())
if pat == 'all': # reserved key
return keys
return [k for k in keys if re.search(pat, k, re.I)]
def _get_root(key):
path = key.split('.')
cursor = _global_config
for p in path[:-1]:
cursor = cursor[p]
return cursor, path[-1]
def _is_deprecated(key):
""" Returns True if the given option has been deprecated """
key = key.lower()
return key in _deprecated_options
def _get_deprecated_option(key):
"""
Retrieves the metadata for a deprecated option, if `key` is deprecated.
Returns
-------
DeprecatedOption (namedtuple) if key is deprecated, None otherwise
"""
try:
d = _deprecated_options[key]
except KeyError:
return None
else:
return d
def _get_registered_option(key):
"""
Retrieves the option metadata if `key` is a registered option.
Returns
-------
RegisteredOption (namedtuple) if key is deprecated, None otherwise
"""
return _registered_options.get(key)
def _translate_key(key):
"""
if key id deprecated and a replacement key defined, will return the
replacement key, otherwise returns `key` as - is
"""
d = _get_deprecated_option(key)
if d:
return d.rkey or key
else:
return key
def _warn_if_deprecated(key):
"""
Checks if `key` is a deprecated option and if so, prints a warning.
Returns
-------
bool - True if `key` is deprecated, False otherwise.
"""
d = _get_deprecated_option(key)
if d:
if d.msg:
print(d.msg)
warnings.warn(d.msg, FutureWarning)
else:
msg = "'{key}' is deprecated".format(key=key)
if d.removal_ver:
msg += (' and will be removed in {version}'
.format(version=d.removal_ver))
if d.rkey:
msg += ", please use '{rkey}' instead.".format(rkey=d.rkey)
else:
msg += ', please refrain from using it.'
warnings.warn(msg, FutureWarning)
return True
return False
def _build_option_description(k):
""" Builds a formatted description of a registered option and prints it """
o = _get_registered_option(k)
d = _get_deprecated_option(k)
s = u('{k} ').format(k=k)
if o.doc:
s += '\n'.join(o.doc.strip().split('\n'))
else:
s += 'No description available.'
if o:
s += (u('\n [default: {default}] [currently: {current}]')
.format(default=o.defval, current=_get_option(k, True)))
if d:
s += u('\n (Deprecated')
s += (u(', use `{rkey}` instead.')
.format(rkey=d.rkey if d.rkey else ''))
s += u(')')
s += '\n\n'
return s
def pp_options_list(keys, width=80, _print=False):
""" Builds a concise listing of available options, grouped by prefix """
from textwrap import wrap
from itertools import groupby
def pp(name, ks):
pfx = ('- ' + name + '.[' if name else '')
ls = wrap(', '.join(ks), width, initial_indent=pfx,
subsequent_indent=' ', break_long_words=False)
if ls and ls[-1] and name:
ls[-1] = ls[-1] + ']'
return ls
ls = []
singles = [x for x in sorted(keys) if x.find('.') < 0]
if singles:
ls += pp('', singles)
keys = [x for x in keys if x.find('.') >= 0]
for k, g in groupby(sorted(keys), lambda x: x[:x.rfind('.')]):
ks = [x[len(k) + 1:] for x in list(g)]
ls += pp(k, ks)
s = '\n'.join(ls)
if _print:
print(s)
else:
return s
#
# helpers
@contextmanager
def config_prefix(prefix):
"""contextmanager for multiple invocations of API with a common prefix
supported API functions: (register / get / set )__option
Warning: This is not thread - safe, and won't work properly if you import
the API functions into your module using the "from x import y" construct.
Example:
import pandas.core.config as cf
with cf.config_prefix("display.font"):
cf.register_option("color", "red")
cf.register_option("size", " 5 pt")
cf.set_option(size, " 6 pt")
cf.get_option(size)
...
etc'
will register options "display.font.color", "display.font.size", set the
value of "display.font.size"... and so on.
"""
# Note: reset_option relies on set_option, and on key directly
# it does not fit in to this monkey-patching scheme
global register_option, get_option, set_option, reset_option
def wrap(func):
def inner(key, *args, **kwds):
pkey = '{prefix}.{key}'.format(prefix=prefix, key=key)
return func(pkey, *args, **kwds)
return inner
_register_option = register_option
_get_option = get_option
_set_option = set_option
set_option = wrap(set_option)
get_option = wrap(get_option)
register_option = wrap(register_option)
yield None
set_option = _set_option
get_option = _get_option
register_option = _register_option
# These factories and methods are handy for use as the validator
# arg in register_option
def is_type_factory(_type):
"""
Parameters
----------
`_type` - a type to be compared against (e.g. type(x) == `_type`)
Returns
-------
validator - a function of a single argument x , which raises
ValueError if type(x) is not equal to `_type`
"""
def inner(x):
if type(x) != _type:
msg = "Value must have type '{typ!s}'"
raise ValueError(msg.format(typ=_type))
return inner
def is_instance_factory(_type):
"""
Parameters
----------
`_type` - the type to be checked against
Returns
-------
validator - a function of a single argument x , which raises
ValueError if x is not an instance of `_type`
"""
if isinstance(_type, (tuple, list)):
_type = tuple(_type)
from pandas.io.formats.printing import pprint_thing
type_repr = "|".join(map(pprint_thing, _type))
else:
type_repr = "'{typ}'".format(typ=_type)
def inner(x):
if not isinstance(x, _type):
msg = "Value must be an instance of {type_repr}"
raise ValueError(msg.format(type_repr=type_repr))
return inner
def is_one_of_factory(legal_values):
callables = [c for c in legal_values if callable(c)]
legal_values = [c for c in legal_values if not callable(c)]
def inner(x):
from pandas.io.formats.printing import pprint_thing as pp
if x not in legal_values:
if not any(c(x) for c in callables):
pp_values = pp("|".join(lmap(pp, legal_values)))
msg = "Value must be one of {pp_values}"
if len(callables):
msg += " or a callable"
raise ValueError(msg.format(pp_values=pp_values))
return inner
# common type validators, for convenience
# usage: register_option(... , validator = is_int)
is_int = is_type_factory(int)
is_bool = is_type_factory(bool)
is_float = is_type_factory(float)
is_str = is_type_factory(str)
is_unicode = is_type_factory(compat.text_type)
is_text = is_instance_factory((str, bytes))
def is_callable(obj):
"""
Parameters
----------
`obj` - the object to be checked
Returns
-------
validator - returns True if object is callable
raises ValueError otherwise.
"""
if not callable(obj):
raise ValueError("Value must be a callable")
return True
| bsd-3-clause | 5,525,378,590,257,934,000 | 26.830346 | 79 | 0.617627 | false |
JerelynCo/Marimar | marimar/api.py | 1 | 2752 | from flask import Flask, render_template, make_response
from flask_restful import Api, Resource
from math import radians, cos, sin, asin, sqrt
import pandas as pd
import numpy as np
import math
app = Flask(__name__)
api = Api(app)
"""
Loading of data
"""
hosp_data = pd.read_csv("data/processed/hospitals_complete.csv")
def haversine(origin, destination):
lat1, lon1 = origin
lat2, lon2 = destination
radius = 6371 # km
dlat = math.radians(lat2-lat1)
dlon = math.radians(lon2-lon1)
a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(lat1)) \
* math.cos(math.radians(lat2)) * math.sin(dlon/2) * math.sin(dlon/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = radius * c
return d
"""
API for topfive nearby hospitals
"""
class TopFive(Resource):
def get(self, facility, pt1_lon, pt1_lat):
filtered = hosp_data[hosp_data[facility]==1].reset_index()
filtered['dist'] = 0.0
for i in range(filtered.shape[0]):
filtered['dist'][i] = haversine((pt1_lat, pt1_lon), (filtered['lat'][i], filtered['lon'][i]))
return make_response(filtered.sort(columns='dist', ascending=True)[0:5].to_json(orient='records'))
api.add_resource(TopFive, '/topfive/<string:facility>/<float:pt1_lon>/<float:pt1_lat>')
"""
API for Health Center info
"""
class HospInfo(Resource):
hospInfo = pd.DataFrame()
def get(self):
hospInfo = hosp_data[['FacilityName', 'Type', 'Classification', 'StreetNameAndNo', 'BarangayName', 'City', 'lon', 'lat', 'LandlineNumber']]
return make_response(hospInfo.to_json(orient='records'))
api.add_resource(HospInfo, '/hospinfo')
"""
API for cityCount per facility per city
"""
class CityCount(Resource):
def get(self, facility):
cityCount = pd.DataFrame()
city = np.array([])
count = np.array([])
filtered = hosp_data[hosp_data[facility]==1]
for i in filtered['City'].unique():
city = np.append(city, i)
count = np.append(count, filtered[filtered['City'] == i]['City'].size)
cityCount['City'] = city
cityCount['Count'] = count
return make_response(cityCount.to_json(orient='records'))
api.add_resource(CityCount, '/citycount/<string:facility>')
@app.route('/')
def index():
user = {'nickname': 'Je'}
posts = [
{
'author': {'nickname': 'John'},
'body': 'Beautiful day in Portland!'
},
{
'author': {'nickname': 'Susan'},
'body': 'The Avengers movie was so cool!'
}
]
return render_template('index.html', title='Home', user=user, posts=posts)
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=8000)
| mit | -4,048,964,048,559,151,000 | 28.276596 | 147 | 0.613009 | false |
vascotenner/holoviews | holoviews/plotting/mpl/chart3d.py | 1 | 7216 | from distutils.version import LooseVersion
import numpy as np
import param
import matplotlib as mpl
import matplotlib.cm as cm
from ...core import Dimension
from ...core.util import basestring
from ..util import map_colors
from .element import ColorbarPlot
from .chart import PointPlot
class Plot3D(ColorbarPlot):
"""
Plot3D provides a common baseclass for mplot3d based
plots.
"""
azimuth = param.Integer(default=-60, bounds=(-180, 180), doc="""
Azimuth angle in the x,y plane.""")
elevation = param.Integer(default=30, bounds=(0, 180), doc="""
Elevation angle in the z-axis.""")
distance = param.Integer(default=10, bounds=(7, 15), doc="""
Distance from the plotted object.""")
disable_axes = param.Boolean(default=False, doc="""
Disable all axes.""")
bgcolor = param.String(default='white', doc="""
Background color of the axis.""")
labelled = param.List(default=['x', 'y', 'z'], doc="""
Whether to plot the 'x', 'y' and 'z' labels.""")
projection = param.ObjectSelector(default='3d', objects=['3d'], doc="""
The projection of the matplotlib axis.""")
show_frame = param.Boolean(default=False, doc="""
Whether to draw a frame around the figure.""")
show_grid = param.Boolean(default=True, doc="""
Whether to draw a grid in the figure.""")
xaxis = param.ObjectSelector(default='fixed',
objects=['fixed', None], doc="""
Whether and where to display the xaxis.""")
yaxis = param.ObjectSelector(default='fixed',
objects=['fixed', None], doc="""
Whether and where to display the yaxis.""")
zaxis = param.ObjectSelector(default='fixed',
objects=['fixed', None], doc="""
Whether and where to display the yaxis.""")
def _finalize_axis(self, key, **kwargs):
"""
Extends the ElementPlot _finalize_axis method to set appropriate
labels, and axes options for 3D Plots.
"""
axis = self.handles['axis']
self.handles['fig'].set_frameon(False)
axis.grid(self.show_grid)
axis.view_init(elev=self.elevation, azim=self.azimuth)
axis.dist = self.distance
if self.xaxis is None:
axis.w_xaxis.line.set_lw(0.)
axis.w_xaxis.label.set_text('')
if self.yaxis is None:
axis.w_yaxis.line.set_lw(0.)
axis.w_yaxis.label.set_text('')
if self.zaxis is None:
axis.w_zaxis.line.set_lw(0.)
axis.w_zaxis.label.set_text('')
if self.disable_axes:
axis.set_axis_off()
axis.set_axis_bgcolor(self.bgcolor)
return super(Plot3D, self)._finalize_axis(key, **kwargs)
def _draw_colorbar(self, artist, element, dim=None):
fig = self.handles['fig']
ax = self.handles['axis']
# Get colorbar label
if dim is None:
dim = element.vdims[0]
elif not isinstance(dim, Dimension):
dim = element.get_dimension(dim)
label = str(dim)
cbar = fig.colorbar(artist, shrink=0.7, ax=ax)
self.handles['cax'] = cbar.ax
self._adjust_cbar(cbar, label, dim)
class Scatter3DPlot(Plot3D, PointPlot):
"""
Subclass of PointPlot allowing plotting of Points
on a 3D axis, also allows mapping color and size
onto a particular Dimension of the data.
"""
color_index = param.ClassSelector(default=4, class_=(basestring, int),
allow_None=True, doc="""
Index of the dimension from which the color will the drawn""")
size_index = param.ClassSelector(default=3, class_=(basestring, int),
allow_None=True, doc="""
Index of the dimension from which the sizes will the drawn.""")
_plot_methods = dict(single='scatter')
def get_data(self, element, ranges, style):
xs, ys, zs = (element.dimension_values(i) for i in range(3))
self._compute_styles(element, ranges, style)
# Temporary fix until color handling is deterministic in mpl+py3
if not element.get_dimension(self.color_index) and 'c' in style:
color = style.pop('c')
if LooseVersion(mpl.__version__) >= '1.5':
style['color'] = color
else:
style['facecolors'] = color
return (xs, ys, zs), style, {}
def update_handles(self, key, axis, element, ranges, style):
artist = self.handles['artist']
artist._offsets3d, style, _ = self.get_data(element, ranges, style)
cdim = element.get_dimension(self.color_index)
if cdim and 'cmap' in style:
clim = style['vmin'], style['vmax']
cmap = cm.get_cmap(style['cmap'])
artist._facecolor3d = map_colors(style['c'], clim, cmap, hex=False)
if element.get_dimension(self.size_index):
artist.set_sizes(style['s'])
class SurfacePlot(Plot3D):
"""
Plots surfaces wireframes and contours in 3D space.
Provides options to switch the display type via the
plot_type parameter has support for a number of
styling options including strides and colors.
"""
colorbar = param.Boolean(default=False, doc="""
Whether to add a colorbar to the plot.""")
plot_type = param.ObjectSelector(default='surface',
objects=['surface', 'wireframe',
'contour'], doc="""
Specifies the type of visualization for the Surface object.
Valid values are 'surface', 'wireframe' and 'contour'.""")
style_opts = ['antialiased', 'cmap', 'color', 'shade',
'linewidth', 'facecolors', 'rstride', 'cstride']
def init_artists(self, ax, plot_data, plot_kwargs):
if self.plot_type == "wireframe":
artist = ax.plot_wireframe(*plot_data, **plot_kwargs)
elif self.plot_type == "surface":
artist = ax.plot_surface(*plot_data, **plot_kwargs)
elif self.plot_type == "contour":
artist = ax.contour3D(*plot_data, **plot_kwargs)
return {'artist': artist}
def get_data(self, element, ranges, style):
mat = element.data
rn, cn = mat.shape
l, b, _, r, t, _ = self.get_extents(element, ranges)
r, c = np.mgrid[l:r:(r-l)/float(rn), b:t:(t-b)/float(cn)]
self._norm_kwargs(element, ranges, style, element.vdims[0])
return (r, c, mat), style, {}
class TrisurfacePlot(Plot3D):
"""
Plots a trisurface given a Trisurface element, containing
X, Y and Z coordinates.
"""
colorbar = param.Boolean(default=False, doc="""
Whether to add a colorbar to the plot.""")
style_opts = ['cmap', 'color', 'shade', 'linewidth', 'edgecolor']
_plot_methods = dict(single='plot_trisurf')
def get_data(self, element, ranges, style):
dims = element.dimensions()
self._norm_kwargs(element, ranges, style, dims[2])
x, y, z = [element.dimension_values(d) for d in dims]
return (x, y, z), style, {}
| bsd-3-clause | 1,487,844,312,141,393,400 | 34.900498 | 79 | 0.587722 | false |
CrazyGuo/vincent | examples/bar_chart_examples.py | 11 | 2026 | # -*- coding: utf-8 -*-
"""
Vincent Bar Chart Example
"""
#Build a Bar Chart from scratch
from vincent import *
import pandas as pd
farm_1 = {'apples': 10, 'berries': 32, 'squash': 21, 'melons': 13, 'corn': 18}
farm_2 = {'apples': 15, 'berries': 43, 'squash': 17, 'melons': 10, 'corn': 22}
farm_3 = {'apples': 6, 'berries': 24, 'squash': 22, 'melons': 16, 'corn': 30}
farm_4 = {'apples': 12, 'berries': 30, 'squash': 15, 'melons': 9, 'corn': 15}
data = [farm_1, farm_2, farm_3, farm_4]
index = ['Farm 1', 'Farm 2', 'Farm 3', 'Farm 4']
df = pd.DataFrame(data, index=index)
vis = Visualization(width=500, height=300)
vis.scales['x'] = Scale(name='x', type='ordinal', range='width',
domain=DataRef(data='table', field="data.idx"))
vis.scales['y'] = Scale(name='y', range='height', nice=True,
domain=DataRef(data='table', field="data.val"))
vis.axes.extend([Axis(type='x', scale='x'),
Axis(type='y', scale='y')])
#Marks
enter_props = PropertySet(x=ValueRef(scale='x', field="data.idx"),
y=ValueRef(scale='y', field="data.val"),
width=ValueRef(scale='x', band=True, offset=-1),
y2=ValueRef(scale='y', value=0))
update_props = PropertySet(fill=ValueRef(value='steelblue'))
mark = Mark(type='rect', from_=MarkRef(data='table'),
properties=MarkProperties(enter=enter_props,
update=update_props))
vis.marks.append(mark)
data = Data.from_pandas(df['apples'])
#Using a Vincent KeyedList here
vis.data['table'] = data
vis.axis_titles(x='Farms', y='Data')
vis.to_json('vega.json')
#Convenience methods
vis = Bar(df['apples'])
#Fruit
trans = df.T
vis = Bar(trans['Farm 1'])
#From dict
vis = Bar(farm_1)
#From dict of iterables
vis = Bar({'x': ['apples', 'berries', 'squash', 'melons', 'corn'],
'y': [10, 32, 21, 13, 18]}, iter_idx='x')
#Finally, a boring bar chart from a list
vis = Bar([10, 20, 30, 15, 35, 10, 20])
| mit | 8,237,328,885,574,837,000 | 29.238806 | 78 | 0.57848 | false |
RI-imaging/qpimage | examples/hologram_cell.py | 1 | 3393 | """Digital hologram of a single cell
This example illustrates how qpimage can be used to analyze
digital holograms. The hologram of a single myeloid leukemia
cell (HL60) shown was recorded using digital holographic microscopy
(DHM). Because the phase-retrieval method used in DHM is based on the
discrete Fourier transform, there always is a residual background
phase tilt which must be removed for further image analysis.
The setup used for recording these data is described in reference
:cite:`Schuermann2015`, which also contains a description of the
hologram-to-phase conversion and phase background correction algorithms
which qpimage is based on.
"""
import matplotlib
import matplotlib.pylab as plt
import numpy as np
import qpimage
# load the experimental data
edata = np.load("./data/hologram_cell.npz")
# create QPImage instance
qpi = qpimage.QPImage(data=edata["data"],
bg_data=edata["bg_data"],
which_data="hologram",
# This parameter allows to pass arguments to the
# hologram-analysis algorithm of qpimage.
# (see qpimage.holo.get_field)
holo_kw={
# For this hologram, the "smooth disk"
# filter yields the best trade-off
# between interference from the central
# band and image resolution.
"filter_name": "smooth disk",
# As can be seen in the hologram image,
# the sidebands are not positioned at
# an angle of 45° from the central band.
# If the filter size is 1/3 (default),
# the central band introduces line-
# artifacts to the reconstructed image.
"filter_size": 1/4
}
)
amp0 = qpi.amp
pha0 = qpi.pha
# background correction
qpi.compute_bg(which_data=["amplitude", "phase"],
fit_offset="fit",
fit_profile="tilt",
border_px=5,
)
# plot the properties of `qpi`
fig = plt.figure(figsize=(8, 10))
matplotlib.rcParams["image.interpolation"] = "bicubic"
holkw = {"cmap": "gray",
"vmin": 0,
"vmax": 200}
ax1 = plt.subplot(321, title="cell hologram")
map1 = ax1.imshow(edata["data"], **holkw)
plt.colorbar(map1, ax=ax1, fraction=.046, pad=0.04)
ax2 = plt.subplot(322, title="bg hologram")
map2 = ax2.imshow(edata["bg_data"], **holkw)
plt.colorbar(map2, ax=ax2, fraction=.046, pad=0.04)
ax3 = plt.subplot(323, title="input phase [rad]")
map3 = ax3.imshow(pha0)
plt.colorbar(map3, ax=ax3, fraction=.046, pad=0.04)
ax4 = plt.subplot(324, title="input amplitude")
map4 = ax4.imshow(amp0, cmap="gray")
plt.colorbar(map4, ax=ax4, fraction=.046, pad=0.04)
ax5 = plt.subplot(325, title="corrected phase [rad]")
map5 = ax5.imshow(qpi.pha)
plt.colorbar(map5, ax=ax5, fraction=.046, pad=0.04)
ax6 = plt.subplot(326, title="corrected amplitude")
map6 = ax6.imshow(qpi.amp, cmap="gray")
plt.colorbar(map6, ax=ax6, fraction=.046, pad=0.04)
# disable axes
[ax.axis("off") for ax in [ax1, ax2, ax3, ax4, ax5, ax6]]
plt.tight_layout()
plt.show()
| mit | 4,917,183,173,856,020,000 | 36.274725 | 71 | 0.597288 | false |
dokato/connectivipy | examples/example2.py | 1 | 1153 | import numpy as np
import matplotlib.pyplot as plt
import connectivipy as cp
from connectivipy import mvar_gen
"""
In this example we don't use Data class
"""
fs = 256.
acf = np.zeros((3, 3, 3))
# matrix shape meaning
# (p,k,k) k - number of channels,
# p - order of mvar parameters
acf[0, 0, 0] = 0.3
acf[0, 1, 0] = 0.6
acf[1, 0, 0] = 0.1
acf[1, 1, 1] = 0.2
acf[1, 2, 0] = 0.6
acf[2, 2, 2] = 0.2
acf[2, 1, 0] = 0.4
# generate 3-channel signal from matrix above
y = mvar_gen(acf, int(10e4))
# assign static class cp.Mvar to variable mv
mv = cp.Mvar
# find best model order using Vieira-Morf algorithm
best, crit = mv.order_akaike(y, 15, 'vm')
plt.plot(1+np.arange(len(crit[1:])), crit[1:], 'g')
plt.show()
print('Best order', best)
# now let's fit parameters to the signal
av, vf = mv.fit(y, best, 'vm')
# and check whether values are correct +/- 0.01
print(np.allclose(acf, av, 0.01, 0.01))
# now we can calculate Directed Transfer Function from the data
dtf = cp.conn.DTF()
dtfval = dtf.calculate(av, vf, 128)
# all possible methods are visible in that dictionary:
print(cp.conn.conn_estim_dc.keys())
cp.plot_conn(dtfval, 'DTF values', fs)
| bsd-2-clause | -6,490,504,614,586,477,000 | 22.530612 | 63 | 0.669558 | false |
jamestwebber/scipy | scipy/signal/spectral.py | 1 | 73518 | """Tools for spectral analysis.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy import fft as sp_fft
from . import signaltools
from .windows import get_window
from ._spectral import _lombscargle
from ._arraytools import const_ext, even_ext, odd_ext, zero_ext
import warnings
from scipy._lib.six import string_types
__all__ = ['periodogram', 'welch', 'lombscargle', 'csd', 'coherence',
'spectrogram', 'stft', 'istft', 'check_COLA', 'check_NOLA']
def lombscargle(x,
y,
freqs,
precenter=False,
normalize=False):
"""
lombscargle(x, y, freqs)
Computes the Lomb-Scargle periodogram.
The Lomb-Scargle periodogram was developed by Lomb [1]_ and further
extended by Scargle [2]_ to find, and test the significance of weak
periodic signals with uneven temporal sampling.
When *normalize* is False (default) the computed periodogram
is unnormalized, it takes the value ``(A**2) * N/4`` for a harmonic
signal with amplitude A for sufficiently large N.
When *normalize* is True the computed periodogram is normalized by
the residuals of the data around a constant reference model (at zero).
Input arrays should be 1-D and will be cast to float64.
Parameters
----------
x : array_like
Sample times.
y : array_like
Measurement values.
freqs : array_like
Angular frequencies for output periodogram.
precenter : bool, optional
Pre-center amplitudes by subtracting the mean.
normalize : bool, optional
Compute normalized periodogram.
Returns
-------
pgram : array_like
Lomb-Scargle periodogram.
Raises
------
ValueError
If the input arrays `x` and `y` do not have the same shape.
Notes
-----
This subroutine calculates the periodogram using a slightly
modified algorithm due to Townsend [3]_ which allows the
periodogram to be calculated using only a single pass through
the input arrays for each frequency.
The algorithm running time scales roughly as O(x * freqs) or O(N^2)
for a large number of samples and frequencies.
References
----------
.. [1] N.R. Lomb "Least-squares frequency analysis of unequally spaced
data", Astrophysics and Space Science, vol 39, pp. 447-462, 1976
.. [2] J.D. Scargle "Studies in astronomical time series analysis. II -
Statistical aspects of spectral analysis of unevenly spaced data",
The Astrophysical Journal, vol 263, pp. 835-853, 1982
.. [3] R.H.D. Townsend, "Fast calculation of the Lomb-Scargle
periodogram using graphics processing units.", The Astrophysical
Journal Supplement Series, vol 191, pp. 247-253, 2010
See Also
--------
istft: Inverse Short Time Fourier Transform
check_COLA: Check whether the Constant OverLap Add (COLA) constraint is met
welch: Power spectral density by Welch's method
spectrogram: Spectrogram by Welch's method
csd: Cross spectral density by Welch's method
Examples
--------
>>> import matplotlib.pyplot as plt
First define some input parameters for the signal:
>>> A = 2.
>>> w = 1.
>>> phi = 0.5 * np.pi
>>> nin = 1000
>>> nout = 100000
>>> frac_points = 0.9 # Fraction of points to select
Randomly select a fraction of an array with timesteps:
>>> r = np.random.rand(nin)
>>> x = np.linspace(0.01, 10*np.pi, nin)
>>> x = x[r >= frac_points]
Plot a sine wave for the selected times:
>>> y = A * np.sin(w*x+phi)
Define the array of frequencies for which to compute the periodogram:
>>> f = np.linspace(0.01, 10, nout)
Calculate Lomb-Scargle periodogram:
>>> import scipy.signal as signal
>>> pgram = signal.lombscargle(x, y, f, normalize=True)
Now make a plot of the input data:
>>> plt.subplot(2, 1, 1)
>>> plt.plot(x, y, 'b+')
Then plot the normalized periodogram:
>>> plt.subplot(2, 1, 2)
>>> plt.plot(f, pgram)
>>> plt.show()
"""
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
freqs = np.asarray(freqs, dtype=np.float64)
assert x.ndim == 1
assert y.ndim == 1
assert freqs.ndim == 1
if precenter:
pgram = _lombscargle(x, y - y.mean(), freqs)
else:
pgram = _lombscargle(x, y, freqs)
if normalize:
pgram *= 2 / np.dot(y, y)
return pgram
def periodogram(x, fs=1.0, window='boxcar', nfft=None, detrend='constant',
return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using a periodogram.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to 'boxcar'.
nfft : int, optional
Length of the FFT used. If `None` the length of `x` will be
used.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Defaults to `True`, but for
complex data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power
spectrum ('spectrum') where `Pxx` has units of V**2, if `x`
is measured in V and `fs` is measured in Hz. Defaults to
'density'
axis : int, optional
Axis along which the periodogram is computed; the default is
over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of `x`.
Notes
-----
.. versionadded:: 0.12.0
See Also
--------
welch: Estimate power spectral density using Welch's method
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234)
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.periodogram(x, fs)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([1e-7, 1e2])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[25000:])
0.00099728892368242854
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.periodogram(x, fs, 'flattop', scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.ylim([1e-4, 1e1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS
amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
x = np.asarray(x)
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape)
if window is None:
window = 'boxcar'
if nfft is None:
nperseg = x.shape[axis]
elif nfft == x.shape[axis]:
nperseg = nfft
elif nfft > x.shape[axis]:
nperseg = x.shape[axis]
elif nfft < x.shape[axis]:
s = [np.s_[:]]*len(x.shape)
s[axis] = np.s_[:nfft]
x = x[tuple(s)]
nperseg = nfft
nfft = None
return welch(x, fs=fs, window=window, nperseg=nperseg, noverlap=0,
nfft=nfft, detrend=detrend, return_onesided=return_onesided,
scaling=scaling, axis=axis)
def welch(x, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density',
axis=-1, average='mean'):
r"""
Estimate power spectral density using Welch's method.
Welch's method [1]_ computes an estimate of the power spectral
density by dividing the data into overlapping segments, computing a
modified periodogram for each segment and averaging the
periodograms.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Defaults to `True`, but for
complex data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power
spectrum ('spectrum') where `Pxx` has units of V**2, if `x`
is measured in V and `fs` is measured in Hz. Defaults to
'density'
axis : int, optional
Axis along which the periodogram is computed; the default is
over the last axis (i.e. ``axis=-1``).
average : { 'mean', 'median' }, optional
Method to use when averaging periodograms. Defaults to 'mean'.
.. versionadded:: 1.2.0
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of x.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default Hann window an overlap of
50% is a reasonable trade off between accurately estimating the
signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
If `noverlap` is 0, this method is equivalent to Bartlett's method
[2]_.
.. versionadded:: 0.12.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika, vol. 37, pp. 1-16, 1950.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234)
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.welch(x, fs, nperseg=1024)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([0.5e-3, 1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0009924865443739191
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.welch(x, fs, 'flattop', 1024, scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS
amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
If we now introduce a discontinuity in the signal, by increasing the
amplitude of a small portion of the signal by 50, we can see the
corruption of the mean average power spectral density, but using a
median average better estimates the normal behaviour.
>>> x[int(N//2):int(N//2)+10] *= 50.
>>> f, Pxx_den = signal.welch(x, fs, nperseg=1024)
>>> f_med, Pxx_den_med = signal.welch(x, fs, nperseg=1024, average='median')
>>> plt.semilogy(f, Pxx_den, label='mean')
>>> plt.semilogy(f_med, Pxx_den_med, label='median')
>>> plt.ylim([0.5e-3, 1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.legend()
>>> plt.show()
"""
freqs, Pxx = csd(x, x, fs=fs, window=window, nperseg=nperseg,
noverlap=noverlap, nfft=nfft, detrend=detrend,
return_onesided=return_onesided, scaling=scaling,
axis=axis, average=average)
return freqs, Pxx.real
def csd(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density',
axis=-1, average='mean'):
r"""
Estimate the cross power spectral density, Pxy, using Welch's
method.
Parameters
----------
x : array_like
Time series of measurement values
y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series. Defaults
to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap: int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Defaults to `True`, but for
complex data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross spectrum
('spectrum') where `Pxy` has units of V**2, if `x` and `y` are
measured in V and `fs` is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the CSD is computed for both inputs; the
default is over the last axis (i.e. ``axis=-1``).
average : { 'mean', 'median' }, optional
Method to use when averaging periodograms. Defaults to 'mean'.
.. versionadded:: 1.2.0
Returns
-------
f : ndarray
Array of sample frequencies.
Pxy : ndarray
Cross spectral density or cross power spectrum of x,y.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method. [Equivalent to
csd(x,x)]
coherence: Magnitude squared coherence by Welch's method.
Notes
--------
By convention, Pxy is computed with the conjugate FFT of X
multiplied by the FFT of Y.
If the input series differ in length, the shorter series will be
zero-padded to match.
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default Hann window an overlap of
50% is a reasonable trade off between accurately estimating the
signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
.. versionadded:: 0.16.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] Rabiner, Lawrence R., and B. Gold. "Theory and Application of
Digital Signal Processing" Prentice-Hall, pp. 414-419, 1975
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate two test signals with some common features.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 20
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> b, a = signal.butter(2, 0.25, 'low')
>>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> y = signal.lfilter(b, a, x)
>>> x += amp*np.sin(2*np.pi*freq*time)
>>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
Compute and plot the magnitude of the cross spectral density.
>>> f, Pxy = signal.csd(x, y, fs, nperseg=1024)
>>> plt.semilogy(f, np.abs(Pxy))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('CSD [V**2/Hz]')
>>> plt.show()
"""
freqs, _, Pxy = _spectral_helper(x, y, fs, window, nperseg, noverlap, nfft,
detrend, return_onesided, scaling, axis,
mode='psd')
# Average over windows.
if len(Pxy.shape) >= 2 and Pxy.size > 0:
if Pxy.shape[-1] > 1:
if average == 'median':
Pxy = np.median(Pxy, axis=-1) / _median_bias(Pxy.shape[-1])
elif average == 'mean':
Pxy = Pxy.mean(axis=-1)
else:
raise ValueError('average must be "median" or "mean", got %s'
% (average,))
else:
Pxy = np.reshape(Pxy, Pxy.shape[:-1])
return freqs, Pxy
def spectrogram(x, fs=1.0, window=('tukey', .25), nperseg=None, noverlap=None,
nfft=None, detrend='constant', return_onesided=True,
scaling='density', axis=-1, mode='psd'):
"""
Compute a spectrogram with consecutive Fourier transforms.
Spectrograms can be used as a way of visualizing the change of a
nonstationary signal's frequency content over time.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg.
Defaults to a Tukey window with shape parameter of 0.25.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 8``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Defaults to `True`, but for
complex data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Sxx` has units of V**2/Hz and computing the power
spectrum ('spectrum') where `Sxx` has units of V**2, if `x`
is measured in V and `fs` is measured in Hz. Defaults to
'density'.
axis : int, optional
Axis along which the spectrogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
mode : str, optional
Defines what kind of return values are expected. Options are
['psd', 'complex', 'magnitude', 'angle', 'phase']. 'complex' is
equivalent to the output of `stft` with no padding or boundary
extension. 'magnitude' returns the absolute magnitude of the
STFT. 'angle' and 'phase' return the complex angle of the STFT,
with and without unwrapping, respectively.
Returns
-------
f : ndarray
Array of sample frequencies.
t : ndarray
Array of segment times.
Sxx : ndarray
Spectrogram of x. By default, the last axis of Sxx corresponds
to the segment times.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. In contrast to welch's method, where the
entire data stream is averaged over, one may wish to use a smaller
overlap (or perhaps none at all) when computing a spectrogram, to
maintain some statistical independence between individual segments.
It is for this reason that the default window is a Tukey window with
1/8th of a window's length overlap at each end.
.. versionadded:: 0.16.0
References
----------
.. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
"Discrete-Time Signal Processing", Prentice Hall, 1999.
Examples
--------
>>> from scipy import signal
>>> from scipy.fft import fftshift
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave whose frequency is slowly
modulated around 3kHz, corrupted by white noise of exponentially
decreasing magnitude sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.01 * fs / 2
>>> time = np.arange(N) / float(fs)
>>> mod = 500*np.cos(2*np.pi*0.25*time)
>>> carrier = amp * np.sin(2*np.pi*3e3*time + mod)
>>> noise = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> noise *= np.exp(-time/5)
>>> x = carrier + noise
Compute and plot the spectrogram.
>>> f, t, Sxx = signal.spectrogram(x, fs)
>>> plt.pcolormesh(t, f, Sxx)
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.show()
Note, if using output that is not one sided, then use the following:
>>> f, t, Sxx = signal.spectrogram(x, fs, return_onesided=False)
>>> plt.pcolormesh(t, fftshift(f), fftshift(Sxx, axes=0))
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.show()
"""
modelist = ['psd', 'complex', 'magnitude', 'angle', 'phase']
if mode not in modelist:
raise ValueError('unknown value for mode {}, must be one of {}'
.format(mode, modelist))
# need to set default for nperseg before setting default for noverlap below
window, nperseg = _triage_segments(window, nperseg,
input_length=x.shape[axis])
# Less overlap than welch, so samples are more statisically independent
if noverlap is None:
noverlap = nperseg // 8
if mode == 'psd':
freqs, time, Sxx = _spectral_helper(x, x, fs, window, nperseg,
noverlap, nfft, detrend,
return_onesided, scaling, axis,
mode='psd')
else:
freqs, time, Sxx = _spectral_helper(x, x, fs, window, nperseg,
noverlap, nfft, detrend,
return_onesided, scaling, axis,
mode='stft')
if mode == 'magnitude':
Sxx = np.abs(Sxx)
elif mode in ['angle', 'phase']:
Sxx = np.angle(Sxx)
if mode == 'phase':
# Sxx has one additional dimension for time strides
if axis < 0:
axis -= 1
Sxx = np.unwrap(Sxx, axis=axis)
# mode =='complex' is same as `stft`, doesn't need modification
return freqs, time, Sxx
def check_COLA(window, nperseg, noverlap, tol=1e-10):
r"""
Check whether the Constant OverLap Add (COLA) constraint is met
Parameters
----------
window : str or tuple or array_like
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg.
nperseg : int
Length of each segment.
noverlap : int
Number of points to overlap between segments.
tol : float, optional
The allowed variance of a bin's weighted sum from the median bin
sum.
Returns
-------
verdict : bool
`True` if chosen combination satisfies COLA within `tol`,
`False` otherwise
See Also
--------
check_NOLA: Check whether the Nonzero Overlap Add (NOLA) constraint is met
stft: Short Time Fourier Transform
istft: Inverse Short Time Fourier Transform
Notes
-----
In order to enable inversion of an STFT via the inverse STFT in
`istft`, it is sufficient that the signal windowing obeys the constraint of
"Constant OverLap Add" (COLA). This ensures that every point in the input
data is equally weighted, thereby avoiding aliasing and allowing full
reconstruction.
Some examples of windows that satisfy COLA:
- Rectangular window at overlap of 0, 1/2, 2/3, 3/4, ...
- Bartlett window at overlap of 1/2, 3/4, 5/6, ...
- Hann window at 1/2, 2/3, 3/4, ...
- Any Blackman family window at 2/3 overlap
- Any window with ``noverlap = nperseg-1``
A very comprehensive list of other windows may be found in [2]_,
wherein the COLA condition is satisfied when the "Amplitude
Flatness" is unity.
.. versionadded:: 0.19.0
References
----------
.. [1] Julius O. Smith III, "Spectral Audio Signal Processing", W3K
Publishing, 2011,ISBN 978-0-9745607-3-1.
.. [2] G. Heinzel, A. Ruediger and R. Schilling, "Spectrum and
spectral density estimation by the Discrete Fourier transform
(DFT), including a comprehensive list of window functions and
some new at-top windows", 2002,
http://hdl.handle.net/11858/00-001M-0000-0013-557A-5
Examples
--------
>>> from scipy import signal
Confirm COLA condition for rectangular window of 75% (3/4) overlap:
>>> signal.check_COLA(signal.boxcar(100), 100, 75)
True
COLA is not true for 25% (1/4) overlap, though:
>>> signal.check_COLA(signal.boxcar(100), 100, 25)
False
"Symmetrical" Hann window (for filter design) is not COLA:
>>> signal.check_COLA(signal.hann(120, sym=True), 120, 60)
False
"Periodic" or "DFT-even" Hann window (for FFT analysis) is COLA for
overlap of 1/2, 2/3, 3/4, etc.:
>>> signal.check_COLA(signal.hann(120, sym=False), 120, 60)
True
>>> signal.check_COLA(signal.hann(120, sym=False), 120, 80)
True
>>> signal.check_COLA(signal.hann(120, sym=False), 120, 90)
True
"""
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
if noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
noverlap = int(noverlap)
if isinstance(window, string_types) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] != nperseg:
raise ValueError('window must have length of nperseg')
step = nperseg - noverlap
binsums = sum(win[ii*step:(ii+1)*step] for ii in range(nperseg//step))
if nperseg % step != 0:
binsums[:nperseg % step] += win[-(nperseg % step):]
deviation = binsums - np.median(binsums)
return np.max(np.abs(deviation)) < tol
def check_NOLA(window, nperseg, noverlap, tol=1e-10):
r"""
Check whether the Nonzero Overlap Add (NOLA) constraint is met
Parameters
----------
window : str or tuple or array_like
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg.
nperseg : int
Length of each segment.
noverlap : int
Number of points to overlap between segments.
tol : float, optional
The allowed variance of a bin's weighted sum from the median bin
sum.
Returns
-------
verdict : bool
`True` if chosen combination satisfies the NOLA constraint within
`tol`, `False` otherwise
See Also
--------
check_COLA: Check whether the Constant OverLap Add (COLA) constraint is met
stft: Short Time Fourier Transform
istft: Inverse Short Time Fourier Transform
Notes
-----
In order to enable inversion of an STFT via the inverse STFT in
`istft`, the signal windowing must obey the constraint of "nonzero
overlap add" (NOLA):
.. math:: \sum_{t}w^{2}[n-tH] \ne 0
for all :math:`n`, where :math:`w` is the window function, :math:`t` is the
frame index, and :math:`H` is the hop size (:math:`H` = `nperseg` -
`noverlap`).
This ensures that the normalization factors in the denominator of the
overlap-add inversion equation are not zero. Only very pathological windows
will fail the NOLA constraint.
.. versionadded:: 1.2.0
References
----------
.. [1] Julius O. Smith III, "Spectral Audio Signal Processing", W3K
Publishing, 2011,ISBN 978-0-9745607-3-1.
.. [2] G. Heinzel, A. Ruediger and R. Schilling, "Spectrum and
spectral density estimation by the Discrete Fourier transform
(DFT), including a comprehensive list of window functions and
some new at-top windows", 2002,
http://hdl.handle.net/11858/00-001M-0000-0013-557A-5
Examples
--------
>>> from scipy import signal
Confirm NOLA condition for rectangular window of 75% (3/4) overlap:
>>> signal.check_NOLA(signal.boxcar(100), 100, 75)
True
NOLA is also true for 25% (1/4) overlap:
>>> signal.check_NOLA(signal.boxcar(100), 100, 25)
True
"Symmetrical" Hann window (for filter design) is also NOLA:
>>> signal.check_NOLA(signal.hann(120, sym=True), 120, 60)
True
As long as there is overlap, it takes quite a pathological window to fail
NOLA:
>>> w = np.ones(64, dtype="float")
>>> w[::2] = 0
>>> signal.check_NOLA(w, 64, 32)
False
If there is not enough overlap, a window with zeros at the ends will not
work:
>>> signal.check_NOLA(signal.hann(64), 64, 0)
False
>>> signal.check_NOLA(signal.hann(64), 64, 1)
False
>>> signal.check_NOLA(signal.hann(64), 64, 2)
True
"""
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
if noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg')
if noverlap < 0:
raise ValueError('noverlap must be a nonnegative integer')
noverlap = int(noverlap)
if isinstance(window, string_types) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] != nperseg:
raise ValueError('window must have length of nperseg')
step = nperseg - noverlap
binsums = sum(win[ii*step:(ii+1)*step]**2 for ii in range(nperseg//step))
if nperseg % step != 0:
binsums[:nperseg % step] += win[-(nperseg % step):]**2
return np.min(binsums) > tol
def stft(x, fs=1.0, window='hann', nperseg=256, noverlap=None, nfft=None,
detrend=False, return_onesided=True, boundary='zeros', padded=True,
axis=-1):
r"""
Compute the Short Time Fourier Transform (STFT).
STFTs can be used as a way of quantifying the change of a
nonstationary signal's frequency and phase content over time.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`. When
specified, the COLA constraint must be met (see Notes below).
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to `False`.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Defaults to `True`, but for
complex data, a two-sided spectrum is always returned.
boundary : str or None, optional
Specifies whether the input signal is extended at both ends, and
how to generate the new values, in order to center the first
windowed segment on the first input point. This has the benefit
of enabling reconstruction of the first input point when the
employed window function starts at zero. Valid options are
``['even', 'odd', 'constant', 'zeros', None]``. Defaults to
'zeros', for zero padding extension. I.e. ``[1, 2, 3, 4]`` is
extended to ``[0, 1, 2, 3, 4, 0]`` for ``nperseg=3``.
padded : bool, optional
Specifies whether the input signal is zero-padded at the end to
make the signal fit exactly into an integer number of window
segments, so that all of the signal is included in the output.
Defaults to `True`. Padding occurs after boundary extension, if
`boundary` is not `None`, and `padded` is `True`, as is the
default.
axis : int, optional
Axis along which the STFT is computed; the default is over the
last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
t : ndarray
Array of segment times.
Zxx : ndarray
STFT of `x`. By default, the last axis of `Zxx` corresponds
to the segment times.
See Also
--------
istft: Inverse Short Time Fourier Transform
check_COLA: Check whether the Constant OverLap Add (COLA) constraint
is met
check_NOLA: Check whether the Nonzero Overlap Add (NOLA) constraint is met
welch: Power spectral density by Welch's method.
spectrogram: Spectrogram by Welch's method.
csd: Cross spectral density by Welch's method.
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Notes
-----
In order to enable inversion of an STFT via the inverse STFT in
`istft`, the signal windowing must obey the constraint of "Nonzero
OverLap Add" (NOLA), and the input signal must have complete
windowing coverage (i.e. ``(x.shape[axis] - nperseg) %
(nperseg-noverlap) == 0``). The `padded` argument may be used to
accomplish this.
Given a time-domain signal :math:`x[n]`, a window :math:`w[n]`, and a hop
size :math:`H` = `nperseg - noverlap`, the windowed frame at time index
:math:`t` is given by
.. math:: x_{t}[n]=x[n]w[n-tH]
The overlap-add (OLA) reconstruction equation is given by
.. math:: x[n]=\frac{\sum_{t}x_{t}[n]w[n-tH]}{\sum_{t}w^{2}[n-tH]}
The NOLA constraint ensures that every normalization term that appears
in the denomimator of the OLA reconstruction equation is nonzero. Whether a
choice of `window`, `nperseg`, and `noverlap` satisfy this constraint can
be tested with `check_NOLA`.
.. versionadded:: 0.19.0
References
----------
.. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
"Discrete-Time Signal Processing", Prentice Hall, 1999.
.. [2] Daniel W. Griffin, Jae S. Lim "Signal Estimation from
Modified Short-Time Fourier Transform", IEEE 1984,
10.1109/TASSP.1984.1164317
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave whose frequency is slowly
modulated around 3kHz, corrupted by white noise of exponentially
decreasing magnitude sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.01 * fs / 2
>>> time = np.arange(N) / float(fs)
>>> mod = 500*np.cos(2*np.pi*0.25*time)
>>> carrier = amp * np.sin(2*np.pi*3e3*time + mod)
>>> noise = np.random.normal(scale=np.sqrt(noise_power),
... size=time.shape)
>>> noise *= np.exp(-time/5)
>>> x = carrier + noise
Compute and plot the STFT's magnitude.
>>> f, t, Zxx = signal.stft(x, fs, nperseg=1000)
>>> plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=amp)
>>> plt.title('STFT Magnitude')
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.show()
"""
freqs, time, Zxx = _spectral_helper(x, x, fs, window, nperseg, noverlap,
nfft, detrend, return_onesided,
scaling='spectrum', axis=axis,
mode='stft', boundary=boundary,
padded=padded)
return freqs, time, Zxx
def istft(Zxx, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
input_onesided=True, boundary=True, time_axis=-1, freq_axis=-2):
r"""
Perform the inverse Short Time Fourier transform (iSTFT).
Parameters
----------
Zxx : array_like
STFT of the signal to be reconstructed. If a purely real array
is passed, it will be cast to a complex data type.
fs : float, optional
Sampling frequency of the time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window. Must match the window used to generate the
STFT for faithful inversion.
nperseg : int, optional
Number of data points corresponding to each STFT segment. This
parameter must be specified if the number of data points per
segment is odd, or if the STFT was padded via ``nfft >
nperseg``. If `None`, the value depends on the shape of
`Zxx` and `input_onesided`. If `input_onesided` is `True`,
``nperseg=2*(Zxx.shape[freq_axis] - 1)``. Otherwise,
``nperseg=Zxx.shape[freq_axis]``. Defaults to `None`.
noverlap : int, optional
Number of points to overlap between segments. If `None`, half
of the segment length. Defaults to `None`. When specified, the
COLA constraint must be met (see Notes below), and should match
the parameter used to generate the STFT. Defaults to `None`.
nfft : int, optional
Number of FFT points corresponding to each STFT segment. This
parameter must be specified if the STFT was padded via ``nfft >
nperseg``. If `None`, the default values are the same as for
`nperseg`, detailed above, with one exception: if
`input_onesided` is True and
``nperseg==2*Zxx.shape[freq_axis] - 1``, `nfft` also takes on
that value. This case allows the proper inversion of an
odd-length unpadded STFT using ``nfft=None``. Defaults to
`None`.
input_onesided : bool, optional
If `True`, interpret the input array as one-sided FFTs, such
as is returned by `stft` with ``return_onesided=True`` and
`numpy.fft.rfft`. If `False`, interpret the input as a a
two-sided FFT. Defaults to `True`.
boundary : bool, optional
Specifies whether the input signal was extended at its
boundaries by supplying a non-`None` ``boundary`` argument to
`stft`. Defaults to `True`.
time_axis : int, optional
Where the time segments of the STFT is located; the default is
the last axis (i.e. ``axis=-1``).
freq_axis : int, optional
Where the frequency axis of the STFT is located; the default is
the penultimate axis (i.e. ``axis=-2``).
Returns
-------
t : ndarray
Array of output data times.
x : ndarray
iSTFT of `Zxx`.
See Also
--------
stft: Short Time Fourier Transform
check_COLA: Check whether the Constant OverLap Add (COLA) constraint
is met
check_NOLA: Check whether the Nonzero Overlap Add (NOLA) constraint is met
Notes
-----
In order to enable inversion of an STFT via the inverse STFT with
`istft`, the signal windowing must obey the constraint of "nonzero
overlap add" (NOLA):
.. math:: \sum_{t}w^{2}[n-tH] \ne 0
This ensures that the normalization factors that appear in the denominator
of the overlap-add reconstruction equation
.. math:: x[n]=\frac{\sum_{t}x_{t}[n]w[n-tH]}{\sum_{t}w^{2}[n-tH]}
are not zero. The NOLA constraint can be checked with the `check_NOLA`
function.
An STFT which has been modified (via masking or otherwise) is not
guaranteed to correspond to a exactly realizible signal. This
function implements the iSTFT via the least-squares estimation
algorithm detailed in [2]_, which produces a signal that minimizes
the mean squared error between the STFT of the returned signal and
the modified STFT.
.. versionadded:: 0.19.0
References
----------
.. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
"Discrete-Time Signal Processing", Prentice Hall, 1999.
.. [2] Daniel W. Griffin, Jae S. Lim "Signal Estimation from
Modified Short-Time Fourier Transform", IEEE 1984,
10.1109/TASSP.1984.1164317
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave at 50Hz corrupted by
0.001 V**2/Hz of white noise sampled at 1024 Hz.
>>> fs = 1024
>>> N = 10*fs
>>> nperseg = 512
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / float(fs)
>>> carrier = amp * np.sin(2*np.pi*50*time)
>>> noise = np.random.normal(scale=np.sqrt(noise_power),
... size=time.shape)
>>> x = carrier + noise
Compute the STFT, and plot its magnitude
>>> f, t, Zxx = signal.stft(x, fs=fs, nperseg=nperseg)
>>> plt.figure()
>>> plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=amp)
>>> plt.ylim([f[1], f[-1]])
>>> plt.title('STFT Magnitude')
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.yscale('log')
>>> plt.show()
Zero the components that are 10% or less of the carrier magnitude,
then convert back to a time series via inverse STFT
>>> Zxx = np.where(np.abs(Zxx) >= amp/10, Zxx, 0)
>>> _, xrec = signal.istft(Zxx, fs)
Compare the cleaned signal with the original and true carrier signals.
>>> plt.figure()
>>> plt.plot(time, x, time, xrec, time, carrier)
>>> plt.xlim([2, 2.1])
>>> plt.xlabel('Time [sec]')
>>> plt.ylabel('Signal')
>>> plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier'])
>>> plt.show()
Note that the cleaned signal does not start as abruptly as the original,
since some of the coefficients of the transient were also removed:
>>> plt.figure()
>>> plt.plot(time, x, time, xrec, time, carrier)
>>> plt.xlim([0, 0.1])
>>> plt.xlabel('Time [sec]')
>>> plt.ylabel('Signal')
>>> plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier'])
>>> plt.show()
"""
# Make sure input is an ndarray of appropriate complex dtype
Zxx = np.asarray(Zxx) + 0j
freq_axis = int(freq_axis)
time_axis = int(time_axis)
if Zxx.ndim < 2:
raise ValueError('Input stft must be at least 2d!')
if freq_axis == time_axis:
raise ValueError('Must specify differing time and frequency axes!')
nseg = Zxx.shape[time_axis]
if input_onesided:
# Assume even segment length
n_default = 2*(Zxx.shape[freq_axis] - 1)
else:
n_default = Zxx.shape[freq_axis]
# Check windowing parameters
if nperseg is None:
nperseg = n_default
else:
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
if nfft is None:
if (input_onesided) and (nperseg == n_default + 1):
# Odd nperseg, no FFT padding
nfft = nperseg
else:
nfft = n_default
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
else:
nfft = int(nfft)
if noverlap is None:
noverlap = nperseg//2
else:
noverlap = int(noverlap)
if noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
nstep = nperseg - noverlap
# Rearrange axes if necessary
if time_axis != Zxx.ndim-1 or freq_axis != Zxx.ndim-2:
# Turn negative indices to positive for the call to transpose
if freq_axis < 0:
freq_axis = Zxx.ndim + freq_axis
if time_axis < 0:
time_axis = Zxx.ndim + time_axis
zouter = list(range(Zxx.ndim))
for ax in sorted([time_axis, freq_axis], reverse=True):
zouter.pop(ax)
Zxx = np.transpose(Zxx, zouter+[freq_axis, time_axis])
# Get window as array
if isinstance(window, string_types) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] != nperseg:
raise ValueError('window must have length of {0}'.format(nperseg))
ifunc = sp_fft.irfft if input_onesided else sp_fft.ifft
xsubs = ifunc(Zxx, axis=-2, n=nfft)[..., :nperseg, :]
# Initialize output and normalization arrays
outputlength = nperseg + (nseg-1)*nstep
x = np.zeros(list(Zxx.shape[:-2])+[outputlength], dtype=xsubs.dtype)
norm = np.zeros(outputlength, dtype=xsubs.dtype)
if np.result_type(win, xsubs) != xsubs.dtype:
win = win.astype(xsubs.dtype)
xsubs *= win.sum() # This takes care of the 'spectrum' scaling
# Construct the output from the ifft segments
# This loop could perhaps be vectorized/strided somehow...
for ii in range(nseg):
# Window the ifft
x[..., ii*nstep:ii*nstep+nperseg] += xsubs[..., ii] * win
norm[..., ii*nstep:ii*nstep+nperseg] += win**2
# Remove extension points
if boundary:
x = x[..., nperseg//2:-(nperseg//2)]
norm = norm[..., nperseg//2:-(nperseg//2)]
# Divide out normalization where non-tiny
if np.sum(norm > 1e-10) != len(norm):
warnings.warn("NOLA condition failed, STFT may not be invertible")
x /= np.where(norm > 1e-10, norm, 1.0)
if input_onesided:
x = x.real
# Put axes back
if x.ndim > 1:
if time_axis != Zxx.ndim-1:
if freq_axis < time_axis:
time_axis -= 1
x = np.rollaxis(x, -1, time_axis)
time = np.arange(x.shape[0])/float(fs)
return time, x
def coherence(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None,
nfft=None, detrend='constant', axis=-1):
r"""
Estimate the magnitude squared coherence estimate, Cxy, of
discrete-time signals X and Y using Welch's method.
``Cxy = abs(Pxy)**2/(Pxx*Pyy)``, where `Pxx` and `Pyy` are power
spectral density estimates of X and Y, and `Pxy` is the cross
spectral density estimate of X and Y.
Parameters
----------
x : array_like
Time series of measurement values
y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series. Defaults
to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap: int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
axis : int, optional
Axis along which the coherence is computed for both inputs; the
default is over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Cxy : ndarray
Magnitude squared coherence of x and y.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
--------
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default Hann window an overlap of
50% is a reasonable trade off between accurately estimating the
signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
.. versionadded:: 0.16.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] Stoica, Petre, and Randolph Moses, "Spectral Analysis of
Signals" Prentice Hall, 2005
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate two test signals with some common features.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 20
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> b, a = signal.butter(2, 0.25, 'low')
>>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> y = signal.lfilter(b, a, x)
>>> x += amp*np.sin(2*np.pi*freq*time)
>>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
Compute and plot the coherence.
>>> f, Cxy = signal.coherence(x, y, fs, nperseg=1024)
>>> plt.semilogy(f, Cxy)
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Coherence')
>>> plt.show()
"""
freqs, Pxx = welch(x, fs=fs, window=window, nperseg=nperseg,
noverlap=noverlap, nfft=nfft, detrend=detrend,
axis=axis)
_, Pyy = welch(y, fs=fs, window=window, nperseg=nperseg, noverlap=noverlap,
nfft=nfft, detrend=detrend, axis=axis)
_, Pxy = csd(x, y, fs=fs, window=window, nperseg=nperseg,
noverlap=noverlap, nfft=nfft, detrend=detrend, axis=axis)
Cxy = np.abs(Pxy)**2 / Pxx / Pyy
return freqs, Cxy
def _spectral_helper(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None,
nfft=None, detrend='constant', return_onesided=True,
scaling='density', axis=-1, mode='psd', boundary=None,
padded=False):
"""
Calculate various forms of windowed FFTs for PSD, CSD, etc.
This is a helper function that implements the commonality between
the stft, psd, csd, and spectrogram functions. It is not designed to
be called externally. The windows are not averaged over; the result
from each window is returned.
Parameters
---------
x : array_like
Array or sequence containing the data to be analyzed.
y : array_like
Array or sequence containing the data to be analyzed. If this is
the same object in memory as `x` (i.e. ``_spectral_helper(x,
x, ...)``), the extra computations are spared.
fs : float, optional
Sampling frequency of the time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Defaults to `True`, but for
complex data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross
spectrum ('spectrum') where `Pxy` has units of V**2, if `x`
and `y` are measured in V and `fs` is measured in Hz.
Defaults to 'density'
axis : int, optional
Axis along which the FFTs are computed; the default is over the
last axis (i.e. ``axis=-1``).
mode: str {'psd', 'stft'}, optional
Defines what kind of return values are expected. Defaults to
'psd'.
boundary : str or None, optional
Specifies whether the input signal is extended at both ends, and
how to generate the new values, in order to center the first
windowed segment on the first input point. This has the benefit
of enabling reconstruction of the first input point when the
employed window function starts at zero. Valid options are
``['even', 'odd', 'constant', 'zeros', None]``. Defaults to
`None`.
padded : bool, optional
Specifies whether the input signal is zero-padded at the end to
make the signal fit exactly into an integer number of window
segments, so that all of the signal is included in the output.
Defaults to `False`. Padding occurs after boundary extension, if
`boundary` is not `None`, and `padded` is `True`.
Returns
-------
freqs : ndarray
Array of sample frequencies.
t : ndarray
Array of times corresponding to each data segment
result : ndarray
Array of output data, contents dependent on *mode* kwarg.
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
"""
if mode not in ['psd', 'stft']:
raise ValueError("Unknown value for mode %s, must be one of: "
"{'psd', 'stft'}" % mode)
boundary_funcs = {'even': even_ext,
'odd': odd_ext,
'constant': const_ext,
'zeros': zero_ext,
None: None}
if boundary not in boundary_funcs:
raise ValueError("Unknown boundary option '{0}', must be one of: {1}"
.format(boundary, list(boundary_funcs.keys())))
# If x and y are the same object we can save ourselves some computation.
same_data = y is x
if not same_data and mode != 'psd':
raise ValueError("x and y must be equal if mode is 'stft'")
axis = int(axis)
# Ensure we have np.arrays, get outdtype
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
outdtype = np.result_type(x, y, np.complex64)
else:
outdtype = np.result_type(x, np.complex64)
if not same_data:
# Check if we can broadcast the outer axes together
xouter = list(x.shape)
youter = list(y.shape)
xouter.pop(axis)
youter.pop(axis)
try:
outershape = np.broadcast(np.empty(xouter), np.empty(youter)).shape
except ValueError:
raise ValueError('x and y cannot be broadcast together.')
if same_data:
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape), np.empty(x.shape)
else:
if x.size == 0 or y.size == 0:
outshape = outershape + (min([x.shape[axis], y.shape[axis]]),)
emptyout = np.rollaxis(np.empty(outshape), -1, axis)
return emptyout, emptyout, emptyout
if x.ndim > 1:
if axis != -1:
x = np.rollaxis(x, axis, len(x.shape))
if not same_data and y.ndim > 1:
y = np.rollaxis(y, axis, len(y.shape))
# Check if x and y are the same length, zero-pad if necessary
if not same_data:
if x.shape[-1] != y.shape[-1]:
if x.shape[-1] < y.shape[-1]:
pad_shape = list(x.shape)
pad_shape[-1] = y.shape[-1] - x.shape[-1]
x = np.concatenate((x, np.zeros(pad_shape)), -1)
else:
pad_shape = list(y.shape)
pad_shape[-1] = x.shape[-1] - y.shape[-1]
y = np.concatenate((y, np.zeros(pad_shape)), -1)
if nperseg is not None: # if specified by user
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
# parse window; if array like, then set nperseg = win.shape
win, nperseg = _triage_segments(window, nperseg, input_length=x.shape[-1])
if nfft is None:
nfft = nperseg
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
else:
nfft = int(nfft)
if noverlap is None:
noverlap = nperseg//2
else:
noverlap = int(noverlap)
if noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
nstep = nperseg - noverlap
# Padding occurs after boundary extension, so that the extended signal ends
# in zeros, instead of introducing an impulse at the end.
# I.e. if x = [..., 3, 2]
# extend then pad -> [..., 3, 2, 2, 3, 0, 0, 0]
# pad then extend -> [..., 3, 2, 0, 0, 0, 2, 3]
if boundary is not None:
ext_func = boundary_funcs[boundary]
x = ext_func(x, nperseg//2, axis=-1)
if not same_data:
y = ext_func(y, nperseg//2, axis=-1)
if padded:
# Pad to integer number of windowed segments
# I.e make x.shape[-1] = nperseg + (nseg-1)*nstep, with integer nseg
nadd = (-(x.shape[-1]-nperseg) % nstep) % nperseg
zeros_shape = list(x.shape[:-1]) + [nadd]
x = np.concatenate((x, np.zeros(zeros_shape)), axis=-1)
if not same_data:
zeros_shape = list(y.shape[:-1]) + [nadd]
y = np.concatenate((y, np.zeros(zeros_shape)), axis=-1)
# Handle detrending and window functions
if not detrend:
def detrend_func(d):
return d
elif not hasattr(detrend, '__call__'):
def detrend_func(d):
return signaltools.detrend(d, type=detrend, axis=-1)
elif axis != -1:
# Wrap this function so that it receives a shape that it could
# reasonably expect to receive.
def detrend_func(d):
d = np.rollaxis(d, -1, axis)
d = detrend(d)
return np.rollaxis(d, axis, len(d.shape))
else:
detrend_func = detrend
if np.result_type(win, np.complex64) != outdtype:
win = win.astype(outdtype)
if scaling == 'density':
scale = 1.0 / (fs * (win*win).sum())
elif scaling == 'spectrum':
scale = 1.0 / win.sum()**2
else:
raise ValueError('Unknown scaling: %r' % scaling)
if mode == 'stft':
scale = np.sqrt(scale)
if return_onesided:
if np.iscomplexobj(x):
sides = 'twosided'
warnings.warn('Input data is complex, switching to '
'return_onesided=False')
else:
sides = 'onesided'
if not same_data:
if np.iscomplexobj(y):
sides = 'twosided'
warnings.warn('Input data is complex, switching to '
'return_onesided=False')
else:
sides = 'twosided'
if sides == 'twosided':
freqs = sp_fft.fftfreq(nfft, 1/fs)
elif sides == 'onesided':
freqs = sp_fft.rfftfreq(nfft, 1/fs)
# Perform the windowed FFTs
result = _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides)
if not same_data:
# All the same operations on the y data
result_y = _fft_helper(y, win, detrend_func, nperseg, noverlap, nfft,
sides)
result = np.conjugate(result) * result_y
elif mode == 'psd':
result = np.conjugate(result) * result
result *= scale
if sides == 'onesided' and mode == 'psd':
if nfft % 2:
result[..., 1:] *= 2
else:
# Last point is unpaired Nyquist freq point, don't double
result[..., 1:-1] *= 2
time = np.arange(nperseg/2, x.shape[-1] - nperseg/2 + 1,
nperseg - noverlap)/float(fs)
if boundary is not None:
time -= (nperseg/2) / fs
result = result.astype(outdtype)
# All imaginary parts are zero anyways
if same_data and mode != 'stft':
result = result.real
# Output is going to have new last axis for time/window index, so a
# negative axis index shifts down one
if axis < 0:
axis -= 1
# Roll frequency axis back to axis where the data came from
result = np.rollaxis(result, -1, axis)
return freqs, time, result
def _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides):
"""
Calculate windowed FFT, for internal use by
scipy.signal._spectral_helper
This is a helper function that does the main FFT calculation for
`_spectral helper`. All input validation is performed there, and the
data axis is assumed to be the last axis of x. It is not designed to
be called externally. The windows are not averaged over; the result
from each window is returned.
Returns
-------
result : ndarray
Array of FFT data
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
"""
# Created strided array of data segments
if nperseg == 1 and noverlap == 0:
result = x[..., np.newaxis]
else:
# https://stackoverflow.com/a/5568169
step = nperseg - noverlap
shape = x.shape[:-1]+((x.shape[-1]-noverlap)//step, nperseg)
strides = x.strides[:-1]+(step*x.strides[-1], x.strides[-1])
result = np.lib.stride_tricks.as_strided(x, shape=shape,
strides=strides)
# Detrend each data segment individually
result = detrend_func(result)
# Apply window by multiplication
result = win * result
# Perform the fft. Acts on last axis by default. Zero-pads automatically
if sides == 'twosided':
func = sp_fft.fft
else:
result = result.real
func = sp_fft.rfft
result = func(result, n=nfft)
return result
def _triage_segments(window, nperseg, input_length):
"""
Parses window and nperseg arguments for spectrogram and _spectral_helper.
This is a helper function, not meant to be called externally.
Parameters
----------
window : string, tuple, or ndarray
If window is specified by a string or tuple and nperseg is not
specified, nperseg is set to the default of 256 and returns a window of
that length.
If instead the window is array_like and nperseg is not specified, then
nperseg is set to the length of the window. A ValueError is raised if
the user supplies both an array_like window and a value for nperseg but
nperseg does not equal the length of the window.
nperseg : int
Length of each segment
input_length: int
Length of input signal, i.e. x.shape[-1]. Used to test for errors.
Returns
-------
win : ndarray
window. If function was called with string or tuple than this will hold
the actual array used as a window.
nperseg : int
Length of each segment. If window is str or tuple, nperseg is set to
256. If window is array_like, nperseg is set to the length of the
6
window.
"""
# parse window; if array like, then set nperseg = win.shape
if isinstance(window, string_types) or isinstance(window, tuple):
# if nperseg not specified
if nperseg is None:
nperseg = 256 # then change to default
if nperseg > input_length:
warnings.warn('nperseg = {0:d} is greater than input length '
' = {1:d}, using nperseg = {1:d}'
.format(nperseg, input_length))
nperseg = input_length
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if input_length < win.shape[-1]:
raise ValueError('window is longer than input signal')
if nperseg is None:
nperseg = win.shape[0]
elif nperseg is not None:
if nperseg != win.shape[0]:
raise ValueError("value specified for nperseg is different"
" from length of window")
return win, nperseg
def _median_bias(n):
"""
Returns the bias of the median of a set of periodograms relative to
the mean.
See arXiv:gr-qc/0509116 Appendix B for details.
Parameters
----------
n : int
Numbers of periodograms being averaged.
Returns
-------
bias : float
Calculated bias.
"""
ii_2 = 2 * np.arange(1., (n-1) // 2 + 1)
return 1 + np.sum(1. / (ii_2 + 1) - 1. / ii_2)
| bsd-3-clause | -5,161,674,247,566,273,000 | 35.722278 | 80 | 0.612653 | false |
Tong-Chen/scikit-learn | sklearn/ensemble/partial_dependence.py | 5 | 14809 | """Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
from sklearn.externals.six.moves import zip
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils import array2d
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(map(lambda x: 0.0 <= x <= 1.0, percentiles)):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = array2d(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in xrange(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = array2d(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = map(str, range(gbrt.n_features))
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
names.append([feature_names[i] for i in fxs])
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(map(np.size, axes)).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| bsd-3-clause | 7,954,101,607,814,231,000 | 37.167526 | 79 | 0.589371 | false |
soylentdeen/Graffity | src/GravityGrism/design.py | 1 | 1116 | import scipy
import numpy
from matplotlib import pyplot
import Grism
fig = pyplot.figure(0)
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax.clear()
G = Grism.Grism(n=3.43)
C = Grism.CCD()
focalLength = []
slitImage = []
slitWidth = []
"""
for f1 in numpy.arange(2.0, 50.0):
for npix in numpy.arange(2.0, 100.0):
focalLength.append(f1)
slitImage.append(npix)
Instrument = Grism.GrismInstrument(Grism=G, f1=f1)
Instrument.optimize(npix=npix)
slitWidth.append(Instrument.Grism.deltaX_slit)
focalLength = numpy.array(focalLength)
slitImage = numpy.array(slitImage)
slitWidth = numpy.array(slitWidth)
"""
Instrument = Grism.GrismInstrument(Grism=G, f1=54.6, f2=134.3, CCD=C)
Instrument.optimize(npix=2.0, Lambda = 2200.0, dLambda=500.0, order=5)
print("Sigma = %.3f, %.1f lines/mm" % (Instrument.Grism.sigma, 1.0/(Instrument.Grism.sigma/1000.0)))
print("Resolving Power = %.3f" % Instrument.ResolvingPower)
print("Delta = %.3f" % Instrument.Grism.delta)
print("Slit Size = %.3f" % Instrument.deltaX_slit)
#ax.scatter(focalLength, slitWidth, c = slitImage)
#fig.show()
| mit | 4,718,174,225,502,181,000 | 23.26087 | 100 | 0.685484 | false |
tensorflow/probability | tensorflow_probability/python/experimental/lazybones/utils/utils.py | 1 | 4203 | # Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Lint as: python3
"""Utility functions for tfp.experimental.lazybones."""
from __future__ import absolute_import
from __future__ import division
# [internal] enable type annotations
from __future__ import print_function
from tensorflow_probability.python.experimental.lazybones import deferred
# Since neither `networkx` or `matplotlib.plt` are official TFP dependencies, we
# lazily import them only as needed using the `_import_once_*` functions defined
# below.
nx = None
plt = None
__all__ = [
'get_leaves',
'get_roots',
'is_any_ancestor',
'iter_edges',
'plot_graph',
]
def get_leaves(ancestor):
"""Returns all childless descendants ("leaves") of (iterable) ancestors."""
return _get_relation(ancestor, 'children')
def get_roots(descendant):
"""Returns all parentless ancestors ("roots") of (iterable) descendants."""
return _get_relation(descendant, 'parents')
def is_any_ancestor(v, ancestor):
"""Returns `True` if any member of `v` has an ancestor in `ancestor`s."""
v = set(_prep_arg(v))
ancestor = set(_prep_arg(ancestor))
return (any(v_ in ancestor for v_ in v) or
any(is_any_ancestor(v_.parents, ancestor) for v_ in v))
def iter_edges(leaves, from_roots=False):
"""Returns iter over all `(parent, child)` edges in `leaves`' ancestors."""
leaves = _prep_arg(leaves)
for child in leaves:
parents = child.children if from_roots else child.parents
for parent in parents:
yield (parent, child)
# The following is only supported in >= Python3.3:
# yield from iter_edges(parents, from_roots=from_roots)
for e in iter_edges(parents, from_roots=from_roots):
yield e
def plot_graph(leaves,
pos=None,
with_labels=True,
arrowsize=10,
node_size=1200,
fig=None,
labels=lambda node: node.name,
seed=42,
**kwargs):
"""Plots `leaves` and ancestors. (See `help(nx.draw_networkx)` for kwargs)."""
_import_once_nx()
_import_once_plt()
if isinstance(leaves, nx.Graph):
nx_graph = leaves
else:
nx_graph = nx.DiGraph()
nx_graph.add_edges_from(iter_edges(leaves))
if pos is None:
pos = lambda g: nx.spring_layout(g, seed=seed)
if callable(pos):
pos = pos(nx_graph)
if fig is None:
fig = plt.figure() # or, f,ax=plt.subplots()
if isinstance(fig, plt.Figure):
fig = fig.add_axes((0, 0, 1, 1)) # or, f.gca()
if not isinstance(fig, plt.Axes):
raise ValueError()
if callable(labels):
labels = dict((v, labels(v)) for v in nx_graph.nodes())
nx.draw(nx_graph,
pos=pos,
arrows=kwargs.pop('arrows', arrowsize > 0),
with_labels=with_labels,
arrowsize=arrowsize,
node_size=node_size,
ax=fig,
labels=labels)
return nx_graph, fig
def _get_relation(vertices, attr):
vertices = set(_prep_arg(vertices))
relations = set()
for v in vertices:
near_relation = getattr(v, attr)
relations.update(_get_relation(near_relation, attr) if near_relation
else (v,))
return relations
def _import_once_nx():
global nx
if nx is None:
import networkx as nx # pylint: disable=g-import-not-at-top,redefined-outer-name
def _import_once_plt():
global plt
if plt is None:
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top,redefined-outer-name
def _prep_arg(x):
if isinstance(x, deferred.DeferredBase):
return (x,)
return x
| apache-2.0 | -8,679,003,098,745,722,000 | 28.1875 | 95 | 0.645491 | false |
zharfi/Cidar | classification.py | 1 | 7753 | import itertools
import numpy as np
import matplotlib.pyplot as plt
import pickle
import tensorflow as tf
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.decomposition import KernelPCA
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from feature_extraction import FeatureExtraction
from helper import Helper
# Classify adalah kelas yang melaksanakan fungsi klasifikasi
class Classify:
# Inisialisasi model kecerdasan buatan yang telah dilatih sebelumnya
# Singkatan praproses:
# SCAL = Standard Scaler
# LDA = Linear Discriminant Analysis
# PCA = Principal Component Analysis
# Singkatan algoritma kecerdasan buatan:
# DT = Decision Tree
# KNN = K-Nearest Neighbour
# NB = Naive Bayes
# RF = Random Forest
# SVM = Support Vector Machine
model_fol = './Model'
scal = './Model/scale.sav'
proc_lda = './Model/lda.sav'
proc_pca = './Model/pca.sav'
model_dt = './Model/DT_noproc.sav'
model_knn = './Model/kNN_noproc.sav'
model_nb = './Model/NB_lda.sav' # awalnya './Model/NB_pca.sav'
model_rf = './Model/RF_noproc.sav'
model_svm = './Model/SVM_noproc.sav'
model_svm_giemsa = './Model/SVM_noproc_g.sav'
model_svm_wright = './Model/SVM_noproc_w.sav'
def __init__(self):
pass
# Fungsi untuk mengklasifikasi banyak citra sekaligus
# Citra yang dapat diklasifikasi hanya yang sudah di crop saja
def klasifikasiCitraBanyak(self, folder, method):
self.folder = folder
helper = Helper()
files = helper.listFiles(folder)
scale, proc, klas = self.loadModel(method)
fitur_banyak = []
hasil = []
for fil in files:
fe = FeatureExtraction()
fitur_banyak.append(fe.ekstraksifitur(fil))
hasil = self.klaf(scale, fitur_banyak, proc, method, klas)
return hasil
# Fungsi untuk mengklasifikasi data citra sel darah putih berformat teks
# Berkas teks disimpan dengan nama "Hasil Ekstraksi.txt"
# Formatnya adalah desimal menggunakan titik (.) dan pemisah koma (,), tidak ada headernya
# Contoh:
# 2034,20.4,133,1, ... , 15.45
def klasifikasiTeks(self, folder, method):
self.folder = folder
berkas_teks = open(folder + "/Hasil Ekstraksi.txt", "r")
fitur_banyak = []
hasil = []
if berkas_teks != None:
fitur_banyak = np.loadtxt(berkas_teks,delimiter=',')
scale, proc, klas = self.loadModel(method)
hasil = self.klaf(scale, fitur_banyak, proc, method, klas)
return hasil
# Fungsi untuk menghitung dan menampilkan confusion matrix
# Fungsi ini membutuhkan "truth.txt" yang berisi kelas citra
# Kelas citra basofil, eosinofil, limfosit, monosit, neutrofil, dan stab berurut dari 0-5
# Hanya pada citra dengan pewarnaan giemsa yang tidak mencantumkan basofil, sehingga urutan kelas 0-4
# Contoh:
# 5,1,0,0,4
#
# Tandanya baris pertama adalah stab, kedua eosinofil, dan kelima adalah neutrofil
def ambilConfusionMatrix(self, folder, prediksi):
self.folder = folder
truth_file = open(folder + "/truth.txt", "r")
if truth_file != None:
y_true = truth_file.read().split(",")
y_true_val = list(map(int, y_true))
conf = confusion_matrix(y_true_val, prediksi)
plt.figure()
classes = None
apakahWright = False
if apakahWright == True:
self.plot_confusion_matrix(conf, classes=[0, 1, 2, 3, 4, 5], title='Confusion matrix, without normalization')
else:
self.plot_confusion_matrix(conf, classes=[0, 1, 2, 3, 4], title='Confusion matrix, without normalization')
plt.show()
# Fungsi bantuan untuk membuat grafik confusion matrix
# Digunakan pada fungsi ambilConfusionMatrix()
def plot_confusion_matrix(self, cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Fungsi untuk memuat model terpilih yang telah tersimpan sebelumnya
# Semua model harus disimpan di folder ./Model
def loadModel(self, method):
scale = pickle.load(open(self.scal, 'rb'))
proc = None
klas = None
if method == 'Decision Tree':
klas = pickle.load(open(self.model_dt, 'rb'))
elif method == 'kNN':
klas = pickle.load(open(self.model_knn, 'rb'))
elif method == 'Neural Network':
dimension = 29
hidden_layers = [100, 100]
feature_columns = [tf.contrib.layers.real_valued_column("", dimension=dimension)] # Banyaknya fitur
klas = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units=hidden_layers,
n_classes=5, # banyak kelas, dalam darah ada 5
model_dir= self.model_fol)
elif method == 'Naive Bayes':
proc = pickle.load(open(self.proc_lda, 'rb')) # awalnya self.proc_pca
klas = pickle.load(open(self.model_nb, 'rb'))
elif method == 'Random Forest':
klas = pickle.load(open(self.model_rf, 'rb'))
elif method == 'SVM Giemsa':
klas = pickle.load(open(self.model_svm_giemsa, 'rb'))
elif method == 'SVM Wright':
klas = pickle.load(open(self.model_svm_wright, 'rb'))
else:
klas = pickle.load(open(self.model_svm, 'rb'))
return scale, proc, klas
# Fungsi utama dari kelas Classify()
# Berisi urutan eksekusi fungsi pada kelas ini
def klaf(self, scale, fitur, proc, method, klas):
fitur_scaled = []
fitur_fixed = []
hasil = []
if method == 'Neural Network':
fitur_fixed = np.array(fitur, dtype=float)
hasil = np.asarray(list(klas.predict(fitur_fixed, as_iterable = True)))
else:
if proc == None:
fitur_scaled = scale.transform(fitur)
hasil = klas.predict(fitur_scaled)
else:
fitur_scaled = scale.transform(fitur)
fitur_fixed = proc.transform(fitur_scaled)
hasil = klas.predict(fitur_fixed)
return hasil | mit | 4,136,870,814,328,457,700 | 38.969072 | 125 | 0.604798 | false |
SuperDARNCanada/placeholderOS | tools/testing_utils/filter_testing/frerking/frerking.py | 2 | 13674 | from scipy import signal
import numpy as np
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
from scipy.fftpack import fft,ifft,fftshift
import math
import random
import cmath
#import test_signals
import sys
def create_signal_1(freq1, freq2, num_samps,rate):
f1 = freq1 + 2 * np.random.randn(num_samps)
f2 = freq2 + 2 * np.random.randn(num_samps)
t = np.arange(num_samps)/rate
sig = 10*np.exp(1j*2*np.pi*f1*t) + 10*np.exp(1j*2*np.pi*f2*t)
return sig
def create_signal_2(freq1, freq2, num_samps,rate):
t = np.arange(num_samps)/rate
sig = 10*np.exp(1j*2*np.pi*freq1*t) + 10*np.exp(1j*2*np.pi*freq2*t)
return sig
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def plot_fft(samplesa, rate, title):
fft_samps=fft(samplesa)
T= 1.0 /float(rate)
num_samps=len(samplesa)
if num_samps%2==1:
xf = np.linspace(-1.0/(2.0*T), 1.0/(2.0*T), num_samps)
else:
#xf = np.arange(-1.0/(2.0*T), 1.0/(2.0*T),1.0/(T*num_samps))
xf = np.linspace(-1.0/(2.0*T), 1.0/(2.0*T), num_samps)
fig= plt.figure()
ax1 = fig.add_subplot(111)
fft_to_plot=np.empty([num_samps],dtype=complex)
fft_to_plot=fftshift(fft_samps)
plt.plot(xf, 1.0/num_samps * np.abs(fft_to_plot))
plt.title(title)
plt.xlabel('Frequency (Hz)')
plt.ylabel('Amplitude')
ax2 = ax1.twinx()
plt.ylabel('Phase [rads]', color='g')
angles=np.angle(fft_to_plot)
plt.plot(xf, angles, 'm')
return fig
def plot_all_ffts(bpass_filters, rate, title):
fig, smpplt = plt.subplots(1,1)
for filt in bpass_filters:
fft_samps=fft(filt)
T= 1.0 /float(rate)
num_samps=len(filt)
if num_samps%2==1:
xf = np.linspace(-1.0/(2.0*T), 1.0/(2.0*T), num_samps)
else:
#xf = np.arange(-1.0/(2.0*T), 1.0/(2.0*T),1.0/(T*num_samps))
xf = np.linspace(-1.0/(2.0*T), 1.0/(2.0*T), num_samps)
fft_to_plot=np.empty([num_samps],dtype=complex)
fft_to_plot=fftshift(fft_samps)
smpplt.plot(xf, 1.0/num_samps * np.abs(fft_to_plot))
smpplt.set_title(title)
smpplt.set_xlabel('Frequency (Hz)')
smpplt.set_ylabel('Amplitude')
return fig
def get_samples(rate,wave_freq,numberofsamps,start_rads):
rate = float(rate)
wave_freq = float(wave_freq)
start_rads = float(start_rads)
print start_rads
sampling_freq=2*math.pi*wave_freq/rate
sampleslen=int(numberofsamps)
samples=np.empty([sampleslen],dtype=complex)
for i in range(0,sampleslen):
amp=1
rads=math.fmod(start_rads + (sampling_freq * i), 2*math.pi)
samples[i]=amp*math.cos(rads)+amp*math.sin(rads)*1j
return samples
def downsample(samples, rate):
rate = int(rate)
sampleslen = len(samples)/rate + 1 # should be an int
samples_down=np.empty([sampleslen],dtype=complex)
samples_down[0]=samples[0]
print sampleslen
for i in range(1,len(samples)):
if i%rate==0:
#print(i/rate)
samples_down[i/rate]=samples[i]
return samples_down
def fftnoise(f):
f = np.array(f, dtype='complex')
Np = (len(f) - 1) // 2
phases = np.random.rand(Np) * 2 * np.pi
phases = np.cos(phases) + 1j * np.sin(phases)
f[1:Np+1] *= phases
f[-1:-1-Np:-1] = np.conj(f[1:Np+1])
return np.fft.ifft(f).real
def band_limited_noise(min_freq, max_freq, samples=1024, samplerate=1):
freqs = np.abs(np.fft.fftfreq(samples, 1/samplerate))
f = np.zeros(samples)
idx = np.where(np.logical_and(freqs>=min_freq, freqs<=max_freq))[0]
f[idx] = 1
return fftnoise(f)
# SET VALUES
# Low-pass filter design parameters
fs = 12e6 # Sample rate, Hz
wave_freq = -0.96e6 # 1.8 MHz below centre freq (12.2 MHz if ctr = 14 MHz)
ctrfreq = 14000 # kHz
cutoff = 400e3 # Desired cutoff frequency, Hz
trans_width = 50e3 # Width of transition from pass band to stop band, Hz
numtaps = 1024 # Size of the FIR filter.
decimation_rate = 200.0
pmax = 100 # max value of P (integer) that we will run the script at.
# Set the number of output samples first so we can get correct number of
# input samples to have our three frequencies all perfectly in the FFT
# bins to check phase linearity.
num_output_samps = 5001
if numtaps > decimation_rate:
num_input_samps = num_output_samps*decimation_rate+numtaps
#num_output_samps = int((len(pulse_samples)-len(lpass))/decimation_rate)
else:
num_input_samps = num_output_samps*decimation_rate
#num_output_samps = int(len(pulse_samples)/decimation_rate)
fft_bin_increment = (fs/decimation_rate)/((num_output_samps-1)/2)
# Set the frequencies depending on FFT bins so that we can prove
# phase linearity.
freq_2 = wave_freq+10*fft_bin_increment
freq_3 = wave_freq-10*fft_bin_increment
# Calculate for Frerking's filter, Rf/fs which must be rational.
frerking = abs(decimation_rate * wave_freq / fs)
# find number of filter coefficients
for x in range(1, int(fs)):
if x*frerking % 1 == 0:
number_of_coeff_sets = x
break
else: # no break
sys.exit(['Error: could not find number of coefficient sets, greater than fs'])
if number_of_coeff_sets > pmax:
sys.exit(['Error: number of coefficient sets required is too large: %d' % number_of_coeff_sets])
#pulse_samples = test_signals.create_signal_1(wave_freq,4.0e6,10000,fs) # with added noise
#pulse_samples = 0.008*np.asarray(random.sample(range(-10000,10000),10000))
#pulse_samples = band_limited_noise(-6000000,6000000,10000,fs)
pulse_samples = create_signal_2(wave_freq,0,num_input_samps,fs) # without noise added
pulse_samples += create_signal_2(freq_2,freq_3,num_input_samps,fs)
print 'Fs = %d' % fs
print 'F = %d' % wave_freq
print 'R = %d' % decimation_rate
print 'P = %d' % number_of_coeff_sets
fig1= plot_fft(pulse_samples,fs, 'FFT of Original Pulse Samples')
lpass = signal.remez(numtaps, [0, cutoff, cutoff + trans_width, 0.5*fs],
[1, 0], Hz=fs)
bpass = np.array([])
for i in range(0, number_of_coeff_sets):
if i == 0:
print number_of_coeff_sets
start_rads = 0
shift_wave = get_samples(fs,wave_freq,numtaps,start_rads)
# we need a number of bpass filters depending on number_of_coeff_sets
bpass = np.array([[l*i for l,i in zip(lpass,shift_wave)]])
else:
# shift wave needs to start in a different location
# start at sampling rate * nth sample we are on (i * decimation_rate)
start_rads = -math.fmod((2*math.pi*wave_freq/fs)*i*decimation_rate, 2*math.pi)
print start_rads
shift_wave = get_samples(fs,wave_freq,numtaps,start_rads)
bpass = np.append(bpass, [[l*i for l,i in zip(lpass,shift_wave)]], axis=0)
# have to implement special convolution with multiple filters.
#
#
#
# CALCULATE USING FRERKING'S METHOD OF MULTIPLE COEFF SETS.
if len(lpass) > decimation_rate:
first_sample_index = len(lpass)
else:
first_sample_index = decimation_rate
output1=np.array([],dtype=complex)
for x in range(0,num_output_samps):
bpass_filt_num = x % number_of_coeff_sets
sum_array = np.array([l*i for l,i in zip(pulse_samples[(first_sample_index + x * decimation_rate - len(lpass)):(first_sample_index + x * decimation_rate)],bpass[bpass_filt_num][::-1])])
#sum_array = np.array([l*i for l,i in zip(pulse_samples[(x*len(lpass)):((x+1)*len(lpass))],bpass[bpass_filt_num][::-1])])
output_sum = 0.0
for element in sum_array:
output_sum += element
output1 = np.append(output1,output_sum)
print num_output_samps
# Uncomment to plot the fft after first filter stage.
#response1 = plot_fft(output,fs)
fig2 = plot_all_ffts(bpass,fs, 'FFT of All Bandpass Filters Using Frerking\'s Method')
fig3 = plot_fft(lpass,fs, 'FFT of Lowpass Filter')
fig4 = plt.figure()
plt.title('Frequency Responses of the P Bandpass Filters (Amp)')
plt.ylabel('Amplitude [dB]', color='b')
plt.xlabel('Frequency [rad/sample]')
plt.grid()
for i in range(0, number_of_coeff_sets):
w,h = signal.freqz(bpass[i], whole=True)
#ax1 = fig.add_subplot(111)
plt.plot(w, 20 * np.log10(abs(h)))
plt.axis('tight')
fig5 = plt.figure()
plt.title('Frequency Responses of the P Bandpass Filters (Phase)')
plt.xlabel('Frequency [rad/sample]')
plt.ylabel('Angle (radians)', color='g')
plt.grid()
for i in range(0, number_of_coeff_sets):
w,h = signal.freqz(bpass[i], whole=True)
#ax2 = ax1.twinx()
angles = np.unwrap(np.angle(h))
plt.plot(w, angles)
plt.axis('tight')
#
#
#
# CALCULATE USING ALEX'S METHOD, TRANSLATING SAMPLES AFTER DECIMATION.
output2=np.array([],dtype=complex)
for x in range(0,num_output_samps):
bpass_filt_num = 0
sum_array = np.array([l*i for l,i in zip(pulse_samples[(first_sample_index + x * decimation_rate - len(lpass)):(first_sample_index + x * decimation_rate)],bpass[bpass_filt_num][::-1])])
output_sum = 0.0
for element in sum_array:
output_sum += element
output2 = np.append(output2,output_sum)
#fig10 = plot_fft(output2, fs/decimation_rate, 'FFT of New Method Before Phase Correction')
# Phase shift after Convolution.
for i in range(0, number_of_coeff_sets):
# calculate the offset.
start_rads = math.fmod((2*math.pi*wave_freq/fs)*i*decimation_rate, 2*math.pi)
# offset every nth + i sample
n = i
while n < num_output_samps:
output2[n]=output2[n]*cmath.exp(-1j*start_rads)
n += number_of_coeff_sets
#fig9 = plot_fft(output2, fs/decimation_rate, 'FFT of New Method Output')
#
#
#
# CALCULATE USING MIXING THEN DECIMATION, IN TWO STEPS.
# shifting the signal not the filter so we must shift in the other direction.
# we have to start the shift_wave in the right spot (offset by the first sample index that was used above)
shift_wave = get_samples(fs,-wave_freq,len(pulse_samples),(math.fmod((first_sample_index-1)*2*math.pi*wave_freq/fs, 2*math.pi)))
pulse_samples = [l*i for l,i in zip(pulse_samples,shift_wave)]
# filter before decimating to prevent aliasing
#fig7 = plot_fft(pulse_samples,fs, 'FFT of Mixed Pulse Samples Using Traditional Method, Before Filtering and Decimating')
output = signal.convolve(pulse_samples,lpass,mode='valid') #/ sum(lpass)
# OR, can convolve using the same method as above (which is using the valid method).
#output=np.array([],dtype=complex)
#for x in range(0,len(pulse_samples)-first_sample_index):
# sum_array = np.array([l*i for l,i in zip(pulse_samples[(first_sample_index + x - len(lpass)):(first_sample_index + x)],lpass[::-1])])
# output_sum = 0.0
# for element in sum_array:
# output_sum += element
# output = np.append(output,output_sum)
#fig8 = plot_fft(output, fs, 'FFT of Filtered Output Using Traditional Method, Before Decimating')
# Decimate here.
output3=np.array([],dtype=complex)
for x in range(0,num_output_samps):
samp = output[x * decimation_rate]
output3=np.append(output3, samp)
# Plot the output using Frerking's method
new_fs = float(fs) / decimation_rate
#
#
#
#
# Plot FFTs and Phase responses of all methods
#fig6, smpplt = plt.subplots(1,1)
fig6 = plt.figure()
fft_samps1=fft(output1)
fft_samps2=fft(output2)
fft_samps3=fft(output3)
T= 1.0 /float(new_fs)
num_samps=len(output1)
if num_samps%2==1:
xf = np.linspace(-1.0/(2.0*T), 1.0/(2.0*T), num_samps)
else:
#xf = np.arange(-1.0/(2.0*T), 1.0/(2.0*T),1.0/(T*num_samps))
xf = np.linspace(-1.0/(2.0*T), 1.0/(2.0*T), num_samps)
print(num_samps)
#print(len(fft_samps))
#print(len(xf))
ax1 = fig6.add_subplot(111)
plt.title('Response of All Filters')
plt.ylabel('Amplitude [dB]', color='r')
plt.xlabel('Frequency [rad/sample]')
plt.grid()
fft_to_plot1=np.empty([num_samps],dtype=complex)
fft_to_plot1=fftshift(fft_samps1)
fft_to_plot2=np.empty([num_samps],dtype=complex)
fft_to_plot2=fftshift(fft_samps2)
fft_to_plot3=np.empty([num_samps],dtype=complex)
fft_to_plot3=fftshift(fft_samps3)
plt.plot(xf, 1.0/num_samps * np.abs(fft_to_plot1), 'c')
plt.plot(xf, 1.0/num_samps * np.abs(fft_to_plot2), 'y')
plt.plot(xf, 1.0/num_samps * np.abs(fft_to_plot3), 'r')
#plt.plot(xf, 1.0/num_samps * np.abs( np.roll( fft_to_plot3, int(-len(output1) * wave_freq / (1.0/T)))), 'c')
ax2 = ax1.twinx()
plt.ylabel('Phase [rads]', color='g')
angles1=np.angle(fft_to_plot1)
angles2=np.angle(fft_to_plot2)
angles3=np.angle(fft_to_plot3)
plt.plot(xf, angles1, 'm')
plt.plot(xf, angles2, 'b')
plt.plot(xf, angles3, 'g')
for ind,freq in enumerate(xf):
if freq == -10*fft_bin_increment:
angle1=angles1[ind]
print 'Angle 1: %f Rads' % angle1
if freq == 0.0:
angle2=angles1[ind]
print 'Angle 2: %f Rads' % angle2
if freq == 10*fft_bin_increment:
angle3=angles1[ind]
print 'Angle 3: %f Rads' % angle3
angle_offset1 = angle2-angle1
angle_offset2 = angle3-angle2
print 'Angle2 - Angle1 = %f = Angle3 - Angle2 = %f' % (angle2-angle1, angle3-angle2)
print 'Error of : %f Degrees' % (abs(angle_offset2-angle_offset1)*360/(2*math.pi))
# in time domain, all filtered outputs:
fig7 = plt.figure()
plt.title('Three Filtered Outputs Of Different Methods, Time Domain')
plt.plot(range(0,len(output1)), output1)
plt.plot(range(0,len(output2)), output2)
plt.plot(range(0,len(output3)), output3)
#for n in range(0,len(output1)):
# if not isclose(output1[n], output2[n]) or not isclose(output2[n], output3[n]) or not isclose(output1[n],output3[n]):
# print "NOT EQUAL: %d" % n
# print output1[n]
# print output2[n]
# print output3[n]
# WHY? The last output sample is not exactly equal but the rest are....
# Maybe a python float thing?
plt.show()
| gpl-3.0 | -2,596,692,148,660,857,000 | 33.882653 | 189 | 0.661109 | false |
ycliuxinwei/linear_sys_sim | python/linear_system_v2.py | 1 | 3236 | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 13 21:24:00 2015
@author: Edmund Liu
License: BSD license
"""
import numpy as np
from scipy import linalg as lin
from scipy import signal as sgn
import matplotlib.pyplot as plt
""" necessory functions
obsv(A, C) - Observability of pair (A, C)
ctrb(A, B) - Controllabilty of pair (A, B)
"""
# Observability of pair (A, C)
def obsv(A, C):
amat = np.mat(A)
cmat = np.mat(C)
n = np.shape(amat)[0]
# Construct the controllability matrix
obsv = cmat
for i in range(1, n):
obsv = np.vstack((obsv, cmat*amat**i))
observability = np.linalg.matrix_rank(obsv.getA())
return observability
# Controllabilty of pair (A, B)
def ctrb(A, B):
amat = np.mat(A)
bmat = np.mat(B)
n = np.shape(amat)[0]
# Construct the controllability matrix
ctrb = bmat
for i in range(1, n):
ctrb = np.hstack((ctrb, amat**i*bmat))
controllability = np.linalg.matrix_rank(ctrb.getA())
return controllability
"""
self-functions
"""
# system function
def Sys(x, u, other, A, B):
x = A.dot(x) + B.dot(u) + other
return x
# Runge-Kutta method
def RK(x, u, other, A, B, h):
K1 = Sys(x, u, other, A, B)
K2 = Sys(x + h*K1/2, u, other, A, B)
K3 = Sys(x + h*K2/2, u, other, A, B)
K4 = Sys(x + h*K3, u, other, A, B)
x1 = x + (K1 + 2*K2 + 2*K3 + K4)*h/6
return x1
# parameters
Ml = 0.5; ml = 0.2
bl = 0.1; Il = 0.006
gl = 9.8; ll = 0.3
pl = Il*(Ml+ml)+Ml*ml*ll**2
# Inverted Pendulum System
# x' = Ax+Bu
# y = Cx+Du
A = np.array([[0, 1.0, 0, 0],
[0, -(Il+ml*ll**2)*bl/pl, (ml**2*gl*ll**2)/pl, 0],
[0, 0, 0, 1.0],
[0, -(ml*ll*bl)/pl, ml*gl*ll*(Ml+ml)/pl, 0]])
B = np.array([[0], [(Il+ml*ll**2)/pl], [0], [ml*ll/pl]])
C = np.array([[1.0, 0, 0, 0], [0, 1.0, 0, 0]])
# initial state
x0 = np.array([[0.98], [0], [0.2], [0]])
# size of A and B
n1 = np.size(A,0)
n2 = np.size(B,1)
# Solve the continuous time lqr controller
# to get controller gain 'K'
# J = x'Qx + u'Ru
# A'X + XA - XBR^(-1)B'X + Q = 0
# K = inv(R)B'X
Q = C.T.dot(C)
R = np.eye(n2)
X = np.matrix(lin.solve_continuous_are(A, B, Q, R))
K = -np.matrix(lin.inv(R)*(B.T*X)).getA()
# observability
ob = obsv(A, C)
print "observability =", ob
# controllability
cb = ctrb(A, B)
print "controllability =", cb
# get the observer gain, using pole placement
p = np.array([-13, -12, -11, -10])
fsf1 = sgn.place_poles(A.T, C.T, p)
L = fsf1.gain_matrix.T
x = x0; xhat = 0*x0
h = 0.01; ts = 2000;
X = np.array([[],[],[],[]])
Y = np.array([[],[]])
Xhat = X; t = []
for k in range(ts):
t.append(k*h)
u = K.dot(xhat)
y = C.dot(x)
yhat = C.dot(xhat)
X = np.hstack((X, x))
Xhat = np.hstack((Xhat, xhat))
Y = np.hstack((Y, y))
x = RK(x, u, np.zeros([n1,1]), A, B, h)
xhat = RK(xhat, u, L.dot((y-yhat)), A, B, h)
from bokeh.plotting import figure, show, output_file, vplot
output_file("linear_system.html", title="linear_system")
TOOLS = "pan,wheel_zoom,box_zoom,reset,save,box_select"
p1 = figure(title="linear_system Example", tools=TOOLS)
p1.line(t, Y[0], legend="y_1", line_width=2)
p1.line(t, Y[1], legend="y_2", color="green", line_width=2)
show(vplot(p1)) # open a browser
| bsd-2-clause | -2,365,946,444,817,965,600 | 23.330827 | 65 | 0.568603 | false |
daniel-muthukrishna/EmissionLineAnalysis | fitelp/fit_line_profiles.py | 1 | 16449 | import os
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
import numpy as np
from lmfit import Parameters
from lmfit.models import GaussianModel, LinearModel
from astropy.constants import c
from fitelp.label_tools import line_label
import fitelp.constants as constants
constants.init()
def vel_to_wave(restWave, vel, flux, fluxError=None, delta=False):
if delta is True:
wave = (vel / c.to('km/s').value) * restWave
else:
wave = (vel / c.to('km/s').value) * restWave + restWave
flux = flux / (restWave / c.to('km/s').value)
if fluxError is not None:
fluxError = fluxError / (restWave / c.to('km/s').value)
return vel, flux, fluxError
else:
return wave, flux
def wave_to_vel(restWave, wave, flux, fluxError=None, delta=False):
if delta is True:
vel = (wave / restWave) * c.to('km/s').value
else:
vel = ((wave - restWave) / restWave) * c.to('km/s').value
flux = flux * (restWave / c.to('km/s').value)
if fluxError is not None:
fluxError = fluxError * (restWave / c.to('km/s').value)
return vel, flux, fluxError
else:
return vel, flux
class FittingProfile(object):
def __init__(self, wave, flux, restWave, lineName, zone, rp, fluxError=None, xAxis='vel', initVals='vel'):
"""The input vel and flux must be limited to a single emission line profile"""
self.flux = flux
self.fluxError = fluxError
self.restWave = restWave
self.lineName = lineName
self.zone = zone
self.weights = self._weights()
self.rp = rp
self.xAxis = xAxis
self.initVals = initVals
if xAxis == 'vel':
if fluxError is None:
vel, self.flux = wave_to_vel(restWave, wave, flux)
else:
vel, self.flux, self.fluxError = wave_to_vel(restWave, wave, flux, fluxError)
self.x = vel
else:
self.x = wave
self.linGaussParams = Parameters()
def _weights(self):
if self.fluxError is None:
return None
else:
fluxErrorCR = self.fluxError# - self.continuum
return 1./fluxErrorCR
def _get_amplitude(self, numOfComponents, modelFit):
amplitudeTotal = 0.
for i in range(numOfComponents):
amplitudeTotal = amplitudeTotal + modelFit.best_values['g%d_amplitude' % (i+1)]
print("Amplitude Total is %f" % amplitudeTotal)
return amplitudeTotal
def _gaussian_component(self, pars, prefix, c, s, a, limits):
"""Fits a gaussian with given parameters.
pars is the lmfit Parameters for the fit, prefix is the label of the gaussian, c is the center, s is sigma,
a is amplitude. Returns the Gaussian model"""
varyCentre = True
varySigma = True
varyAmp = True
if limits['c'] is False:
varyCentre = False
cMin, cMax = -np.inf, np.inf
elif type(limits['c']) is tuple:
cMin = limits['c'][0]
cMax = limits['c'][1]
else:
cMin = c - c*limits['c']
cMax = c + c*limits['c']
if limits['s'] is False:
varySigma = False
sMin, sMax = -np.inf, np.inf
elif type(limits['s']) is tuple:
sMin = limits['s'][0]
sMax = limits['s'][1]
else:
sMin = s - s * limits['s']
sMax = s + s * limits['s']
if limits['a'] is False:
varyAmp = False
aMin, aMax = -np.inf, np.inf
elif type(limits['a']) is tuple:
aMin = limits['a'][0]
aMax = limits['a'][1]
else:
aMin = a - a * limits['a']
aMax = a + a * limits['a']
g = GaussianModel(prefix=prefix)
pars.update(g.make_params())
if isinstance(c, str):
pars[prefix + 'center'].set(expr=c, min=cMin, max=cMax, vary=varyCentre)
else:
pars[prefix + 'center'].set(c, min=cMin, max=cMax, vary=varyCentre)
if isinstance(s, str):
pars[prefix + 'sigma'].set(expr=s, min=sMin, max=sMax, vary=varySigma)
else:
pars[prefix + 'sigma'].set(s, min=sMin, max=sMax, vary=varySigma)
if isinstance(a, str):
pars[prefix + 'amplitude'].set(expr=a, min=aMin, max=aMax, vary=varyAmp)
else:
pars[prefix + 'amplitude'].set(a, min=aMin, max=aMax, vary=varyAmp)
return g
def multiple_close_emission_lines(self, lineNames, cListInit, sListInit, lS, lI):
"""All lists should be the same length"""
gList = []
# Assume initial parameters are in velocity
lin = LinearModel(prefix='lin_')
self.linGaussParams = lin.guess(self.flux, x=self.x)
self.linGaussParams.update(lin.make_params())
self.linGaussParams['lin_slope'].set(lS, vary=True)
self.linGaussParams['lin_intercept'].set(lI, vary=True)
for j, lineName in enumerate(lineNames):
numComps = self.rp.emProfiles[lineName]['numComps']
restWave = self.rp.emProfiles[lineName]['restWavelength']
copyFrom = self.rp.emProfiles[lineName]['copyFrom']
if copyFrom is not None:
copyFromRestWave = self.rp.emProfiles[copyFrom]['restWavelength']
cList = ['g{0}{1}_center*{2}'.format(copyFrom.replace('-', ''), (i + 1), (restWave / copyFromRestWave)) for i in range(numComps)]
sList = ['g{0}{1}_sigma'.format(copyFrom.replace('-', ''), i + 1) for i in range(numComps)]
if type(self.rp.emProfiles[lineName]['ampList']) is list:
aList = self.rp.emProfiles[lineName]['ampList']
if self.xAxis == 'vel':
aList = vel_to_wave(restWave, vel=0, flux=np.array(aList))[1]
else:
ampRatio = self.rp.emProfiles[lineName]['ampList']
aList = ['g{0}{1}_amplitude*{2}'.format(copyFrom.replace('-', ''), i + 1, ampRatio) for i in range(numComps)]
else:
cList = vel_to_wave(restWave, vel=np.array(cListInit), flux=0)[0]
sList = vel_to_wave(restWave, vel=np.array(sListInit), flux=0, delta=True)[0]
aListInit = self.rp.emProfiles[lineName]['ampList']
aList = vel_to_wave(restWave, vel=0, flux=np.array(aListInit))[1]
limits = self.rp.emProfiles[lineName]['compLimits']
for i in range(numComps):
if type(limits['c']) is list:
cLimit = limits['c'][i]
else:
cLimit = limits['c']
if type(limits['s']) is list:
sLimit = limits['s'][i]
else:
sLimit = limits['s']
if type(limits['a']) is list:
aLimit = limits['a'][i]
else:
aLimit = limits['a']
lims = {'c': cLimit, 's': sLimit, 'a': aLimit}
if len(lineNames) == 1:
prefix = 'g{0}_'.format(i + 1)
else:
prefix = 'g{0}{1}_'.format(lineName.replace('-', ''), i + 1)
gList.append(self._gaussian_component(self.linGaussParams, prefix, cList[i], sList[i], aList[i], lims))
gList = np.array(gList)
mod = lin + gList.sum()
init = mod.eval(self.linGaussParams, x=self.x)
out = mod.fit(self.flux, self.linGaussParams, x=self.x, weights=self.weights)
f = open(os.path.join(constants.OUTPUT_DIR, self.rp.regionName, "{0}_Log.txt".format(self.rp.regionName)), "a")
print("######## %s %s Linear and Multi-gaussian Model ##########\n" % (self.rp.regionName, self.lineName))
print(out.fit_report())
f.write("######## %s %s Linear and Multi-gaussian Model ##########\n" % (self.rp.regionName, self.lineName))
f.write(out.fit_report())
f.close()
components = out.eval_components()
if not hasattr(self.rp, 'plotResiduals'):
self.rp.plotResiduals = True
numComps = self.rp.emProfiles[lineName]['numComps']
self.plot_emission_line(numComps, components, out, self.rp.plotResiduals, lineNames, init=init, scaleFlux=self.rp.scaleFlux)
return out, components
def lin_and_multi_gaussian(self, numOfComponents, cList, sList, aList, lS, lI, limits):
"""All lists should be the same length"""
gList = []
if self.xAxis == 'wave' and self.initVals == 'vel':
cList = vel_to_wave(self.restWave, vel=np.array(cList), flux=0)[0]
sList = vel_to_wave(self.restWave, vel=np.array(sList), flux=0, delta=True)[0]
aList = vel_to_wave(self.restWave, vel=0, flux=np.array(aList))[1]
elif self.xAxis == 'vel' and self.initVals == 'wave':
cList = wave_to_vel(self.restWave, wave=np.array(cList), flux=0)[0]
sList = wave_to_vel(self.restWave, wave=np.array(sList), flux=0, delta=True)[0]
aList = wave_to_vel(self.restWave, wave=0, flux=np.array(aList))[1]
lin = LinearModel(prefix='lin_')
self.linGaussParams = lin.guess(self.flux, x=self.x)
self.linGaussParams.update(lin.make_params())
self.linGaussParams['lin_slope'].set(lS, vary=True)
self.linGaussParams['lin_intercept'].set(lI, vary=True)
for i in range(numOfComponents):
if type(limits['c']) is list:
cLimit = limits['c'][i]
else:
cLimit = limits['c']
if type(limits['s']) is list:
sLimit = limits['s'][i]
else:
sLimit = limits['s']
if type(limits['a']) is list:
aLimit = limits['a'][i]
else:
aLimit = limits['a']
lims = {'c': cLimit, 's': sLimit, 'a': aLimit}
prefix = 'g{0}_'.format(i+1)
gList.append(self._gaussian_component(self.linGaussParams, prefix, cList[i], sList[i], aList[i], lims))
gList = np.array(gList)
mod = lin + gList.sum()
init = mod.eval(self.linGaussParams, x=self.x)
out = mod.fit(self.flux, self.linGaussParams, x=self.x, weights=self.weights)
f = open(os.path.join(constants.OUTPUT_DIR, self.rp.regionName, "{0}_Log.txt".format(self.rp.regionName)), "a")
print("######## %s %s Linear and Multi-gaussian Model ##########\n" % (self.rp.regionName, self.lineName))
print(out.fit_report())
f.write("######## %s %s Linear and Multi-gaussian Model ##########\n" % (self.rp.regionName, self.lineName))
f.write(out.fit_report())
f.close()
components = out.eval_components()
if not hasattr(self.rp, 'plotResiduals'):
self.rp.plotResiduals = True
self.plot_emission_line(numOfComponents, components, out, self.rp.plotResiduals, init=init, scaleFlux=self.rp.scaleFlux)
self._get_amplitude(numOfComponents, out)
return out, components
def plot_emission_line(self, numOfComponents, components, out, plotResiduals=True, lineNames=None, init=None, scaleFlux=1e14):
ion, lambdaZero = line_label(self.lineName, self.restWave)
fig = plt.figure("%s %s %s" % (self.rp.regionName, ion, lambdaZero))
if plotResiduals is True:
frame1 = fig.add_axes((.1, .3, .8, .6))
plt.title("%s %s" % (ion, lambdaZero))
if self.xAxis == 'wave':
x = self.x
xLabel = constants.WAVE_AXIS_LABEL
yLabel = constants.FluxUnitsLabels(scaleFlux).FLUX_WAVE_AXIS_LABEL
elif self.xAxis == 'vel':
if hasattr(self.rp, 'showSystemicVelocity') and self.rp.showSystemicVelocity is True:
x = self.x - self.rp.systemicVelocity
xLabel = constants.DELTA_VEL_AXIS_LABEL
else:
x = self.x
xLabel = constants.VEL_AXIS_LABEL
if hasattr(self.rp, 'rp.plottingXRange') and self.rp.plottingXRange is not None:
plt.xlim(self.rp.plottingXRange)
yLabel = constants.FluxUnitsLabels(scaleFlux).FLUX_VEL_AXIS_LABEL
else:
raise Exception("Invalid xAxis argument. Must be either 'wave' or 'vel'. ")
plt.plot(x, self.flux, label='Data')
for i in range(numOfComponents):
labelComp = self.rp.componentLabels
if lineNames is None:
plt.plot(x, components['g%d_' % (i+1)]+components['lin_'], color=self.rp.componentColours[i], linestyle=':', label=labelComp[i])
else:
for j, lineName in enumerate(lineNames):
plt.plot(x, components['g{0}{1}_'.format(lineName.replace('-', ''), i + 1)] + components['lin_'], color=self.rp.componentColours[i], linestyle=':', label=labelComp[i])
# plt.plot(x, components['lin_'], label='lin_')
plt.plot(x, out.best_fit, color='black', linestyle='--', label='Fit')
# plt.plot(x, init, label='init')
plt.legend(loc='upper left')
plt.ylabel(yLabel)
if plotResiduals is True:
frame1 = plt.gca()
frame1.axes.get_xaxis().set_visible(False)
frame2 = fig.add_axes((.1, .1, .8, .2))
plt.plot(x, self.flux - out.best_fit)
plt.axhline(y=0, linestyle='--', color='black')
plt.ylabel('Residuals')
# plt.locator_params(axis='y', nbins=3)
# nbins = len(frame2.get_yticklabels())
frame2.yaxis.set_major_locator(MaxNLocator(nbins=3, prune='upper'))
plt.xlabel(xLabel)
plt.savefig(os.path.join(constants.OUTPUT_DIR, self.rp.regionName, self.lineName + " {0} Component Linear-Gaussian Model".format(numOfComponents)), bbox_inches='tight')
def plot_profiles(lineNames, rp, nameForComps='', title='', sortedIndex=None, plotAllComps=False, xAxis='vel', logscale=False, ymin=None):
try:
plt.figure(title)
ax = plt.subplot(1, 1, 1)
plt.title(title) # Recombination Emission Lines")
if xAxis == 'wave':
xLabel = constants.WAVE_AXIS_LABEL
yLabel = constants.FluxUnitsLabels(rp.scaleFlux).FLUX_WAVE_AXIS_LABEL
elif xAxis == 'vel':
if hasattr(rp, 'showSystemicVelocity') and rp.showSystemicVelocity is True:
xLabel = constants.DELTA_VEL_AXIS_LABEL
else:
xLabel = constants.VEL_AXIS_LABEL
if hasattr(rp, 'rp.plottingXRange'):
plt.xlim(rp.plottingXRange)
yLabel = constants.FluxUnitsLabels(rp.scaleFlux).FLUX_VEL_AXIS_LABEL
else:
raise Exception("Invalid xAxis argument. Must be either 'wave' or 'vel'. ")
plt.xlabel(xLabel)
plt.ylabel(yLabel)
for i in range(len(lineNames)):
name, x, flux, mod, col, comps, lab = rp.emProfiles[lineNames[i]]['plotInfo']
if xAxis == 'vel' and hasattr(rp, 'showSystemicVelocity') and rp.showSystemicVelocity is True:
x = x - rp.systemicVelocity
ax.plot(x, flux, color=col, label=lab)
ax.plot(x, mod, color=col, linestyle='--')
if plotAllComps is True:
for idx in range(rp.emProfiles[lineNames[i]]['numComps']):
plt.plot(x, comps['g%d_' % (idx + 1)] + comps['lin_'], color=rp.componentColours[idx], linestyle=':')
else:
if name == nameForComps:
for idx in range(rp.emProfiles[lineNames[i]]['numComps']):
plt.plot(x, comps['g%d_' % (idx + 1)] + comps['lin_'], color=rp.componentColours[idx], linestyle=':')
if sortedIndex is not None:
handles, labels = ax.get_legend_handles_labels()
handles2 = [handles[idx] for idx in sortedIndex]
labels2 = [labels[idx] for idx in sortedIndex]
ax.legend(handles2, labels2)
else:
ax.legend()
if logscale is True:
ax.set_yscale('log')
if ymin is not None:
ax.set_ylim(bottom=ymin)
plt.savefig(os.path.join(constants.OUTPUT_DIR, rp.regionName, title.strip(' ') + '.png'), bbox_inches='tight')
except KeyError:
print("SOME IONS IN {0} HAVE NOT BEEN DEFINED.".format(lineNames))
| mit | -2,569,111,299,552,671,000 | 43.456757 | 187 | 0.565627 | false |
droundy/deft | papers/contact/figs/plot-gnn-walls.py | 1 | 2967 | #!/usr/bin/python
import matplotlib
matplotlib.use('Agg')
import pylab, numpy, sys
if len(sys.argv) != 6:
print(("Usage: " + sys.argv[0] + " mc-filename.dat wb-filename.dat wbt-filename.dat wb-m2.dat out-filename.pdf"))
exit(1)
mcdata = numpy.loadtxt(sys.argv[1])
dftdata = numpy.loadtxt(sys.argv[2])
wbtdata = numpy.loadtxt(sys.argv[3])
wbm2data = numpy.loadtxt(sys.argv[4])
dft_len = len(dftdata[:, 0])
dft_dr = dftdata[2, 0] - dftdata[1, 0]
mcoffset = 13
n0 = dftdata[:, 6]
nA = dftdata[:, 8]
nAmc = mcdata[:, 11]
n0mc = mcdata[:, 10]
stop_here = int(dft_len - 1/dft_dr)
print(stop_here)
start_here = int(2.5/dft_dr)
off = 0
me = 30
n = len(mcdata[:, 0])
pylab.figure(figsize=(8, 6))
pylab.subplots_adjust(hspace=0.001)
Agnn_plt = pylab.subplot(2, 1, 1)
Agnn_plt.plot(dftdata[start_here:stop_here, 0], dftdata[start_here:stop_here, 1]*nA[start_here:stop_here]*(4*numpy.pi/3)**2*dftdata[start_here:stop_here, 5],
"g+--", markevery=me, label="$nn_Ag_\sigma^A$ (White Bear)")
Agnn_plt.plot(mcdata[:, 0]+mcoffset, mcdata[:, 2+2*off]*mcdata[:, 1]*(4*numpy.pi/3)**2, "g-", label="$nn_Ag_\sigma^A$ MC")
Agnn_plt.plot(dftdata[start_here:stop_here, 0], (dftdata[start_here:stop_here, 1]*4*numpy.pi/3)**2*dftdata[start_here:stop_here, 7],
"rx--", markevery=me, label="$nn_Ag_\sigma^A$ (Gross)")
Agnn_plt.legend(loc=1, bbox_to_anchor=[1.0, 1.0], ncol=1).get_frame().set_alpha(0.5)
#It seems like with the Gross here we do n*n*g and not n*nA*g, why?
# if ((mcdata[int(n/2),1]*4*numpy.pi/3<0.45) & (mcdata[int(n/2),1]*4*numpy.pi/3>0.35)):
# pylab.ylim(0.3,0.980)
# if ((mcdata[int(n/2),1]*4*numpy.pi/3<0.35) & (mcdata[int(n/2),1]*4*numpy.pi/3>0.25)):
# pylab.ylim(0.090,0.360)
# if ((mcdata[int(n/2),1]*4*numpy.pi/3<0.15) & (mcdata[int(n/2),1]*4*numpy.pi/3>0.05)):
# pylab.ylim(0.000,0.034)
Sgnn_plt = pylab.subplot(2, 1, 2)
Sgnn_plt.plot(dftdata[start_here:stop_here, 0], (n0[start_here:stop_here]*4*numpy.pi/3)**2*dftdata[start_here:stop_here, 3],
"g+--", markevery=me, label="$n_0^2g_\sigma^S$ (White Bear)")
Sgnn_plt.plot(mcdata[:, 0]+mcoffset, mcdata[:, 3+2*off]*n0mc*(4*numpy.pi/3)**2, "g-", label="$n_0^2g_\sigma^S$ MC")
Sgnn_plt.plot(dftdata[start_here:stop_here, 0], (n0[start_here:stop_here]*4*numpy.pi/3)**2*dftdata[start_here:stop_here, 4],
"rx--", markevery=me, label="$n_0^2g_\sigma^S$ (YuWu)")
Sgnn_plt.legend(loc=1, bbox_to_anchor=[1.0, 1.0], ncol=1).get_frame().set_alpha(0.5)
pylab.xlim(0, 12)
# if ((mcdata[int(n/2),1]*4*numpy.pi/3<0.45) & (mcdata[int(n/2),1]*4*numpy.pi/3>0.35)):
# pylab.ylim(0.270,0.980)
# if ((mcdata[int(n/2),1]*4*numpy.pi/3<0.35) & (mcdata[int(n/2),1]*4*numpy.pi/3>0.25)):
# pylab.ylim(0.120,0.380)
# if ((mcdata[int(n/2),1]*4*numpy.pi/3<0.15) & (mcdata[int(n/2),1]*4*numpy.pi/3>0.05)):
# pylab.ylim(0.000,0.030)
#xticklabels = A_plt.get_xticklabels() + S_plt.get_xticklabels()
#pylab.setp(xticklabels, visible=False)
pylab.savefig(sys.argv[5])
| gpl-2.0 | -2,023,563,158,121,514,500 | 41.385714 | 157 | 0.629592 | false |
architecture-building-systems/CityEnergyAnalyst | cea/interfaces/cli/excel_to_shapefile.py | 2 | 2759 | """
Implements the CEA script
``excel-to-shapefile``
Similar to how ``excel-to-dbf`` takes a dBase database file (example.dbf) and converts that to Excel format,
this does the same with a Shapefile.
It uses the ``geopandas.GeoDataFrame`` class to read in the shapefile.
The geometry column is serialized to a nested list of coordinates using the JSON notation.
"""
import os
import shapely
import json
import pandas as pd
import geopandas as gpd
import cea.config
import cea.inputlocator
__author__ = "Daren Thomas"
__copyright__ = "Copyright 2017, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Daren Thomas"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "cea@arch.ethz.ch"
__status__ = "Production"
def excel_to_shapefile(excel_file, shapefile, index, crs, polygon=True):
"""
Expects the Excel file to be in the format created by
``cea shapefile-to-excel``
:param polygon: Set this to ``False`` if the Excel file contains polyline data in the
``geometry`` column instead of the default polygon data. (polylines are used for representing streets etc.)
:type polygon: bool
"""
df = pd.read_excel(excel_file)
if polygon:
geometry = [shapely.geometry.polygon.Polygon(json.loads(g)) for g in df.geometry]
else:
geometry = [shapely.geometry.LineString(json.loads(g)) for g in df.geometry]
df.drop('geometry', axis=1)
gdf = gpd.GeoDataFrame(df, crs=crs, geometry=geometry)
gdf.to_file(shapefile, driver='ESRI Shapefile', encoding='ISO-8859-1')
def main(config):
"""
Run :py:func:`excel_to_shapefile` with the values from the configuration file, section ``[shapefile-tools]``.
:param config: the configuration object to use
:type config: cea.config.Configuration
:return:
"""
assert os.path.exists(config.shapefile_tools.excel_file), (
'Excel file not found: %s' % config.shapefile_tools.shapefile)
# print out all configuration variables used by this script
print("Running excel-to-shapefile with excel-file = %s" % config.shapefile_tools.excel_file)
print("Running excel-to-shapefile with shapefile = %s" % config.shapefile_tools.shapefile)
print("Running excel-to-shapefile with crs = %s" % config.shapefile_tools.crs)
print("Running excel-to-shapefile with polygon = %s" % config.shapefile_tools.polygon)
excel_to_shapefile(excel_file=config.shapefile_tools.excel_file, shapefile=config.shapefile_tools.shapefile,
index=config.shapefile_tools.index, crs=config.shapefile_tools.crs,
polygon=config.shapefile_tools.polygon)
print("done.")
if __name__ == '__main__':
main(cea.config.Configuration())
| mit | 8,380,056,851,555,585,000 | 33.061728 | 115 | 0.694817 | false |
LCAS/zoidbot | vrep_teleop/scripts/teleop_data_logging.py | 1 | 7445 | #!/usr/bin/env python
# run the baxterTeleopRecording.ttt file on Vrep before running this
import rospy
from baxter_core_msgs.msg import DigitalIOState
from sensor_msgs.msg import JointState
from vrep_teleop.msg import Joints
import numpy as np
import matplotlib.pyplot as plt
class RecordData:
def __init__(self):
self.left_button_pressed = 0
self.left_cuff_pressed = 0
self.right_cuff_pressed = 0
self.record = 0
self.start = 0
self.end = 0
self.n = 14
self.z = 0
self.currentPos = []
self.targetPos = []
self.newTargetPos = []
self.errorValue = []
self.targetVel = []
self.currentVel = []
self.newTargetVel = []
self.effort = []
self.ts = []
self.f1 = open("/home/user/turnDemoMaster_1.txt", "w+")
self.f2 = open("/home/user/turnDemoSlave_1.txt", "w+")
rospy.Subscriber('robot/digital_io/left_lower_button/state', DigitalIOState, self.left_button)
rospy.Subscriber('robot/digital_io/left_lower_cuff/state', DigitalIOState, self.left_cuff)
rospy.Subscriber('robot/digital_io/right_lower_cuff/state', DigitalIOState, self.right_cuff)
rospy.Subscriber("/robot/joint_states", JointState, self.master_state)
rospy.Subscriber("/vrep/joints", Joints, self.slave_state)
def left_button(self, data):
if data.state == 1:
self.left_button_pressed = 1
self.start = 1
else:
self.left_button_pressed = 0
if self.start == 1 and self.record == 1:
self.record = 0
self.end = 1
def left_cuff(self, data):
if data.state == 1:
self.left_cuff_pressed = 1
else:
self.left_cuff_pressed = 0
def right_cuff(self, data):
if data.state == 1:
self.right_cuff_pressed = 1
else:
self.right_cuff_pressed = 0
if (self.left_cuff_pressed == 1 or self.right_cuff_pressed == 1) and self.start == 1:
self.record = 1
self.start = 0
def master_state(self, data):
if self.record == 1:
self.f1.write("%s \n" % data)
elif self.end == 1:
self.f1.close()
def slave_state(self, data):
if self.record == 1:
self.currentPos.extend(data.currentPos)
self.targetPos.extend(data.targetPos)
self.newTargetPos.extend(data.newTargetPos)
self.errorValue.extend(data.errorValue)
self.targetVel.extend(data.targetVel)
self.currentVel.extend(data.currentVel)
self.newTargetVel.extend(data.newTargetVel)
self.effort.extend(data.effort)
self.ts.extend(data.simTime)
self.z = self.z + 1
rospy.loginfo("master %s", data.seq)
self.f2.write("seq ")
self.f2.write("%d " % data.seq)
self.f2.write("\n")
self.f2.write("timeStamp ")
self.f2.write("%f " % data.timeStamp)
self.f2.write("\n")
self.f2.write("currentPos ")
for i in range(0, self.n):
self.f2.write("%f " % (data.currentPos[i]))
self.f2.write("\n")
self.f2.write("targetPos ")
for i in range(0, self.n):
self.f2.write("%f " % (data.targetPos[i]))
self.f2.write("\n")
self.f2.write("newTargetPos ")
for i in range(0, self.n):
self.f2.write("%f " % (data.newTargetPos[i]))
self.f2.write("\n")
self.f2.write("errorValue ")
for i in range(0, self.n):
self.f2.write("%f " % (data.errorValue[i]))
self.f2.write("\n")
self.f2.write("targetVel ")
for i in range(0, self.n):
self.f2.write("%f " % (data.targetVel[i]))
self.f2.write("\n")
self.f2.write("currentVel ")
for i in range(0, self.n):
self.f2.write("%f " % (data.currentVel[i]))
self.f2.write("\n")
self.f2.write("newTargetVel ")
for i in range(0, self.n):
self.f2.write("%f " % (data.newTargetVel[i]))
self.f2.write("\n")
self.f2.write("effort ")
for i in range(0, self.n):
self.f2.write("%f " % (data.effort[i]))
self.f2.write("\n")
self.f2.write("simTime ")
for i in range(0, self.n):
self.f2.write("%f " % (data.simTime[i]))
self.f2.write("\n")
self.f2.write("boxPosition ")
for i in range(0, 3):
self.f2.write("%f " % (data.boxPosition[i]))
self.f2.write("\n")
self.f2.write("boxOrientation ")
for i in range(0, 3):
self.f2.write("%f " % (data.boxOrientation[i]))
self.f2.write("\n")
elif self.end == 1:
self.f2.close()
if __name__ == '__main__':
rospy.init_node('trajectory_listener', anonymous=True)
try:
startRecord = RecordData()
while startRecord.end == 0:
pass
totalRead = startRecord.z
currentPos = np.array(startRecord.currentPos).reshape((totalRead, startRecord.n))
targetPos = np.array(startRecord.targetPos).reshape((totalRead, startRecord.n))
newTargetPos = np.array(startRecord.newTargetPos).reshape((totalRead, startRecord.n))
errorValue = np.array(startRecord.errorValue).reshape((totalRead, startRecord.n))
targetVel = np.array(startRecord.targetVel).reshape((totalRead, startRecord.n))
currentVel = np.array(startRecord.currentVel).reshape((totalRead, startRecord.n))
newTargetVel = np.array(startRecord.newTargetVel).reshape((totalRead, startRecord.n))
effort = np.array(startRecord.effort).reshape((totalRead, startRecord.n))
ts = np.array(startRecord.ts).reshape((totalRead, startRecord.n))
for i in range(0, startRecord.n):
plt.figure(i+1)
plt.figure(i+1).suptitle('Joint'+str(i+1))
plt.subplot(311)
plt.plot(ts[:, i]-ts[0, 0], targetPos[:, i], '.', label='Master')
plt.subplot(311)
plt.plot(ts[:, i]-ts[0, 0], newTargetPos[:, i], '. r', label='Master_Corrected')
plt.subplot(311)
plt.plot(ts[:, i]-ts[0, 0], currentPos[:, i], '. g', label='Slave')
plt.legend()
plt.xlabel('Time(in sec)')
plt.ylabel('Joint Angles(in Radians)')
plt.subplot(312)
plt.plot(ts[:, i]-ts[0, 0], errorValue[:, i], '.')
plt.xlabel('Time(in sec)')
plt.ylabel('Position Error(in Radians)')
plt.subplot(313)
plt.plot(ts[:, i]-ts[0, 0], targetVel[:, i], '.', label='Master_Velocity')
plt.subplot(313)
plt.plot(ts[:, i]-ts[0, 0], newTargetVel[:, i], '. r', label='Master_Velocity_Corrected')
plt.subplot(313)
plt.plot(ts[:, i]-ts[0, 0], currentVel[:, i], '. g', label='Slave_Velocity')
plt.legend()
plt.xlabel('Time(in sec)')
plt.ylabel('Joint Velocities(in Radians/sec)')
# plt.figure(i+1).savefig("jn"+str(i+1)+".png")
plt.show()
except rospy.ROSInterruptException:
pass
| mit | -7,452,702,805,220,498,000 | 36.41206 | 102 | 0.540094 | false |
choderalab/MSMs | attic/src/code/hmsm/trim_hmsm.py | 3 | 1243 | import shutil
import numpy as np
import pandas as pd
import mdtraj as md
from mixtape.utils import iterobjects
import mixtape.ghmm
import mixtape.featurizer
import os
name = "atomindices"
json_filename = "./%s.jsonlines" % name
feature_filename = "./%s.pkl" % name
models = list(iterobjects(json_filename))
df = pd.DataFrame(models)
x = df.ix[0]
T = np.array(x["transmat"])
p = np.array(x["populations"])
featurizer = mixtape.featurizer.load(feature_filename)
model = mixtape.ghmm.GaussianFusionHMM(3, featurizer.n_features)
model.means_ = x["means"]
model.vars_ = x["vars"]
model.transmat_ = x["transmat"]
model.populations_ = x["populations"]
trj0 = md.load("./system.subset.pdb")
atom_indices = np.loadtxt("./AtomIndices.dat", "int")
n_traj = 348
#n_traj = 131
scores = np.zeros(n_traj)
for i in range(n_traj):
print(i)
traj = md.load("./Trajectories/trj%d.h5" % i)
features = featurizer.featurize(traj)
scores[i] = model.score([features]) / float(len(features))
cutoff = 500.0 # atomindices
#cutoff = 0.0 # atompairs
k = 0
for i in range(n_traj):
if scores[i] > cutoff:
print(i, k)
shutil.copy("./Trajectories/trj%d.h5" % i, "./subset_%s/Trajectories/trj%d.h5" % (name, k))
k += 1
| gpl-2.0 | 8,470,554,097,710,361,000 | 22.903846 | 99 | 0.672566 | false |
ghl3/bamboo | tests/test_plotting.py | 1 | 1540 |
import numpy as np
import pandas
import bamboo.groups
import bamboo.frames
import bamboo.addons
from helpers import *
from bamboo import hist
group = ['A', 'A', 'A', 'A',
'B', 'B',
'C']
feature1 = [1, 1, 1, 1,
2, 2,
3]
feature2 = [10.0, 10.5, 9.5, 11.0,
20.0, 20.0,
0.0]
df = pandas.DataFrame({'group':group,
'feature1':feature1,
'feature2':feature2})
@plotting
def test_boxplot():
bamboo.frames.boxplot(df, 'feature1', 'feature2')
@plotting
def test_hexbin():
try:
from sklearn.datasets import make_blobs
except:
assert(False)
X1, Y1 = make_blobs(n_features=2, centers=3, n_samples=10000)
df = pandas.DataFrame(X1, columns=['x', 'y'])
bamboo.frames.hexbin(df, 'x', 'y')
@plotting
def test_hist_df():
bamboo.core.hist(df.groupby('group'))
@plotting
def test_hist_var():
bamboo.core.hist(df.groupby('group'), 'feature1')
@plotting
def test_hist():
dfgb = create_test_df_v3().groupby('group')
hist(dfgb['feature1'])
#hist(dfgb['feature2'])
@plotting
def test_summary_table():
df = pandas.DataFrame({'group': ['GOOD', 'GOOD', 'GOOD', 'GOOD', 'BAD', 'BAD', 'BAD'],
'x': [1, 2, 1, 3.4, 2, 5.6, 3],
'y':[10, 50, 10, 20, 20, 40, -10]})
bamboo.hist(df.groupby('group'), 'x',
addons=[bamboo.addons.summary_table],
bins=np.arange(0, 5, 1),
alpha=0.5)
| mit | -799,099,104,008,905,000 | 21.647059 | 90 | 0.534416 | false |
amaurywalbert/twitter | graphs/n3/n7_co_likes_creating_network_with_v1.3.py | 1 | 11313 | # -*- coding: latin1 -*-
################################################################################################
#
#
import datetime, sys, time, json, os, os.path, shutil, time, struct, random
import networkx as nx
import matplotlib.pyplot as plt
from math import*
reload(sys)
sys.setdefaultencoding('utf-8')
######################################################################################################################################################################
## Status - Versão 1 - Criar rede N7 (co-likes) a partir dos dados coletados e de acordo com as instruções a seguir:
## Versão 1.1 - Tentar corrigir problema de elevado consumo de memória durante a criação das redes.
## - Corrigido - Clear no grafo
## Versão 1.2 - Usar conjunto de dados com 500 egos aleatórios.
## Versão 1.3 - remover a parte de registrar arquivos faltando... "partial missing"
## Carregar dados dos alters em memória (authors_set, likes_set)
##
## ATENÇÃO - NECESSÁRIO PELO MENOS 8GB DE RAM
##
## # INPUT:
## - Lista de Egos (egos)
## - Conjunto de Autores de Likes (alters) de cada Ego - Formação do conjunto de Alters
## - Conjunto de likes do ego e de cada Alter - verificação do CSJ entre os conjuntos de pares de vértices
##
## # ALGORITMO
## 0 - Para cada ego[i]:
## 1 - Inicializa o ego_[i] e todos os autores de likes do ego (alters[i][n]) como vértices de um grafo - (tabela hash - ego+alters - vertices)
## 2 - Para cada elemento i no conjunto de vertices (v[i]):
## 3 - Para cada elemento j no conjunto de vértices (v[j]):
## 4 - Com i != j:
## 5 - Se não existe uma aresta (v[i],v[j]):
## 6 - Cria uma aresta entre (v[i],v[j]) com peso igual ao CSJ entre seus conjuntos de alters
## 7 - Remova arestas com peso igual a zero
##
######################################################################################################################################################################
################################################################################################
# Imprime os arquivos binários com os dados dos usuários
################################################################################################
def read_arq_bin(file):
with open(file, 'r') as f:
f.seek(0,2)
tamanho = f.tell()
f.seek(0)
likes_set = set()
authors_set = set()
while f.tell() < tamanho:
buffer = f.read(favorites_struct.size)
like, author = favorites_struct.unpack(buffer)
likes_set.add(long(like))
authors_set.add(long(author))
status = {'likes':likes_set, 'authors':authors_set}
return status
################################################################################################
# Função para calcular o csj entre dois conjuntos de dados
################################################################################################
def csj(a,b):
intersection = len(a.intersection(b))
union = len(a.union(b))
# Calcula o CSJ entre os dois conjuntos e atribui 0 caso a união dos conjuntos for 0
if union != 0:
result = intersection/float(union) # float(uniao) para resultado no intervalo [0,1
else:
result = 0
# print ("União: "+str(union)+" --- Interseção: "+str(intersection)+" --- CSJ: "+str(result))
return result
################################################################################################
# Função para salvar os grafos em formato padrão para entrada nos algoritmos de detecção
################################################################################################
def save_graph(ego, G): # Função recebe o id do ego corrente e o grafo (lista de arestas)
with open(output_dir+str(ego)+".edge_list", 'wb') as graph:
nx.write_weighted_edgelist(G,graph) # Imprimir lista de arestas COM PESO
G.clear()
################################################################################################
# Gera as redes - grafos
################################################################################################
def ego_net(ego,status_ego,l): # Função recebe o id do ego, o conjunto de alters e o número ordinal do ego corrente
G=nx.Graph() # Inicia um grafo NÂO DIRECIONADO
G.clear()
ti = datetime.datetime.now() # Tempo do inicio da construção do grafo
########################################### # Criar tabela hash com o conjunto de dados (likes_ids) dos vértices (ego e todos os alters)
vertices = {}
vertices[ego] = status_ego['likes']
for alter in status_ego['authors']:
if not alter == ego: # Se não for o ego...
try:
status_alter = read_arq_bin(alters_dir+str(alter)+".dat") # Chama função para converter o conjunto de autores de retweets do ego do formato Binário para uma lista do python
vertices[alter] = status_alter['likes'] # Adiciona conjunto de dados (likes_ids) do alter à tabela hash
except IOError: # Tratamento de exceção - caso falte algum arquivo do alter,
pass
###########################################
print ("Construindo grafo do ego n: "+str(l)+" - Quantidade de vertices: "+str(len(vertices)))
indice = 0
########################################### # Criando arestas
for i in vertices:
indice +=1
print ("Ego: "+str(l)+" - Verificando arestas para alter: "+str(indice)+"/"+str(len(vertices)))
for j in vertices:
if i != j:
if not G.has_edge(i,j): ### Se ainda não existe uma aresta entre os dois vértices
csj_i_j = csj(vertices[i],vertices[j]) # Calcula o CSJ entre os dois conjuntos
G.add_edge(i,j,weight=csj_i_j) # Cria aresta
########################################### # Remove arestas com CJS igual a zero.
########################################### # Deixar pra remover aqui pq a criação delas é interessante durante o processo de geração das redes...
for (u,v,d) in G.edges(data='weight'):
if d==0:
G.remove_edge(u,v)
###########################################
tf = datetime.datetime.now() # Tempo final da construção do grafo do ego corrente
tp = tf - ti # Cálculo do tempo gasto para a construção do grafo
print ("Lista de arestas do grafo "+str(l)+" construído com sucesso. EGO: "+str(ego))
print("Tempo para construir o grafo: "+str(tp))
return G
######################################################################################################################################################################
######################################################################################################################################################################
#
# Método principal do programa.
# Realiza teste e coleta dos dados de cada user especificado no arquivo.
#
######################################################################################################################################################################
######################################################################################################################################################################
def main():
missing = set() # Conjunto de usuários faltando...
l = 0 # Variável para exibir o número ordinal do ego que está sendo usado para a construção do grafo
ti = datetime.datetime.now() # Tempo de início do processo de criação de todos os grafos
for file in os.listdir(egos_dir): # Para cada arquivo de Ego no diretório
l+=1 # Incrementa contador do número do Ego
ego = file.split(".dat") # Separa a extensão do id do usuário no nome do arquivo
ego = long(ego[0]) # recebe o id do usuário em formato Long
if not dictionary.has_key(ego):
status_ego = read_arq_bin(egos_dir+file) # Chama função para converter o conjunto de autores de retweets do ego do formato Binário para uma lista do python
n_alters = len(status_ego['authors']) # Variável que armazena o tamanho do conjunto de alters do usuário corrente
print("######################################################################")
print ("Construindo grafo do ego n: "+str(l)+" - Quantidade de alters: "+str(n_alters))
G = ego_net(ego,status_ego,l) # Inicia função de criação do grafo (lista de arestas) para o ego corrente
print
print("Salvando o grafo...")
save_graph(ego,G)
G.clear()
tp = datetime.datetime.now()
tp = tp - ti
print ("Tempo decorrido: "+str(tp))
print("######################################################################")
else:
print ("Lista de arestas já criada para o ego "+str(l)+": "+str(ego))
print
tf = datetime.datetime.now() # Recebe tempo final do processo de construção dos grafos
t = tf - ti # Calcula o tempo gasto com o processo de criação dos grafos
print("Tempo total do script: "+str(t))
print("Quantidade total de usuários faltando: "+str(len(missing)))
print("######################################################################")
print("Networks created!")
print("######################################################################\n")
######################################################################################################################################################################
#
# INÍCIO DO PROGRAMA
#
######################################################################################################################################################################
######################################################################################################################
egos_dir = "/home/amaury/dataset/n3/egos/bin/"################# Diretório contendo os arquivos dos Egos
alters_dir = "/home/amaury/dataset/n3/alters/bin/" ############ Diretório contendo os arquivos dos Alters
output_dir = "/home/amaury/graphs/n7/graphs_with/" ################# Diretório para armazenamento dos arquivos das listas de arestas
formato = 'll' ####################################### Long para id do tweet e outro long para autor
favorites_struct = struct.Struct(formato) ##################### Inicializa o objeto do tipo struct para poder armazenar o formato específico no arquivo binário
######################################################################################################################
#Cria os diretórios para armazenamento dos arquivos
if not os.path.exists(output_dir):
os.makedirs(output_dir)
###### Iniciando dicionário - tabela hash a partir dos arquivos já criados.
print
print("######################################################################")
print ("Criando tabela hash...")
dictionary = {} #################################################### Tabela {chave:valor} para facilitar a consulta dos usuários já coletados
for file in os.listdir(output_dir):
user_id = file.split(".edge_list")
user_id = long(user_id[0])
dictionary[user_id] = user_id
print ("Tabela hash criada com sucesso...")
print("######################################################################\n")
#Executa o método main
if __name__ == "__main__": main() | gpl-3.0 | -8,057,541,321,551,177,000 | 55.575758 | 176 | 0.468546 | false |
toobaz/pandas | pandas/tests/tseries/offsets/test_ticks.py | 2 | 9347 | """
Tests for offsets.Tick and subclasses
"""
from datetime import datetime, timedelta
from hypothesis import assume, example, given, settings, strategies as st
import numpy as np
import pytest
from pandas import Timedelta, Timestamp
import pandas.util.testing as tm
from pandas.tseries import offsets
from pandas.tseries.offsets import Hour, Micro, Milli, Minute, Nano, Second
from .common import assert_offset_equal
# ---------------------------------------------------------------------
# Test Helpers
tick_classes = [Hour, Minute, Second, Milli, Micro, Nano]
# ---------------------------------------------------------------------
def test_apply_ticks():
result = offsets.Hour(3).apply(offsets.Hour(4))
exp = offsets.Hour(7)
assert result == exp
def test_delta_to_tick():
delta = timedelta(3)
tick = offsets._delta_to_tick(delta)
assert tick == offsets.Day(3)
td = Timedelta(nanoseconds=5)
tick = offsets._delta_to_tick(td)
assert tick == Nano(5)
@pytest.mark.parametrize("cls", tick_classes)
@settings(deadline=None) # GH 24641
@example(n=2, m=3)
@example(n=800, m=300)
@example(n=1000, m=5)
@given(n=st.integers(-999, 999), m=st.integers(-999, 999))
def test_tick_add_sub(cls, n, m):
# For all Tick subclasses and all integers n, m, we should have
# tick(n) + tick(m) == tick(n+m)
# tick(n) - tick(m) == tick(n-m)
left = cls(n)
right = cls(m)
expected = cls(n + m)
assert left + right == expected
assert left.apply(right) == expected
expected = cls(n - m)
assert left - right == expected
@pytest.mark.parametrize("cls", tick_classes)
@settings(deadline=None)
@example(n=2, m=3)
@given(n=st.integers(-999, 999), m=st.integers(-999, 999))
def test_tick_equality(cls, n, m):
assume(m != n)
# tick == tock iff tick.n == tock.n
left = cls(n)
right = cls(m)
assert left != right
assert not (left == right)
right = cls(n)
assert left == right
assert not (left != right)
if n != 0:
assert cls(n) != cls(-n)
# ---------------------------------------------------------------------
def test_Hour():
assert_offset_equal(Hour(), datetime(2010, 1, 1), datetime(2010, 1, 1, 1))
assert_offset_equal(Hour(-1), datetime(2010, 1, 1, 1), datetime(2010, 1, 1))
assert_offset_equal(2 * Hour(), datetime(2010, 1, 1), datetime(2010, 1, 1, 2))
assert_offset_equal(-1 * Hour(), datetime(2010, 1, 1, 1), datetime(2010, 1, 1))
assert Hour(3) + Hour(2) == Hour(5)
assert Hour(3) - Hour(2) == Hour()
assert Hour(4) != Hour(1)
def test_Minute():
assert_offset_equal(Minute(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 1))
assert_offset_equal(Minute(-1), datetime(2010, 1, 1, 0, 1), datetime(2010, 1, 1))
assert_offset_equal(2 * Minute(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 2))
assert_offset_equal(-1 * Minute(), datetime(2010, 1, 1, 0, 1), datetime(2010, 1, 1))
assert Minute(3) + Minute(2) == Minute(5)
assert Minute(3) - Minute(2) == Minute()
assert Minute(5) != Minute()
def test_Second():
assert_offset_equal(Second(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 1))
assert_offset_equal(Second(-1), datetime(2010, 1, 1, 0, 0, 1), datetime(2010, 1, 1))
assert_offset_equal(
2 * Second(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 2)
)
assert_offset_equal(
-1 * Second(), datetime(2010, 1, 1, 0, 0, 1), datetime(2010, 1, 1)
)
assert Second(3) + Second(2) == Second(5)
assert Second(3) - Second(2) == Second()
def test_Millisecond():
assert_offset_equal(
Milli(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 1000)
)
assert_offset_equal(
Milli(-1), datetime(2010, 1, 1, 0, 0, 0, 1000), datetime(2010, 1, 1)
)
assert_offset_equal(
Milli(2), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 2000)
)
assert_offset_equal(
2 * Milli(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 2000)
)
assert_offset_equal(
-1 * Milli(), datetime(2010, 1, 1, 0, 0, 0, 1000), datetime(2010, 1, 1)
)
assert Milli(3) + Milli(2) == Milli(5)
assert Milli(3) - Milli(2) == Milli()
def test_MillisecondTimestampArithmetic():
assert_offset_equal(
Milli(), Timestamp("2010-01-01"), Timestamp("2010-01-01 00:00:00.001")
)
assert_offset_equal(
Milli(-1), Timestamp("2010-01-01 00:00:00.001"), Timestamp("2010-01-01")
)
def test_Microsecond():
assert_offset_equal(Micro(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 1))
assert_offset_equal(
Micro(-1), datetime(2010, 1, 1, 0, 0, 0, 1), datetime(2010, 1, 1)
)
assert_offset_equal(
2 * Micro(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 2)
)
assert_offset_equal(
-1 * Micro(), datetime(2010, 1, 1, 0, 0, 0, 1), datetime(2010, 1, 1)
)
assert Micro(3) + Micro(2) == Micro(5)
assert Micro(3) - Micro(2) == Micro()
def test_NanosecondGeneric():
timestamp = Timestamp(datetime(2010, 1, 1))
assert timestamp.nanosecond == 0
result = timestamp + Nano(10)
assert result.nanosecond == 10
reverse_result = Nano(10) + timestamp
assert reverse_result.nanosecond == 10
def test_Nanosecond():
timestamp = Timestamp(datetime(2010, 1, 1))
assert_offset_equal(Nano(), timestamp, timestamp + np.timedelta64(1, "ns"))
assert_offset_equal(Nano(-1), timestamp + np.timedelta64(1, "ns"), timestamp)
assert_offset_equal(2 * Nano(), timestamp, timestamp + np.timedelta64(2, "ns"))
assert_offset_equal(-1 * Nano(), timestamp + np.timedelta64(1, "ns"), timestamp)
assert Nano(3) + Nano(2) == Nano(5)
assert Nano(3) - Nano(2) == Nano()
# GH9284
assert Nano(1) + Nano(10) == Nano(11)
assert Nano(5) + Micro(1) == Nano(1005)
assert Micro(5) + Nano(1) == Nano(5001)
@pytest.mark.parametrize(
"kls, expected",
[
(Hour, Timedelta(hours=5)),
(Minute, Timedelta(hours=2, minutes=3)),
(Second, Timedelta(hours=2, seconds=3)),
(Milli, Timedelta(hours=2, milliseconds=3)),
(Micro, Timedelta(hours=2, microseconds=3)),
(Nano, Timedelta(hours=2, nanoseconds=3)),
],
)
def test_tick_addition(kls, expected):
offset = kls(3)
result = offset + Timedelta(hours=2)
assert isinstance(result, Timedelta)
assert result == expected
@pytest.mark.parametrize("cls", tick_classes)
def test_tick_division(cls):
off = cls(10)
assert off / cls(5) == 2
assert off / 2 == cls(5)
assert off / 2.0 == cls(5)
assert off / off.delta == 1
assert off / off.delta.to_timedelta64() == 1
assert off / Nano(1) == off.delta / Nano(1).delta
if cls is not Nano:
# A case where we end up with a smaller class
result = off / 1000
assert isinstance(result, offsets.Tick)
assert not isinstance(result, cls)
assert result.delta == off.delta / 1000
if cls._inc < Timedelta(seconds=1):
# Case where we end up with a bigger class
result = off / 0.001
assert isinstance(result, offsets.Tick)
assert not isinstance(result, cls)
assert result.delta == off.delta / 0.001
@pytest.mark.parametrize("cls", tick_classes)
def test_tick_rdiv(cls):
off = cls(10)
delta = off.delta
td64 = delta.to_timedelta64()
with pytest.raises(TypeError):
2 / off
with pytest.raises(TypeError):
2.0 / off
assert (td64 * 2.5) / off == 2.5
if cls is not Nano:
# skip pytimedelta for Nano since it gets dropped
assert (delta.to_pytimedelta() * 2) / off == 2
result = np.array([2 * td64, td64]) / off
expected = np.array([2.0, 1.0])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("cls1", tick_classes)
@pytest.mark.parametrize("cls2", tick_classes)
def test_tick_zero(cls1, cls2):
assert cls1(0) == cls2(0)
assert cls1(0) + cls2(0) == cls1(0)
if cls1 is not Nano:
assert cls1(2) + cls2(0) == cls1(2)
if cls1 is Nano:
assert cls1(2) + Nano(0) == cls1(2)
@pytest.mark.parametrize("cls", tick_classes)
def test_tick_equalities(cls):
assert cls() == cls(1)
@pytest.mark.parametrize("cls", tick_classes)
def test_tick_offset(cls):
assert not cls().isAnchored()
@pytest.mark.parametrize("cls", tick_classes)
def test_compare_ticks(cls):
three = cls(3)
four = cls(4)
assert three < cls(4)
assert cls(3) < four
assert four > cls(3)
assert cls(4) > three
assert cls(3) == cls(3)
assert cls(3) != cls(4)
@pytest.mark.parametrize("cls", tick_classes)
def test_compare_ticks_to_strs(cls):
# GH#23524
off = cls(19)
# These tests should work with any strings, but we particularly are
# interested in "infer" as that comparison is convenient to make in
# Datetime/Timedelta Array/Index constructors
assert not off == "infer"
assert not "foo" == off
for left, right in [("infer", off), (off, "infer")]:
with pytest.raises(TypeError):
left < right
with pytest.raises(TypeError):
left <= right
with pytest.raises(TypeError):
left > right
with pytest.raises(TypeError):
left >= right
| bsd-3-clause | 5,927,153,196,646,788,000 | 28.02795 | 88 | 0.596448 | false |
raghavrv/scikit-learn | examples/svm/plot_svm_anova.py | 33 | 2024 | """
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature selection before running a
SVC (support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import Pipeline
# #############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
# #############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
# #############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using 1 CPU
this_scores = cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
| bsd-3-clause | 8,551,080,352,086,274,000 | 33.896552 | 79 | 0.615119 | false |
asford/depth | bin/apps_backend.py | 1 | 5022 | # This is part of DEPTH.
# DEPTH (Version: 2.0) computes the closest distance of a residue/atom to bulk solvent and predicts small molecule binding site of a protein.
# Copyright (C) 2013, Kuan Pern Tan, Nguyen Thanh Binh, Raghavan Varadarajan and M.S. Madhusudhan
#
# DEPTH is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
# DEPTH is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License along with DEPTH. If not, see <http://www.gnu.org/licenses/>.
import os
import tempfile
os.environ['MPLCONFIGDIR'] = tempfile.mkdtemp()
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from numpy import *
def read2floatarray(s):
w = []
for i in range(len(s)):
try:
t = float(s[i])
except ValueError:
t = None
# end try
w.append(t)
# end for
return array(w, dtype=float)
# end def
def isint(s):
if s - int(s) == 0:
return True
else:
return False
# end if
# end def
def read_table(fname, FS = None, label = [], check_num = True, check_int = True):
output = []
fin = open(fname)
for line in fin:
output.append(line.split(FS))
# end for
fin.close()
if check_num == False:
return output
# end if
if type(label) == int:
label = [label]
# end for
for i in range(len(output)):
for j in range(len(output[i])):
if j not in label:
try:
output[i][j] = float(output[i][j])
if isint(output[i][j]):
output[i][j] = int(output[i][j])
# end if
except ValueError:
pass
# end try
# end if
# end for
# end for
return output
# end def
class bookmarks:
def __init__(self):
self.prefix = '<li> <a href='
self.suffix = '</a> </li>'
self.record = []
# end def
def add(self, name, title):
line = self.prefix+'"#'+name+'"> '+title+self.suffix
self.record.append(line)
# end def
def generate(self):
return self.record
# end def
# end class
def matplotlib_1st_nan_bugfix(S):
# first number cannot be 'NaN', matplotlib bug. Bug fix here
start = 0
n = len(S[0])
for i in range(n):
if list(set([isnan(s[i]) for s in S])) == [False]:
start = i
break
# end if
# end for
W = [s[start:] for s in S]
return W
# end def
def plot_type_I(y_mean, y_std, xnames, title, xlabel, ylabel, figname, xlim='auto', ylim='auto'):
plt.cla()
y_upper = y_mean + y_std
y_lower = y_mean - y_std
x = range(len(y_mean))
x, y_mean, y_upper, y_lower = matplotlib_1st_nan_bugfix([x, y_mean, y_upper, y_lower])
plt_mean = plt.plot(x, y_mean , 'r-', linewidth=2)
plt_std = plt.plot(x, y_upper, 'g:')
plt_std = plt.plot(x, y_lower, 'g:')
plt.legend([plt_mean, plt_std], ["mean", "stdev"])
x_nums = [int(t) for t in linspace(0, len(y_mean)-1, 10)] # 10 intervals + last point
x_tics = [xnames[i] for i in x_nums]
plt.xticks(x_nums, x_tics, rotation=90)
if xlim == 'auto':
plt.xlim(0, len(y_mean)+1)
else:
plt.xlim(*zip(xlim))
# end if
if ylim == 'auto':
pass
elif ylim == 'reverse':
max_y = max(max(y_upper), max(y_lower))
min_y = min(min(y_upper), min(y_lower))
plt.ylim(max_y, min_y)
# end if
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.savefig(figname)
# end def
def plot_type_II(y, xnames, hlines, title, xlabel, ylabel, figname, xlim='auto', ylim='auto'):
plt.cla()
x = range(len(y))
x, y = matplotlib_1st_nan_bugfix([x, y])
plt_y = plt.plot(x, y , 'r-', linewidth=2)
if hlines != None:
plt_hlines = []
hvalues, hlegends = hlines
for h in hvalues:
plt_hlines.append(plt.axhline(h))
# end for
plt.legend(plt_hlines, hlegends)
# end if
x_nums = [int(t) for t in linspace(0, len(y)-1, 10)] # 10 intervals + last point
x_tics = [xnames[i] for i in x_nums]
plt.xticks(x_nums, x_tics, rotation=90)
if xlim == 'auto':
plt.xlim(0, len(y)+1)
else:
plt.xlim(*zip(xlim))
# end if
if ylim == 'auto':
stdev = std(y)
plt.ylim(min(y)-stdev/3, max(y)+stdev/3)
elif ylim == 'reverse':
plt.ylim(max(y), min(y))
# end if
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.savefig(figname)
# end def
class Summary:
def __init__(self):
self.title = None
self.out = []
# end def
def add_title(self, title):
self.title = title
# end def
def add(self, pair):
name, par = pair
self.out.append('<tr><td>'+name+'</td><td>'+par+'</td><tr>')
# end def
def generate(self):
self.lines = []
self.lines.append('<table>')
self.lines.append('<tbody><tr><td><b>'+self.title+'</td></tr>')
for i in range(len(self.out)):
self.lines.append('\t'+self.out[i])
# end for
self.lines.append('</table>')
output = '\n'.join(self.lines)
return output
# end def
# end class
| gpl-3.0 | 6,149,589,249,083,286,000 | 22.35814 | 241 | 0.639785 | false |
rlzijdeman/nlgis2 | web/api/api.py | 1 | 21490 | # Copyright (C) 2014 International Institute of Social History.
# @author Vyacheslav Tykhonov <vty@iisg.nl>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License, version 3,
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# As a special exception, the copyright holders give permission to link the
# code of portions of this program with the OpenSSL library under certain
# conditions as described in each individual source file and distribute
# linked combinations including the program with the OpenSSL library. You
# must comply with the GNU Affero General Public License in all respects
# for all of the code used other than as permitted herein. If you modify
# file(s) with this exception, you may extend this exception to your
# version of the file(s), but you are not obligated to do so. If you do not
# wish to do so, delete this exception statement from your version. If you
# delete this exception statement from all source files in the program,
# then also delete it in the license file.
from flask import Flask, Response, request, send_from_directory
from twisted.web import http
import json
import simplejson
import urllib2
import glob
import csv
import xlwt
import os
import sys
import psycopg2
import psycopg2.extras
import pprint
import collections
import getopt
import numpy as np
import pandas as pd
import random
import ConfigParser
from subprocess import Popen, PIPE, STDOUT
from random import randint
import brewer2mpl
import string
import re
def connect():
cparser = ConfigParser.RawConfigParser()
cpath = "/etc/apache2/nlgiss2.config"
cparser.read(cpath)
options = {}
dataoptions = cparser.items( "dataoptions" )
for key, value in dataoptions:
options[key] = value
database = cparser.get('config', 'dbname')
if request.args.get('custom'):
database = cparser.get('config', 'customdbname')
conn_string = "host='%s' dbname='%s' user='%s' password='%s'" % (cparser.get('config', 'dbhost'), database, cparser.get('config', 'dblogin'), cparser.get('config', 'dbpassword'))
# get a connection, if a connect cannot be made an exception will be raised here
conn = psycopg2.connect(conn_string)
# conn.cursor will return a cursor object, you can use this cursor to perform queries
cursor = conn.cursor()
return (cursor, options)
def json_generator(c, jsondataname, data):
sqlnames = [desc[0] for desc in c.description]
jsonlist = []
jsonhash = {}
for valuestr in data:
datakeys = {}
for i in range(len(valuestr)):
name = sqlnames[i]
value = valuestr[i]
datakeys[name] = value
#print "%s %s", (name, value)
jsonlist.append(datakeys)
jsonhash[jsondataname] = jsonlist;
json_string = json.dumps(jsonhash, encoding="utf-8", sort_keys=True, indent=4)
return json_string
def load_years(cursor):
data = {}
sql = "select * from datasets.years where 1=1";
sql = "select year, count(*) as count from datasets.data where 1=1"
sql = sqlfilter(sql)
sql = sql + ' group by year order by year asc';
# execute
cursor.execute(sql)
# retrieve the records from the database
data = cursor.fetchall()
jsondata = json_generator(cursor, 'years', data)
return jsondata
def sqlfilter(sql):
items = ''
sqlparams = ''
for key, value in request.args.items():
#sql = sql + ' ' +key + '=' + value + '<br>'
items = request.args.get(key, '')
itemlist = items.split(",")
itemparams = ''
for item in itemlist:
#sql = sql + ' ' + item + ' = ' + '<br>'
sqlparams = "\'%s\'" % item
#sqlparams = sqlparams[:-1]
if key != 'datarange':
if key != 'output':
if key != 'custom':
if key != 'scales':
if key != 'categories':
if key != 'csv':
sql += " AND %s in (%s)" % (key, sqlparams)
return sql
def load_locations(cursor, year, indicator):
data = {}
sql = "select naam, amsterdam_code, year, count(*) from datasets.data where 1=1 "
limit = 0
sql = sqlfilter(sql)
sql = sql + ' group by naam, year, amsterdam_code'
# execute
cursor.execute(sql)
# retrieve the records from the database
data = cursor.fetchall()
jsondata = json_generator(cursor, 'locations', data)
return jsondata
def list_topics(cursor):
data = {}
# Before the list of topics will be available a few sql statements should be run
# update datasets.topics set count=subquery.as_count from (select code as as_code, count(*) as as_count from datasets.data group by as_code) as subquery where topic_code=subquery.as_code;
#update datasets.topics set startyear=subquery.startyear from (select code as as_code, min(year) as startyear from datasets.data group by as_code) as subquery where topic_code=subquery.as_code;
# update datasets.topics set totalyears=subquery.total from (select count(DISTINCT year) as total, code as as_code from datasets.data group by as_code) as subquery where topic_code=subquery.as_code;
sql = "select topic_name, topic_code, count, startyear, totalyears from datasets.topics where startyear > 0 order by count desc"
# execute
cursor.execute(sql)
# retrieve the records from the database
data = cursor.fetchall()
columns = [i[0] for i in cursor.description]
topics = {}
maxvalue = -1
for topic in data:
topicdata = {}
letter = 'A'
for i, field in enumerate(columns):
topicdata[field] = topic[i]
if field == 'topic_code':
mletter = re.match("^(\w)", topicdata[field])
letter = mletter.group(0)
if maxvalue == -1:
if field == 'count':
findindex = columns.index('totalyears')
if topic[findindex] < 10:
maxvalue = topicdata[field]
topicdata['max'] = maxvalue
topicdata['letter'] = letter
topicdata['max'] = maxvalue
topics[topicdata['topic_code']] = topicdata
#jsondata = json_generator(cursor, 'topics', topics)
jsondata = json.dumps(topics, encoding="utf-8", sort_keys=True, indent=4)
return jsondata
def load_topics(cursor, year, indicator):
data = {}
sql = "select code, indicator, topic_name, count(*) as count from datasets.data as d, datasets.topics as t where d.code=t.topic_code "
limit = 0
sql = sqlfilter(sql)
try:
if limit:
sql = sql + ' limit ' + str(limit)
except:
limit = 0
sql = sql + ' group by code, indicator, t.topic_name'
# execute
cursor.execute(sql)
# retrieve the records from the database
data = cursor.fetchall()
jsondata = json_generator(cursor, 'codes', data)
return jsondata
def load_classes(cursor):
data = {}
sql = "select topic_code, topic_name from datasets.topics where 1=1"
sql = sqlfilter(sql)
# execute
cursor.execute(sql)
# retrieve the records from the database
data = cursor.fetchall()
jsondata = json_generator(cursor, 'indicators', data)
return jsondata
def load_regions(cursor):
data = {}
sql = "select * from datasets.regions where 1=1";
sql = sqlfilter(sql)
sql = sql + ';'
# execute
cursor.execute(sql)
# retrieve the records from the database
data = cursor.fetchall()
jsondata = json_generator(cursor, 'regions', data)
return jsondata
def medianlimits(dataframe):
scale = []
frame1 = []
frame2 = []
avg = dataframe.median()
for value in dataframe:
if value <= avg:
frame1.append(value)
else:
frame2.append(value)
avg1 = pd.DataFrame(frame1).median()
avg2 = pd.DataFrame(frame2).median()
return (dataframe.min(), int(avg1), int(avg), int(avg2), dataframe.max())
def combinerange(map):
rangestr = ''
rangearr = []
for i in reversed(range(len(map))):
if i > 0:
id = i - 1
min = map[id]
max = map[i]
rangestr = rangestr + str(min) + '-' + str(max) + ', '
rangearr.append(str(min) + '-' + str(max))
rangestr = rangestr[:-2]
return (rangearr, rangestr)
def buildcategories(num):
step = 100 / float(num)
print step
p = []
for i in range(num+1):
if i:
p.append(i * step)
else:
p.append(i)
return p
def meanlimits(dataframe):
scale = []
frame1 = []
frame2 = []
avg = dataframe.mean()
for value in dataframe:
if value <= avg:
frame1.append(value)
else:
frame2.append(value)
avg1 = pd.DataFrame(frame1).mean()
avg2 = pd.DataFrame(frame2).mean()
return (dataframe.min(), int(avg1), int(avg), int(avg2), dataframe.max())
def load_data(cursor, year, datatype, region, datarange, output, debug, dataframe, catnum, options, csvexport):
data = {}
colors = ['red', 'green', 'orange', 'brown', 'purple', 'blue', 'cyan']
colormap = 'Paired'
#colormap = 'Green'
if not catnum:
try:
catnumint = int(options['defaultcategories'])
if catnumint:
catnum = catnumint
except:
catnum = 8
bmap = brewer2mpl.get_map(colormap, 'Qualitative', catnum)
colors = bmap.hex_colors
# execute our Query
# for key, value in request.args.iteritems():
# extra = "%s<br>%s=%s<br>" % (extra, key, value)
query = "select * from datasets.data WHERE 1 = 1 ";
if output:
query = "select amsterdam_code, value from datasets.data WHERE 1 = 1 ";
query = sqlfilter(query)
if debug:
print "DEBUG " + query + " <br>\n"
query += ' order by id asc'
if debug:
return query
# execute
cursor.execute(query)
columns = [i[0] for i in cursor.description]
thiscount = 0
index = 0
for col in columns:
if col == 'value':
index = thiscount
thiscount = thiscount + 1
# retrieve the records from the database
records = cursor.fetchall()
if csvexport:
return (records, columns)
# Data upload
i = 0
values = []
index = 6
for row in records:
i = i + 1
values.append(row[index])
data[i] = row
# Calculate ranges based on percentile
qwranges = []
if values:
df = pd.DataFrame(values)
colormap = []
p = buildcategories(catnum)
for i in p:
val = round(np.percentile(df, i), 2)
qwranges.append(val)
fulldata = {}
#fulldata['data'] = []
fulldataarray = []
#for i in xrange(cursor.rowcount):
i = 0
for dataline in records:
dataset = {}
index = 0
amscode = ''
for item in dataline:
fieldname = columns[index]
#dataset[fieldname] = dataline[index]
#if fieldname == 'value':
# value = float(dataline[index])
if fieldname == 'amsterdam_code':
amscode = str(dataline[index])
else:
dataset[fieldname] = dataline[index]
k = item
index = index + 1
# Select colors
if datarange == 'random':
colorID = randint(0,4)
dataset['color'] = colors[colorID]
if datarange == 'binary':
colorID = 0
dataset['color'] = colors[colorID]
if not datarange:
datarange = 'calculate'
if datarange == 'calculate':
if dataset['value']:
colorID = 0
dataset['color'] = colors[colorID]
for i in qwranges:
if dataset['value'] > i:
dataset['r'] = i
dataset['color'] = colors[colorID]
colorID = colorID + 1
fulldata[amscode] = []
fulldata[amscode] = dataset
fulldataarray.append(dataset)
i = i + 1
#return json.dumps(fulldataarray)
jsondata = json.dumps(fulldata, ensure_ascii=False, sort_keys=True, indent=4)
row_count = 0
i = 0
values = []
index = 6
for row in records:
i = i + 1
#row['color'] = 'red'
values.append(row[index])
data[i] = row
# print row[0]
#jsondata = json_generator(fulldataarray)
if dataframe:
return (qwranges, colors)
df = pd.DataFrame(values)
colormap = []
p = buildcategories(catnum)
qw = []
for i in p:
val = round(np.percentile(df, i), 2)
qw.append(val)
if dataframe == 'mean':
colormap = meanlimits(df[0])
else:
colormap = medianlimits(df[0])
#colormap = [0, 1, 2, 3]
return qw
#return json_generator(cursor, 'ranges', colormap)
if year:
return (jsondata, colors)
else:
return (json_generator(cursor, 'data', records), colors)
app = Flask(__name__)
@app.route('/')
def test():
description = 'nlgis2 API Service v.0.1<br>/api/maps (map polygons)<br>/api/data (data services)<br>'
return description
@app.route('/demo')
def demo():
sql = "select * from datasets.topics where 1=1";
sql = sqlfilter(sql)
return sql
@app.route('/topicslist')
def topicslist():
(cursor, options) = connect()
data = list_topics(cursor)
return Response(data, mimetype='application/json')
@app.route('/topics')
def topics():
(cursor, options) = connect()
data = load_topics(cursor, 0, 0)
return Response(data, mimetype='application/json')
def load_province_data(apiurl, province):
jsondataurl = apiurl + province
req = urllib2.Request(jsondataurl)
opener = urllib2.build_opener()
f = opener.open(req)
dataframe = simplejson.load(f)
return dataframe
@app.route('/provincies')
def provincies():
thisprovince = ''
provinceurl = "http://www.gemeentegeschiedenis.nl/provincie/json/"
paramprovince = request.args.get('province');
if paramprovince:
thisprovince = paramprovince
provlist = ["Groningen", "Friesland", "Drenthe", "Overijssel", "Flevoland", "Gelderland", "Utrecht", "Noord-Holland", "Zuid-Holland", "Zeeland", "Noord-Brabant", "Limburg"]
provincies = {}
if thisprovince:
provlist = []
provlist.append(thisprovince)
for province in provlist:
data = load_province_data(provinceurl, province)
provincelist = []
for item in data:
locations = {}
#print item['amco'] + ' ' + item['provincie'] + ' ' + item['startjaar'] + ' ' + item['eindjaar'] + ' ' + item['naam']
locations['amsterdamcode'] = item['amco']
locations['name'] = item['naam']
locations['start'] = item['startjaar']
locations['end'] = item['eindjaar']
locations['cbscode'] = item['cbscode']
provincelist.append(locations)
provincies[province] = provincelist
jsondata = json.dumps(provincies, ensure_ascii=False, sort_keys=True, indent=4)
return Response(jsondata, mimetype='application/json')
@app.route('/locations')
def locations():
(cursor, options) = connect()
data = load_locations(cursor, 0, 0)
return Response(data, mimetype='application/json')
@app.route('/indicators')
def classes():
(cursor, options) = connect()
data = load_classes(cursor)
return Response(data, mimetype='application/json')
@app.route('/years')
def years():
(cursor, options) = connect()
data = load_years(cursor)
return Response(data, mimetype='application/json')
@app.route('/regions')
def regions():
(cursor, options) = connect()
data = load_regions(cursor)
return Response(data, mimetype='application/json')
@app.route('/scales')
def scales():
(cursor, options) = connect()
year = 0
datatype = '1.01'
region = 0
debug = 0
datarange = 'random'
output = ''
# Read parameters grom GET
paramrange = request.args.get('datarange');
paramyear = request.args.get('year')
paramoutput = request.args.get('output');
paramscales = request.args.get('scales');
paramcat = request.args.get('categories');
catnum = 8
if paramrange:
datarange = paramrange
if paramyear:
year = paramyear
if paramoutput:
output = paramoutput
if options['defaultcategories']:
catnumint = int(options['defaultcategories'])
if paramcat:
catnumint = int(paramcat)
catnum = catnumint
(data, colors) = load_data(cursor, year, datatype, region, datarange, output, debug, paramscales, catnum, options, '')
(rangearr, rangestr) = combinerange(data)
colormap = []
for color in reversed(colors):
colormap.append(color)
output = output + ' ' + color
output = ''
id = 0
scales = {}
for thisrange in rangearr:
output = output + ' ' + thisrange + '=' + str(id) + '<br>'
color = colormap[id]
savecolor = {}
savecolor['color'] = color
thisid = catnum - id
savecolor['range'] = thisrange
savecolor['max'] = data[thisid]
savecolor['sector'] = id
scales[id] = savecolor
id = id + 1
# Add no data in scale
if id:
savecolor = {}
savecolor['color'] = '#ffffff'
savecolor['range'] = 'no data'
scales[id] = savecolor
jsondata = json.dumps(scales, ensure_ascii=False, sort_keys=True, indent=4)
return Response(jsondata, mimetype='application/json')
@app.route('/data')
def data():
(cursor, options) = connect()
year = 0
csvexport = ''
datatype = '1.01'
code = ''
region = 0
debug = 0
datarange = 'random'
output = ''
paramrange = request.args.get('datarange');
paramyear = request.args.get('year')
paramoutput = request.args.get('output');
paramscales = request.args.get('scales');
paramcat = request.args.get('categories');
catnum = 8
if paramrange:
datarange = paramrange
if paramyear:
year = paramyear
if paramoutput:
output = paramoutput
if options['defaultcategories']:
catnumint = int(options['defaultcategories'])
#catnum = catnumint
if paramcat:
catnumint = int(paramcat)
catnum = catnumint
if request.args.get('csv'):
csvexport = 'yes'
if request.args.get('code'):
code = request.args.get('code')
(data, colors) = load_data(cursor, year, datatype, region, datarange, output, debug, paramscales, catnum, options, csvexport)
dataset = data
if csvexport:
cparser = ConfigParser.RawConfigParser()
cpath = "/etc/apache2/nlgiss2.config"
cparser.read(cpath)
imagepathloc = cparser.get('config', 'imagepathloc')
# CSV
localfile = 'dataset_' + code + '.csv'
fullpath = imagepathloc + '/' + localfile
f = csv.writer(open(fullpath, "wb+"))
f.writerow(colors)
#m = dataset['data']
for dataset in data:
f.writerow(dataset)
return send_from_directory(imagepathloc, localfile, as_attachment=True)
if paramscales:
#dataset = paramscales
(rangearr, rangestr) = combinerange(dataset)
output = ''
id = 0
for i in dataset:
if output:
output = output + ',' + str(i) #+ colors[id]
else:
output = str(i)
id = id + 1
json_response = rangestr
return Response(json_response) #, mimetype='application/json')
else:
return Response(dataset, mimetype='application/json')
#json_response = json.loads(data)
#return Response(data, mimetype='application/json;charset=utf-8')
return Response(dataset, mimetype='application/json')
@app.route('/maps')
def maps():
cparser = ConfigParser.RawConfigParser()
cpath = "/etc/apache2/nlgiss2.config"
cparser.read(cpath)
path = cparser.get('config', 'path')
geojson = cparser.get('config', 'geojson')
# Default year
year = cparser.get('config', 'year')
cmdgeo = ''
provcmd = ''
thisformat = 'topojson'
# get year from API call
paramyear = request.args.get('year');
# format for polygons: geojson, topojson, kml
paramformat = request.args.get('format');
paramprovince = request.args.get('province');
if paramyear:
year = paramyear
if paramformat == 'geojson':
cmdgeo = path + "/maps/bin/geojson.py " + str(year) + " " + geojson
thisformat = paramformat
if paramprovince:
provcmd = path + '/maps/bin/topoprovince.py ' + str(year) + " " + paramprovince + " " + thisformat
cmd = path + "/maps/bin/topojson.py " + str(year)
if cmdgeo:
cmd = cmdgeo
if provcmd:
cmd = provcmd
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
response = json.dumps(p.stdout.read())
#"objects":{"1812
#new_string = re.sub(r'"{\"1812"', r'{\"NLD', response)
json_response = json.loads(response)
return Response(json_response, mimetype='application/json;charset=utf-8')
if __name__ == '__main__':
app.run()
| gpl-3.0 | 3,016,022,400,229,428,700 | 29.439093 | 199 | 0.614472 | false |
mjvakili/gambly | code/tests/overlay.py | 1 | 1970 | import corner
import matplotlib.pyplot as plt
import numpy as np
ndim, nsamples = 3, 50000
# Generate some fake data.
np.random.seed(42)
data1 = np.random.randn(ndim * 4 * nsamples // 5).reshape([4 * nsamples // 5, ndim])
data2 = (4*np.random.rand(ndim)[None, :] + np.random.randn(ndim * nsamples // 5).reshape([nsamples // 5, ndim]))
data = np.vstack([data1, data2])
import matplotlib.lines as mlines
#blue_line = mlines.Line2D([], [], color='blue', label='SGM')
#red_line = mlines.Line2D([], [], color='red', label='GMM')
data2 = data.copy()
#data2[:,2] = data2[:,2] + 10.
data[:,2] = data[:,2] * 0.
print data[:,:2] - data2[:,:2]
#plt.legend(handles=[blue_line,red_line], bbox_to_anchor=(0., 1.0, 1., .0), loc=4)
prior_range = [[-4. , 10],[-3.,3.],[-3.,3.]]
# Plot it.
figure = corner.corner(data, labels=[r"$x$", r"$y$", r"$\log \alpha$", r"$\Gamma \, [\mathrm{parsec}]$"],
title_kwargs={"fontsize": 12},
range=prior_range,
quantiles=[0.16,0.5,0.84],
show_titles=False,
title_args={"fontsize": 12},
plot_datapoints=False,
fill_contours=True,
levels=[0.68, 0.95],
color='#ee6a50',
bins=50,
smooth=1.0)
corner.corner(data2, labels=[r"$x$", r"$y$", r"$\log \alpha$", r"$\Gamma \, [\mathrm{parsec}]$"],
title_kwargs={"fontsize": 12},
range=prior_range,
quantiles=[0.16,0.5,0.84],
show_titles=False,
title_args={"fontsize": 12},
plot_datapoints=False,
fill_contours=True,
levels=[0.68, 0.95],
color='blue',
bins=50,
smooth=1.0 , fig = figure)
plt.show()
| mit | -6,222,035,935,354,646,000 | 36.169811 | 112 | 0.471066 | false |
bradleyhd/netsim | 3d.py | 1 | 2087 | import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import json, argparse
from networkx.readwrite import json_graph as imports
parser = argparse.ArgumentParser(description='Draws a graph in 3d.')
parser.add_argument('graph_file', help='the name of the graph file')
parser.add_argument('--saveas', help='the name of the output file')
args = parser.parse_args()
config = {}
with open('config.json', 'r') as file:
config = json.load(file)
with open('data/' + args.graph_file + '.graph', 'r') as file:
data = json.load(file)
graph = imports.node_link_graph(data)
def draw_3d(G):
print('Drawing graph...')
nodes = []
colors = []
positions = []
xs = []
ys = []
zs = []
priorities = {}
p = []
fig = plt.figure()
ax = fig.gca(projection='3d')
# plot edges
for node_1, node_2, data in G.edges(data = True):
# if 'real_arc' in data:
# #continue
x_1 = G.node[node_1]['lon']
y_1 = G.node[node_1]['lat']
x_2 = G.node[node_2]['lon']
y_2 = G.node[node_2]['lat']
z_1 = G.node[node_1]['priority']
z_2 = G.node[node_2]['priority']
# z_1 = 1
# z_2 = 1
color = 'b'
if data['is_shortcut']:
color = 'r'
# if data['highway'] in ['motorway', 'motorway_link', 'trunk', 'trunk_link', 'primary', 'primary_link']:
# z_1 = 10
# z_2 = 10
# color = 'g'
# elif data['highway'] in ['secondary', 'secondary_link', 'tertiary', 'tertiary_link', 'road']:
# z_1 = 5
# z_2 = 5
# color = 'y'
# else:
# z_1 = 0
# z_2 = 0
# color = 'r'
# xs.extend([x_1, x_2])
# ys.extend([y_1, y_2])
# zs.extend([z_1, z_2])
# colors.append(color)
ax.plot([x_1, x_2], [y_1, y_2], [z_1, z_2], color = color)
# ax.plot(xs, ys, zs=zs, color='r')
#plt.show()
plt.savefig('test.pdf')
draw_3d(graph) | gpl-3.0 | 783,887,631,444,302,500 | 24.777778 | 116 | 0.494011 | false |
teuben/masc | examples/mplot1.py | 3 | 7410 | #! /usr/bin/env python
#
# quick and dirty processing of the MD All Sky images
from astropy.io import fits
from scipy.misc import imsave
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import aplpy
import argparse as ap
import os.path
import logging
import time
def d(ff, box=[]):
#very specific for 16 bit data, since we want to keep the data in uint16
h = fits.open(ff, do_not_scale_image_data=True)
if len(box)==0:
return h[0].header, h[0].data
else:
# figure out 0 vs. 1 based offsets; box is 1 based
return h[0].header, h[0].data[box[1]:box[3], box[0]:box[2]]
def dsum(i0,i1,step = 1, box=[]):
""" for a range of fits files
compute the mean and dispersion from the mean
"""
for i in range(i0,i1+1,step):
ff = 'IMG%05d.FIT' % i
h1, d1 = d(ff,box)
#very specific for 16 bit data, since we want to keep the data in uint16
bzero = h1['BZERO']
bscale = h1['BSCALE']
if i == i0:
sum0 = 1.0
sum1 = d1*bscale+bzero
sum2 = sum1*sum1
#sum1 = d1
#sum2 = d1*d1
h = h1
nx = d1.shape[1]
ny = d1.shape[0]
nz = i1 + 1 - i0
c = np.zeros((nz, ny, nx))
c[0,:,:] = d1.reshape(ny,nx)
else:
sum0 = sum0 + 1.0
sum1 = sum1 + (d1 * bscale + bzero)
sum2 = sum2 + (d1 * bscale + bzero) * (d1 * bscale + bzero)
#sum2 = sum2+d1*d1
c[i - i0,:,:] = d1.reshape(ny,nx)
sum1 = sum1 / sum0
sum2 = sum2 / sum0 - sum1*sum1
print (type(sum1), type(sum2))
return (h,sum1,np.sqrt(sum2),c)
def show(sum):
""" some native matplotlib display,
doesn't show pointsources well at all
"""
ip = plt.imshow(sum)
plt.show()
def show2(sum):
""" aplpy is the better viewer clearly
"""
fig = aplpy.FITSFigure(sum)
#fig.show_grayscale()
fig.show_colorscale()
def show3(sum1,sum2):
""" aplpy is the better viewer clearly
"""
fig = aplpy.FITSFigure(sum1,subplot=(2,2,1))
#fig = aplpy.FITSFigure(sum2,subplot=(2,2,2),figure=1)
#fig.show_grayscale()
fig.show_colorscale()
# For some variations on this theme, e.g. time.time vs. time.clock, see
# http://stackoverflow.com/questions/7370801/measure-time-elapsed-in-python
#
class Dtime(object):
""" Class to help measuring the wall clock time between tagged events
Typical usage:
dt = Dtime()
...
dt.tag('a')
...
dt.tag('b')
"""
def __init__(self, label=".", report=True):
self.start = self.time()
self.init = self.start
self.label = label
self.report = report
self.dtimes = []
dt = self.init - self.init
if self.report:
logging.info("Dtime: %s ADMIT " % self.label + str(self.start))
logging.info("Dtime: %s BEGIN " % self.label + str(dt))
def reset(self, report=True):
self.start = self.time()
self.report = report
self.dtimes = []
def tag(self, mytag):
t0 = self.start
t1 = self.time()
dt = t1 - t0
self.dtimes.append((mytag, dt))
self.start = t1
if self.report:
logging.info("Dtime: %s " % self.label + mytag + " " + str(dt))
return dt
def show(self):
if self.report:
for r in self.dtimes:
logging.info("Dtime: %s " % self.label + str(r[0]) + " " +
str(r[1]))
return self.dtimes
def end(self):
t0 = self.init
t1 = self.time()
dt = t1 - t0
if self.report:
logging.info("Dtime: %s END " % self.label + str(dt))
return dt
def time(self):
""" pick the actual OS routine that returns some kind of timer
time.time : wall clock time (include I/O and multitasking overhead)
time.clock : cpu clock time
"""
return np.array([time.clock(), time.time()])
if __name__ == '__main__':
logging.basicConfig(level = logging.INFO)
dt = Dtime("mplot1")
#--start, -s n
#--end, -e n
#--box x1 y1 x2 y2
parser = ap.ArgumentParser(description='Plotting .fits files.')
parser.add_argument('-f', '--frame', nargs = '*', type = int, help =
'Starting and ending parameters for the frames analyzed')
parser.add_argument('-b', '--box', nargs = 4, type = int, help =
'Coordinates for the bottom left corner and'
+ 'top right corner of a rectangle of pixels to be analyzed from the' +
' data. In the structure x1, y1, x2, y2 (1 based numbers)')
parser.add_argument('-g', '--graphics', nargs = 1, type = int, default = 0,
help = 'Controls whether to display or save graphics. 0: no graphics,'
+ '1: display graphics, 2: save graphics as .png')
args = vars(parser.parse_args())
if args['frame'] == None:
count = 0
start = None
end = None
step = 1
#while we have yet to find an end
while end == None:
filename = 'IMG%05d.FIT' % count
#if start has not been found yet, and this file exists
if start == None and os.path.isfile(filename):
start = count
#if start has been found and we finally found a file that doesn't
#exist, set end to the last file that existed (count - 1.FIT)
elif start != None and not os.path.isfile(filename):
end = count - 1
count += 1
elif len(args['frame']) >= 2 and len(args['frame']) <= 3:
start = args['frame'][0] # starting frame (IMGnnnnn.FIT)
end = args['frame'][1] # ending frame
if len(args['frame']) == 3:
step = args['frame']
else:
step = 1
else:
raise Exception("-f needs 0, 2, or 3 arguments.")
box = args['box'] # BLC and TRC
if box == None:
box = []
dt.tag("start")
# compute the average and dispersion of the series
h1,sum1,sum2,cube = dsum(start,end,step,box=box)
# end can be uninitialized here might throw an error?
dt.tag("dsum")
nz = cube.shape[0]
# delta X and Y images
dsumy = sum1 - np.roll(sum1, 1, axis = 0) # change in the y axis
dsumx = sum1 - np.roll(sum1, 1, axis = 1) # change in the x axis
# write them to FITS
fits.writeto('dsumx.fits', dsumx, h1, clobber=True)
fits.writeto('dsumy.fits', dsumy, h1, clobber=True)
fits.writeto('sum1.fits', sum1, h1, clobber=True)
fits.writeto('sum2.fits', sum2, h1, clobber=True)
dt.tag("write2d")
# 3D cube to
h1['NAXIS'] = 3
h1['NAXIS3'] = nz
fits.writeto('cube.fits', cube, h1, clobber=True)
dt.tag("write3d")
if args['graphics'][0] == 1:
# plot the sum1 and sum2 correllation (glueviz should do this)
s1 = sum1.flatten()
s2 = sum2.flatten()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(s1,s2)
plt.show()
show2(sum1)
show2(sum2)
if args['graphics'][0] == 2:
imsave('sum1.png', sum1)
imsave('sum2.png', sum2)
dt.tag("done")
dt.end()
| mit | 9,003,871,413,496,574,000 | 30.802575 | 80 | 0.543185 | false |
winklerand/pandas | asv_bench/benchmarks/io_bench.py | 1 | 7986 | import os
from .pandas_vb_common import *
from pandas import concat, Timestamp, compat
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import timeit
class _BenchTeardown(object):
"""
base class for teardown method implementation
"""
fname = None
def remove(self, f):
try:
os.remove(f)
except:
pass
def teardown(self):
self.remove(self.fname)
class frame_to_csv(_BenchTeardown):
goal_time = 0.2
fname = '__test__.csv'
def setup(self):
self.df = DataFrame(np.random.randn(3000, 30))
def time_frame_to_csv(self):
self.df.to_csv(self.fname)
class frame_to_csv2(_BenchTeardown):
goal_time = 0.2
fname = '__test__.csv'
def setup(self):
self.df = DataFrame({'A': range(50000), })
self.df['B'] = (self.df.A + 1.0)
self.df['C'] = (self.df.A + 2.0)
self.df['D'] = (self.df.A + 3.0)
def time_frame_to_csv2(self):
self.df.to_csv(self.fname)
class frame_to_csv_date_formatting(_BenchTeardown):
goal_time = 0.2
fname = '__test__.csv'
def setup(self):
self.rng = date_range('1/1/2000', periods=1000)
self.data = DataFrame(self.rng, index=self.rng)
def time_frame_to_csv_date_formatting(self):
self.data.to_csv(self.fname, date_format='%Y%m%d')
class frame_to_csv_mixed(_BenchTeardown):
goal_time = 0.2
fname = '__test__.csv'
def setup(self):
self.df_float = DataFrame(np.random.randn(5000, 5), dtype='float64', columns=self.create_cols('float'))
self.df_int = DataFrame(np.random.randn(5000, 5), dtype='int64', columns=self.create_cols('int'))
self.df_bool = DataFrame(True, index=self.df_float.index, columns=self.create_cols('bool'))
self.df_object = DataFrame('foo', index=self.df_float.index, columns=self.create_cols('object'))
self.df_dt = DataFrame(Timestamp('20010101'), index=self.df_float.index, columns=self.create_cols('date'))
self.df_float.ix[30:500, 1:3] = np.nan
self.df = concat([self.df_float, self.df_int, self.df_bool, self.df_object, self.df_dt], axis=1)
def time_frame_to_csv_mixed(self):
self.df.to_csv(self.fname)
def create_cols(self, name):
return [('%s%03d' % (name, i)) for i in range(5)]
class read_csv_infer_datetime_format_custom(object):
goal_time = 0.2
def setup(self):
self.rng = date_range('1/1/2000', periods=1000)
self.data = '\n'.join(self.rng.map((lambda x: x.strftime('%m/%d/%Y %H:%M:%S.%f'))))
def time_read_csv_infer_datetime_format_custom(self):
read_csv(StringIO(self.data), header=None, names=['foo'], parse_dates=['foo'], infer_datetime_format=True)
class read_csv_infer_datetime_format_iso8601(object):
goal_time = 0.2
def setup(self):
self.rng = date_range('1/1/2000', periods=1000)
self.data = '\n'.join(self.rng.map((lambda x: x.strftime('%Y-%m-%d %H:%M:%S'))))
def time_read_csv_infer_datetime_format_iso8601(self):
read_csv(StringIO(self.data), header=None, names=['foo'], parse_dates=['foo'], infer_datetime_format=True)
class read_csv_infer_datetime_format_ymd(object):
goal_time = 0.2
def setup(self):
self.rng = date_range('1/1/2000', periods=1000)
self.data = '\n'.join(self.rng.map((lambda x: x.strftime('%Y%m%d'))))
def time_read_csv_infer_datetime_format_ymd(self):
read_csv(StringIO(self.data), header=None, names=['foo'], parse_dates=['foo'], infer_datetime_format=True)
class read_csv_skiprows(_BenchTeardown):
goal_time = 0.2
fname = '__test__.csv'
def setup(self):
self.index = tm.makeStringIndex(20000)
self.df = DataFrame({'float1': randn(20000), 'float2': randn(20000), 'string1': (['foo'] * 20000), 'bool1': ([True] * 20000), 'int1': np.random.randint(0, 200000, size=20000), }, index=self.index)
self.df.to_csv(self.fname)
def time_read_csv_skiprows(self):
read_csv(self.fname, skiprows=10000)
class read_csv_standard(_BenchTeardown):
goal_time = 0.2
fname = '__test__.csv'
def setup(self):
self.index = tm.makeStringIndex(10000)
self.df = DataFrame({'float1': randn(10000), 'float2': randn(10000), 'string1': (['foo'] * 10000), 'bool1': ([True] * 10000), 'int1': np.random.randint(0, 100000, size=10000), }, index=self.index)
self.df.to_csv(self.fname)
def time_read_csv_standard(self):
read_csv(self.fname)
class read_parse_dates_iso8601(object):
goal_time = 0.2
def setup(self):
self.rng = date_range('1/1/2000', periods=1000)
self.data = '\n'.join(self.rng.map((lambda x: x.strftime('%Y-%m-%d %H:%M:%S'))))
def time_read_parse_dates_iso8601(self):
read_csv(StringIO(self.data), header=None, names=['foo'], parse_dates=['foo'])
class read_uint64_integers(object):
goal_time = 0.2
def setup(self):
self.na_values = [2**63 + 500]
self.arr1 = np.arange(10000).astype('uint64') + 2**63
self.data1 = '\n'.join(map(lambda x: str(x), self.arr1))
self.arr2 = self.arr1.copy().astype(object)
self.arr2[500] = -1
self.data2 = '\n'.join(map(lambda x: str(x), self.arr2))
def time_read_uint64(self):
read_csv(StringIO(self.data1), header=None)
def time_read_uint64_neg_values(self):
read_csv(StringIO(self.data2), header=None)
def time_read_uint64_na_values(self):
read_csv(StringIO(self.data1), header=None, na_values=self.na_values)
class write_csv_standard(_BenchTeardown):
goal_time = 0.2
fname = '__test__.csv'
def setup(self):
self.index = tm.makeStringIndex(10000)
self.df = DataFrame({'float1': randn(10000), 'float2': randn(10000), 'string1': (['foo'] * 10000), 'bool1': ([True] * 10000), 'int1': np.random.randint(0, 100000, size=10000), }, index=self.index)
def time_write_csv_standard(self):
self.df.to_csv(self.fname)
class read_csv_from_s3(object):
# Make sure that we can read part of a file from S3 without
# needing to download the entire thing. Use the timeit.default_timer
# to measure wall time instead of CPU time -- we want to see
# how long it takes to download the data.
timer = timeit.default_timer
params = ([None, "gzip", "bz2"], ["python", "c"])
param_names = ["compression", "engine"]
def setup(self, compression, engine):
if compression == "bz2" and engine == "c" and compat.PY2:
# The Python 2 C parser can't read bz2 from open files.
raise NotImplementedError
try:
import s3fs
except ImportError:
# Skip these benchmarks if `boto` is not installed.
raise NotImplementedError
self.big_fname = "s3://pandas-test/large_random.csv"
def time_read_nrows(self, compression, engine):
# Read a small number of rows from a huge (100,000 x 50) table.
ext = ""
if compression == "gzip":
ext = ".gz"
elif compression == "bz2":
ext = ".bz2"
pd.read_csv(self.big_fname + ext, nrows=10,
compression=compression, engine=engine)
class read_json_lines(_BenchTeardown):
goal_time = 0.2
fname = "__test__.json"
def setup(self):
self.N = 100000
self.C = 5
self.df = DataFrame({('float{0}'.format(i), randn(self.N)) for i in range(self.C)})
self.df.to_json(self.fname,orient="records",lines=True)
def time_read_json_lines(self):
pd.read_json(self.fname, lines=True)
def time_read_json_lines_chunk(self):
pd.concat(pd.read_json(self.fname, lines=True, chunksize=self.N//4))
def peakmem_read_json_lines(self):
pd.read_json(self.fname, lines=True)
def peakmem_read_json_lines_chunk(self):
pd.concat(pd.read_json(self.fname, lines=True, chunksize=self.N//4))
| bsd-3-clause | 5,833,311,090,910,208,000 | 32.136929 | 204 | 0.613699 | false |
Tejas-Khot/ConvAE-DeSTIN | scripts/convae_destin_1.py | 3 | 11425 | """Stacked fixed noise dConvAE test"""
import sys
sys.path.append("..")
import numpy as np
import matplotlib.pyplot as plt
import cPickle as pickle
import theano
import theano.tensor as T
import scae_destin.datasets as ds
from scae_destin.fflayers import ReLULayer
from scae_destin.fflayers import SoftmaxLayer
from scae_destin.convnet import ReLUConvLayer
from scae_destin.convnet import SigmoidConvLayer
from scae_destin.model import ConvAutoEncoder
from scae_destin.convnet import MaxPooling
from scae_destin.convnet import Flattener
from scae_destin.model import FeedForward
from scae_destin.optimize import gd_updates
from scae_destin.cost import mean_square_cost
from scae_destin.cost import categorical_cross_entropy_cost
from scae_destin.cost import L2_regularization
n_epochs=1
batch_size=100
nkerns=100
Xtr, Ytr, Xte, Yte=ds.load_CIFAR10("../cifar-10-batches-py/")
Xtr=np.mean(Xtr, 3)
Xte=np.mean(Xte, 3)
Xtrain=Xtr.reshape(Xtr.shape[0], Xtr.shape[1]*Xtr.shape[2])/255.0
Xtest=Xte.reshape(Xte.shape[0], Xte.shape[1]*Xte.shape[2])/255.0
train_set_x, train_set_y=ds.shared_dataset((Xtrain, Ytr))
test_set_x, test_set_y=ds.shared_dataset((Xtest, Yte))
n_train_batches=train_set_x.get_value(borrow=True).shape[0]/batch_size
n_test_batches=test_set_x.get_value(borrow=True).shape[0]/batch_size
print "[MESSAGE] The data is loaded"
################################## FIRST LAYER #######################################
X=T.matrix("data")
y=T.ivector("label")
idx=T.lscalar()
corruption_level=T.fscalar()
images=X.reshape((batch_size, 1, 32, 32))
layer_0_en=ReLUConvLayer(filter_size=(7,7),
num_filters=50,
num_channels=1,
fm_size=(32,32),
batch_size=batch_size)
layer_0_de=SigmoidConvLayer(filter_size=(7,7),
num_filters=1,
num_channels=50,
fm_size=(26,26),
batch_size=batch_size,
border_mode="full")
layer_1_en=ReLUConvLayer(filter_size=(5,5),
num_filters=50,
num_channels=50,
fm_size=(26,26),
batch_size=batch_size)
layer_1_de=SigmoidConvLayer(filter_size=(5,5),
num_filters=50,
num_channels=50,
fm_size=(22,22),
batch_size=batch_size,
border_mode="full")
layer_2_en=ReLUConvLayer(filter_size=(5,5),
num_filters=50,
num_channels=50,
fm_size=(22,22),
batch_size=batch_size)
layer_2_de=SigmoidConvLayer(filter_size=(5,5),
num_filters=50,
num_channels=50,
fm_size=(18,18),
batch_size=batch_size,
border_mode="full")
layer_3_en=ReLUConvLayer(filter_size=(3,3),
num_filters=50,
num_channels=50,
fm_size=(18,18),
batch_size=batch_size)
layer_3_de=SigmoidConvLayer(filter_size=(3,3),
num_filters=50,
num_channels=50,
fm_size=(16,16),
batch_size=batch_size,
border_mode="full")
model_0=ConvAutoEncoder(layers=[layer_0_en, layer_0_de])
out_0=model_0.fprop(images, corruption_level=corruption_level)
cost_0=mean_square_cost(out_0[-1], images)+L2_regularization(model_0.params, 0.005)
updates_0=gd_updates(cost=cost_0, params=model_0.params, method="sgd", learning_rate=0.1)
model_1=ConvAutoEncoder(layers=[layer_1_en, layer_1_de])
out_1=model_1.fprop(out_0[0], corruption_level=corruption_level)
cost_1=mean_square_cost(out_1[-1], out_0[0])+L2_regularization(model_1.params, 0.005)
updates_1=gd_updates(cost=cost_1, params=model_1.params, method="sgd", learning_rate=0.1)
model_2=ConvAutoEncoder(layers=[layer_2_en, layer_2_de])
out_2=model_2.fprop(out_1[0], corruption_level=corruption_level)
cost_2=mean_square_cost(out_2[-1], out_1[0])+L2_regularization(model_2.params, 0.005)
updates_2=gd_updates(cost=cost_2, params=model_2.params, method="sgd", learning_rate=0.1)
model_3=ConvAutoEncoder(layers=[layer_3_en, layer_3_de])
out_3=model_3.fprop(out_2[0], corruption_level=corruption_level)
cost_3=mean_square_cost(out_3[-1], out_2[0])+L2_regularization(model_3.params, 0.005)
updates_3=gd_updates(cost=cost_3, params=model_3.params, method="sgd", learning_rate=0.1)
train_0=theano.function(inputs=[idx, corruption_level],
outputs=[cost_0],
updates=updates_0,
givens={X: train_set_x[idx * batch_size: (idx + 1) * batch_size]})
train_1=theano.function(inputs=[idx, corruption_level],
outputs=[cost_1],
updates=updates_1,
givens={X: train_set_x[idx * batch_size: (idx + 1) * batch_size]})
train_2=theano.function(inputs=[idx, corruption_level],
outputs=[cost_2],
updates=updates_2,
givens={X: train_set_x[idx * batch_size: (idx + 1) * batch_size]})
train_3=theano.function(inputs=[idx, corruption_level],
outputs=[cost_3],
updates=updates_3,
givens={X: train_set_x[idx * batch_size: (idx + 1) * batch_size]})
print "[MESSAGE] The 4-layer model is built"
corr={}
corr[0]=corr[1]=corr[2]=corr[3]=np.random.uniform(low=0.1, high=0.2, size=1).astype("float32")
min_cost={0:None,
1:None,
2:None,
3:None}
corr_best={0:corr[0],
1:corr[0],
2:corr[0],
3:corr[0]}
max_iter={0:0,
1:0,
2:0,
3:0}
epoch = 0
while (epoch < n_epochs):
epoch = epoch + 1
c_0 = c_1 = c_2 = c_3 = []
for batch_index in xrange(n_train_batches):
for rep in xrange(8):
train_cost=train_3(batch_index, corr_best[3][0])
c_3.append(train_cost)
train_cost=train_2(batch_index, corr_best[2][0])
c_2.append(train_cost)
train_cost=train_1(batch_index, corr_best[1][0])
c_1.append(train_cost)
train_cost=train_0(batch_index, corr_best[0][0])
c_0.append(train_cost)
if min_cost[0]==None:
min_cost[0]=np.mean(c_0)
else:
if (np.mean(c_0)<min_cost[0]*0.5) or (max_iter[0]>=20):
min_cost[0]=np.mean(c_0)
corr_best[0][0]=corr[0]
corr[0]=np.random.uniform(low=corr_best[0][0], high=corr_best[0][0]+0.1, size=1).astype("float32")
max_iter[0]=0
else:
max_iter[0]+=1
if min_cost[1]==None:
min_cost[1]=np.mean(c_1)
else:
if (np.mean(c_1)<min_cost[1]*0.5) or (max_iter[1]>=20):
min_cost[1]=np.mean(c_1)
corr_best[1][0]=corr[1]
corr[1]=np.random.uniform(low=corr_best[1][0], high=corr_best[1][0]+0.1, size=1).astype("float32")
max_iter[1]=0
else:
max_iter[1]+=1
if min_cost[2]==None:
min_cost[2]=np.mean(c_2)
else:
if (np.mean(c_2)<min_cost[2]*0.5) or (max_iter[2]>=20):
min_cost[2]=np.mean(c_2)
corr_best[2][0]=corr[2]
corr[2]=np.random.uniform(low=corr_best[2][0], high=corr_best[2][0]+0.1, size=1).astype("float32")
max_iter[2]=0
else:
max_iter[2]+=1
if min_cost[3]==None:
min_cost[3]=np.mean(c_3)
else:
if (np.mean(c_3)<min_cost[3]*0.5) or (max_iter[3]>=20):
min_cost[3]=np.mean(c_3)
corr_best[3][0]=corr[3]
corr[3]=np.random.uniform(low=corr_best[3][0], high=corr_best[3][0]+0.1, size=1).astype("float32")
max_iter[3]=0
else:
max_iter[3]+=1
print 'Training epoch %d, cost ' % epoch, np.mean(c_0), str(corr_best[0][0]), min_cost[0], max_iter[0]
print ' ', np.mean(c_1), str(corr_best[1][0]), min_cost[1], max_iter[1]
print ' ' , np.mean(c_2), str(corr_best[2][0]), min_cost[2], max_iter[2]
print ' ' , np.mean(c_3), str(corr_best[3][0]), min_cost[3], max_iter[3]
print "[MESSAGE] The model is trained"
################################## BUILD SUPERVISED MODEL #######################################
flattener=Flattener()
layer_5=ReLULayer(in_dim=50*16*16,
out_dim=1000)
layer_6=SoftmaxLayer(in_dim=1000,
out_dim=10)
model_sup=FeedForward(layers=[layer_0_en, layer_1_en, layer_2_en, layer_3_en, flattener, layer_5, layer_6])
out_sup=model_sup.fprop(images)
cost_sup=categorical_cross_entropy_cost(out_sup[-1], y)
updates=gd_updates(cost=cost_sup, params=model_sup.params, method="sgd", learning_rate=0.1)
train_sup=theano.function(inputs=[idx],
outputs=cost_sup,
updates=updates,
givens={X: train_set_x[idx * batch_size: (idx + 1) * batch_size],
y: train_set_y[idx * batch_size: (idx + 1) * batch_size]})
test_sup=theano.function(inputs=[idx],
outputs=model_sup.layers[-1].error(out_sup[-1], y),
givens={X: test_set_x[idx * batch_size: (idx + 1) * batch_size],
y: test_set_y[idx * batch_size: (idx + 1) * batch_size]})
print "[MESSAGE] The supervised model is built"
n_epochs=1
test_record=np.zeros((n_epochs, 1))
epoch = 0
while (epoch < n_epochs):
epoch+=1
for minibatch_index in xrange(n_train_batches):
mlp_minibatch_avg_cost = train_sup(minibatch_index)
iteration = (epoch - 1) * n_train_batches + minibatch_index
if (iteration + 1) % n_train_batches == 0:
print 'MLP MODEL'
test_losses = [test_sup(i) for i in xrange(n_test_batches)]
test_record[epoch-1] = np.mean(test_losses)
print((' epoch %i, minibatch %i/%i, test error %f %%') %
(epoch, minibatch_index + 1, n_train_batches, test_record[epoch-1] * 100.))
filters=[]
filters.append(model_sup.layers[0].filters.get_value(borrow=True))
filters.append(model_sup.layers[1].filters.get_value(borrow=True))
filters.append(model_sup.layers[2].filters.get_value(borrow=True))
filters.append(model_sup.layers[3].filters.get_value(borrow=True))
pickle.dump(test_record, open("convae_destin.pkl", "w"))
for i in xrange(n_epochs):
for j in xrange(4):
image_adr="convae_destin/layer_%d_filter_%d.eps" % (j,i)
plt.imshow(filters[j][i, 0, :, :], cmap = plt.get_cmap('gray'), interpolation='nearest')
plt.axis('off')
plt.savefig(image_adr , bbox_inches='tight', pad_inches=0) | apache-2.0 | -576,982,735,838,468,000 | 38.130137 | 110 | 0.536718 | false |
darrenabbey/ymap | scripts_seqModules/scripts_hapmaps/hapmap.expand_definitions.py | 2 | 3719 | # Input Arguments
# 1) user : 'darren'
# 2) hapmap : 'test_Ca'
# 4) main_dir : '/home/bermanj/shared/links/'
# 5) logName :
#
# Example input line:
# 2(b[1:495225] a[501338:3188548]); 3(a[1:1315157] b[1335699:2232035]);
#
# Example outputlines:
# >Ca_haploid.b.chr2 (1..495225) (1091bp) [*]
# NULL
# >Ca_haploid.a.chr2 (501338..3188548) (2687211bp) [*]
# NULL
# >Ca_haploid.a.chr3 (1..1315157) (1315157bp) [*]
# NULL
# >Ca_haploid.b.chr3 (1335699..2232035) (896337bp) [*]
import string, sys;
user = sys.argv[1];
hapmap = sys.argv[2];
main_dir = sys.argv[3];
hapmapDir = main_dir+"users/"+user+"/hapmaps/"+hapmap+"/";
logName = hapmapDir+"process_log.txt";
inputFile = hapmapDir+"haplotypeMap.txt";
with open(logName, "a") as myfile:
myfile.write("*===============================================================================*\n");
myfile.write("| Log of 'scripts_seqModules/scripts_hapmaps/hapmap.expand_definitions.py'. |\n");
myfile.write("*-------------------------------------------------------------------------------*\n");
#============================================================================================================
# Load haplotype map entries from 'haplotypeMap.txt' file.
#------------------------------------------------------------------------------------------------------------
# Initialize output file counter.
outCount = 0;
# Open 'haplotypeMap.txt'.
haplotypeMap_data = open(inputFile,'r');
# Process digested FASTA genome file, line by line.
while True:
# haplotype map entries are sets of three lines.
# 1) Parent dataset name.
# 2) Child dataset name.
# 3) LOH definitions.
parent = haplotypeMap_data.readline(); # C.albicans_SC5314
parent = parent[:-1];
child = haplotypeMap_data.readline(); # Ca_haploid
child = child[:-1];
LOH_entry = haplotypeMap_data.readline(); # 2(b[1:495225] a[501338:3188548]); 3(a[1:1315157] b[1335699:2232035]); 4(a[1:844690] b[854160:1799406]); 5(a[1:1603444]);
if not parent: # 6(a[1:1190929]); 7(a[1:1033531]); 8(a[1:949617]); 9(a[1:2286390]);
break; # EOF
# Define output file for this haplotype entry.
outputFile = hapmapDir+"haplotypeFragments."+str(outCount)+".txt";
output = open(outputFile, 'w');
# trim last two characters off the LOH entry string. ('; ')
LOH_entry = LOH_entry[:-2];
# break LOH string into per-chromosome definitions.
LOH_chr_list = string.split(LOH_entry,"; ");
for chr in LOH_chr_list:
# trim last character off the chr entry string. (')')
chr = chr[:-1];
# split string into chrID and LOH_list.
chr_parts = string.split(chr,"(");
chrID = chr_parts[0];
LOH_list = string.split(chr_parts[1]," ");
for LOH_item in LOH_list:
# trim last character off the string. (']')
LOH_item = LOH_item[:-1];
# split string into homologID and coordinates.
LOH_item_parts = string.split(LOH_item,"[");
homologID = LOH_item_parts[0];
coordinates = string.split(LOH_item_parts[1],":");
startbp = coordinates[0];
endbp = coordinates[1];
output.write(">"+child+".chr"+chrID+" ("+startbp+".."+endbp+") ("+str(int(endbp)-int(startbp)+1)+"bp) [*] "+homologID+"\n");
output.write("NULL\n");
# close output file.
output.close();
# increment output file counter.
outCount += 1
with open(logName, "a") as myfile:
myfile.write("*-------------------------------------------------------------------------------*\n");
myfile.write("| 'scripts_seqModules/scripts_hapmaps/hapmap.expand_definitions.py' completed. |\n");
myfile.write("*===============================================================================*\n");
| mit | 1,327,611,832,437,496,000 | 40.322222 | 165 | 0.541006 | false |
douglasbagnall/py_bh_tsne | test_radial.py | 1 | 1367 | #!/usr/bin/python
import gzip, cPickle
import numpy as np
import matplotlib.pyplot as plt
import sys
from fasttsne import fast_tsne
import random
random.seed(1)
def generate_angular_clusters(n, d, extra_d=10):
data = []
classes = []
for i in range(n):
scale = random.random() * 5 + 0.1
centre = [random.randrange(2) for x in range(d)]
_class = ''.join(str(x) for x in centre)
row = [scale * (random.random() * 0.05 + x - 0.5)
for x in centre]
row += [random.random() * 0.01 for x in range(extra_d)]
data.append(row)
classes.append(_class)
data = np.asarray(data)
return data, classes
def main():
data, classes = generate_angular_clusters(5000, 4)
#print data
if len(sys.argv) > 1:
mode = int(sys.argv[1])
else:
mode = 2
Y = fast_tsne(data, perplexity=10, mode=mode)
#print zip(classes, Y)[:50]
digits = set(classes)
fig = plt.figure()
colormap = plt.cm.spectral
plt.gca().set_color_cycle(colormap(i) for i in np.linspace(0, 0.6, 10))
ax = fig.add_subplot(111)
labels = []
for d in sorted(digits):
idx = np.array([x == d for x in classes], dtype=np.bool)
ax.plot(Y[idx, 0], Y[idx, 1], 'o')
labels.append(d)
ax.legend(labels, numpoints=1, fancybox=True)
plt.show()
main()
| bsd-3-clause | 2,999,283,102,789,160,000 | 26.34 | 75 | 0.585955 | false |
zfrenchee/pandas | pandas/tests/indexes/test_category.py | 1 | 44849 | # -*- coding: utf-8 -*-
import pytest
import pandas.util.testing as tm
from pandas.core.indexes.api import Index, CategoricalIndex
from pandas.core.dtypes.dtypes import CategoricalDtype
from .common import Base
from pandas.compat import range, PY3
import numpy as np
from pandas import Categorical, IntervalIndex, compat
from pandas.util.testing import assert_almost_equal
import pandas.core.config as cf
import pandas as pd
if PY3:
unicode = lambda x: x
class TestCategoricalIndex(Base):
_holder = CategoricalIndex
def setup_method(self, method):
self.indices = dict(catIndex=tm.makeCategoricalIndex(100))
self.setup_indices()
def create_index(self, categories=None, ordered=False):
if categories is None:
categories = list('cab')
return CategoricalIndex(
list('aabbca'), categories=categories, ordered=ordered)
def test_construction(self):
ci = self.create_index(categories=list('abcd'))
categories = ci.categories
result = Index(ci)
tm.assert_index_equal(result, ci, exact=True)
assert not result.ordered
result = Index(ci.values)
tm.assert_index_equal(result, ci, exact=True)
assert not result.ordered
# empty
result = CategoricalIndex(categories=categories)
tm.assert_index_equal(result.categories, Index(categories))
tm.assert_numpy_array_equal(result.codes, np.array([], dtype='int8'))
assert not result.ordered
# passing categories
result = CategoricalIndex(list('aabbca'), categories=categories)
tm.assert_index_equal(result.categories, Index(categories))
tm.assert_numpy_array_equal(result.codes,
np.array([0, 0, 1,
1, 2, 0], dtype='int8'))
c = pd.Categorical(list('aabbca'))
result = CategoricalIndex(c)
tm.assert_index_equal(result.categories, Index(list('abc')))
tm.assert_numpy_array_equal(result.codes,
np.array([0, 0, 1,
1, 2, 0], dtype='int8'))
assert not result.ordered
result = CategoricalIndex(c, categories=categories)
tm.assert_index_equal(result.categories, Index(categories))
tm.assert_numpy_array_equal(result.codes,
np.array([0, 0, 1,
1, 2, 0], dtype='int8'))
assert not result.ordered
ci = CategoricalIndex(c, categories=list('abcd'))
result = CategoricalIndex(ci)
tm.assert_index_equal(result.categories, Index(categories))
tm.assert_numpy_array_equal(result.codes,
np.array([0, 0, 1,
1, 2, 0], dtype='int8'))
assert not result.ordered
result = CategoricalIndex(ci, categories=list('ab'))
tm.assert_index_equal(result.categories, Index(list('ab')))
tm.assert_numpy_array_equal(result.codes,
np.array([0, 0, 1,
1, -1, 0], dtype='int8'))
assert not result.ordered
result = CategoricalIndex(ci, categories=list('ab'), ordered=True)
tm.assert_index_equal(result.categories, Index(list('ab')))
tm.assert_numpy_array_equal(result.codes,
np.array([0, 0, 1,
1, -1, 0], dtype='int8'))
assert result.ordered
result = pd.CategoricalIndex(ci, categories=list('ab'), ordered=True)
expected = pd.CategoricalIndex(ci, categories=list('ab'), ordered=True,
dtype='category')
tm.assert_index_equal(result, expected, exact=True)
# turn me to an Index
result = Index(np.array(ci))
assert isinstance(result, Index)
assert not isinstance(result, CategoricalIndex)
def test_construction_with_dtype(self):
# specify dtype
ci = self.create_index(categories=list('abc'))
result = Index(np.array(ci), dtype='category')
tm.assert_index_equal(result, ci, exact=True)
result = Index(np.array(ci).tolist(), dtype='category')
tm.assert_index_equal(result, ci, exact=True)
# these are generally only equal when the categories are reordered
ci = self.create_index()
result = Index(
np.array(ci), dtype='category').reorder_categories(ci.categories)
tm.assert_index_equal(result, ci, exact=True)
# make sure indexes are handled
expected = CategoricalIndex([0, 1, 2], categories=[0, 1, 2],
ordered=True)
idx = Index(range(3))
result = CategoricalIndex(idx, categories=idx, ordered=True)
tm.assert_index_equal(result, expected, exact=True)
def test_construction_with_categorical_dtype(self):
# construction with CategoricalDtype
# GH18109
data, cats, ordered = 'a a b b'.split(), 'c b a'.split(), True
dtype = CategoricalDtype(categories=cats, ordered=ordered)
result = pd.CategoricalIndex(data, dtype=dtype)
expected = pd.CategoricalIndex(data, categories=cats,
ordered=ordered)
tm.assert_index_equal(result, expected, exact=True)
# error to combine categories or ordered and dtype keywords args
with pytest.raises(ValueError, match="Cannot specify both `dtype` and "
"`categories` or `ordered`."):
pd.CategoricalIndex(data, categories=cats, dtype=dtype)
with pytest.raises(ValueError, match="Cannot specify both `dtype` and "
"`categories` or `ordered`."):
pd.CategoricalIndex(data, ordered=ordered, dtype=dtype)
def test_create_categorical(self):
# https://github.com/pandas-dev/pandas/pull/17513
# The public CI constructor doesn't hit this code path with
# instances of CategoricalIndex, but we still want to test the code
ci = CategoricalIndex(['a', 'b', 'c'])
# First ci is self, second ci is data.
result = CategoricalIndex._create_categorical(ci, ci)
expected = Categorical(['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
def test_disallow_set_ops(self):
# GH 10039
# set ops (+/-) raise TypeError
idx = pd.Index(pd.Categorical(['a', 'b']))
pytest.raises(TypeError, lambda: idx - idx)
pytest.raises(TypeError, lambda: idx + idx)
pytest.raises(TypeError, lambda: idx - ['a', 'b'])
pytest.raises(TypeError, lambda: idx + ['a', 'b'])
pytest.raises(TypeError, lambda: ['a', 'b'] - idx)
pytest.raises(TypeError, lambda: ['a', 'b'] + idx)
def test_method_delegation(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cabdef'))
result = ci.set_categories(list('cab'))
tm.assert_index_equal(result, CategoricalIndex(
list('aabbca'), categories=list('cab')))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
result = ci.rename_categories(list('efg'))
tm.assert_index_equal(result, CategoricalIndex(
list('ffggef'), categories=list('efg')))
# GH18862 (let rename_categories take callables)
result = ci.rename_categories(lambda x: x.upper())
tm.assert_index_equal(result, CategoricalIndex(
list('AABBCA'), categories=list('CAB')))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
result = ci.add_categories(['d'])
tm.assert_index_equal(result, CategoricalIndex(
list('aabbca'), categories=list('cabd')))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
result = ci.remove_categories(['c'])
tm.assert_index_equal(result, CategoricalIndex(
list('aabb') + [np.nan] + ['a'], categories=list('ab')))
ci = CategoricalIndex(list('aabbca'), categories=list('cabdef'))
result = ci.as_unordered()
tm.assert_index_equal(result, ci)
ci = CategoricalIndex(list('aabbca'), categories=list('cabdef'))
result = ci.as_ordered()
tm.assert_index_equal(result, CategoricalIndex(
list('aabbca'), categories=list('cabdef'), ordered=True))
# invalid
pytest.raises(ValueError, lambda: ci.set_categories(
list('cab'), inplace=True))
def test_contains(self):
ci = self.create_index(categories=list('cabdef'))
assert 'a' in ci
assert 'z' not in ci
assert 'e' not in ci
assert np.nan not in ci
# assert codes NOT in index
assert 0 not in ci
assert 1 not in ci
ci = CategoricalIndex(
list('aabbca') + [np.nan], categories=list('cabdef'))
assert np.nan in ci
def test_min_max(self):
ci = self.create_index(ordered=False)
pytest.raises(TypeError, lambda: ci.min())
pytest.raises(TypeError, lambda: ci.max())
ci = self.create_index(ordered=True)
assert ci.min() == 'c'
assert ci.max() == 'b'
def test_map(self):
ci = pd.CategoricalIndex(list('ABABC'), categories=list('CBA'),
ordered=True)
result = ci.map(lambda x: x.lower())
exp = pd.CategoricalIndex(list('ababc'), categories=list('cba'),
ordered=True)
tm.assert_index_equal(result, exp)
ci = pd.CategoricalIndex(list('ABABC'), categories=list('BAC'),
ordered=False, name='XXX')
result = ci.map(lambda x: x.lower())
exp = pd.CategoricalIndex(list('ababc'), categories=list('bac'),
ordered=False, name='XXX')
tm.assert_index_equal(result, exp)
# GH 12766: Return an index not an array
tm.assert_index_equal(ci.map(lambda x: 1),
Index(np.array([1] * 5, dtype=np.int64),
name='XXX'))
# change categories dtype
ci = pd.CategoricalIndex(list('ABABC'), categories=list('BAC'),
ordered=False)
def f(x):
return {'A': 10, 'B': 20, 'C': 30}.get(x)
result = ci.map(f)
exp = pd.CategoricalIndex([10, 20, 10, 20, 30],
categories=[20, 10, 30],
ordered=False)
tm.assert_index_equal(result, exp)
result = ci.map(pd.Series([10, 20, 30], index=['A', 'B', 'C']))
tm.assert_index_equal(result, exp)
result = ci.map({'A': 10, 'B': 20, 'C': 30})
tm.assert_index_equal(result, exp)
def test_map_with_categorical_series(self):
# GH 12756
a = pd.Index([1, 2, 3, 4])
b = pd.Series(["even", "odd", "even", "odd"],
dtype="category")
c = pd.Series(["even", "odd", "even", "odd"])
exp = CategoricalIndex(["odd", "even", "odd", np.nan])
tm.assert_index_equal(a.map(b), exp)
exp = pd.Index(["odd", "even", "odd", np.nan])
tm.assert_index_equal(a.map(c), exp)
@pytest.mark.parametrize('klass', [list, tuple, np.array, pd.Series])
def test_where(self, klass):
i = self.create_index()
cond = [True] * len(i)
expected = i
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * (len(i) - 1)
expected = CategoricalIndex([np.nan] + i[1:].tolist(),
categories=i.categories)
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_append(self):
ci = self.create_index()
categories = ci.categories
# append cats with the same categories
result = ci[:3].append(ci[3:])
tm.assert_index_equal(result, ci, exact=True)
foos = [ci[:1], ci[1:3], ci[3:]]
result = foos[0].append(foos[1:])
tm.assert_index_equal(result, ci, exact=True)
# empty
result = ci.append([])
tm.assert_index_equal(result, ci, exact=True)
# appending with different categories or reoreded is not ok
pytest.raises(
TypeError,
lambda: ci.append(ci.values.set_categories(list('abcd'))))
pytest.raises(
TypeError,
lambda: ci.append(ci.values.reorder_categories(list('abc'))))
# with objects
result = ci.append(Index(['c', 'a']))
expected = CategoricalIndex(list('aabbcaca'), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
# invalid objects
pytest.raises(TypeError, lambda: ci.append(Index(['a', 'd'])))
# GH14298 - if base object is not categorical -> coerce to object
result = Index(['c', 'a']).append(ci)
expected = Index(list('caaabbca'))
tm.assert_index_equal(result, expected, exact=True)
def test_insert(self):
ci = self.create_index()
categories = ci.categories
# test 0th element
result = ci.insert(0, 'a')
expected = CategoricalIndex(list('aaabbca'), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
# test Nth element that follows Python list behavior
result = ci.insert(-1, 'a')
expected = CategoricalIndex(list('aabbcaa'), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
# test empty
result = CategoricalIndex(categories=categories).insert(0, 'a')
expected = CategoricalIndex(['a'], categories=categories)
tm.assert_index_equal(result, expected, exact=True)
# invalid
pytest.raises(TypeError, lambda: ci.insert(0, 'd'))
# GH 18295 (test missing)
expected = CategoricalIndex(['a', np.nan, 'a', 'b', 'c', 'b'])
for na in (np.nan, pd.NaT, None):
result = CategoricalIndex(list('aabcb')).insert(1, na)
tm.assert_index_equal(result, expected)
def test_delete(self):
ci = self.create_index()
categories = ci.categories
result = ci.delete(0)
expected = CategoricalIndex(list('abbca'), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
result = ci.delete(-1)
expected = CategoricalIndex(list('aabbc'), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
with pytest.raises((IndexError, ValueError)):
# Either depending on NumPy version
ci.delete(10)
def test_astype(self):
ci = self.create_index()
result = ci.astype(object)
tm.assert_index_equal(result, Index(np.array(ci)))
# this IS equal, but not the same class
assert result.equals(ci)
assert isinstance(result, Index)
assert not isinstance(result, CategoricalIndex)
# interval
ii = IntervalIndex.from_arrays(left=[-0.001, 2.0],
right=[2, 4],
closed='right')
ci = CategoricalIndex(Categorical.from_codes(
[0, 1, -1], categories=ii, ordered=True))
result = ci.astype('interval')
expected = ii.take([0, 1, -1])
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(result.values)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('name', [None, 'foo'])
@pytest.mark.parametrize('dtype_ordered', [True, False])
@pytest.mark.parametrize('index_ordered', [True, False])
def test_astype_category(self, name, dtype_ordered, index_ordered):
# GH 18630
index = self.create_index(ordered=index_ordered)
if name:
index = index.rename(name)
# standard categories
dtype = CategoricalDtype(ordered=dtype_ordered)
result = index.astype(dtype)
expected = CategoricalIndex(index.tolist(),
name=name,
categories=index.categories,
ordered=dtype_ordered)
tm.assert_index_equal(result, expected)
# non-standard categories
dtype = CategoricalDtype(index.unique().tolist()[:-1], dtype_ordered)
result = index.astype(dtype)
expected = CategoricalIndex(index.tolist(), name=name, dtype=dtype)
tm.assert_index_equal(result, expected)
if dtype_ordered is False:
# dtype='category' can't specify ordered, so only test once
result = index.astype('category')
expected = index
tm.assert_index_equal(result, expected)
def test_reindex_base(self):
# Determined by cat ordering.
idx = CategoricalIndex(list("cab"), categories=list("cab"))
expected = np.arange(len(idx), dtype=np.intp)
actual = idx.get_indexer(idx)
tm.assert_numpy_array_equal(expected, actual)
with tm.assert_raises_regex(ValueError, "Invalid fill method"):
idx.get_indexer(idx, method="invalid")
def test_reindexing(self):
np.random.seed(123456789)
ci = self.create_index()
oidx = Index(np.array(ci))
for n in [1, 2, 5, len(ci)]:
finder = oidx[np.random.randint(0, len(ci), size=n)]
expected = oidx.get_indexer_non_unique(finder)[0]
actual = ci.get_indexer(finder)
tm.assert_numpy_array_equal(expected, actual)
# see gh-17323
#
# Even when indexer is equal to the
# members in the index, we should
# respect duplicates instead of taking
# the fast-track path.
for finder in [list("aabbca"), list("aababca")]:
expected = oidx.get_indexer_non_unique(finder)[0]
actual = ci.get_indexer(finder)
tm.assert_numpy_array_equal(expected, actual)
def test_reindex_dtype(self):
c = CategoricalIndex(['a', 'b', 'c', 'a'])
res, indexer = c.reindex(['a', 'c'])
tm.assert_index_equal(res, Index(['a', 'a', 'c']), exact=True)
tm.assert_numpy_array_equal(indexer,
np.array([0, 3, 2], dtype=np.intp))
c = CategoricalIndex(['a', 'b', 'c', 'a'])
res, indexer = c.reindex(Categorical(['a', 'c']))
exp = CategoricalIndex(['a', 'a', 'c'], categories=['a', 'c'])
tm.assert_index_equal(res, exp, exact=True)
tm.assert_numpy_array_equal(indexer,
np.array([0, 3, 2], dtype=np.intp))
c = CategoricalIndex(['a', 'b', 'c', 'a'],
categories=['a', 'b', 'c', 'd'])
res, indexer = c.reindex(['a', 'c'])
exp = Index(['a', 'a', 'c'], dtype='object')
tm.assert_index_equal(res, exp, exact=True)
tm.assert_numpy_array_equal(indexer,
np.array([0, 3, 2], dtype=np.intp))
c = CategoricalIndex(['a', 'b', 'c', 'a'],
categories=['a', 'b', 'c', 'd'])
res, indexer = c.reindex(Categorical(['a', 'c']))
exp = CategoricalIndex(['a', 'a', 'c'], categories=['a', 'c'])
tm.assert_index_equal(res, exp, exact=True)
tm.assert_numpy_array_equal(indexer,
np.array([0, 3, 2], dtype=np.intp))
def test_reindex_empty_index(self):
# See GH16770
c = CategoricalIndex([])
res, indexer = c.reindex(['a', 'b'])
tm.assert_index_equal(res, Index(['a', 'b']), exact=True)
tm.assert_numpy_array_equal(indexer,
np.array([-1, -1], dtype=np.intp))
def test_is_monotonic(self):
c = CategoricalIndex([1, 2, 3])
assert c.is_monotonic_increasing
assert not c.is_monotonic_decreasing
c = CategoricalIndex([1, 2, 3], ordered=True)
assert c.is_monotonic_increasing
assert not c.is_monotonic_decreasing
c = CategoricalIndex([1, 2, 3], categories=[3, 2, 1])
assert not c.is_monotonic_increasing
assert c.is_monotonic_decreasing
c = CategoricalIndex([1, 3, 2], categories=[3, 2, 1])
assert not c.is_monotonic_increasing
assert not c.is_monotonic_decreasing
c = CategoricalIndex([1, 2, 3], categories=[3, 2, 1], ordered=True)
assert not c.is_monotonic_increasing
assert c.is_monotonic_decreasing
# non lexsorted categories
categories = [9, 0, 1, 2, 3]
c = CategoricalIndex([9, 0], categories=categories)
assert c.is_monotonic_increasing
assert not c.is_monotonic_decreasing
c = CategoricalIndex([0, 1], categories=categories)
assert c.is_monotonic_increasing
assert not c.is_monotonic_decreasing
def test_duplicates(self):
idx = CategoricalIndex([0, 0, 0], name='foo')
assert not idx.is_unique
assert idx.has_duplicates
expected = CategoricalIndex([0], name='foo')
tm.assert_index_equal(idx.drop_duplicates(), expected)
tm.assert_index_equal(idx.unique(), expected)
def test_get_indexer(self):
idx1 = CategoricalIndex(list('aabcde'), categories=list('edabc'))
idx2 = CategoricalIndex(list('abf'))
for indexer in [idx2, list('abf'), Index(list('abf'))]:
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([0, 1, 2, -1], dtype=np.intp))
pytest.raises(NotImplementedError,
lambda: idx2.get_indexer(idx1, method='pad'))
pytest.raises(NotImplementedError,
lambda: idx2.get_indexer(idx1, method='backfill'))
pytest.raises(NotImplementedError,
lambda: idx2.get_indexer(idx1, method='nearest'))
def test_get_loc(self):
# GH 12531
cidx1 = CategoricalIndex(list('abcde'), categories=list('edabc'))
idx1 = Index(list('abcde'))
assert cidx1.get_loc('a') == idx1.get_loc('a')
assert cidx1.get_loc('e') == idx1.get_loc('e')
for i in [cidx1, idx1]:
with pytest.raises(KeyError):
i.get_loc('NOT-EXIST')
# non-unique
cidx2 = CategoricalIndex(list('aacded'), categories=list('edabc'))
idx2 = Index(list('aacded'))
# results in bool array
res = cidx2.get_loc('d')
tm.assert_numpy_array_equal(res, idx2.get_loc('d'))
tm.assert_numpy_array_equal(res, np.array([False, False, False,
True, False, True]))
# unique element results in scalar
res = cidx2.get_loc('e')
assert res == idx2.get_loc('e')
assert res == 4
for i in [cidx2, idx2]:
with pytest.raises(KeyError):
i.get_loc('NOT-EXIST')
# non-unique, slicable
cidx3 = CategoricalIndex(list('aabbb'), categories=list('abc'))
idx3 = Index(list('aabbb'))
# results in slice
res = cidx3.get_loc('a')
assert res == idx3.get_loc('a')
assert res == slice(0, 2, None)
res = cidx3.get_loc('b')
assert res == idx3.get_loc('b')
assert res == slice(2, 5, None)
for i in [cidx3, idx3]:
with pytest.raises(KeyError):
i.get_loc('c')
def test_repr_roundtrip(self):
ci = CategoricalIndex(['a', 'b'], categories=['a', 'b'], ordered=True)
str(ci)
tm.assert_index_equal(eval(repr(ci)), ci, exact=True)
# formatting
if PY3:
str(ci)
else:
compat.text_type(ci)
# long format
# this is not reprable
ci = CategoricalIndex(np.random.randint(0, 5, size=100))
if PY3:
str(ci)
else:
compat.text_type(ci)
def test_isin(self):
ci = CategoricalIndex(
list('aabca') + [np.nan], categories=['c', 'a', 'b'])
tm.assert_numpy_array_equal(
ci.isin(['c']),
np.array([False, False, False, True, False, False]))
tm.assert_numpy_array_equal(
ci.isin(['c', 'a', 'b']), np.array([True] * 5 + [False]))
tm.assert_numpy_array_equal(
ci.isin(['c', 'a', 'b', np.nan]), np.array([True] * 6))
# mismatched categorical -> coerced to ndarray so doesn't matter
result = ci.isin(ci.set_categories(list('abcdefghi')))
expected = np.array([True] * 6)
tm.assert_numpy_array_equal(result, expected)
result = ci.isin(ci.set_categories(list('defghi')))
expected = np.array([False] * 5 + [True])
tm.assert_numpy_array_equal(result, expected)
def test_identical(self):
ci1 = CategoricalIndex(['a', 'b'], categories=['a', 'b'], ordered=True)
ci2 = CategoricalIndex(['a', 'b'], categories=['a', 'b', 'c'],
ordered=True)
assert ci1.identical(ci1)
assert ci1.identical(ci1.copy())
assert not ci1.identical(ci2)
def test_ensure_copied_data(self):
# gh-12309: Check the "copy" argument of each
# Index.__new__ is honored.
#
# Must be tested separately from other indexes because
# self.value is not an ndarray.
_base = lambda ar: ar if ar.base is None else ar.base
for index in self.indices.values():
result = CategoricalIndex(index.values, copy=True)
tm.assert_index_equal(index, result)
assert _base(index.values) is not _base(result.values)
result = CategoricalIndex(index.values, copy=False)
assert _base(index.values) is _base(result.values)
def test_equals_categorical(self):
ci1 = CategoricalIndex(['a', 'b'], categories=['a', 'b'], ordered=True)
ci2 = CategoricalIndex(['a', 'b'], categories=['a', 'b', 'c'],
ordered=True)
assert ci1.equals(ci1)
assert not ci1.equals(ci2)
assert ci1.equals(ci1.astype(object))
assert ci1.astype(object).equals(ci1)
assert (ci1 == ci1).all()
assert not (ci1 != ci1).all()
assert not (ci1 > ci1).all()
assert not (ci1 < ci1).all()
assert (ci1 <= ci1).all()
assert (ci1 >= ci1).all()
assert not (ci1 == 1).all()
assert (ci1 == Index(['a', 'b'])).all()
assert (ci1 == ci1.values).all()
# invalid comparisons
with tm.assert_raises_regex(ValueError, "Lengths must match"):
ci1 == Index(['a', 'b', 'c'])
pytest.raises(TypeError, lambda: ci1 == ci2)
pytest.raises(
TypeError, lambda: ci1 == Categorical(ci1.values, ordered=False))
pytest.raises(
TypeError,
lambda: ci1 == Categorical(ci1.values, categories=list('abc')))
# tests
# make sure that we are testing for category inclusion properly
ci = CategoricalIndex(list('aabca'), categories=['c', 'a', 'b'])
assert not ci.equals(list('aabca'))
# Same categories, but different order
# Unordered
assert ci.equals(CategoricalIndex(list('aabca')))
# Ordered
assert not ci.equals(CategoricalIndex(list('aabca'), ordered=True))
assert ci.equals(ci.copy())
ci = CategoricalIndex(list('aabca') + [np.nan],
categories=['c', 'a', 'b'])
assert not ci.equals(list('aabca'))
assert not ci.equals(CategoricalIndex(list('aabca')))
assert ci.equals(ci.copy())
ci = CategoricalIndex(list('aabca') + [np.nan],
categories=['c', 'a', 'b'])
assert not ci.equals(list('aabca') + [np.nan])
assert ci.equals(CategoricalIndex(list('aabca') + [np.nan]))
assert not ci.equals(CategoricalIndex(list('aabca') + [np.nan],
ordered=True))
assert ci.equals(ci.copy())
def test_string_categorical_index_repr(self):
# short
idx = pd.CategoricalIndex(['a', 'bb', 'ccc'])
if PY3:
expected = u"""CategoricalIndex(['a', 'bb', 'ccc'], categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'a', u'bb', u'ccc'], categories=[u'a', u'bb', u'ccc'], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# multiple lines
idx = pd.CategoricalIndex(['a', 'bb', 'ccc'] * 10)
if PY3:
expected = u"""CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',
'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb',
'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb',
u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a',
u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc',
u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'],
categories=[u'a', u'bb', u'ccc'], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# truncated
idx = pd.CategoricalIndex(['a', 'bb', 'ccc'] * 100)
if PY3:
expected = u"""CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',
...
'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
categories=['a', 'bb', 'ccc'], ordered=False, dtype='category', length=300)""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb',
u'ccc', u'a',
...
u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a',
u'bb', u'ccc'],
categories=[u'a', u'bb', u'ccc'], ordered=False, dtype='category', length=300)""" # noqa
assert unicode(idx) == expected
# larger categories
idx = pd.CategoricalIndex(list('abcdefghijklmmo'))
if PY3:
expected = u"""CategoricalIndex(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',
'm', 'm', 'o'],
categories=['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', ...], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'a', u'b', u'c', u'd', u'e', u'f', u'g', u'h', u'i', u'j',
u'k', u'l', u'm', u'm', u'o'],
categories=[u'a', u'b', u'c', u'd', u'e', u'f', u'g', u'h', ...], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# short
idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'])
if PY3:
expected = u"""CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう'], categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# multiple lines
idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'] * 10)
if PY3:
expected = u"""CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ',
'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',
'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],
categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい',
u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう',
u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう'],
categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# truncated
idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'] * 100)
if PY3:
expected = u"""CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ',
...
'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],
categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい',
u'ううう', u'あ',
...
u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう'],
categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category', length=300)""" # noqa
assert unicode(idx) == expected
# larger categories
idx = pd.CategoricalIndex(list(u'あいうえおかきくけこさしすせそ'))
if PY3:
expected = u"""CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ', 'さ', 'し',
'す', 'せ', 'そ'],
categories=['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', ...], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く', u'け', u'こ',
u'さ', u'し', u'す', u'せ', u'そ'],
categories=[u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く', ...], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# Emable Unicode option -----------------------------------------
with cf.option_context('display.unicode.east_asian_width', True):
# short
idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'])
if PY3:
expected = u"""CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう'], categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# multiple lines
idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'] * 10)
if PY3:
expected = u"""CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',
'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',
'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',
'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],
categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ', u'いい', u'ううう'],
categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# truncated
idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'] * 100)
if PY3:
expected = u"""CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',
'ううう', 'あ',
...
'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',
'あ', 'いい', 'ううう'],
categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ',
...
u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい',
u'ううう', u'あ', u'いい', u'ううう'],
categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category', length=300)""" # noqa
assert unicode(idx) == expected
# larger categories
idx = pd.CategoricalIndex(list(u'あいうえおかきくけこさしすせそ'))
if PY3:
expected = u"""CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ',
'さ', 'し', 'す', 'せ', 'そ'],
categories=['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', ...], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く',
u'け', u'こ', u'さ', u'し', u'す', u'せ', u'そ'],
categories=[u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く', ...], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
def test_fillna_categorical(self):
# GH 11343
idx = CategoricalIndex([1.0, np.nan, 3.0, 1.0], name='x')
# fill by value in categories
exp = CategoricalIndex([1.0, 1.0, 3.0, 1.0], name='x')
tm.assert_index_equal(idx.fillna(1.0), exp)
# fill by value not in categories raises ValueError
with tm.assert_raises_regex(ValueError,
'fill value must be in categories'):
idx.fillna(2.0)
def test_take_fill_value(self):
# GH 12631
# numeric category
idx = pd.CategoricalIndex([1, 2, 3], name='xxx')
result = idx.take(np.array([1, 0, -1]))
expected = pd.CategoricalIndex([2, 1, 3], name='xxx')
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.CategoricalIndex([2, 1, np.nan], categories=[1, 2, 3],
name='xxx')
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.CategoricalIndex([2, 1, 3], name='xxx')
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
# object category
idx = pd.CategoricalIndex(list('CBA'), categories=list('ABC'),
ordered=True, name='xxx')
result = idx.take(np.array([1, 0, -1]))
expected = pd.CategoricalIndex(list('BCA'), categories=list('ABC'),
ordered=True, name='xxx')
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.CategoricalIndex(['B', 'C', np.nan],
categories=list('ABC'), ordered=True,
name='xxx')
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.CategoricalIndex(list('BCA'), categories=list('ABC'),
ordered=True, name='xxx')
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
def test_take_fill_value_datetime(self):
# datetime category
idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
name='xxx')
idx = pd.CategoricalIndex(idx)
result = idx.take(np.array([1, 0, -1]))
expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'],
name='xxx')
expected = pd.CategoricalIndex(expected)
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', 'NaT'],
name='xxx')
exp_cats = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'])
expected = pd.CategoricalIndex(expected, categories=exp_cats)
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'],
name='xxx')
expected = pd.CategoricalIndex(expected)
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
def test_take_invalid_kwargs(self):
idx = pd.CategoricalIndex([1, 2, 3], name='foo')
indices = [1, 0, -1]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, mode='clip')
| bsd-3-clause | -7,361,495,921,514,615,000 | 39.921992 | 148 | 0.52833 | false |
asoliveira/NumShip | scripts/plot/beta-velo-v-zz-plt.py | 1 | 3060 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#É adimensional?
adi = False
#É para salvar as figuras(True|False)?
save = True
#Caso seja para salvar, qual é o formato desejado?
formato = 'jpg'
#Caso seja para salvar, qual é o diretório que devo salvar?
dircg = 'fig-sen'
#Caso seja para salvar, qual é o nome do arquivo?
nome = 'beta-velo-v-zz'
#Qual título colocar no gráficos?
titulo = ''#'Curva de ZigZag'
titulo2=''
#Qual a cor dos gráficos?
pc = 'k'
r1c = 'b'
r2c = 'y'
r3c = 'r'
#Estilo de linha
ps = '-'
r1s = '-'
r2s = '-'
r3s = '-'
import os
import scipy as sp
import matplotlib.pyplot as plt
from libplot import *
acelhis = sp.genfromtxt('../entrada/padrao/CurvaZigZag/velo.dat')
acelhis2 = sp.genfromtxt('../entrada/beta/saida1.1/CurvaZigZag/velo.dat')
acelhis3 = sp.genfromtxt('../entrada/beta/saida1.2/CurvaZigZag/velo.dat')
acelhis4 = sp.genfromtxt('../entrada/beta/saida1.3/CurvaZigZag/velo.dat')
lemehis = sp.genfromtxt('../entrada/padrao/CurvaZigZag/leme.dat')
lemehis2 = sp.genfromtxt('../entrada/beta/saida1.1/CurvaZigZag/leme.dat')
lemehis3 = sp.genfromtxt('../entrada/beta/saida1.2/CurvaZigZag/leme.dat')
lemehis4 = sp.genfromtxt('../entrada/beta/saida1.3/CurvaZigZag/leme.dat')
axl = [0, 1000, -1.5, 2.]
axl2 = [0, 1000, -25, 25]#do leme
#Plotando a Curva de Giro
if adi:
ylabel = r'$t\prime$'
xacellabel = r'$v\prime$'
else:
ylabel = r'$v \quad m/s$'
xacellabel = r'$t \quad segundos$'
plt.subplot2grid((1,4),(0,0), colspan=3)
#Padrao
plt.plot(acelhis[:, 0], acelhis[:, 2], color = pc, linestyle = ps,
linewidth = 2, label=ur'padrão')
plt.plot(acelhis2[:, 0], acelhis2[:, 2], color = r1c,linestyle = r1s,
linewidth = 2, label=ur'1.1--$\beta$')
plt.plot(acelhis3[:, 0], acelhis3[:, 2], color = r2c, linestyle = r2s,
linewidth = 2, label=ur'1.2--$\beta$')
plt.plot(acelhis4[:, 0], acelhis4[:, 2], color = r3c, linestyle = r3s,
linewidth = 2, label=ur'1.3--$\beta$')
plt.title(titulo)
plt.legend(bbox_to_anchor=(1.1, 1), loc=2, borderaxespad=0.)
plt.ylabel(ylabel)
plt.xlabel(xacellabel)
plt.axis(axl)
plt.grid(True)
plt.twinx()
plt.plot(lemehis[:, 0], lemehis[:, 1] * (180/sp.pi), color = pc, linestyle = "--",
linewidth = 1, label=ur'leme--padrão')
plt.plot(lemehis2[:, 0], lemehis2[:, 1] * (180/sp.pi), color = r1c, linestyle = "--",
linewidth = 1, label=ur'leme--1.1$\beta$')
plt.plot(lemehis3[:, 0], lemehis3[:, 1] * (180/sp.pi), color = r2c, linestyle = "--",
linewidth = 1, label=ur'leme--1.2$\beta$')
plt.plot(lemehis4[:, 0], lemehis4[:, 1] * (180/sp.pi), color = r3c, linestyle = "--",
linewidth = 1, label=ur'leme--1.3$\beta$')
plt.title(titulo2)
plt.legend(bbox_to_anchor=(1.1, 0), loc=3, borderaxespad=0.)
plt.ylabel(r"$\delta_R$")
plt.axis(axl2)
plt.grid(False)
if save:
if not os.path.exists(dircg):
os.makedirs(dircg)
if os.path.exists(dircg + '/' + nome + '.' + formato):
os.remove(dircg + '/' + nome + '.' + formato)
plt.savefig(dircg + '/' + nome + '.' + formato , format=formato)
else:
plt.show()
| gpl-3.0 | -6,526,457,012,097,562,000 | 29.79798 | 87 | 0.638242 | false |
yiori-s/fit_instagram_gender | instagram_collector.py | 1 | 2456 | import sys
from settings import instgram_access_token
from api import InstagramAPI, Alchemy
import pandas as pd
import csv
def following_users(api, user_name):
instgram_user_id = api.user_id(user_name=user_name)
following_users = api.follows_list(user_id=instgram_user_id)
return following_users
def userinfo_list(api, following_users):
userinfo_list = []
for user in following_users:
entries = api.media_list(user["user_id"])
for entry in entries:
tag_list = Alchemy.tag_list(image_url=entry['url'])
if tag_list is None:
return userinfo_list
entry.update({'tag_list': tag_list})
tags = [entry['tag_list'] for entry in entries]
df = pd.DataFrame(tags).fillna(0)
user_summery = df.sum()
user_summery = user_summery.to_dict()
user.update(user_summery)
userinfo_list.append(user)
return userinfo_list
if __name__ == '__main__':
argvs = sys.argv
argc = len(argvs)
if len(argvs) != 2:
print('Usage: # python %s INSTAGRAM_USER_NAME' % argvs[0])
quit()
instgram_user_name = argvs[1]
api = InstagramAPI(access_token=instgram_access_token)
# 取得済みのユーザ情報をCSVからロード
f = open('gotten_user_tags.csv', 'r')
reader = csv.DictReader(f)
gotten_users = []
for user in reader:
gotten_users.append(user)
following_users = following_users(api, instgram_user_name)
# フォロー中のユーザと取得済みユーザの差分辞書の作成
userid_list = []
get_users = []
for g_user in gotten_users:
userid_list.append(g_user["user_id"])
for f_user in following_users:
if f_user["user_id"] in set(userid_list):
pass
else:
get_users.append(f_user)
get_users = get_users[0:40]
# following_users = following_users[0:40]
userinfo_list = userinfo_list(api, get_users)
users_df = pd.DataFrame(userinfo_list).fillna(0)
users_df.to_csv("user_tags.csv")
# for following_user in following_users:
# # entries = api.media_list(user_name=following_user)
# # for entry in entries:
# # image_url = entry["url"]
# # tag_list = Alchemy.tag_list(image_url=image_url)
# # entry.update({"tag_list": tag_list})
# # print(entry)
# # print(entries)
print(userinfo_list)
| mit | 1,824,728,866,409,863,000 | 26.604651 | 66 | 0.606992 | false |
LFPy/LFPy | examples/example_suppl.py | 1 | 8078 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Some plottin' functions used by the example scripts
Copyright (C) 2017 Computational Neuroscience Group, NMBU.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import neuron
plt.rcParams.update({
'axes.xmargin': 0.0,
'axes.ymargin': 0.0,
})
def plot_ex1(cell, electrode, X, Y, Z):
'''
plot the morphology and LFP contours, synaptic current and soma trace
'''
# some plot parameters
t_show = 30 # time point to show LFP
tidx = np.where(cell.tvec == t_show)
# contour lines:
n_contours = 200
n_contours_black = 40
# This is the extracellular potential, reshaped to the X, Z mesh
LFP = np.arcsinh(electrode.data[:, tidx]).reshape(X.shape)
# figure object
fig = plt.figure(figsize=(12, 8))
fig.subplots_adjust(left=None, bottom=None, right=None, top=None,
wspace=0.4, hspace=0.4)
# Plot LFP around the cell with in color and with equipotential lines
ax1 = fig.add_subplot(121, aspect='equal', frameon=False)
# plot_morphology(plot_synapses=True)
for sec in neuron.h.allsec():
idx = cell.get_idx(sec.name())
ax1.plot(np.r_[cell.x[idx, 0], cell.x[idx, 1][-1]],
np.r_[cell.z[idx, 0], cell.z[idx, 1][-1]],
color='k')
for i in range(len(cell.synapses)):
ax1.plot([cell.synapses[i].x], [cell.synapses[i].z], '.',
markersize=10)
# contour lines
ct1 = ax1.contourf(X, Z, LFP, n_contours)
ct1.set_clim((-0.00007, 0.00002))
ax1.contour(X, Z, LFP, n_contours_black, colors='k')
# Plot synaptic input current
ax2 = fig.add_subplot(222)
ax2.plot(cell.tvec, cell.synapses[0].i)
# Plot soma potential
ax3 = fig.add_subplot(224)
ax3.plot(cell.tvec, cell.somav)
# Figure formatting and labels
fig.suptitle('example 1', fontsize=14)
ax1.set_title('LFP at t=' + str(t_show) + ' ms', fontsize=12)
ax1.set_xticks([])
ax1.set_xticklabels([])
ax1.set_yticks([])
ax1.set_yticklabels([])
ax2.set_title('synaptic input current', fontsize=12)
ax2.set_ylabel('(nA)')
ax2.set_xlabel('time (ms)')
ax3.set_title('somatic membrane potential', fontsize=12)
ax3.set_ylabel('(mV)')
ax3.set_xlabel('time (ms)')
return fig
def plot_ex2(cell, electrode):
'''example2.py plotting function'''
# creating array of points and corresponding diameters along structure
for i in range(cell.x.shape[0]):
if i == 0:
xcoords = np.array([cell.x[i].mean()])
ycoords = np.array([cell.y[i].mean()])
zcoords = np.array([cell.z[i].mean()])
diams = np.array([cell.d[i]])
else:
if cell.z[i].mean() < 100 and cell.z[i].mean() > -100 and \
cell.x[i].mean() < 100 and cell.x[i].mean() > -100:
xcoords = np.r_[xcoords,
np.linspace(cell.x[i, 0],
cell.x[i, 1],
int(cell.length[i] * 3))]
ycoords = np.r_[ycoords,
np.linspace(cell.y[i, 0],
cell.y[i, 1],
int(cell.length[i] * 3))]
zcoords = np.r_[zcoords,
np.linspace(cell.z[i, 0],
cell.z[i, 1],
int(cell.length[i] * 3))]
diams = np.r_[diams,
np.linspace(cell.d[i], cell.d[i],
int(cell.length[i] * 3))]
# sort along depth-axis
argsort = np.argsort(ycoords)
# plotting
fig = plt.figure(figsize=[12, 8])
ax = fig.add_axes([0.1, 0.1, 0.533334, 0.8], frameon=False)
ax.scatter(xcoords[argsort], zcoords[argsort], s=diams[argsort]**2 * 20,
c=ycoords[argsort], edgecolors='none', cmap='gray')
ax.plot(electrode.x, electrode.z, '.', marker='o', markersize=5, color='k')
i = 0
for LFP in electrode.data:
tvec = cell.tvec * 0.6 + electrode.x[i] + 2
if abs(LFP).max() >= 1:
factor = 2
color = 'r'
elif abs(LFP).max() < 0.25:
factor = 50
color = 'b'
else:
factor = 10
color = 'g'
trace = LFP * factor + electrode.z[i]
ax.plot(tvec, trace, color=color, lw=2)
i += 1
ax.plot([22, 28], [-60, -60], color='k', lw=3)
ax.text(22, -65, '10 ms')
ax.plot([40, 50], [-60, -60], color='k', lw=3)
ax.text(42, -65, r'10 $\mu$m')
ax.plot([60, 60], [20, 30], color='r', lw=2)
ax.text(62, 20, '5 mV')
ax.plot([60, 60], [0, 10], color='g', lw=2)
ax.text(62, 0, '1 mV')
ax.plot([60, 60], [-20, -10], color='b', lw=2)
ax.text(62, -20, '0.1 mV')
ax.set_xticks([])
ax.set_yticks([])
ax.axis([-61, 61, -61, 61])
ax.set_title('Location-dependent extracellular spike shapes')
# plotting the soma trace
ax = fig.add_axes([0.75, 0.55, 0.2, 0.35])
ax.plot(cell.tvec, cell.somav)
ax.set_title('Somatic action-potential')
ax.set_ylabel(r'$V_\mathrm{membrane}$ (mV)')
# plotting the synaptic current
ax = fig.add_axes([0.75, 0.1, 0.2, 0.35])
ax.plot(cell.tvec, cell.synapses[0].i)
ax.set_title('Synaptic current')
ax.set_ylabel(r'$i_\mathrm{synapse}$ (nA)')
ax.set_xlabel(r'time (ms)')
return fig
def plot_ex3(cell, electrode):
'''plotting function used by example3/4'''
fig = plt.figure(figsize=[12, 8])
# plot the somatic trace
ax = fig.add_axes([0.1, 0.7, 0.5, 0.2])
ax.plot(cell.tvec, cell.somav)
ax.set_xlabel('Time (ms)')
ax.set_ylabel('Soma pot. (mV)')
# plot the synaptic current
ax = fig.add_axes([0.1, 0.4, 0.5, 0.2])
for i in range(len(cell.synapses)):
ax.plot(cell.tvec, cell.synapses[i].i,
color='C0'
if cell.synapses[i].kwargs['e'] > cell.v_init else 'C1',
)
ax.set_xlabel('Time (ms)')
ax.set_ylabel('Syn. i (nA)')
# plot the LFP as image plot
ax = fig.add_axes([0.1, 0.1, 0.5, 0.2])
absmaxLFP = electrode.data.std() * 3
im = ax.pcolormesh(cell.tvec, electrode.z, electrode.data,
vmax=absmaxLFP, vmin=-absmaxLFP,
cmap='PRGn',
shading='auto')
rect = np.array(ax.get_position().bounds)
rect[0] += rect[2] + 0.01
rect[2] = 0.02
cax = fig.add_axes(rect)
cbar = plt.colorbar(im, cax=cax)
cbar.set_label('LFP (mV)')
ax.axis(ax.axis('tight'))
ax.set_xlabel('Time (ms)')
ax.set_ylabel(r'z ($\mu$m)')
# plot the morphology, electrode contacts and synapses
ax = fig.add_axes([0.65, 0.1, 0.25, 0.8], frameon=False)
for sec in neuron.h.allsec():
idx = cell.get_idx(sec.name())
ax.plot(np.r_[cell.x[idx, 0], cell.x[idx, 1][-1]],
np.r_[cell.z[idx, 0], cell.z[idx, 1][-1]],
color='k')
for i in range(len(cell.synapses)):
ax.plot([cell.synapses[i].x], [cell.synapses[i].z], marker='.',
color='C0'
if cell.synapses[i].kwargs['e'] > cell.v_init else 'C1',
)
for i in range(electrode.x.size):
ax.plot(electrode.x[i], electrode.z[i], color='C2', marker='o')
ax.set_xticks([])
ax.set_yticks([])
return fig
| gpl-3.0 | -1,700,147,359,224,970,800 | 32.518672 | 79 | 0.541842 | false |
JudoWill/glue | glue/tests/test_qglue.py | 1 | 6819 | from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
from mock import patch, MagicMock
import pytest
from .. import qglue
from ..core.registry import Registry
from ..core.exceptions import IncompatibleAttribute
from ..core import Data
from ..qt.glue_application import GlueApplication
from .helpers import requires_astropy
@requires_astropy
class TestQGlue(object):
def setup_method(self, method):
from astropy.table import Table
from astropy.io.fits import HDUList, ImageHDU
Registry().clear()
x = [1, 2, 3]
y = [2, 3, 4]
u = [10, 20, 30, 40]
v = [20, 40, 60, 80]
self.xy = {'x': x, 'y': y}
self.dict_data = {'u': u, 'v': v}
self.recarray_data = np.rec.array([(0, 1), (2, 3)],
dtype=[(str('a'), int), (str('b'), int)])
self.astropy_table = Table({'x': x, 'y': y})
self.bad_data = {'x': x, 'u': u}
self.hdulist = HDUList([ImageHDU(x, name='PRIMARY')])
self.x = np.array(x)
self.y = np.array(y)
self.u = np.array(u)
self.v = np.array(v)
self._start = GlueApplication.start
GlueApplication.start = MagicMock()
def teardown_method(self, method):
GlueApplication.start = self._start
def check_setup(self, dc, expected):
# assert that the assembled data collection returned
# form qglue matches expected structure
# test for expected data, components
for data in dc:
components = set(c.label for c in data.components)
e = expected.pop(data.label)
for component in e:
assert component in components
assert len(expected) == 0
def test_qglue_starts_application(self):
pandas_data = pd.DataFrame(self.xy)
app = qglue(data1=pandas_data)
app.start.assert_called_once_with()
def test_single_pandas(self):
dc = qglue(data1=self.xy).data_collection
self.check_setup(dc, {'data1': ['x', 'y']})
def test_single_pandas_nonstring_column(self):
dc = qglue(data1=pd.DataFrame({1: [1, 2, 3]})).data_collection
self.check_setup(dc, {'data1': ['1']})
def test_single_numpy(self):
dc = qglue(data1=np.array([1, 2, 3])).data_collection
self.check_setup(dc, {'data1': ['data1']})
def test_single_list(self):
dc = qglue(data1=[1, 2, 3]).data_collection
self.check_setup(dc, {'data1': ['data1']})
def test_single_dict(self):
dc = qglue(data2=self.dict_data).data_collection
self.check_setup(dc, {'data2': ['u', 'v']})
def test_recarray(self):
dc = qglue(data3=self.recarray_data).data_collection
self.check_setup(dc, {'data3': ['a', 'b']})
def test_astropy_table(self):
dc = qglue(data4=self.astropy_table).data_collection
self.check_setup(dc, {'data4': ['x', 'y']})
def test_multi_data(self):
dc = qglue(data1=self.dict_data, data2=self.xy).data_collection
self.check_setup(dc, {'data1': ['u', 'v'],
'data2': ['x', 'y']})
def test_hdulist(self):
dc = qglue(data1=self.hdulist).data_collection
self.check_setup(dc, {'data1': ['PRIMARY']})
def test_glue_data(self):
d = Data(x=[1, 2, 3])
dc = qglue(x=d).data_collection
assert d.label == 'x'
def test_simple_link(self):
using = lambda x: x * 2
links = [['data1.x', 'data2.u', using]]
dc = qglue(data1=self.xy, data2=self.dict_data,
links=links).data_collection
links = [[['x'], 'u', using]]
self.check_setup(dc, {'data1': ['x', 'y'],
'data2': ['u', 'v']})
d = dc[0] if dc[0].label == 'data1' else dc[1]
np.testing.assert_array_equal(d['x'], self.x)
np.testing.assert_array_equal(d['u'], self.x * 2)
d = dc[0] if dc[0].label == 'data2' else dc[1]
with pytest.raises(IncompatibleAttribute) as exc:
d['x']
def test_multi_link(self):
forwards = lambda *args: (args[0] * 2, args[1] * 3)
backwards = lambda *args: (args[0] / 2, args[1] / 3)
links = [[['Data1.x', 'Data1.y'],
['Data2.u', 'Data2.v'], forwards, backwards]]
dc = qglue(Data1=self.xy, Data2=self.dict_data,
links=links).data_collection
self.check_setup(dc, {'Data1': ['x', 'y'],
'Data2': ['u', 'v']})
for d in dc:
if d.label == 'Data1':
np.testing.assert_array_equal(d['x'], self.x)
np.testing.assert_array_equal(d['y'], self.y)
np.testing.assert_array_equal(d['u'], self.x * 2)
np.testing.assert_array_equal(d['v'], self.y * 3)
else:
np.testing.assert_array_equal(d['x'], self.u / 2)
np.testing.assert_array_equal(d['y'], self.v / 3)
np.testing.assert_array_equal(d['u'], self.u)
np.testing.assert_array_equal(d['v'], self.v)
def test_implicit_identity_link(self):
links = [('Data1.x', 'Data2.v'),
('Data1.y', 'Data2.u')]
dc = qglue(Data1=self.xy, Data2=self.dict_data,
links=links).data_collection
# currently, identity links rename the second link to first,
# so u/v disappear
for d in dc:
if d.label == 'Data1':
np.testing.assert_array_equal(d['x'], self.x)
np.testing.assert_array_equal(d['y'], self.y)
else:
np.testing.assert_array_equal(d['y'], self.u)
np.testing.assert_array_equal(d['x'], self.v)
def test_bad_link(self):
forwards = lambda *args: args
links = [(['Data1.a'], ['Data2.b'], forwards)]
with pytest.raises(ValueError) as exc:
dc = qglue(Data1=self.xy, Data2=self.dict_data,
links=links).data_collection
assert exc.value.args[0] == "Invalid link (no component named Data1.a)"
def test_bad_data_shape(self):
with pytest.raises(ValueError) as exc:
dc = qglue(d=self.bad_data).data_collection
assert exc.value.args[0].startswith("Invalid format for data 'd'")
def test_bad_data_format(self):
with pytest.raises(TypeError) as exc:
dc = qglue(d=5).data_collection
assert exc.value.args[0].startswith("Invalid data description")
def test_malformed_data_dict(self):
with pytest.raises(ValueError) as exc:
dc = qglue(d={'x': 'bad'}).data_collection
assert exc.value.args[0].startswith("Invalid format for data 'd'")
| bsd-3-clause | 6,128,743,806,767,828,000 | 35.079365 | 83 | 0.5514 | false |
enfeizhan/clusteror | clusteror/preprocessing_layer.py | 1 | 8211 | import os
import sys
import timeit
import theano
import theano.tensor as T
import numpy as np
import pandas as pd
import pickle as pk
from scipy.special import expit
from theano.tensor.shared_randomstreams import RandomStreams
from sklearn.preprocessing import StandardScaler
from discovery_layer import DataDiscovery
from dA import dA
# from SdA import SdA
ss = StandardScaler()
def _pretraining_early_stopping(
train_model,
n_train_batches,
n_epochs,
patience,
patience_increase,
improvement_threshold,
verbose,
):
epoch = 0
done_looping = False
check_frequency = min(n_epochs, patience / 2)
best_cost = np.inf
start_time = timeit.default_timer()
while (epoch < n_epochs) and (not done_looping):
epoch += 1
# go through training set
c = []
for minibatch_index in range(n_train_batches):
c.append(train_model(minibatch_index))
cost = np.mean(c)
if verbose is True:
print(
'Training epoch {epoch}, cost {cost}.'
.format(epoch=epoch, cost=cost))
if epoch % check_frequency == 0:
if cost < best_cost:
much_better_cost = best_cost * improvement_threshold
if cost < much_better_cost:
patience = max(patience, (epoch + 1) * patience_increase)
print(
'Epoch {epoch}, patience increased to {patience}'.
format(epoch=epoch, patience=patience))
best_cost = cost
if epoch > patience:
done_looping = True
end_time = timeit.default_timer()
training_time = (end_time - start_time)
sys.stderr.write(
'The 30% corruption code for file ' +
os.path.split(__file__)[1] +
' ran for {time:.2f}m'.format(time=training_time / 60.))
class DataPreprocessing(DataDiscovery):
def da_reduce_dim(
self,
dat,
field_weights=None,
to_dim=1,
batch_size=50,
corruption_level=0.3,
learning_rate=0.002,
training_epochs=200,
verbose=True,
patience=60,
patience_increase=2,
improvement_threshold=0.9995,
random_state=123):
'''
Reduce the dimension of each record down to a dimension.
verbose: boolean, default True
If true, printing out the progress of pretraining.
'''
rng = np.random.RandomState(random_state)
if isinstance(dat, pd.core.frame.DataFrame):
dat = dat.values
sys.stderr.write(
'Squeezing the data into [0 1] recommended!')
dat = np.asarray(dat, dtype=theano.config.floatX)
train_set_x = theano.shared(value=dat, borrow=True)
# compute number of minibatches for training, validation and testing
n_train_batches = (
train_set_x.get_value(borrow=True).shape[0] // batch_size)
# start-snippet-2
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
x = T.matrix('x')
# end-snippet-2
#################################
# BUILDING THE MODEL CORRUPTION #
#################################
theano_rng = RandomStreams(rng.randint(2 ** 30))
da = dA(
numpy_rng=rng,
theano_rng=theano_rng,
input_dat=x,
field_weights=field_weights,
n_visible=dat.shape[1],
n_hidden=1
)
cost, updates = da.get_cost_updates(
corruption_level=corruption_level,
learning_rate=learning_rate
)
train_da = theano.function(
[index],
cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size]
}
)
_pretraining_early_stopping(
train_model=train_da,
n_train_batches=n_train_batches,
n_epochs=training_epochs,
patience=patience,
patience_increase=patience_increase,
improvement_threshold=improvement_threshold,
verbose=verbose)
self.denoising_autoencoder = da
x = T.dmatrix('x')
self.to_lower_dim = theano.function([x], da.get_hidden_values(x))
self.reconstruct = theano.function([x], da.get_reconstructed_input(x))
def save_da_reduce_dim(self, filename):
f = open(filename, 'wb')
pk.dump(self.to_lower_dim, f)
print(
'Denoising autoencoder model saved in {}.'.format(filename)
)
def sda_reduce_dim(
self,
dat,
to_dim=1,
batch_size=50,
corruption_level=0.3,
learning_rate=0.002,
training_epochs=100,
compress_to_zero_one=True,
random_state=123):
'''
Reduce the dimension of each record down to a dimension.
'''
rng = np.random.RandomState(random_state)
if isinstance(dat, pd.core.frame.DataFrame):
dat = dat.values
if compress_to_zero_one:
print('Transform data ...')
dat = expit(ss.fit_transform(dat))
print('Transform completed ...')
self.standard_scaler = ss
else:
sys.stderr.write('Better squeeze data into [0 1] range!')
dat = np.asarray(dat, dtype=theano.config.floatX)
train_set_x = theano.shared(value=dat, borrow=True)
# compute number of minibatches for training, validation and testing
n_train_batches = (
train_set_x.get_value(borrow=True).shape[0] // batch_size)
# start-snippet-2
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
x = T.matrix('x')
# end-snippet-2
#####################################
# BUILDING THE MODEL CORRUPTION 30% #
#####################################
theano_rng = RandomStreams(rng.randint(2 ** 30))
da = dA(
numpy_rng=rng,
theano_rng=theano_rng,
input=x,
n_visible=dat.shape[1],
n_hidden=1
)
cost, updates = da.get_cost_updates(
corruption_level=corruption_level,
learning_rate=learning_rate
)
train_da = theano.function(
[index],
cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size]
}
)
start_time = timeit.default_timer()
# ###########
# TRAINING #
# ###########
# go through training epochs
for epoch in range(training_epochs):
# go through trainng set
c = []
for batch_index in range(n_train_batches):
c.append(train_da(batch_index))
print('Training epoch {}, cost '.format(epoch) +
'{cost}.'.format(cost=np.mean(c)))
end_time = timeit.default_timer()
training_time = (end_time - start_time)
sys.stderr.write(
'The 30% corruption code for file ' +
os.path.split(__file__)[1] +
' ran for {time:.2f}m'.format(time=training_time / 60.))
self.denoising_autoencoder = da
x = T.dmatrix('x')
self.to_lower_dim = theano.function([x], da.get_hidden_values(x))
self.reconstruct = theano.function([x], da.get_reconstructed_input(x))
# running this model will test the method defined here
if __name__ == '__main__':
dat = pd.read_csv('complete_cols_for_clustering.csv', index_col=0)
cols_to_read = [
'bookings',
'passengers',
'luggage_fee',
'infant_fee',
'unique_passengers']
dp = DataPreprocessing()
dp.da_reduce_dim(
dat.loc[:, cols_to_read],
to_dim=1,
batch_size=100000,
corruption_level=0.3,
learning_rate=0.02,
)
| mit | 1,165,009,514,918,314,000 | 32.514286 | 78 | 0.534892 | false |
christos-tsotskas/PhD_post_processing | src/post_process2.py | 1 | 2157 | '''
Created on 26 Sep 2016
@author: DefaultUser
'''
from pylab import scatter
import pylab
import matplotlib.pyplot as plt
import pandas as pd
from pandas import Series, DataFrame
import numpy as np
def plot_quality():
f1 = 'all_tests_phd_corrections4_20000evals.txt'
f2 = 'all_tests_phd_corrections5_20000evals.txt'
f3 = 'all_tests_phd_corrections7_20000evals.txt'
f4 = 'all_tests_phd_corrections6_20000evals.txt'
f5 = 'all_tests_phd_corrections8_20000evals.txt'
all_files = [f1, f2, f3, f4, f5]
data1 = DataFrame(pd.read_csv(f1, delim_whitespace=True))
data2 = DataFrame(pd.read_csv(f2, delim_whitespace=True))
data3 = DataFrame(pd.read_csv(f3, delim_whitespace=True))
data4 = DataFrame(pd.read_csv(f4, delim_whitespace=True))
data5 = DataFrame(pd.read_csv(f5, delim_whitespace=True))
# data2 = DataFrame(pd.read_csv(f2))
# data3 = DataFrame(pd.read_csv(f3))
x1 = np.array(data1['#number_of_variables'], dtype='int')
y1 = np.array(data1['time(s)'], dtype='float32')
x2 = np.array(data2['#nVar'], dtype='int')
y2 = np.array(data2['time(s):'], dtype='float32')
x3 = np.array(data3['#nVar'], dtype='int')
y3 = np.array(data3['time(s):'], dtype='float32')
x4 = np.array(data4['#nVar'], dtype='int')
y4 = np.array(data4['time(s):'], dtype='float32')
x5 = np.array(data5['#nVar'], dtype='int')
y5 = np.array(data5['time(s):'], dtype='float32')
fig = plt.figure()
plt.scatter(x1, y1, s=10, c='r', marker="o", label = 'case4')
plt.scatter(x2, y2, s=13, c='b', marker="s", label = 'case5')
plt.scatter(x3, y3, marker="*", label = 'case6')
plt.scatter(x4, y4, marker="+", label = 'case7')
plt.scatter(x5, y5, marker=".", label = 'case8')
#markers: ',', '+', '.', 'o', '*'
plt.xlabel('Number of variables')
plt.ylabel('Time(s)')
plt.title('Variables - Time Scalability')
plt.grid(True)
plt.legend(loc='upper left')
plt.show()
if __name__ == "__main__":
plot_quality() | mit | 1,422,669,631,993,509,400 | 31.215385 | 65 | 0.584608 | false |
manulera/ModellingCourse | ReAct/Python/ColorLine.py | 1 | 1500 | # Function found on the internet
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.collections as mcoll
import matplotlib.path as mpath
def colorline(
x, y, z=None, cmap=plt.get_cmap('copper'), norm=plt.Normalize(0.0, 1.0),
linewidth=3, alpha=1.0):
"""
http://nbviewer.ipython.org/github/dpsanders/matplotlib-examples/blob/master/colorline.ipynb
http://matplotlib.org/examples/pylab_examples/multicolored_line.html
Plot a colored line with coordinates x and y
Optionally specify colors in the array z
Optionally specify a colormap, a norm function and a line width
"""
# Default colors equally spaced on [0,1]:
if z is None:
z = np.linspace(0.0, 1.0, len(x))
# Special case if a single number:
if not hasattr(z, "__iter__"): # to check for numerical input -- this is a hack
z = np.array([z])
z = np.asarray(z)
segments = make_segments(x, y)
lc = mcoll.LineCollection(segments, array=z, cmap=cmap, norm=norm,
linewidth=linewidth, alpha=alpha)
ax = plt.gca()
ax.add_collection(lc)
return lc
def make_segments(x, y):
"""
Create list of line segments from x and y coordinates, in the correct format
for LineCollection: an array of the form numlines x (points per line) x 2 (x
and y) array
"""
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
return segments | gpl-3.0 | -1,122,759,915,787,861,800 | 30.270833 | 96 | 0.653333 | false |
znes/HESYSOPT | hesysopt/restore_results.py | 1 | 1428 | # -*- coding: utf-8 -*-
"""
This module is used to configure the plotting. At the momemt it reads for
the default all results path and creates a multiindex dataframe. This is
used by the different plotting-modules. Also, colors are set here.
Note: This is rather ment to illustrate, how hesysopt results can be plotted,
than to depict a complete plotting ready to use library.
"""
import os
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('ggplot')
def restore(scenarios=['1HBP', '2HBP', '4HBP']):
homepath = os.path.expanduser('~')
main_df = pd.DataFrame()
for s in scenarios:
resultspath = os.path.join(homepath, 'hesysopt_simulation', s, 'results',
'all_results.csv')
tmp = pd.read_csv(resultspath)
tmp['Scenario'] = s
main_df = pd.concat([main_df, tmp])
# restore orginial df multiindex
main_df.set_index(['Scenario', 'bus_label', 'type', 'obj_label', 'datetime'],
inplace=True)
# set colors
colors = {}
components = main_df.index.get_level_values('obj_label').unique()
colors['components'] = dict(zip(components,
sns.color_palette("coolwarm_r", len(components))))
colors['scenarios'] = dict(zip(scenarios,
sns.color_palette("muted", len(scenarios))))
return main_df, scenarios, colors
| gpl-3.0 | -5,618,960,233,635,005,000 | 33.829268 | 81 | 0.630952 | false |
tritemio/pybroom | setup.py | 1 | 2252 | from setuptools import setup
#import versioneer
def get_version():
# http://stackoverflow.com/questions/2058802/how-can-i-get-the-version-defined-in-setup-py-setuptools-in-my-package
from ast import parse
with open('pybroom.py') as f:
version = parse(next(filter(
lambda line: line.startswith('__version__'), f))).body[0].value.s
return version
long_description = r"""
pybroom
=======
**Pybroom** is a small python 3+ library for converting collections of
fit results (curve fitting or other optimizations)
to `Pandas <http://pandas.pydata.org/>`__
`DataFrame <http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe>`__
in tidy format (or long-form)
`(Wickham 2014) <http://dx.doi.org/10.18637/jss.v059.i10>`__.
Once fit results are in tidy DataFrames, it is possible to leverage
`common patterns <http://tomaugspurger.github.io/modern-5-tidy.html>`__
for tidy data analysis. Furthermore powerful visual
explorations using multi-facet plots becomes easy thanks to libraries
like `seaborn <https://pypi.python.org/pypi/seaborn/>`__ natively
supporting tidy DataFrames.
See the `pybroom homepage <http://pybroom.readthedocs.io/>`__ for more info.
"""
setup(
name='pybroom',
version=get_version(),
py_modules=['pybroom'],
#version=versioneer.get_version(),
#cmdclass=versioneer.get_cmdclass(),
author='Antonino Ingargiola',
author_email='tritemio@gmail.com',
url='http://pybroom.readthedocs.io/',
download_url='https://github.com/tritemio/pybroom',
install_requires=['pandas', 'lmfit'],
include_package_data=True,
license='MIT',
description=("Make tidy DataFrames from messy fit/model results."),
long_description=long_description,
platforms=('Windows', 'Linux', 'Mac OS X'),
classifiers=['Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: MIT License'],
keywords=('dataframe tidy-data long-form model fitting tidyverse'))
| mit | 2,977,026,090,203,476,500 | 38.508772 | 119 | 0.677176 | false |
peterwilletts24/Python-Scripts | plot_scripts/EMBRACE/heat_flux/plot_from_pp_3234_regrid_3_hourly.py | 1 | 9160 | """
Load pp, plot and save
"""
import os, sys
import matplotlib
#matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from matplotlib import rc
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
from mpl_toolkits.basemap import Basemap
rc('font', family = 'serif', serif = 'cmr10')
rc('text', usetex=True)
rcParams['text.usetex']=True
rcParams['text.latex.unicode']=True
rcParams['font.family']='serif'
rcParams['font.serif']='cmr10'
import matplotlib.pyplot as plt
#from matplotlib import figure
import matplotlib as mpl
import matplotlib.cm as mpl_cm
import numpy as np
import iris
import iris.coords as coords
import iris.quickplot as qplt
import iris.plot as iplt
import iris.coord_categorisation
import iris.unit as unit
import cartopy.crs as ccrs
import cartopy.io.img_tiles as cimgt
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import datetime
from mpl_toolkits.basemap import cm
import imp
from textwrap import wrap
import re
import iris.analysis.cartography
import math
from dateutil import tz
#import multiprocessing as mp
import gc
import types
save_path='/nfs/a90/eepdw/Figures/EMBRACE/'
model_name_convert_title = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/model_name_convert_title.py')
unrotate = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/unrotate_pole.py')
pp_file = '3234_mean_by_hour_regrid'
degs_crop_top = 1.7
degs_crop_bottom = 2.5
min_contour = 0
max_contour = 400
tick_interval=40
figprops = dict(figsize=(8,8), dpi=360)
clevs = np.linspace(min_contour, max_contour,32)
#cmap=cm.s3pcpn_l
ticks = (np.arange(min_contour, max_contour+tick_interval,tick_interval))
u = unit.Unit('hours since 1970-01-01 00:00:00',calendar='gregorian')
dx, dy = 10, 10
divisor=10 # for lat/lon rounding
def main():
experiment_ids = ['djznw', 'djzny', 'djznq', 'djzns', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkbhu', 'djznu', 'dkhgu' ] # All 12
#experiment_ids = ['djzny', 'djzns', 'djznw', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq' ]
#experiment_ids = ['djzny', 'djzns', 'djznu', 'dkbhu', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkhgu']
#experiment_ids = ['djzns', 'djznu', 'dkbhu', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkhgu']
#experiment_ids = ['dklzq', 'dkmbq', 'dkjxq', 'dklwu', 'dklyu', 'djzns']
#experiment_ids = ['djzns' ]
#experiment_ids = ['dkhgu','dkjxq']
for experiment_id in experiment_ids:
model_info=re.sub('(.{68} )', '\\1\n', str(model_name_convert_title.main(experiment_id)), 0, re.DOTALL)
expmin1 = experiment_id[:-1]
pfile = '/nfs/a90/eepdw/Data/EMBRACE/Mean_State/pp_files/%s/%s/%s.pp' % (expmin1, experiment_id, pp_file)
#pc = iris(pfile)
pcube = iris.load_cube(pfile)
#pcube=iris.analysis.maths.multiply(pcube,3600)
# For each hour in cube
# Get min and max latitude/longitude and unrotate to get min/max corners to crop plot automatically - otherwise end with blank bits on the edges
lats = pcube.coord('grid_latitude').points
lons = pcube.coord('grid_longitude').points
cs = pcube.coord_system('CoordSystem')
if isinstance(cs, iris.coord_systems.RotatedGeogCS):
#print 'Rotated CS %s' % cs
lon_low= np.min(lons)
lon_high = np.max(lons)
lat_low = np.min(lats)
lat_high = np.max(lats)
lon_corners, lat_corners = np.meshgrid((lon_low, lon_high), (lat_low, lat_high))
lon_corner_u,lat_corner_u = unrotate.unrotate_pole(lon_corners, lat_corners, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
lon_low = lon_corner_u[0,0]
lon_high = lon_corner_u[0,1]
lat_low = lat_corner_u[0,0]
lat_high = lat_corner_u[1,0]
else:
lon_low= np.min(lons)
lon_high = np.max(lons)
lat_low = np.min(lats)
lat_high = np.max(lats)
#lon_low= 62
#lon_high = 102
#lat_low = -7
#lat_high = 33
#lon_high_box = 101.866
#lon_low_box = 64.115
#lat_high_box = 33.
#lat_low_box =-6.79
lon_high = 101.866
lon_low = 64.115
lat_high = 33.
lat_low =-6.79
lon_low_tick=lon_low -(lon_low%divisor)
lon_high_tick=math.ceil(lon_high/divisor)*divisor
lat_low_tick=lat_low - (lat_low%divisor)
lat_high_tick=math.ceil(lat_high/divisor)*divisor
#print lat_high_tick
#print lat_low_tick
for t, time_cube in enumerate(pcube.slices(['grid_latitude', 'grid_longitude'])):
#print time_cube
# Get mid-point time of averages
h_max = u.num2date(time_cube.coord('time').bounds[0].max()).strftime('%H%M')
h_min = u.num2date(time_cube.coord('time').bounds[0].min()).strftime('%H%M')
#Convert to India time
from_zone = tz.gettz('UTC')
to_zone = tz.gettz('Asia/Kolkata')
h_max_utc = u.num2date(time_cube.coord('time').bounds[0].max()).replace(tzinfo=from_zone)
h_min_utc = u.num2date(time_cube.coord('time').bounds[0].min()).replace(tzinfo=from_zone)
h_max_local = h_max_utc.astimezone(to_zone).strftime('%H%M')
h_min_local = h_min_utc.astimezone(to_zone).strftime('%H%M')
#m = u.num2date(time_cube.coord('time').bounds[0].mean()).minute
#h = u.num2date(time_cube.coord('time').bounds[0].mean()).hour
#if t==0:
fig = plt.figure(**figprops)
cmap=plt.cm.YlOrRd
ax = plt.axes(projection=ccrs.PlateCarree(), extent=(lon_low,lon_high,lat_low+degs_crop_bottom,lat_high-degs_crop_top))
#ax = fig.axes(projection=ccrs.PlateCarree(), extent=(lon_low,lon_high,lat_low,lat_high))
#ax = fig.axes(projection=ccrs.PlateCarree())
cont = iplt.contourf(time_cube, clevs, cmap=cmap, extend='both')
#del time_cube
#fig.clabel(cont, fmt='%d')
#ax.stock_img()
ax.coastlines(resolution='110m', color='#262626')
gl = ax.gridlines(draw_labels=True,linewidth=0.5, color='#262626', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_right = False
#gl.xlines = False
dx, dy = 10, 10
gl.xlocator = mticker.FixedLocator(range(int(lon_low_tick),int(lon_high_tick)+dx,dx))
gl.ylocator = mticker.FixedLocator(range(int(lat_low_tick),int(lat_high_tick)+dy,dy))
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style = {'size': 12, 'color':'#262626'}
#gl.xlabel_style = {'color': '#262626', 'weight': 'bold'}
gl.ylabel_style = {'size': 12, 'color':'#262626'}
cbar = fig.colorbar(cont, orientation='horizontal', pad=0.05, extend='both')
cbar.set_label('mm/h', fontsize=10, color='#262626')
#cbar.set_label(time_cube.units, fontsize=10, color='#262626')
cbar.set_ticks(np.arange(min_contour, max_contour+tick_interval,tick_interval))
cbar.set_ticklabels(['${%.1f}$' % i for i in ticks])
cbar.ax.tick_params(labelsize=10, color='#262626')
# fig.canvas.draw()
# background = fig.canvas.copy_from_bbox(fig.bbox)
# fig = plt.figure(frameon=False,**figprops)
# make sure frame is off, or everything in existing background
# will be obliterated.
# ax = fig.add_subplot(111,frameon=False)
# restore previous background.
# fig.canvas.restore_region(background)
# time_cube=iris.analysis.maths.multiply(time_cube,3600)
# cont = iplt.contourf(time_cube, clevs, cmap=cmap, extend='both')
#print cont.collections()
#################################################################
## Bug fix for Quad Contour set not having attribute 'set_visible'
# def setvisible(self,vis):
# for c in self.collections: c.set_visible(vis)
# cont.set_visible = types.MethodType(setvisible,)
# cont.axes = plt.gca()
# cont.figure=fig
####################################################################
#ims.append([im])
main_title='Mean Rainfall for EMBRACE Period -%s-%s UTC (%s-%s IST)' % (h_min, h_max, h_min_local, h_max_local)
#main_title=time_cube.standard_name.title().replace('_',' ')
#model_info = re.sub(r'[(\']', ' ', model_info)
#model_info = re.sub(r'[\',)]', ' ', model_info)
#print model_info
if not os.path.exists('%s%s/%s' % (save_path, experiment_id, pp_file)): os.makedirs('%s%s/%s' % (save_path, experiment_id, pp_file))
#fig.show()
fig.savefig('%s%s/%s/%s_%s_%s-%s_notitle.png' % (save_path, experiment_id, pp_file, experiment_id, pp_file, h_min, h_max), format='png', bbox_inches='tight')
plt.title('%s-%s UTC %s-%s IST' % (h_min,h_max, h_min_local, h_max_local))
fig.savefig('%s%s/%s/%s_%s_%s-%s_short_title.png' % (save_path, experiment_id, pp_file, experiment_id, pp_file, h_min, h_max), format='png', bbox_inches='tight')
plt.title('\n'.join(wrap('%s\n%s' % (main_title, model_info), 1000,replace_whitespace=False)), fontsize=16)
fig.savefig('%s%s/%s/%s_%s_%s-%s.png' % (save_path, experiment_id, pp_file, experiment_id, pp_file, h_min, h_max), format='png', bbox_inches='tight')
fig.clf()
plt.close()
del time_cube
gc.collect()
if __name__ == '__main__':
main()
#proc=mp.Process(target=worker)
#proc.daemon=True
#proc.start()
#proc.join()
| mit | -3,145,939,962,526,941,700 | 30.916376 | 164 | 0.648144 | false |
a-holm/MachinelearningAlgorithms | Classification/SupportVectorMachine/howItWorksSupportVectorMachine.py | 1 | 7665 | # -*- coding: utf-8 -*-
"""Support Vector Machine (SVM) classification for machine learning.
SVM is a binary classifier. The objective of the SVM is to find the best
separating hyperplane in vector space which is also referred to as the
decision boundary. And it decides what separating hyperplane is the 'best'
because the distance from it and the associating data it is separating is the
greatest at the plane in question. The SVM classifies as either on the positive
or negative side of the hyperplane.
This is the file where I create the algorithm from scratch. This is algorithm
for linear 2D data.
Example:
$ python howItWorksSupportVectorMachine.py
Todo:
*
"""
import matplotlib.pyplot as plt
from matplotlib import style
import numpy as np
style.use('ggplot')
class SupportVectorMachine:
"""Support Vector Machine (SVM) class.
This class is for creating an instance of a SVM. To avoid retraining or
refitting (as it's also called) it all the time.
"""
def __init__(self, visualization=True):
"""The __init__ method of the SupportVectorMachine class.
Args:
visualization (bool): Default set to True due to debugging
"""
self.visualization = visualization
self.colors = {1: 'r', -1: 'c'}
if visualization:
self.fig = plt.figure()
self.ax = self.fig.add_subplot(1, 1, 1)
def fit(self, data):
"""Method to train the SVM object as a convex optimization problem.
Uses a simple method to solve convex optimization problems. There are
more sophisticated methods, but for the sake of simplicity (because I
want to do it from scratch) I use a simpler method.
Return:
(int)
Args:
data (:obj:`list` of :obj:`int`:): Data to be fitted/trained.
"""
self.data = data
opt_dict = {} # will be populated like { ||w||: [w,b]}
# Everytime we get a value we will transform them by these multipliers
# to see what works on each step we 'step down' the convex area.
transforms = [[1, 1], [-1, 1], [-1, -1], [1, -1]]
# all_data is used to find max and min values.
all_data = []
for yi in self.data:
for featureset in self.data[yi]:
for feature in featureset:
all_data.append(feature)
self.max_feature_value = max(all_data)
self.min_feature_value = min(all_data)
all_data = None # to avoid holding it in memory
# starting values for training algorithm
step_sizes = [self.max_feature_value * 0.1,
self.max_feature_value * 0.01,
self.max_feature_value * 0.001]
# b is expensive and does not need to be as precise.
b_range_multiple = 5
# we do not need to take as small steps with b as we do with w.
# we could do it, but it would make it too complex for just a portfolio
b_multiple = 5
# to cut a few memory corners all w = [latest_optimum, latest_optimum]
optimum_multiplier = 10
latest_optimum = self.max_feature_value * optimum_multiplier
for step in step_sizes:
w = np.array([latest_optimum, latest_optimum])
optimized = False # We can use this because convex problem.
while not optimized: # following code can probably be threaded
for b in np.arange(
-1 * (self.max_feature_value * b_range_multiple),
self.max_feature_value * b_range_multiple,
step * b_multiple):
for transformation in transforms:
w_t = w * transformation
found_option = True
"""
Here is the point that could probably be speeded up
this is the weakest part of the algorithm. I have
more algoritms to show of so I am not going to use
time to make this more effective. There are libraries
that are more effective anyway.
"""
# TODO: Possibly add a break later
for i in self.data:
for xi in self.data[i]:
yi = i
if not yi * (np.dot(w_t, xi) + b) >= 1:
found_option = False
if found_option:
opt_dict[np.linalg.norm(w_t)] = [w_t, b]
if w[0] < 0:
optimized = True
print('Optimized a step')
else:
w = w - step
norms = sorted([n for n in opt_dict])
opt_choice = opt_dict[norms[0]]
self.w = opt_choice[0]
self.b = opt_choice[1]
latest_optimum = opt_choice[0][0] + step * 2
def predict(self, features):
"""Method to predict features based on the SVM object.
Return:
(int) an element-wise indication of the classification of the
features.
Args:
features (:obj:`list` of :obj:`int`:): Features to be predicted.
"""
# just see if (x.w+b) is negative or positive
classification = np.sign(np.dot(np.array(features), self.w) + self.b)
if classification != 0 and self.visualization:
self.ax.scatter(features[0], features[1], marker="*",
s=80, c=self.colors[classification])
return classification
def visualize(self):
"""Method for visualization and plotting."""
[[self.ax.scatter(x[0], x[1], s=100, color=self.colors[i])
for x in data_dict[i]] for i in data_dict]
def hyperplane(x, w, b, v): # hyperplane v = x.w+b
"""Method to return values of hyperplanes for visualization.
Args:
x, w, b (int): values to figure out the hyperplane = x.w+b
v (int): value of the hyperplane, either the positive support
vector (1), the negative support vector (-1) or the
decision boundary (0).
"""
return (-w[0] * x - b + v) / w[1]
datarng = (self.min_feature_value * 0.9, self.max_feature_value * 1.1)
hyp_x_min = datarng[0]
hyp_x_max = datarng[1]
# plot positive support vector hyperplane. (w.x+b) = 1
psv1 = hyperplane(hyp_x_min, self.w, self.b, 1)
psv2 = hyperplane(hyp_x_max, self.w, self.b, 1)
self.ax.plot([hyp_x_min, hyp_x_max], [psv1, psv2], 'k')
# plot negative support vector hyperplane. (w.x+b) = -1
nsv1 = hyperplane(hyp_x_min, self.w, self.b, -1)
nsv2 = hyperplane(hyp_x_max, self.w, self.b, -1)
self.ax.plot([hyp_x_min, hyp_x_max], [nsv1, nsv2], 'k')
# plot decision boundary hyperplane. (w.x+b) = 0
db1 = hyperplane(hyp_x_min, self.w, self.b, 0)
db2 = hyperplane(hyp_x_max, self.w, self.b, 0)
self.ax.plot([hyp_x_min, hyp_x_max], [db1, db2], 'y--')
plt.show()
# get training data
negative_array = np.array([[1, 7], [2, 8], [3, 8]])
positive_array = np.array([[5, 1], [6, -1], [7, 3]])
data_dict = {-1: negative_array, 1: positive_array}
svm = SupportVectorMachine()
svm.fit(data=data_dict)
# prediction data and prediction
pred_test = [[0, 10], [1, 3], [3, 4], [3, 5], [5, 5], [5, 6], [6, -5], [5, 8]]
for p in pred_test:
svm.predict(p)
svm.visualize()
| mit | -5,089,693,739,234,152,000 | 38.715026 | 79 | 0.554468 | false |
jeffknupp/arrow | python/pyarrow/parquet.py | 1 | 28949 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import json
import six
import numpy as np
from pyarrow.filesystem import FileSystem, LocalFileSystem
from pyarrow._parquet import (ParquetReader, FileMetaData, # noqa
RowGroupMetaData, ParquetSchema,
ParquetWriter)
import pyarrow._parquet as _parquet # noqa
import pyarrow.lib as lib
# ----------------------------------------------------------------------
# Reading a single Parquet file
class ParquetFile(object):
"""
Reader interface for a single Parquet file
Parameters
----------
source : str or pyarrow.io.NativeFile
Readable source. For passing Python file objects or byte buffers,
see pyarrow.io.PythonFileInterface or pyarrow.io.BufferReader.
metadata : ParquetFileMetadata, default None
Use existing metadata object, rather than reading from file.
common_metadata : ParquetFileMetadata, default None
Will be used in reads for pandas schema metadata if not found in the
main file's metadata, no other uses at the moment
"""
def __init__(self, source, metadata=None, common_metadata=None):
self.reader = ParquetReader()
self.reader.open(source, metadata=metadata)
self.common_metadata = common_metadata
@property
def metadata(self):
return self.reader.metadata
@property
def schema(self):
return self.metadata.schema
@property
def num_row_groups(self):
return self.reader.num_row_groups
def read_row_group(self, i, columns=None, nthreads=1,
use_pandas_metadata=False):
"""
Read a single row group from a Parquet file
Parameters
----------
columns: list
If not None, only these columns will be read from the row group.
nthreads : int, default 1
Number of columns to read in parallel. If > 1, requires that the
underlying file source is threadsafe
use_pandas_metadata : boolean, default False
If True and file has custom pandas schema metadata, ensure that
index columns are also loaded
Returns
-------
pyarrow.table.Table
Content of the row group as a table (of columns)
"""
column_indices = self._get_column_indices(
columns, use_pandas_metadata=use_pandas_metadata)
return self.reader.read_row_group(i, column_indices=column_indices,
nthreads=nthreads)
def read(self, columns=None, nthreads=1, use_pandas_metadata=False):
"""
Read a Table from Parquet format
Parameters
----------
columns: list
If not None, only these columns will be read from the file.
nthreads : int, default 1
Number of columns to read in parallel. If > 1, requires that the
underlying file source is threadsafe
use_pandas_metadata : boolean, default False
If True and file has custom pandas schema metadata, ensure that
index columns are also loaded
Returns
-------
pyarrow.table.Table
Content of the file as a table (of columns)
"""
column_indices = self._get_column_indices(
columns, use_pandas_metadata=use_pandas_metadata)
return self.reader.read_all(column_indices=column_indices,
nthreads=nthreads)
def _get_column_indices(self, column_names, use_pandas_metadata=False):
if column_names is None:
return None
indices = list(map(self.reader.column_name_idx, column_names))
if use_pandas_metadata:
file_keyvalues = self.metadata.metadata
common_keyvalues = (self.common_metadata.metadata
if self.common_metadata is not None
else None)
if file_keyvalues and b'pandas' in file_keyvalues:
index_columns = _get_pandas_index_columns(file_keyvalues)
elif common_keyvalues and b'pandas' in common_keyvalues:
index_columns = _get_pandas_index_columns(common_keyvalues)
else:
index_columns = []
if indices is not None and index_columns:
indices += map(self.reader.column_name_idx, index_columns)
return indices
def _get_pandas_index_columns(keyvalues):
return (json.loads(keyvalues[b'pandas'].decode('utf8'))
['index_columns'])
# ----------------------------------------------------------------------
# Metadata container providing instructions about reading a single Parquet
# file, possibly part of a partitioned dataset
class ParquetDatasetPiece(object):
"""
A single chunk of a potentially larger Parquet dataset to read. The
arguments will indicate to read either a single row group or all row
groups, and whether to add partition keys to the resulting pyarrow.Table
Parameters
----------
path : str
Path to file in the file system where this piece is located
partition_keys : list of tuples
[(column name, ordinal index)]
row_group : int, default None
Row group to load. By default, reads all row groups
"""
def __init__(self, path, row_group=None, partition_keys=None):
self.path = path
self.row_group = row_group
self.partition_keys = partition_keys or []
def __eq__(self, other):
if not isinstance(other, ParquetDatasetPiece):
return False
return (self.path == other.path and
self.row_group == other.row_group and
self.partition_keys == other.partition_keys)
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return ('{0}({1!r}, row_group={2!r}, partition_keys={3!r})'
.format(type(self).__name__, self.path,
self.row_group,
self.partition_keys))
def __str__(self):
result = ''
if len(self.partition_keys) > 0:
partition_str = ', '.join('{0}={1}'.format(name, index)
for name, index in self.partition_keys)
result += 'partition[{0}] '.format(partition_str)
result += self.path
if self.row_group is not None:
result += ' | row_group={0}'.format(self.row_group)
return result
def get_metadata(self, open_file_func=None):
"""
Given a function that can create an open ParquetFile object, return the
file's metadata
"""
return self._open(open_file_func).metadata
def _open(self, open_file_func=None):
"""
Returns instance of ParquetFile
"""
reader = open_file_func(self.path)
if not isinstance(reader, ParquetFile):
reader = ParquetFile(reader)
return reader
def read(self, columns=None, nthreads=1, partitions=None,
open_file_func=None, file=None, use_pandas_metadata=False):
"""
Read this piece as a pyarrow.Table
Parameters
----------
columns : list of column names, default None
nthreads : int, default 1
For multithreaded file reads
partitions : ParquetPartitions, default None
open_file_func : function, default None
A function that knows how to construct a ParquetFile object given
the file path in this piece
file : file-like object
passed to ParquetFile
Returns
-------
table : pyarrow.Table
"""
if open_file_func is not None:
reader = self._open(open_file_func)
elif file is not None:
reader = ParquetFile(file)
else:
# try to read the local path
reader = ParquetFile(self.path)
options = dict(columns=columns,
nthreads=nthreads,
use_pandas_metadata=use_pandas_metadata)
if self.row_group is not None:
table = reader.read_row_group(self.row_group, **options)
else:
table = reader.read(**options)
if len(self.partition_keys) > 0:
if partitions is None:
raise ValueError('Must pass partition sets')
# Here, the index is the categorical code of the partition where
# this piece is located. Suppose we had
#
# /foo=a/0.parq
# /foo=b/0.parq
# /foo=c/0.parq
#
# Then we assign a=0, b=1, c=2. And the resulting Table pieces will
# have a DictionaryArray column named foo having the constant index
# value as indicated. The distinct categories of the partition have
# been computed in the ParquetManifest
for i, (name, index) in enumerate(self.partition_keys):
# The partition code is the same for all values in this piece
indices = np.array([index], dtype='i4').repeat(len(table))
# This is set of all partition values, computed as part of the
# manifest, so ['a', 'b', 'c'] as in our example above.
dictionary = partitions.levels[i].dictionary
arr = lib.DictionaryArray.from_arrays(indices, dictionary)
col = lib.Column.from_array(name, arr)
table = table.append_column(col)
return table
def _is_parquet_file(path):
return path.endswith('parq') or path.endswith('parquet')
class PartitionSet(object):
"""A data structure for cataloguing the observed Parquet partitions at a
particular level. So if we have
/foo=a/bar=0
/foo=a/bar=1
/foo=a/bar=2
/foo=b/bar=0
/foo=b/bar=1
/foo=b/bar=2
Then we have two partition sets, one for foo, another for bar. As we visit
levels of the partition hierarchy, a PartitionSet tracks the distinct
values and assigns categorical codes to use when reading the pieces
"""
def __init__(self, name, keys=None):
self.name = name
self.keys = keys or []
self.key_indices = {k: i for i, k in enumerate(self.keys)}
self._dictionary = None
def get_index(self, key):
"""
Get the index of the partition value if it is known, otherwise assign
one
"""
if key in self.key_indices:
return self.key_indices[key]
else:
index = len(self.key_indices)
self.keys.append(key)
self.key_indices[key] = index
return index
@property
def dictionary(self):
if self._dictionary is not None:
return self._dictionary
if len(self.keys) == 0:
raise ValueError('No known partition keys')
# Only integer and string partition types are supported right now
try:
integer_keys = [int(x) for x in self.keys]
dictionary = lib.array(integer_keys)
except ValueError:
dictionary = lib.array(self.keys)
self._dictionary = dictionary
return dictionary
@property
def is_sorted(self):
return list(self.keys) == sorted(self.keys)
class ParquetPartitions(object):
def __init__(self):
self.levels = []
self.partition_names = set()
def __len__(self):
return len(self.levels)
def __getitem__(self, i):
return self.levels[i]
def get_index(self, level, name, key):
"""
Record a partition value at a particular level, returning the distinct
code for that value at that level. Example:
partitions.get_index(1, 'foo', 'a') returns 0
partitions.get_index(1, 'foo', 'b') returns 1
partitions.get_index(1, 'foo', 'c') returns 2
partitions.get_index(1, 'foo', 'a') returns 0
Parameters
----------
level : int
The nesting level of the partition we are observing
name : string
The partition name
key : string or int
The partition value
"""
if level == len(self.levels):
if name in self.partition_names:
raise ValueError('{0} was the name of the partition in '
'another level'.format(name))
part_set = PartitionSet(name)
self.levels.append(part_set)
self.partition_names.add(name)
return self.levels[level].get_index(key)
def is_string(x):
return isinstance(x, six.string_types)
class ParquetManifest(object):
"""
"""
def __init__(self, dirpath, filesystem=None, pathsep='/',
partition_scheme='hive'):
self.filesystem = filesystem or LocalFileSystem.get_instance()
self.pathsep = pathsep
self.dirpath = dirpath
self.partition_scheme = partition_scheme
self.partitions = ParquetPartitions()
self.pieces = []
self.common_metadata_path = None
self.metadata_path = None
self._visit_level(0, self.dirpath, [])
def _visit_level(self, level, base_path, part_keys):
fs = self.filesystem
_, directories, files = next(fs.walk(base_path))
filtered_files = []
for path in files:
full_path = self.pathsep.join((base_path, path))
if _is_parquet_file(path):
filtered_files.append(full_path)
elif path.endswith('_common_metadata'):
self.common_metadata_path = full_path
elif path.endswith('_metadata'):
self.metadata_path = full_path
elif not self._should_silently_exclude(path):
print('Ignoring path: {0}'.format(full_path))
# ARROW-1079: Filter out "private" directories starting with underscore
filtered_directories = [self.pathsep.join((base_path, x))
for x in directories
if not _is_private_directory(x)]
filtered_files.sort()
filtered_directories.sort()
if len(files) > 0 and len(filtered_directories) > 0:
raise ValueError('Found files in an intermediate '
'directory: {0}'.format(base_path))
elif len(filtered_directories) > 0:
self._visit_directories(level, filtered_directories, part_keys)
else:
self._push_pieces(filtered_files, part_keys)
def _should_silently_exclude(self, file_name):
return (file_name.endswith('.crc') or
file_name in EXCLUDED_PARQUET_PATHS)
def _visit_directories(self, level, directories, part_keys):
for path in directories:
head, tail = _path_split(path, self.pathsep)
name, key = _parse_hive_partition(tail)
index = self.partitions.get_index(level, name, key)
dir_part_keys = part_keys + [(name, index)]
self._visit_level(level + 1, path, dir_part_keys)
def _parse_partition(self, dirname):
if self.partition_scheme == 'hive':
return _parse_hive_partition(dirname)
else:
raise NotImplementedError('partition schema: {0}'
.format(self.partition_scheme))
def _push_pieces(self, files, part_keys):
self.pieces.extend([
ParquetDatasetPiece(path, partition_keys=part_keys)
for path in files
])
def _parse_hive_partition(value):
if '=' not in value:
raise ValueError('Directory name did not appear to be a '
'partition: {0}'.format(value))
return value.split('=', 1)
def _is_private_directory(x):
_, tail = os.path.split(x)
return tail.startswith('_') and '=' not in tail
def _path_split(path, sep):
i = path.rfind(sep) + 1
head, tail = path[:i], path[i:]
head = head.rstrip(sep)
return head, tail
EXCLUDED_PARQUET_PATHS = {'_SUCCESS'}
class ParquetDataset(object):
"""
Encapsulates details of reading a complete Parquet dataset possibly
consisting of multiple files and partitions in subdirectories
Parameters
----------
path_or_paths : str or List[str]
A directory name, single file name, or list of file names
filesystem : FileSystem, default None
If nothing passed, paths assumed to be found in the local on-disk
filesystem
metadata : pyarrow.parquet.FileMetaData
Use metadata obtained elsewhere to validate file schemas
schema : pyarrow.parquet.Schema
Use schema obtained elsewhere to validate file schemas. Alternative to
metadata parameter
split_row_groups : boolean, default False
Divide files into pieces for each row group in the file
validate_schema : boolean, default True
Check that individual file schemas are all the same / compatible
"""
def __init__(self, path_or_paths, filesystem=None, schema=None,
metadata=None, split_row_groups=False, validate_schema=True):
if filesystem is None:
self.fs = LocalFileSystem.get_instance()
else:
self.fs = _ensure_filesystem(filesystem)
self.paths = path_or_paths
(self.pieces, self.partitions,
self.metadata_path) = _make_manifest(path_or_paths, self.fs)
if self.metadata_path is not None:
self.common_metadata = ParquetFile(self.metadata_path).metadata
else:
self.common_metadata = None
self.metadata = metadata
self.schema = schema
self.split_row_groups = split_row_groups
if split_row_groups:
raise NotImplementedError("split_row_groups not yet implemented")
if validate_schema:
self.validate_schemas()
def validate_schemas(self):
open_file = self._get_open_file_func()
if self.metadata is None and self.schema is None:
if self.metadata_path is not None:
self.schema = open_file(self.metadata_path).schema
else:
self.schema = self.pieces[0].get_metadata(open_file).schema
elif self.schema is None:
self.schema = self.metadata.schema
# Verify schemas are all equal
for piece in self.pieces:
file_metadata = piece.get_metadata(open_file)
if not self.schema.equals(file_metadata.schema):
raise ValueError('Schema in {0!s} was different. '
'{1!s} vs {2!s}'
.format(piece, file_metadata.schema,
self.schema))
def read(self, columns=None, nthreads=1, use_pandas_metadata=False):
"""
Read multiple Parquet files as a single pyarrow.Table
Parameters
----------
columns : List[str]
Names of columns to read from the file
nthreads : int, default 1
Number of columns to read in parallel. Requires that the underlying
file source is threadsafe
use_pandas_metadata : bool, default False
Passed through to each dataset piece
Returns
-------
pyarrow.Table
Content of the file as a table (of columns)
"""
open_file = self._get_open_file_func()
tables = []
for piece in self.pieces:
table = piece.read(columns=columns, nthreads=nthreads,
partitions=self.partitions,
open_file_func=open_file,
use_pandas_metadata=use_pandas_metadata)
tables.append(table)
all_data = lib.concat_tables(tables)
if use_pandas_metadata:
# We need to ensure that this metadata is set in the Table's schema
# so that Table.to_pandas will construct pandas.DataFrame with the
# right index
common_metadata = self._get_common_pandas_metadata()
current_metadata = all_data.schema.metadata or {}
if common_metadata and b'pandas' not in current_metadata:
all_data = all_data.replace_schema_metadata({
b'pandas': common_metadata})
return all_data
def read_pandas(self, **kwargs):
"""
Read dataset including pandas metadata, if any. Other arguments passed
through to ParquetDataset.read, see docstring for further details
Returns
-------
pyarrow.Table
Content of the file as a table (of columns)
"""
return self.read(use_pandas_metadata=True, **kwargs)
def _get_common_pandas_metadata(self):
if self.common_metadata is None:
return None
keyvalues = self.common_metadata.metadata
return keyvalues.get(b'pandas', None)
def _get_open_file_func(self):
if self.fs is None or isinstance(self.fs, LocalFileSystem):
def open_file(path, meta=None):
return ParquetFile(path, metadata=meta,
common_metadata=self.common_metadata)
else:
def open_file(path, meta=None):
return ParquetFile(self.fs.open(path, mode='rb'),
metadata=meta,
common_metadata=self.common_metadata)
return open_file
def _ensure_filesystem(fs):
if not isinstance(fs, FileSystem):
if type(fs).__name__ == 'S3FileSystem':
from pyarrow.filesystem import S3FSWrapper
return S3FSWrapper(fs)
else:
raise IOError('Unrecognized filesystem: {0}'
.format(type(fs)))
else:
return fs
def _make_manifest(path_or_paths, fs, pathsep='/'):
partitions = None
metadata_path = None
if len(path_or_paths) == 1:
# Dask passes a directory as a list of length 1
path_or_paths = path_or_paths[0]
if is_string(path_or_paths) and fs.isdir(path_or_paths):
manifest = ParquetManifest(path_or_paths, filesystem=fs,
pathsep=fs.pathsep)
metadata_path = manifest.metadata_path
pieces = manifest.pieces
partitions = manifest.partitions
else:
if not isinstance(path_or_paths, list):
path_or_paths = [path_or_paths]
# List of paths
if len(path_or_paths) == 0:
raise ValueError('Must pass at least one file path')
pieces = []
for path in path_or_paths:
if not fs.isfile(path):
raise IOError('Passed non-file path: {0}'
.format(path))
piece = ParquetDatasetPiece(path)
pieces.append(piece)
return pieces, partitions, metadata_path
def read_table(source, columns=None, nthreads=1, metadata=None,
use_pandas_metadata=False):
"""
Read a Table from Parquet format
Parameters
----------
source: str or pyarrow.io.NativeFile
Location of Parquet dataset. If a string passed, can be a single file
name or directory name. For passing Python file objects or byte
buffers, see pyarrow.io.PythonFileInterface or pyarrow.io.BufferReader.
columns: list
If not None, only these columns will be read from the file.
nthreads : int, default 1
Number of columns to read in parallel. Requires that the underlying
file source is threadsafe
metadata : FileMetaData
If separately computed
use_pandas_metadata : boolean, default False
If True and file has custom pandas schema metadata, ensure that
index columns are also loaded
Returns
-------
pyarrow.Table
Content of the file as a table (of columns)
"""
if is_string(source):
fs = LocalFileSystem.get_instance()
if fs.isdir(source):
return fs.read_parquet(source, columns=columns,
metadata=metadata)
pf = ParquetFile(source, metadata=metadata)
return pf.read(columns=columns, nthreads=nthreads,
use_pandas_metadata=use_pandas_metadata)
def read_pandas(source, columns=None, nthreads=1, metadata=None):
"""
Read a Table from Parquet format, also reading DataFrame index values if
known in the file metadata
Parameters
----------
source: str or pyarrow.io.NativeFile
Location of Parquet dataset. If a string passed, can be a single file
name. For passing Python file objects or byte buffers,
see pyarrow.io.PythonFileInterface or pyarrow.io.BufferReader.
columns: list
If not None, only these columns will be read from the file.
nthreads : int, default 1
Number of columns to read in parallel. Requires that the underlying
file source is threadsafe
metadata : FileMetaData
If separately computed
Returns
-------
pyarrow.Table
Content of the file as a Table of Columns, including DataFrame indexes
as Columns.
"""
return read_table(source, columns=columns, nthreads=nthreads,
metadata=metadata, use_pandas_metadata=True)
def write_table(table, where, row_group_size=None, version='1.0',
use_dictionary=True, compression='snappy',
use_deprecated_int96_timestamps=False, **kwargs):
"""
Write a Table to Parquet format
Parameters
----------
table : pyarrow.Table
where: string or pyarrow.io.NativeFile
row_group_size : int, default None
The maximum number of rows in each Parquet RowGroup. As a default,
we will write a single RowGroup per file.
version : {"1.0", "2.0"}, default "1.0"
The Parquet format version, defaults to 1.0
use_dictionary : bool or list
Specify if we should use dictionary encoding in general or only for
some columns.
compression : str or dict
Specify the compression codec, either on a general basis or per-column.
"""
row_group_size = kwargs.get('chunk_size', row_group_size)
options = dict(
use_dictionary=use_dictionary,
compression=compression,
version=version,
use_deprecated_int96_timestamps=use_deprecated_int96_timestamps)
writer = None
try:
writer = ParquetWriter(where, table.schema, **options)
writer.write_table(table, row_group_size=row_group_size)
except:
if writer is not None:
writer.close()
if isinstance(where, six.string_types):
try:
os.remove(where)
except os.error:
pass
raise
else:
writer.close()
def write_metadata(schema, where, version='1.0',
use_deprecated_int96_timestamps=False):
"""
Write metadata-only Parquet file from schema
Parameters
----------
schema : pyarrow.Schema
where: string or pyarrow.io.NativeFile
version : {"1.0", "2.0"}, default "1.0"
The Parquet format version, defaults to 1.0
"""
options = dict(
version=version,
use_deprecated_int96_timestamps=use_deprecated_int96_timestamps
)
writer = ParquetWriter(where, schema, **options)
writer.close()
def read_metadata(where):
"""
Read FileMetadata from footer of a single Parquet file
Parameters
----------
where : string (filepath) or file-like object
Returns
-------
metadata : FileMetadata
"""
return ParquetFile(where).metadata
def read_schema(where):
"""
Read effective Arrow schema from Parquet file metadata
Parameters
----------
where : string (filepath) or file-like object
Returns
-------
schema : pyarrow.Schema
"""
return ParquetFile(where).schema.to_arrow_schema()
| apache-2.0 | -3,083,835,975,163,887,600 | 33.057647 | 79 | 0.594839 | false |
abonil91/ncanda-data-integration | scripts/redcap/scoring/pds/__init__.py | 1 | 2566 | #!/usr/bin/env python
##
## Copyright 2016 SRI International
## See COPYING file distributed along with the package for the copyright and license terms.
##
import re
import pandas
import RwrapperNew
#
# Variables from surveys needed for PDS
#
# LimeSurvey field names
lime_fields = [ "pdsf1", "pdsf2", "pdsf3", "pdsf4", "pdsf5a", "pdsf5b",
"pdsm1", "pdsm2", "pdsm3", "pdsm4", "pdsm5" ]
# Dictionary to recover LimeSurvey field names from REDCap names
rc2lime = dict()
for field in lime_fields:
rc2lime[RwrapperNew.label_to_sri( 'youthreport2', field )] = field
# REDCap fields names
input_fields = { 'youthreport2' : [ 'youth_report_2_complete', 'youthreport2_missing' ] + rc2lime.keys() }
#
# This determines the name of the form in REDCap where the results are posted.
#
output_form = 'clinical'
#
# PDS field names mapping from R to REDCap
#
R2rc = { 'PDSS' : 'pds_score', 'pubcat' : 'pds_pubcat' }
#
# Scoring function - take requested data (as requested by "input_fields") for each (subject,event), and demographics (date of birth, gender) for each subject.
#
def compute_scores( data, demographics ):
# Get rid of all records that don't have YR2
data.dropna( axis=1, subset=['youth_report_2_complete'] )
data = data[ data['youth_report_2_complete'] > 0 ]
data = data[ ~(data['youthreport2_missing'] > 0) ]
# If no records to score, return empty DF
if len( data ) == 0:
return pandas.DataFrame()
# Set "ydi2" ("sex" field in LimeSurvey) based on what is in REDCap - this is chacked against subject ID and should avoid mis-entered data
data['ydi2'] = data.index.map( lambda key: demographics['sex'][key[0]] )
# Replace all column labels with the original LimeSurvey names
data.columns = RwrapperNew.map_labels( data.columns, rc2lime )
# Call the scoring function for all table rows
scores = data.apply( RwrapperNew.runscript, axis=1, Rscript='pds/PDS.R' )
# Replace all score columns with REDCap field names
scores.columns = RwrapperNew.map_labels( scores.columns, R2rc )
# Simply copy completion status from the input surveys
scores['pds_complete'] = data['youth_report_2_complete'].map( int )
# Make a proper multi-index for the scores table
scores.index = pandas.MultiIndex.from_tuples(scores.index)
scores.index.names = ['study_id', 'redcap_event_name']
# Return the computed scores - this is what will be imported back into REDCap
outfield_list = [ 'pds_complete' ] + R2rc.values()
return scores[ outfield_list ]
| bsd-3-clause | 8,270,537,386,764,752,000 | 33.675676 | 158 | 0.68901 | false |
datapythonista/pandas | pandas/tests/series/methods/test_item.py | 4 | 1622 | """
Series.item method, mainly testing that we get python scalars as opposed to
numpy scalars.
"""
import pytest
from pandas import (
Series,
Timedelta,
Timestamp,
date_range,
)
class TestItem:
def test_item(self):
# We are testing that we get python scalars as opposed to numpy scalars
ser = Series([1])
result = ser.item()
assert result == 1
assert result == ser.iloc[0]
assert isinstance(result, int) # i.e. not np.int64
ser = Series([0.5], index=[3])
result = ser.item()
assert isinstance(result, float)
assert result == 0.5
ser = Series([1, 2])
msg = "can only convert an array of size 1"
with pytest.raises(ValueError, match=msg):
ser.item()
dti = date_range("2016-01-01", periods=2)
with pytest.raises(ValueError, match=msg):
dti.item()
with pytest.raises(ValueError, match=msg):
Series(dti).item()
val = dti[:1].item()
assert isinstance(val, Timestamp)
val = Series(dti)[:1].item()
assert isinstance(val, Timestamp)
tdi = dti - dti
with pytest.raises(ValueError, match=msg):
tdi.item()
with pytest.raises(ValueError, match=msg):
Series(tdi).item()
val = tdi[:1].item()
assert isinstance(val, Timedelta)
val = Series(tdi)[:1].item()
assert isinstance(val, Timedelta)
# Case where ser[0] would not work
ser = Series(dti, index=[5, 6])
val = ser[:1].item()
assert val == dti[0]
| bsd-3-clause | -6,302,737,722,346,336,000 | 26.491525 | 79 | 0.564735 | false |
timothy1191xa/project-epsilon-1 | code/utils/functions/stimuli.py | 4 | 3069 | from __future__ import print_function # print('me') instead of print 'me'
from __future__ import division # 1/2 == 0.5, not 0
from __future__ import absolute_import
import scipy.stats
from scipy.stats import gamma
import numpy as np
import matplotlib.pyplot as plt
import nibabel as nib
def hrf(times):
""" Return values for HRF at given times """
# Gamma pdf for the peak
peak_values = gamma.pdf(times, 6)
# Gamma pdf for the undershoot
undershoot_values = gamma.pdf(times, 12)
# Combine them
values = peak_values - 0.35 * undershoot_values
# Scale max to 0.6
return values / np.max(values) * 0.6
"""
Functions to work with standard OpenFMRI stimulus files
The functions have docstrings according to the numpy docstring standard - see:
https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
"""
def events2neural(task, tr, n_trs):
""" Return predicted neural time course from event file `task_fname`
Parameters
----------
task_fname : str
Filename of event file
tr : float
TR in seconds
n_trs : int
Number of TRs in functional run
Returns
-------
time_course : array shape (n_trs,)
Predicted neural time course, one value per TR
"""
if task.ndim != 2 or task.shape[1] != 3:
raise ValueError("Is {0} really a task file?", task_fname)
# Convert onset, duration seconds to TRs
task[:, :2] = task[:, :2] / tr
# Neural time course from onset, duration, amplitude for each event
time_course = np.zeros(n_trs)
for onset, duration, amplitude in task:
time_course[onset:onset + duration] = amplitude
return time_course
def events2neural_high(cond_data, TR=2, n_trs=240, tr_div=100):
"""Return predicted neural time course in the case when onsets are not equally spaced and do not start on a TR.
Parameters:
----------
cond_data : np.array
np.array of the condition
TR: float (default: 2)
TR in seconds
n_trs: int (default: 240)
number of TRs
tr_div: int (default value is 10)
step per TR
(We want a resolution to the 10th between each TR)
Return
-------
high_res_neural: np.array
predicted neural time course
"""
onsets_seconds = cond_data[:, 0] # the time when a task starts
durations_seconds = cond_data[:, 1] # duration of a task
amplitudes = cond_data[:, 2] # amplitudes for each different task
onsets_in_scans = onsets_seconds / TR
high_res_times = np.arange(0, n_trs, 1.0/tr_div) * TR
high_res_neural = np.zeros(high_res_times.shape)
high_res_onset_indices = onsets_in_scans * tr_div
high_res_durations = durations_seconds / TR * tr_div
for hr_onset, hr_duration, amplitude in list(zip(high_res_onset_indices,high_res_durations,amplitudes)):
hr_onset = int(round(hr_onset))
hr_duration = int(round(hr_duration))
high_res_neural[hr_onset:hr_onset+hr_duration] = amplitude
return high_res_times, high_res_neural
| bsd-3-clause | 2,075,919,208,005,406,200 | 33.1 | 115 | 0.647768 | false |
opikalo/pyfire | planning/astar/local_graph.py | 1 | 11347 | from __future__ import division
import json
import os
from collections import OrderedDict
import numpy
import matplotlib.pyplot as plt
import networkx as nx
from scipy.misc import imread
from utils import root
import scipy.spatial
from global_map import plot_map
from waypoints import graph_from_waypoints
#the density of the local unconstrained grid graph
LOCAL_GRAPH_DIM = 20
#the size of the local unconstrained grid graph
LOCAL_GRAPH_WIDTH = 1000
def plot_waypoints():
filename = os.path.join(root, 'flash', 'fft2',
'export', 'binaryData', '910.bin')
with open(filename) as f:
car_graph = json.loads(f.read())
G = nx.DiGraph()
x = []
y = []
for p in car_graph['waypoints']:
n_id = p['id']
n_x = p['x']
n_y = p['y']
G.add_node(n_id, pos=(n_x, n_y))
for c in p['connectionIDs']:
G.add_edge(n_id, c)
x.append(p['x'])
y.append(p['y'])
#find the nearest node
current_p = [ 2650,2650 ]
goal_p = [1900, 400]
c_x = numpy.array(zip(x,y))
tree = scipy.spatial.cKDTree(c_x)
dist, indexes = tree.query([current_p, goal_p])
G.add_node('start', pos=current_p)
G.add_edge('start', indexes[0])
G.add_node('goal', pos=goal_p)
G.add_edge(indexes[1], 'goal')
print dist, indexes
pos = nx.get_node_attributes(G,'pos')
return G, pos
def grid_graph(center_pos, dim, width):
""" creates a grid graph centered around particular point with center_pos,
with dimension dim and particular width.
For dim = 10, there will be 10 graph nodes, and grid will be 9x9. Because
we normally want to keep the center node, dim should always be odd. For even
dimensions, the size of the grid is increased by one.
"""
#keep center_pos as actual node and expand from it
if not dim % 2:
dim += 1
c_x = center_pos[0]
c_y = center_pos[1]
x_offset = c_x - width/2
y_offset = c_y - width/2
#for dimension of 10 x 10 (10 poles) the width is 10 - 1 = 9
step = width / (dim - 1)
L = nx.grid_2d_graph(dim, dim)
#iterate through the nodes and set position
for n in L.nodes():
index_x = n[0]
index_y = n[1]
n_x = x_offset + index_x * step
n_y = y_offset + index_y * step
L.node[n]['pos'] = [n_x, n_y]
if L.node[n]['pos'] == center_pos:
center = n
return L, center
DEFAULT_COLOR = (255, 255, 255, 0)
GREEN_COLOR = (0, 128, 0, 255)
RED_COLOR = (255, 0, 0, 255)
BLUE_COLOR = (0, 0, 255, 255)
MIN_UNCONTRAINED_PENALTY = 1
def add_weights(graph, data):
""" Helper utility to add weights to the graph edges,
based on the bitmap data. Modifies the graph in place and returns
custom labels for edges (useful for plotting weights).
If either edge is in the forbidden region, mark edge with penalty weight
Note: grid must be fine enough not to skip the features of the terrain.
"""
penalty = {
DEFAULT_COLOR: MIN_UNCONTRAINED_PENALTY,
GREEN_COLOR: 10,
RED_COLOR: 100,
BLUE_COLOR: 1000
}
#TODO: figure out why we have other colors
OTHER_PENALTY = 10
color_map = {
DEFAULT_COLOR: 'w',
GREEN_COLOR: 'g',
RED_COLOR: 'r',
BLUE_COLOR: 'b'
}
custom_labels={}
for e in graph.edges():
weight = 0
for node in e:
n_pos = graph.node[node]['pos']
d = DEFAULT_COLOR
try:
#interesting, x,y seem to be swapped for image data
d = data[n_pos[1]][n_pos[0]]
except IndexError:
#out of bounds for bitmap
pass
custom_labels[node] = color_map.get(tuple(d), 'o')
weight += penalty.get(tuple(d), OTHER_PENALTY)
graph[e[0]][e[1]]['weight'] = weight
return custom_labels
def stitch(local_graph, global_graph, kd_tree, tolerance, target, rename_string):
""" stitch local unconstrained graph with global graph,
as long as the distance to nearest global graph node is within certain
tolerance. Requires pre-generated kd-tree for the global graph. """
path_candidates = []
for node, d in local_graph.nodes(data=True):
node_pos = d['pos']
dist, indexes = kd_tree.query([node_pos])
if dist[0] < tolerance:
#find astar path to the selected close proximity node
#TODO: compute node path here, and save it, extract length like this:
# path = astar_path(G, source, target, heuristic)
# length = sum(G[u][v].get(weight, 1) for u, v in zip(path[:-1], path[1:]))
path_length = nx.astar_path_length(local_graph, target, node)
entry_node = indexes[0]
path_candidates.append((path_length, target, node, entry_node))
#chose best way to join to global graph
path_candidates.sort()
best_candidate = path_candidates[0]
(path_length, target, node, entry_node) = best_candidate
astar_path = nx.astar_path(local_graph, target, node)
h = local_graph.subgraph(astar_path)
route = h.to_directed()
# because local_graphs have the same naming, aka (1,2) we have to rename
# to join properly
global_graph = nx.union(global_graph, route, rename=(None, rename_string))
#connect two graphs
global_graph.add_edge(rename_string + str(node), entry_node)
global_graph.add_edge( entry_node, rename_string + str(node))
return global_graph
def plan_path(start_pos, goal_pos):
""" Actual path planneer that integrates local/global graphs and finds path
"""
#for now, just hard code this
filename = os.path.join(root, 'flash', 'fft2', 'processed', 'map.png')
img_data = imread(filename)
#make local unconstrained motion graph
#create unconstrained local graph at the start
start_local_graph, start_center = grid_graph(start_pos,
dim=LOCAL_GRAPH_DIM,
width=LOCAL_GRAPH_WIDTH)
add_weights(start_local_graph, img_data)
#create unconstrained local graph at the goal
goal_local_graph, goal_center = grid_graph(goal_pos,
dim=LOCAL_GRAPH_DIM,
width=LOCAL_GRAPH_WIDTH)
add_weights(goal_local_graph, img_data)
#make global graph based on waypoints
filename = os.path.join(root, 'flash', 'fft2',
'export', 'binaryData', '910.bin')
global_graph = graph_from_waypoints(filename)
#make kd-tree from the global graph
pos = nx.get_node_attributes(global_graph, 'pos')
#sorted by keys
d_x = OrderedDict(sorted(pos.items(), key=lambda t: t[0])).values()
c_x = numpy.array(d_x)
global_tree = scipy.spatial.cKDTree(c_x)
#stitch together unconstrained local with global
u_graph = stitch(start_local_graph, global_graph, global_tree, 100, start_center, 'S-')
u_graph = stitch(goal_local_graph, u_graph, global_tree, 100, goal_center, 'G-')
astar_path = nx.astar_path(u_graph, 'S-' + str(start_center), 'G-' + str(goal_center))
#rename node labels from '0' to final node, i.e. '35'
count = 0
mapping = {}
for node in astar_path:
mapping[node] = count
count += 1
planned_path = u_graph.subgraph(astar_path)
planned_path = nx.relabel_nodes(planned_path, mapping)
return planned_path
def test_planner():
start_pos = [2650, 2650]
goal_pos = [1900, 400]
planned_path = plan_path(start_pos, goal_pos)
planned_path_pos = nx.get_node_attributes(planned_path, 'pos')
plot_map()
nx.draw(planned_path, planned_path_pos, node_size=5, edge_color='r')
plt.show()
def test_grid():
plot_map()
start_pos = [ 2650, 2650 ]
L, c = grid_graph(start_pos, dim=10, width=1000)
pos = nx.get_node_attributes(L,'pos')
nx.draw(L, pos, node_size=5)
plt.show()
def test_weights():
plot_map()
start_pos = [ 2650, 2650 ]
L, c = grid_graph(start_pos, dim=10, width=1000)
filename = os.path.join(root, 'flash', 'fft2', 'processed', 'map.png')
img_data = imread(filename)
custom_labels = add_weights(L, img_data)
pos = nx.get_node_attributes(L,'pos')
#nx.draw(L, pos, node_size=5)
edge_weight=dict([((u,v,),int(d['weight'])) for u,v,d in L.edges(data=True)])
nx.draw_networkx_edge_labels(L,pos,edge_labels=edge_weight)
nx.draw_networkx_nodes(L,pos, node_size=0)
nx.draw_networkx_edges(L,pos)
nx.draw_networkx_labels(L,pos, labels=custom_labels)
plt.show()
def test_weights_planning():
plot_map()
start_pos = [ 2650, 2650 ]
L, c = grid_graph(start_pos, dim=10, width=1000)
filename = os.path.join(root, 'flash', 'fft2', 'processed', 'map.png')
img_data = imread(filename)
custom_labels = add_weights(L, img_data)
astar_path = nx.astar_path(L, (5, 5), (0, 4))
H = L.subgraph(astar_path)
h_pos = nx.get_node_attributes(H, 'pos')
pos = nx.get_node_attributes(L,'pos')
nx.draw(L, pos, node_size=5)
edge_weight=dict([((u,v,),int(d['weight'])) for u,v,d in L.edges(data=True)])
nx.draw_networkx_edge_labels(L,pos,edge_labels=edge_weight)
nx.draw_networkx_nodes(L,pos, node_size=0)
nx.draw_networkx_edges(L,pos)
nx.draw_networkx_labels(L,pos, labels=custom_labels)
nx.draw(H,h_pos, node_size=5, edge_color='r')
plt.show()
def test_stitch():
#make local unconstrained motion graph
start_pos = [2650, 2650]
goal_pos = [1900, 400]
#create unconstrained local graph at the start
start_local_graph, start_center = grid_graph(start_pos, dim=10, width=1000)
filename = os.path.join(root, 'flash', 'fft2', 'processed', 'map.png')
img_data = imread(filename)
add_weights(start_local_graph, img_data)
#create unconstrained local graph at the goal
goal_local_graph, goal_center = grid_graph(goal_pos, dim=10, width=1000)
add_weights(goal_local_graph, img_data)
#make global graph based on waypoints
filename = os.path.join(root, 'flash', 'fft2',
'export', 'binaryData', '910.bin')
global_graph = graph_from_waypoints(filename)
#make a tree from the global graph
pos = nx.get_node_attributes(global_graph, 'pos')
#sorted by keys
d_x = OrderedDict(sorted(pos.items(), key=lambda t: t[0])).values()
c_x = numpy.array(d_x)
global_tree = scipy.spatial.cKDTree(c_x)
#stitch together unconstrained local with global
u_graph = stitch(start_local_graph, global_graph, global_tree, 100, start_center, 'S-')
u_graph = stitch(goal_local_graph, u_graph, global_tree, 100, goal_center, 'G-')
u_pos = nx.get_node_attributes(u_graph, 'pos')
plot_map()
nx.draw(u_graph, u_pos, node_size=5)
astar_path = nx.astar_path(u_graph, 'S-' + str(start_center), 'G-' + str(goal_center))
H = u_graph.subgraph(astar_path)
h_pos = nx.get_node_attributes(H, 'pos')
nx.draw(H, h_pos, node_size=5, edge_color='r')
plt.show()
if __name__ == '__main__':
test_planner()
| mit | 1,927,484,057,707,326,500 | 26.342169 | 91 | 0.604124 | false |
GuessWhoSamFoo/pandas | pandas/io/pickle.py | 2 | 5048 | """ pickle compat """
import warnings
from numpy.lib.format import read_array
from pandas.compat import PY3, BytesIO, cPickle as pkl, pickle_compat as pc
from pandas.io.common import _get_handle, _stringify_path
def to_pickle(obj, path, compression='infer', protocol=pkl.HIGHEST_PROTOCOL):
"""
Pickle (serialize) object to file.
Parameters
----------
obj : any object
Any python object.
path : str
File path where the pickled object will be stored.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer'
A string representing the compression to use in the output file. By
default, infers from the file extension in specified path.
.. versionadded:: 0.20.0
protocol : int
Int which indicates which protocol should be used by the pickler,
default HIGHEST_PROTOCOL (see [1], paragraph 12.1.2). The possible
values for this parameter depend on the version of Python. For Python
2.x, possible values are 0, 1, 2. For Python>=3.0, 3 is a valid value.
For Python >= 3.4, 4 is a valid value. A negative value for the
protocol parameter is equivalent to setting its value to
HIGHEST_PROTOCOL.
.. [1] https://docs.python.org/3/library/pickle.html
.. versionadded:: 0.21.0
See Also
--------
read_pickle : Load pickled pandas object (or any object) from file.
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_sql : Write DataFrame to a SQL database.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Examples
--------
>>> original_df = pd.DataFrame({"foo": range(5), "bar": range(5, 10)})
>>> original_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> pd.to_pickle(original_df, "./dummy.pkl")
>>> unpickled_df = pd.read_pickle("./dummy.pkl")
>>> unpickled_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> import os
>>> os.remove("./dummy.pkl")
"""
path = _stringify_path(path)
f, fh = _get_handle(path, 'wb',
compression=compression,
is_text=False)
if protocol < 0:
protocol = pkl.HIGHEST_PROTOCOL
try:
f.write(pkl.dumps(obj, protocol=protocol))
finally:
f.close()
for _f in fh:
_f.close()
def read_pickle(path, compression='infer'):
"""
Load pickled pandas object (or any object) from file.
.. warning::
Loading pickled data received from untrusted sources can be
unsafe. See `here <https://docs.python.org/3/library/pickle.html>`__.
Parameters
----------
path : str
File path where the pickled object will be loaded.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer', then use
gzip, bz2, xz or zip if path ends in '.gz', '.bz2', '.xz',
or '.zip' respectively, and no decompression otherwise.
Set to None for no decompression.
.. versionadded:: 0.20.0
Returns
-------
unpickled : same type as object stored in file
See Also
--------
DataFrame.to_pickle : Pickle (serialize) DataFrame object to file.
Series.to_pickle : Pickle (serialize) Series object to file.
read_hdf : Read HDF5 file into a DataFrame.
read_sql : Read SQL query or database table into a DataFrame.
read_parquet : Load a parquet object, returning a DataFrame.
Examples
--------
>>> original_df = pd.DataFrame({"foo": range(5), "bar": range(5, 10)})
>>> original_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> pd.to_pickle(original_df, "./dummy.pkl")
>>> unpickled_df = pd.read_pickle("./dummy.pkl")
>>> unpickled_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> import os
>>> os.remove("./dummy.pkl")
"""
path = _stringify_path(path)
f, fh = _get_handle(path, 'rb', compression=compression, is_text=False)
# 1) try with cPickle
# 2) try with the compat pickle to handle subclass changes
# 3) pass encoding only if its not None as py2 doesn't handle the param
try:
with warnings.catch_warnings(record=True):
# We want to silence any warnings about, e.g. moved modules.
warnings.simplefilter("ignore", Warning)
return pkl.load(f)
except Exception: # noqa: E722
try:
return pc.load(f, encoding=None)
except Exception: # noqa: E722
if PY3:
return pc.load(f, encoding='latin1')
raise
finally:
f.close()
for _f in fh:
_f.close()
# compat with sparse pickle / unpickle
def _unpickle_array(bytes):
arr = read_array(BytesIO(bytes))
return arr
| bsd-3-clause | -5,027,144,982,533,614,000 | 28.694118 | 78 | 0.57706 | false |
nlholdem/icodoom | ICO1/deep_feedback_learning/vizdoom/ico.py | 1 | 9225 | #!/usr/bin/python3
from __future__ import print_function
from vizdoom import *
import sys
import threading
import math
from random import choice
from time import sleep
from matplotlib import pyplot as plt
sys.path.append('./deep_feedback_learning')
import numpy as np
import cv2
import deep_feedback_learning
# Create DoomGame instance. It will run the game and communicate with you.
game = DoomGame()
# Now it's time for configuration!
# load_config could be used to load configuration instead of doing it here with code.
# If load_config is used in-code configuration will also work - most recent changes will add to previous ones.
# game.load_config("../../scenarios/basic.cfg")
# Sets path to additional resources wad file which is basically your scenario wad.
# If not specified default maps will be used and it's pretty much useless... unless you want to play good old Doom.
game.set_doom_scenario_path("./basic.wad")
# Sets map to start (scenario .wad files can contain many maps).
game.set_doom_map("map01")
# Sets resolution. Default is 320X240
game.set_screen_resolution(ScreenResolution.RES_640X480)
# create masks for left and right visual fields - note that these only cover the upper half of the image
# this is to help prevent the tracking getting confused by the floor pattern
width = 640
widthNet = 320
height = 480
heightNet = 240
# Sets the screen buffer format. Not used here but now you can change it. Defalut is CRCGCB.
game.set_screen_format(ScreenFormat.RGB24)
# Enables depth buffer.
game.set_depth_buffer_enabled(True)
# Enables labeling of in game objects labeling.
game.set_labels_buffer_enabled(True)
# Enables buffer with top down map of the current episode/level.
game.set_automap_buffer_enabled(True)
# Sets other rendering options
game.set_render_hud(False)
game.set_render_minimal_hud(False) # If hud is enabled
game.set_render_crosshair(True)
game.set_render_weapon(False)
game.set_render_decals(False)
game.set_render_particles(False)
game.set_render_effects_sprites(False)
game.set_render_messages(False)
game.set_render_corpses(False)
# Adds buttons that will be allowed.
# game.add_available_button(Button.MOVE_LEFT)
# game.add_available_button(Button.MOVE_RIGHT)
game.add_available_button(Button.MOVE_LEFT_RIGHT_DELTA, 50)
game.add_available_button(Button.ATTACK)
game.add_available_button(Button.TURN_LEFT_RIGHT_DELTA)
# Adds game variables that will be included in state.
game.add_available_game_variable(GameVariable.AMMO2)
# Causes episodes to finish after 200 tics (actions)
game.set_episode_timeout(500)
# Makes episodes start after 10 tics (~after raising the weapon)
game.set_episode_start_time(10)
# Makes the window appear (turned on by default)
game.set_window_visible(True)
# Turns on the sound. (turned off by default)
game.set_sound_enabled(True)
# Sets the livin reward (for each move) to -1
game.set_living_reward(-1)
# Sets ViZDoom mode (PLAYER, ASYNC_PLAYER, SPECTATOR, ASYNC_SPECTATOR, PLAYER mode is default)
game.set_mode(Mode.PLAYER)
# Enables engine output to console.
#game.set_console_enabled(True)
nFiltersInput = 2
nFiltersHidden = 2
minT = 2
maxT = 10
nHidden0 = 4
nHidden1 = 2
learningRate = 0.00005
net = deep_feedback_learning.DeepFeedbackLearning(widthNet*heightNet,[nHidden0*nHidden0], 1, nFiltersInput, nFiltersHidden, minT,maxT)
#net = deep_feedback_learning.DeepFeedbackLearning(widthNet*heightNet,[nHidden0*nHidden0,nHidden1*nHidden1], 1)
net.getLayer(0).setConvolution(widthNet,heightNet)
net.getLayer(1).setConvolution(nHidden0,nHidden0)
#net.getLayer(2).setConvolution(nHidden1,nHidden1)
net.initWeights(1,0,deep_feedback_learning.Neuron.MAX_OUTPUT_RANDOM);
net.setAlgorithm(deep_feedback_learning.DeepFeedbackLearning.ico);
net.setLearningRate(learningRate)
net.setUseDerivative(0)
net.setBias(0)
net.setLearningRateDiscountFactor(1)
#net.getLayer(0).setActivationFunction(deep_feedback_learning.Neuron.RELU)
#net.getLayer(1).setActivationFunction(deep_feedback_learning.Neuron.RELU)
# Initialize the game. Further configuration won't take any effect from now on.
game.init()
# Run this many episodes
episodes = 1000
# Sets time that will pause the engine after each action (in seconds)
# Without this everything would go too fast for you to keep track of what's happening.
sleep_time = 1.0 / DEFAULT_TICRATE # = 0.028
delta2 = 0
dontshoot = 1
inp = np.zeros(widthNet*heightNet)
sharpen = np.array((
[0, 1, 0],
[1, 4, 1],
[0, 1, 0]), dtype="int")
edge = np.array((
[0, 1, 0],
[1, -4, 1],
[0, 1, 0]), dtype="int")
plt.ion()
plt.show()
ln1 = False
ln2 = [False,False,False,False]
def getWeights2D(neuron):
n_neurons = net.getLayer(0).getNneurons()
n_inputs = net.getLayer(0).getNeuron(neuron).getNinputs()
weights = np.zeros(n_inputs)
for i in range(n_inputs):
if net.getLayer(0).getNeuron(neuron).getMask(i):
weights[i] = net.getLayer(0).getNeuron(neuron).getAvgWeight(i)
else:
weights[i] = np.nan
return weights.reshape(heightNet,widthNet)
def getWeights1D(layer,neuron):
n_neurons = net.getLayer(layer).getNneurons()
n_inputs = net.getLayer(layer).getNeuron(neuron).getNinputs()
weights = np.zeros(n_inputs)
for i in range(n_inputs):
weights[i] = net.getLayer(layer).getNeuron(neuron).getAvgWeight(i)
return weights
def plotWeights():
global ln1
global ln2
while True:
if ln1:
ln1.remove()
plt.figure(1)
w1 = getWeights2D(0)
for i in range(1,net.getLayer(0).getNneurons()):
w2 = getWeights2D(i)
w1 = np.where(np.isnan(w2),w1,w2)
ln1 = plt.imshow(w1,cmap='gray')
plt.draw()
plt.pause(0.1)
for j in range(1,net.getNumHidLayers()+1):
if ln2[j]:
ln2[j].remove()
plt.figure(j+1)
w1 = np.zeros( (net.getLayer(j).getNneurons(),net.getLayer(j).getNeuron(0).getNinputs()) )
for i in range(net.getLayer(j).getNneurons()):
w1[i,:] = getWeights1D(j,i)
ln2[j] = plt.imshow(w1,cmap='gray')
plt.draw()
plt.pause(0.1)
t1 = threading.Thread(target=plotWeights)
t1.start()
for i in range(episodes):
print("Episode #" + str(i + 1))
# Starts a new episode. It is not needed right after init() but it doesn't cost much. At least the loop is nicer.
game.new_episode()
tc = 0
while not game.is_episode_finished():
# Gets the state
state = game.get_state()
# Which consists of:
n = state.number
vars = state.game_variables
screen_buf = state.screen_buffer
depth_buf = state.depth_buffer
labels_buf = state.labels_buffer
automap_buf = state.automap_buffer
labels = state.labels
midlinex = int(width/2);
midliney = int(height*0.75);
crcb = screen_buf
screen_left = screen_buf[100:midliney,0:midlinex,2]
screen_right = screen_buf[100:midliney,midlinex:width,2]
screen_left = cv2.filter2D(screen_left, -1, sharpen);
screen_right = cv2.filter2D(screen_right, -1, sharpen);
screen_diff = screen_left-np.fliplr(screen_right)
#cv2.imwrite('/tmp/left.png',screen_left)
#cv2.imwrite('/tmp/right.png',screen_right)
#cv2.imwrite('/tmp/diff.png',screen_diff)
lavg = np.average(screen_left)
ravg = np.average(screen_right)
delta = (lavg - ravg)*3
dd = delta - delta2
delta2 = delta
# print(delta)
net.setLearningRate(0.0)
shoot = 0
if (dontshoot > 1) :
dontshoot = dontshoot - 1
else :
if (tc > 30):
shoot = 1
net.setLearningRate(learningRate)
dontshoot = 5
# blue = cv2.resize(screen_diff, (widthNet,heightNet));
# blue = blue[:,:,2]
# blue = cv2.filter2D(blue, -1, edge)
err = np.ones(nHidden0*nHidden0)*delta
# net.doStep(blue.flatten()/256-0.5,err)
net.doStep(cv2.resize(screen_diff, (widthNet,heightNet)).flatten(),err)
#weightsplot.set_xdata(np.append(weightsplot.get_xdata(),n))
#weightsplot.set_ydata(np.append(weightsplot.get_ydata(),net.getLayer(0).getWeightDistanceFromInitialWeights()))
output = net.getOutput(0)*20
print(n,delta,output,
net.getLayer(0).getWeightDistanceFromInitialWeights(),"\t",
net.getLayer(1).getWeightDistanceFromInitialWeights(),"\t")
# action[0] is translating left/right; action[2] is rotating/aiming
# action = [ delta+output , shoot, 0. ]
if i>1000:
delta = 0
net.setLearningRate(0)
action = [ 0., shoot, (delta+output)*0.1 ]
r = game.make_action(action)
tc = tc + 1
# if sleep_time > 0:
# sleep(sleep_time)
# Check how the episode went.
print("Episode finished.")
print("Total reward:", game.get_total_reward())
print("************************")
tc = 0
sleep(1)
# It will be done automatically anyway but sometimes you need to do it in the middle of the program...
game.close()
| gpl-3.0 | 1,667,550,478,741,109,000 | 30.377551 | 134 | 0.673388 | false |
Batch21/pywr | tests/test_parameters.py | 1 | 38337 | """
Test for individual Parameter classes
"""
from __future__ import division
from pywr.core import Model, Timestep, Scenario, ScenarioIndex, Storage, Link, Input, Output
from pywr.parameters import (Parameter, ArrayIndexedParameter, ConstantScenarioParameter,
ArrayIndexedScenarioMonthlyFactorsParameter, MonthlyProfileParameter, DailyProfileParameter,
DataFrameParameter, AggregatedParameter, ConstantParameter,
IndexParameter, AggregatedIndexParameter, RecorderThresholdParameter, ScenarioMonthlyProfileParameter,
Polynomial1DParameter, Polynomial2DStorageParameter, ArrayIndexedScenarioParameter,
InterpolatedParameter, WeeklyProfileParameter,
FunctionParameter, AnnualHarmonicSeriesParameter, load_parameter)
from pywr.recorders import AssertionRecorder, assert_rec
from pywr.model import OrphanedParameterWarning
from pywr.recorders import Recorder
from fixtures import simple_linear_model, simple_storage_model
from helpers import load_model
import os
import datetime
import numpy as np
import pandas as pd
import pytest
import itertools
from numpy.testing import assert_allclose
TEST_DIR = os.path.dirname(__file__)
@pytest.fixture
def model(solver):
return Model(solver=solver)
def test_parameter_array_indexed(simple_linear_model):
"""
Test ArrayIndexedParameter
"""
model = simple_linear_model
A = np.arange(len(model.timestepper), dtype=np.float64)
p = ArrayIndexedParameter(model, A)
model.setup()
# scenario indices (not used for this test)
si = ScenarioIndex(0, np.array([0], dtype=np.int32))
for v, ts in zip(A, model.timestepper):
np.testing.assert_allclose(p.value(ts, si), v)
# Now check that IndexError is raised if an out of bounds Timestep is given.
ts = Timestep(datetime.datetime(2016, 1, 1), 366, 1.0)
with pytest.raises(IndexError):
p.value(ts, si)
def test_parameter_array_indexed_json_load(simple_linear_model, tmpdir):
"""Test ArrayIndexedParameter can be loaded from json dict"""
model = simple_linear_model
# Daily time-step
index = pd.date_range('2015-01-01', periods=365, freq='D', name='date')
df = pd.DataFrame(np.arange(365), index=index, columns=['data'])
df_path = tmpdir.join('df.csv')
df.to_csv(str(df_path))
data = {
'type': 'arrayindexed',
'url': str(df_path),
'index_col': 'date',
'parse_dates': True,
'column': 'data',
}
p = load_parameter(model, data)
model.setup()
si = ScenarioIndex(0, np.array([0], dtype=np.int32))
for v, ts in enumerate(model.timestepper):
np.testing.assert_allclose(p.value(ts, si), v)
def test_parameter_constant_scenario(simple_linear_model):
"""
Test ConstantScenarioParameter
"""
model = simple_linear_model
# Add two scenarios
scA = Scenario(model, 'Scenario A', size=2)
scB = Scenario(model, 'Scenario B', size=5)
p = ConstantScenarioParameter(model, scB, np.arange(scB.size, dtype=np.float64))
model.setup()
ts = model.timestepper.current
# Now ensure the appropriate value is returned for the Scenario B indices.
for i, (a, b) in enumerate(itertools.product(range(scA.size), range(scB.size))):
si = ScenarioIndex(i, np.array([a, b], dtype=np.int32))
np.testing.assert_allclose(p.value(ts, si), float(b))
def test_parameter_array_indexed_scenario_monthly_factors(simple_linear_model):
"""
Test ArrayIndexedParameterScenarioMonthlyFactors
"""
model = simple_linear_model
# Baseline timeseries data
values = np.arange(len(model.timestepper), dtype=np.float64)
# Add two scenarios
scA = Scenario(model, 'Scenario A', size=2)
scB = Scenario(model, 'Scenario B', size=5)
# Random factors for each Scenario B value per month
factors = np.random.rand(scB.size, 12)
p = ArrayIndexedScenarioMonthlyFactorsParameter(model, scB, values, factors)
model.setup()
# Iterate in time
for v, ts in zip(values, model.timestepper):
imth = ts.datetime.month - 1
# Now ensure the appropriate value is returned for the Scenario B indices.
for i, (a, b) in enumerate(itertools.product(range(scA.size), range(scB.size))):
f = factors[b, imth]
si = ScenarioIndex(i, np.array([a, b], dtype=np.int32))
np.testing.assert_allclose(p.value(ts, si), v*f)
def test_parameter_array_indexed_scenario_monthly_factors_json(model):
model.path = os.path.join(TEST_DIR, "models")
scA = Scenario(model, 'Scenario A', size=2)
scB = Scenario(model, 'Scenario B', size=3)
p1 = ArrayIndexedScenarioMonthlyFactorsParameter.load(model, {
"scenario": "Scenario A",
"values": list(range(32)),
"factors": [list(range(1, 13)),list(range(13, 25))],
})
p2 = ArrayIndexedScenarioMonthlyFactorsParameter.load(model, {
"scenario": "Scenario B",
"values": {
"url": "timeseries1.csv",
"index_col": "Timestamp",
"column": "Data",
},
"factors": {
"url": "monthly_profiles.csv",
"index_col": "scenario",
},
})
node1 = Input(model, "node1", max_flow=p1)
node2 = Input(model, "node2", max_flow=p2)
nodeN = Output(model, "nodeN", max_flow=None, cost=-1)
node1.connect(nodeN)
node2.connect(nodeN)
model.timestepper.start = "2015-01-01"
model.timestepper.end = "2015-01-31"
model.run()
def test_parameter_monthly_profile(simple_linear_model):
"""
Test MonthlyProfileParameter
"""
model = simple_linear_model
values = np.arange(12, dtype=np.float64)
p = MonthlyProfileParameter(model, values)
model.setup()
# Iterate in time
for ts in model.timestepper:
imth = ts.datetime.month - 1
si = ScenarioIndex(0, np.array([0], dtype=np.int32))
np.testing.assert_allclose(p.value(ts, si), values[imth])
class TestScenarioMonthlyProfileParameter:
def test_init(self, simple_linear_model):
model = simple_linear_model
scenario = Scenario(model, 'A', 10)
values = np.random.rand(10, 12)
p = ScenarioMonthlyProfileParameter(model, scenario, values)
model.setup()
# Iterate in time
for ts in model.timestepper:
imth = ts.datetime.month - 1
for i in range(scenario.size):
si = ScenarioIndex(i, np.array([i], dtype=np.int32))
np.testing.assert_allclose(p.value(ts, si), values[i, imth])
def test_json(self, solver):
model = load_model('scenario_monthly_profile.json', solver=solver)
# check first day initalised
assert (model.timestepper.start == datetime.datetime(2015, 1, 1))
# check results
supply1 = model.nodes['supply1']
# Multiplication factors
factors = np.array([
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23],
[0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22],
])
for expected in (23.92, 22.14, 22.57, 24.97, 27.59):
model.step()
imth = model.timestepper.current.month - 1
assert_allclose(supply1.flow, expected*factors[:, imth], atol=1e-7)
def test_parameter_daily_profile(simple_linear_model):
"""
Test DailyProfileParameter
"""
model = simple_linear_model
values = np.arange(366, dtype=np.float64)
p = DailyProfileParameter(model, values)
model.setup()
# Iterate in time
for ts in model.timestepper:
month = ts.datetime.month
day = ts.datetime.day
iday = int((datetime.datetime(2016, month, day) - datetime.datetime(2016, 1, 1)).days)
si = ScenarioIndex(0, np.array([0], dtype=np.int32))
np.testing.assert_allclose(p.value(ts, si), values[iday])
def test_daily_profile_leap_day(model):
"""Test behaviour of daily profile parameter for leap years
"""
inpt = Input(model, "input")
otpt = Output(model, "otpt", max_flow=None, cost=-999)
inpt.connect(otpt)
inpt.max_flow = DailyProfileParameter(model, np.arange(0, 366, dtype=np.float64))
# non-leap year
model.timestepper.start = pd.to_datetime("2015-01-01")
model.timestepper.end = pd.to_datetime("2015-12-31")
model.run()
assert_allclose(inpt.flow, 365) # NOT 364
# leap year
model.timestepper.start = pd.to_datetime("2016-01-01")
model.timestepper.end = pd.to_datetime("2016-12-31")
model.run()
assert_allclose(inpt.flow, 365)
def test_weekly_profile(simple_linear_model):
model = simple_linear_model
model.timestepper.start = "2004-01-01"
model.timestepper.end = "2005-05-01"
model.timestepper.delta = 7
values = np.arange(0, 52) ** 2 + 27.5
p = WeeklyProfileParameter.load(model, {"values": values})
@assert_rec(model, p)
def expected_func(timestep, scenario_index):
week = int(min((timestep.dayofyear - 1) // 7, 51))
value = week ** 2 + 27.5
return value
model.run()
class TestAnnualHarmonicSeriesParameter:
""" Tests for `AnnualHarmonicSeriesParameter` """
def test_single_harmonic(self, model):
p1 = AnnualHarmonicSeriesParameter(model, 0.5, [0.25], [np.pi/4])
si = ScenarioIndex(0, np.array([0], dtype=np.int32))
for ts in model.timestepper:
doy = (ts.datetime.dayofyear - 1)/365
np.testing.assert_allclose(p1.value(ts, si), 0.5 + 0.25*np.cos(doy*2*np.pi + np.pi/4))
def test_double_harmonic(self, model):
p1 = AnnualHarmonicSeriesParameter(model, 0.5, [0.25, 0.3], [np.pi/4, np.pi/3])
si = ScenarioIndex(0, np.array([0], dtype=np.int32))
for ts in model.timestepper:
doy = (ts.datetime.dayofyear - 1) /365
expected = 0.5 + 0.25*np.cos(doy*2*np.pi + np.pi / 4) + 0.3*np.cos(doy*4*np.pi + np.pi/3)
np.testing.assert_allclose(p1.value(ts, si), expected)
def test_load(self, model):
data = {
"type": "annualharmonicseries",
"mean": 0.5,
"amplitudes": [0.25],
"phases": [np.pi/4]
}
p1 = load_parameter(model, data)
si = ScenarioIndex(0, np.array([0], dtype=np.int32))
for ts in model.timestepper:
doy = (ts.datetime.dayofyear - 1) / 365
np.testing.assert_allclose(p1.value(ts, si), 0.5 + 0.25 * np.cos(doy * 2 * np.pi + np.pi / 4))
class TestAggregatedParameter:
"""Tests for AggregatedParameter"""
funcs = {"min": np.min, "max": np.max, "mean": np.mean, "median": np.median, "sum": np.sum}
@pytest.mark.parametrize("agg_func", ["min", "max", "mean", "median", "sum"])
def test_agg(self, simple_linear_model, agg_func):
model = simple_linear_model
model.timestepper.delta = 15
scenarioA = Scenario(model, "Scenario A", size=2)
scenarioB = Scenario(model, "Scenario B", size=5)
values = np.arange(366, dtype=np.float64)
p1 = DailyProfileParameter(model, values)
p2 = ConstantScenarioParameter(model, scenarioB, np.arange(scenarioB.size, dtype=np.float64))
p = AggregatedParameter(model, [p1, p2], agg_func=agg_func)
func = TestAggregatedParameter.funcs[agg_func]
@assert_rec(model, p)
def expected_func(timestep, scenario_index):
x = p1.get_value(scenario_index)
y = p2.get_value(scenario_index)
return func(np.array([x,y]))
model.run()
def test_load(self, simple_linear_model):
""" Test load from JSON dict"""
model = simple_linear_model
data = {
"type": "aggregated",
"agg_func": "product",
"parameters": [
0.8,
{
"type": "monthlyprofile",
"values": list(range(12))
}
]
}
p = load_parameter(model, data)
# Correct instance is loaded
assert isinstance(p, AggregatedParameter)
@assert_rec(model, p)
def expected(timestep, scenario_index):
return (timestep.month - 1) * 0.8
model.run()
class DummyIndexParameter(IndexParameter):
"""A simple IndexParameter which returns a constant value"""
def __init__(self, model, index, **kwargs):
super(DummyIndexParameter, self).__init__(model, **kwargs)
self._index = index
def index(self, timestep, scenario_index):
return self._index
def __repr__(self):
return "<DummyIndexParameter \"{}\">".format(self.name)
class TestAggregatedIndexParameter:
"""Tests for AggregatedIndexParameter"""
funcs = {"min": np.min, "max": np.max, "sum": np.sum, "product": np.product}
@pytest.mark.parametrize("agg_func", ["min", "max", "sum", "product"])
def test_agg(self, simple_linear_model, agg_func):
model = simple_linear_model
model.timestepper.delta = 1
model.timestepper.start = "2017-01-01"
model.timestepper.end = "2017-01-03"
scenarioA = Scenario(model, "Scenario A", size=2)
scenarioB = Scenario(model, "Scenario B", size=5)
p1 = DummyIndexParameter(model, 2)
p2 = DummyIndexParameter(model, 3)
p = AggregatedIndexParameter(model, [p1, p2], agg_func=agg_func)
func = TestAggregatedIndexParameter.funcs[agg_func]
@assert_rec(model, p)
def expected_func(timestep, scenario_index):
x = p1.get_index(scenario_index)
y = p2.get_index(scenario_index)
return func(np.array([x,y], np.int32))
model.run()
def test_agg_anyall(self, simple_linear_model):
"""Test the "any" and "all" aggregation functions"""
model = simple_linear_model
model.timestepper.delta = 1
model.timestepper.start = "2017-01-01"
model.timestepper.end = "2017-01-03"
scenarioA = Scenario(model, "Scenario A", size=2)
scenarioB = Scenario(model, "Scenario B", size=5)
num_comb = len(model.scenarios.get_combinations())
parameters = {
0: DummyIndexParameter(model, 0, name="p0"),
1: DummyIndexParameter(model, 1, name="p1"),
2: DummyIndexParameter(model, 2, name="p2"),
}
data = [(0, 0), (1, 0), (0, 1), (1, 1), (1, 1, 1), (0, 2)]
data_parameters = [[parameters[i] for i in d] for d in data]
expected = [(np.any(d), np.all(d)) for d in data]
for n, params in enumerate(data_parameters):
for m, agg_func in enumerate(["any", "all"]):
p = AggregatedIndexParameter(model, params, agg_func=agg_func)
e = np.ones([len(model.timestepper), num_comb]) * expected[n][m]
r = AssertionRecorder(model, p, expected_data=e, name="assertion {}-{}".format(n, agg_func))
model.run()
def test_parameter_child_variables(model):
p1 = Parameter(model)
# Default parameter
assert len(p1.parents) == 0
assert len(p1.children) == 0
c1 = Parameter(model)
c1.parents.add(p1)
assert len(p1.children) == 1
assert c1 in p1.children
assert p1 in c1.parents
# Test third level
c2 = Parameter(model)
c2.parents.add(c1)
# Disable parent
c1.parents.clear()
assert len(p1.children) == 0
def test_scaled_profile_nested_load(model):
""" Test `ScaledProfileParameter` loading with `AggregatedParameter` """
model.timestepper.delta = 15
s = Storage(model, 'Storage', max_volume=100.0, num_outputs=0)
d = Output(model, 'Link')
data = {
'type': 'scaledprofile',
'scale': 50.0,
'profile': {
'type': 'aggregated',
'agg_func': 'product',
'parameters': [
{
'type': 'monthlyprofile',
'values': [0.5]*12
},
{
'type': 'monthlyprofilecontrolcurve',
'control_curves': [0.8, 0.6],
'values': [[1.0]*12, [0.7]*np.arange(12), [0.3]*12],
'storage_node': 'Storage'
}
]
}
}
s.connect(d)
d.max_flow = p = load_parameter(model, data)
@assert_rec(model, p)
def expected_func(timestep, scenario_index):
if s.initial_volume == 90:
return 50.0*0.5*1.0
elif s.initial_volume == 70:
return 50.0 * 0.5 * 0.7 * (timestep.month - 1)
else:
return 50.0 * 0.5 * 0.3
for initial_volume in (90, 70, 30):
s.initial_volume = initial_volume
model.run()
def test_parameter_df_upsampling(model):
""" Test that the `DataFrameParameter` can upsample data from a `pandas.DataFrame` and return that correctly
"""
# scenario indices (not used for this test)
si = ScenarioIndex(0, np.array([0], dtype=np.int32))
# Use a 7 day timestep for this test and run 2015
model.timestepper.delta = datetime.timedelta(7)
model.timestepper.start = pd.to_datetime('2015-01-01')
model.timestepper.end = pd.to_datetime('2015-12-31')
# Daily time-step
index = pd.date_range('2015-01-01', periods=365, freq='D')
series = pd.Series(np.arange(365), index=index)
p = DataFrameParameter(model, series)
p.setup()
A = series.resample('7D').mean()
for v, ts in zip(A, model.timestepper):
np.testing.assert_allclose(p.value(ts, si), v)
model.reset()
# Daily time-step that requires aligning
index = pd.date_range('2014-12-31', periods=366, freq='D')
series = pd.Series(np.arange(366), index=index)
p = DataFrameParameter(model, series)
p.setup()
# offset the resample appropriately for the test
A = series[1:].resample('7D').mean()
for v, ts in zip(A, model.timestepper):
np.testing.assert_allclose(p.value(ts, si), v)
model.reset()
# Daily time-step that is not covering the require range
index = pd.date_range('2015-02-01', periods=365, freq='D')
series = pd.Series(np.arange(365), index=index)
p = DataFrameParameter(model, series)
with pytest.raises(ValueError):
p.setup()
model.reset()
# Daily time-step that is not covering the require range
index = pd.date_range('2014-11-01', periods=365, freq='D')
series = pd.Series(np.arange(365), index=index)
p = DataFrameParameter(model, series)
with pytest.raises(ValueError):
p.setup()
def test_parameter_df_upsampling_multiple_columns(model):
""" Test that the `DataFrameParameter` works with multiple columns that map to a `Scenario`
"""
scA = Scenario(model, 'A', size=20)
scB = Scenario(model, 'B', size=2)
# scenario indices (not used for this test)
# Use a 7 day timestep for this test and run 2015
model.timestepper.delta = datetime.timedelta(7)
model.timestepper.start = pd.to_datetime('2015-01-01')
model.timestepper.end = pd.to_datetime('2015-12-31')
# Daily time-step
index = pd.date_range('2015-01-01', periods=365, freq='D')
df = pd.DataFrame(np.random.rand(365, 20), index=index)
p = DataFrameParameter(model, df, scenario=scA)
p.setup()
A = df.resample('7D', axis=0).mean()
for v, ts in zip(A.values, model.timestepper):
np.testing.assert_allclose([p.value(ts, ScenarioIndex(i, np.array([i], dtype=np.int32))) for i in range(20)], v)
p = DataFrameParameter(model, df, scenario=scB)
with pytest.raises(ValueError):
p.setup()
def test_parameter_df_json_load(model, tmpdir):
# Daily time-step
index = pd.date_range('2015-01-01', periods=365, freq='D', name='date')
df = pd.DataFrame(np.random.rand(365), index=index, columns=['data'])
df_path = tmpdir.join('df.csv')
df.to_csv(str(df_path))
data = {
'type': 'dataframe',
'url': str(df_path),
'index_col': 'date',
'parse_dates': True,
}
p = load_parameter(model, data)
p.setup()
def test_simple_json_parameter_reference(solver):
# note that parameters in the "parameters" section cannot be literals
model = load_model("parameter_reference.json")
max_flow = model.nodes["supply1"].max_flow
assert(isinstance(max_flow, ConstantParameter))
assert(max_flow.value(None, None) == 125.0)
cost = model.nodes["demand1"].cost
assert(isinstance(cost, ConstantParameter))
assert(cost.value(None, None) == -10.0)
assert(len(model.parameters) == 4) # 4 parameters defined
def test_threshold_parameter(simple_linear_model):
model = simple_linear_model
model.timestepper.delta = 150
scenario = Scenario(model, "Scenario", size=2)
class DummyRecorder(Recorder):
def __init__(self, model, value, *args, **kwargs):
super(DummyRecorder, self).__init__(model, *args, **kwargs)
self.val = value
def setup(self):
super(DummyRecorder, self).setup()
num_comb = len(model.scenarios.combinations)
self.data = np.empty([len(model.timestepper), num_comb], dtype=np.float64)
def after(self):
timestep = model.timestepper.current
self.data[timestep.index, :] = self.val
threshold = 10.0
values = [50.0, 60.0]
rec1 = DummyRecorder(model, threshold-5, name="rec1") # below
rec2 = DummyRecorder(model, threshold, name="rec2") # equal
rec3 = DummyRecorder(model, threshold+5, name="rec3") # above
expected = [
("LT", (1, 0, 0)),
("GT", (0, 0, 1)),
("EQ", (0, 1, 0)),
("LE", (1, 1, 0)),
("GE", (0, 1, 1)),
]
for predicate, (value_lt, value_eq, value_gt) in expected:
for rec in (rec1, rec2, rec3):
param = RecorderThresholdParameter(model, rec, threshold, values=values, predicate=predicate)
e_val = values[getattr(rec.val, "__{}__".format(predicate.lower()))(threshold)]
e = np.ones([len(model.timestepper), len(model.scenarios.get_combinations())]) * e_val
e[0, :] = values[1] # first timestep is always "on"
r = AssertionRecorder(model, param, expected_data=e)
r.name = "assert {} {} {}".format(rec.val, predicate, threshold)
model.run()
def test_constant_from_df(solver):
"""
Test that a dataframe can be used to provide data to ConstantParameter (single values).
"""
model = load_model('simple_df.json', solver=solver)
assert isinstance(model.nodes['demand1'].max_flow, ConstantParameter)
assert isinstance(model.nodes['demand1'].cost, ConstantParameter)
ts = model.timestepper.next()
si = ScenarioIndex(0, np.array([0], dtype=np.int32))
np.testing.assert_allclose(model.nodes['demand1'].max_flow.value(ts, si), 10.0)
np.testing.assert_allclose(model.nodes['demand1'].cost.value(ts, si), -10.0)
def test_constant_from_shared_df(solver):
"""
Test that a shared dataframe can be used to provide data to ConstantParameter (single values).
"""
model = load_model('simple_df_shared.json', solver=solver)
assert isinstance(model.nodes['demand1'].max_flow, ConstantParameter)
assert isinstance(model.nodes['demand1'].cost, ConstantParameter)
ts = model.timestepper.next()
si = ScenarioIndex(0, np.array([0], dtype=np.int32))
np.testing.assert_allclose(model.nodes['demand1'].max_flow.value(ts, si), 10.0)
np.testing.assert_allclose(model.nodes['demand1'].cost.value(ts, si), -10.0)
def test_constant_from_multiindex_df(solver):
"""
Test that a dataframe can be used to provide data to ConstantParameter (single values).
"""
model = load_model('multiindex_df.json', solver=solver)
assert isinstance(model.nodes['demand1'].max_flow, ConstantParameter)
assert isinstance(model.nodes['demand1'].cost, ConstantParameter)
ts = model.timestepper.next()
si = ScenarioIndex(0, np.array([0], dtype=np.int32))
np.testing.assert_allclose(model.nodes['demand1'].max_flow.value(ts, si), 10.0)
np.testing.assert_allclose(model.nodes['demand1'].cost.value(ts, si), -100.0)
def test_parameter_registry_overwrite(model):
# define a parameter
class NewParameter(Parameter):
DATA = 42
def __init__(self, model, values, *args, **kwargs):
super(NewParameter, self).__init__(model, *args, **kwargs)
self.values = values
NewParameter.register()
# re-define a parameter
class NewParameter(IndexParameter):
DATA = 43
def __init__(self, model, values, *args, **kwargs):
super(NewParameter, self).__init__(model, *args, **kwargs)
self.values = values
NewParameter.register()
data = {
"type": "new",
"values": 0
}
parameter = load_parameter(model, data)
# parameter is instance of new class, not old class
assert(isinstance(parameter, NewParameter))
assert(parameter.DATA == 43)
def test_invalid_parameter_values():
"""
Test that `load_parameter_values` returns a ValueError rather than KeyError.
This is useful to catch and give useful messages when no valid reference to
a data location is given.
Regression test for Issue #247 (https://github.com/pywr/pywr/issues/247)
"""
from pywr.parameters._parameters import load_parameter_values
m = Model()
data = {'name': 'my_parameter', 'type': 'AParameterThatShouldHaveValues'}
with pytest.raises(ValueError):
load_parameter_values(model, data)
class Test1DPolynomialParameter:
""" Tests for `Polynomial1DParameter` """
def test_init(self, simple_storage_model):
""" Test initialisation raises error with too many keywords """
stg = simple_storage_model.nodes['Storage']
param = ConstantParameter(simple_storage_model, 2.0)
with pytest.raises(ValueError):
# Passing both "parameter" and "storage_node" is invalid
Polynomial1DParameter(simple_storage_model, [0.5, np.pi], parameter=param, storage_node=stg)
def test_1st_order_with_parameter(self, simple_linear_model):
""" Test 1st order with a `Parameter` """
model = simple_linear_model
x = 2.0
p1 = Polynomial1DParameter(model, [0.5, np.pi], parameter=ConstantParameter(model, x))
@assert_rec(model, p1)
def expected_func(timestep, scenario_index):
return 0.5 + np.pi * x
model.run()
def test_2nd_order_with_parameter(self, simple_linear_model):
""" Test 2nd order with a `Parameter` """
model = simple_linear_model
x = 2.0
px = ConstantParameter(model, x)
p1 = Polynomial1DParameter(model, [0.5, np.pi, 3.0], parameter=px)
@assert_rec(model, p1)
def expected_func(timestep, scenario_index):
return 0.5 + np.pi*x + 3.0*x**2
model.run()
def test_1st_order_with_storage(self, simple_storage_model):
""" Test with a `Storage` node """
model = simple_storage_model
stg = model.nodes['Storage']
x = stg.initial_volume
p1 = Polynomial1DParameter(model, [0.5, np.pi], storage_node=stg)
p2 = Polynomial1DParameter(model, [0.5, np.pi], storage_node=stg, use_proportional_volume=True)
# Test with absolute storage
@assert_rec(model, p1)
def expected_func(timestep, scenario_index):
return 0.5 + np.pi*x
# Test with proportional storage
@assert_rec(model, p2, name="proportionalassertion")
def expected_func(timestep, scenario_index):
return 0.5 + np.pi * x/stg.max_volume
model.setup()
model.step()
def test_load(self, simple_linear_model):
model = simple_linear_model
x = 1.5
data = {
"type": "polynomial1d",
"coefficients": [0.5, 2.5],
"parameter": {
"type": "constant",
"value": x
}
}
p1 = load_parameter(model, data)
@assert_rec(model, p1)
def expected_func(timestep, scenario_index):
return 0.5 + 2.5*x
model.run()
def test_load_with_scaling(self, simple_linear_model):
model = simple_linear_model
x = 1.5
data = {
"type": "polynomial1d",
"coefficients": [0.5, 2.5],
"parameter": {
"type": "constant",
"value": x
},
"scale": 1.25,
"offset": 0.75
}
xscaled = x*1.25 + 0.75
p1 = load_parameter(model, data)
@assert_rec(model, p1)
def expected_func(timestep, scenario_index):
return 0.5 + 2.5*xscaled
model.run()
def test_interpolated_parameter(simple_linear_model):
model = simple_linear_model
model.timestepper.start = "1920-01-01"
model.timestepper.end = "1920-01-12"
p1 = ArrayIndexedParameter(model, [0,1,2,3,4,5,6,7,8,9,10,11])
p2 = InterpolatedParameter(model, p1, [0, 5, 10, 11], [0, 5*2, 10*3, 2])
@assert_rec(model, p2)
def expected_func(timestep, scenario_index):
values = [0, 2, 4, 6, 8, 10, 14, 18, 22, 26, 30, 2]
return values[timestep.index]
model.run()
class Test2DStoragePolynomialParameter:
def test_1st(self, simple_storage_model):
""" Test 1st order """
model = simple_storage_model
stg = model.nodes['Storage']
x = 2.0
y = stg.initial_volume
coefs = [[0.5, np.pi], [2.5, 0.3]]
p1 = Polynomial2DStorageParameter(model, coefs, stg, ConstantParameter(model, x))
@assert_rec(model, p1)
def expected_func(timestep, scenario_index):
return 0.5 + np.pi*x + 2.5*y+ 0.3*x*y
model.setup()
model.step()
def test_load(self, simple_storage_model):
model = simple_storage_model
stg = model.nodes['Storage']
x = 2.0
y = stg.initial_volume/stg.max_volume
data = {
"type": "polynomial2dstorage",
"coefficients": [[0.5, np.pi], [2.5, 0.3]],
"use_proportional_volume": True,
"parameter": {
"type": "constant",
"value": x
},
"storage_node": "Storage"
}
p1 = load_parameter(model, data)
@assert_rec(model, p1)
def expected_func(timestep, scenario_index):
return 0.5 + np.pi*x + 2.5*y+ 0.3*x*y
model.setup()
model.step()
def test_load_wth_scaling(self, simple_storage_model):
model = simple_storage_model
stg = model.nodes['Storage']
x = 2.0
y = stg.initial_volume/stg.max_volume
data = {
"type": "polynomial2dstorage",
"coefficients": [[0.5, np.pi], [2.5, 0.3]],
"use_proportional_volume": True,
"parameter": {
"type": "constant",
"value": x
},
"storage_node": "Storage",
"storage_scale": 1.3,
"storage_offset": 0.75,
"parameter_scale": 1.25,
"parameter_offset": -0.5
}
p1 = load_parameter(model, data)
# Scaled parameters
x = x*1.25 - 0.5
y = y*1.3 + 0.75
@assert_rec(model, p1)
def expected_func(timestep, scenario_index):
return 0.5 + np.pi*x + 2.5*y+ 0.3*x*y
model.setup()
model.step()
class TestMinMaxNegativeParameter:
@pytest.mark.parametrize("ptype,profile", [
("max", list(range(-10, 356))),
("min", list(range(0, 366))),
("negative", list(range(-366, 0))),
("negativemax", list(range(-366, 0))),
])
def test_parameter(cls, simple_linear_model, ptype,profile):
model = simple_linear_model
model.timestepper.start = "2017-01-01"
model.timestepper.end = "2017-01-15"
data = {
"type": ptype,
"parameter": {
"name": "raw",
"type": "dailyprofile",
"values": profile,
}
}
if ptype in ("max", "min"):
data["threshold"] = 3
func = {"min": min, "max": max, "negative": lambda t,x: -x, "negativemax": lambda t,x: max(t, -x)}[ptype]
model.nodes["Input"].max_flow = parameter = load_parameter(model, data)
model.nodes["Output"].max_flow = 9999
model.nodes["Output"].cost = -100
daily_profile = model.parameters["raw"]
@assert_rec(model, parameter)
def expected(timestep, scenario_index):
value = daily_profile.get_value(scenario_index)
return func(3, value)
model.run()
def test_ocptt(simple_linear_model):
model = simple_linear_model
inpt = model.nodes["Input"]
s1 = Scenario(model, "scenario 1", size=3)
s2 = Scenario(model, "scenario 1", size=2)
x = np.arange(len(model.timestepper)).reshape([len(model.timestepper), 1]) + 5
y = np.arange(s1.size).reshape([1, s1.size])
z = x * y ** 2
p = ArrayIndexedScenarioParameter(model, s1, z)
inpt.max_flow = p
model.setup()
model.reset()
model.step()
values1 = [p.get_value(scenario_index) for scenario_index in model.scenarios.combinations]
values2 = list(p.get_all_values())
assert_allclose(values1, [0, 0, 5, 5, 20, 20])
assert_allclose(values2, [0, 0, 5, 5, 20, 20])
class TestThresholdParameters:
def test_storage_threshold_parameter(self, simple_storage_model):
""" Test StorageThresholdParameter """
m = simple_storage_model
data = {
"type": "storagethreshold",
"storage_node": "Storage",
"threshold": 10.0,
"predicate": ">"
}
p1 = load_parameter(m, data)
si = ScenarioIndex(0, np.array([0], dtype=np.int32))
m.nodes['Storage'].initial_volume = 15.0
m.setup()
# Storage > 10
assert p1.index(m.timestepper.current, si) == 1
m.nodes['Storage'].initial_volume = 5.0
m.setup()
# Storage < 10
assert p1.index(m.timestepper.current, si) == 0
def test_node_threshold_parameter2(self, simple_linear_model):
model = simple_linear_model
model.nodes["Input"].max_flow = ArrayIndexedParameter(model, np.arange(0, 20))
model.nodes["Output"].cost = -10.0
model.timestepper.start = "1920-01-01"
model.timestepper.end = "1920-01-15"
model.timestepper.delta = 1
threshold = 5.0
parameters = {}
for predicate in (">", "<", "="):
data = {
"type": "nodethreshold",
"node": "Output",
"threshold": 5.0,
"predicate": predicate,
# we need to define values so AssertionRecorder can be used
"values": [0.0, 1.0],
}
parameter = load_parameter(model, data)
parameter.name = "nodethresold {}".format(predicate)
parameters[predicate] = parameter
if predicate == ">":
expected_data = (np.arange(-1, 20) > threshold).astype(int)
elif predicate == "<":
expected_data = (np.arange(-1, 20) < threshold).astype(int)
else:
expected_data = (np.arange(-1, 20) == threshold).astype(int)
expected_data[0] = 0 # previous flow in initial timestep is undefined
expected_data = expected_data[:, np.newaxis]
rec = AssertionRecorder(model, parameter, expected_data=expected_data, name="assertion recorder {}".format(predicate))
model.run()
@pytest.mark.parametrize("threshold", [
5.0,
{"type": "constant", "value": 5.0},
], ids=["double", "parameter"])
def test_parameter_threshold_parameter(self, simple_linear_model, threshold):
""" Test ParameterThresholdParameter """
m = simple_linear_model
m.nodes['Input'].max_flow = 10.0
m.nodes['Output'].cost = -10.0
data = {
"type": "parameterthreshold",
"parameter": {
"type": "constant",
"value": 3.0
},
"threshold": threshold,
"predicate": "<"
}
p1 = load_parameter(m, data)
si = ScenarioIndex(0, np.array([0], dtype=np.int32))
m.setup()
m.step()
# value < 5
assert p1.index(m.timestepper.current, si) == 1
p1.param.update(np.array([8.0,]))
m.setup()
m.step()
# flow < 5
assert p1.index(m.timestepper.current, si) == 0
def test_orphaned_components(simple_linear_model):
model = simple_linear_model
model.nodes["Input"].max_flow = ConstantParameter(model, 10.0)
result = model.find_orphaned_parameters()
assert(not result)
# assert that warning not raised by check
with pytest.warns(None) as record:
model.check()
for w in record:
if isinstance(w, OrphanedParameterWarning):
pytest.fail("OrphanedParameterWarning raised unexpectedly!")
# add some orphans
orphan1 = ConstantParameter(model, 5.0)
orphan2 = ConstantParameter(model, 10.0)
orphans = {orphan1, orphan2}
result = model.find_orphaned_parameters()
assert(orphans == result)
with pytest.warns(OrphanedParameterWarning):
model.check()
def test_deficit_parameter(solver):
"""Test DeficitParameter
Here we test both uses of the DeficitParameter:
1) Recording the deficit for a node each timestep
2) Using yesterday's deficit to control today's flow
"""
model = load_model("deficit.json", solver=solver)
model.run()
max_flow = np.array([5, 6, 7, 8, 9, 10, 11, 12, 11, 10, 9, 8])
demand = 10.0
supplied = np.minimum(max_flow, demand)
expected = demand - supplied
actual = model.recorders["deficit_recorder"].data
assert_allclose(expected, actual[:,0])
expected_yesterday = [0]+list(expected[0:-1])
actual_yesterday = model.recorders["yesterday_recorder"].data
assert_allclose(expected_yesterday, actual_yesterday[:,0])
| gpl-3.0 | 5,983,785,841,953,108,000 | 32.570053 | 130 | 0.601169 | false |
bnattes/bgdb | findGame.py | 1 | 1684 | import pandas as pd
searchType = ["ID","Name","Number of Players","Game Length","Categories","Mechanics"]
mechanics = ['Mechanics1','Mechanics2','Mechanics3','Mechanics4','Mechanics5']
categories = ['Categories1','Categories2','Categories3','Categories4','Categories5']
ID, names, minPlayer, maxPlayer = ['ID'], ['Name'], ['minPlayer'], ['maxPlayer']
playTime, description = ['playTime'], ['Description']
for types in range(0,len(searchType)):
print(str(types+1) + ") " + searchType[types])
def getElements(elementType):
output = []
gFile = pd.read_csv('gameslist.csv',usecols=elementType,delimiter='|')
catList = gFile.values.tolist()
for i in catList:
for j in i:
if j!='NIL' and j not in output:
output.append(j)
if any(isinstance(x,int) for x in output):
return sorted(output,key=int)
else:
return sorted(output,key=str.lower)
#userType = raw_input("Enter number of your search criteria: ")
#userType = int(userType)
#
#if userType == 1:
# userSearch = raw_input("Enter search query: ")
#elif userType == 2:
# userSearch = raw_input("Enter search query: ")
#elif userType == 3:
# userSearch = raw_input("Enter search query: ")
#elif userType == 4:
# userSearch = raw_input("Enter search query: ")
#elif userType == 5:
# elementList = getElements(categories)
# for i in elementList:
# print(i)
# userSearch = raw_input("Enter search query from list: ")
#elif userType == 6:
# elementList = getElements(mechanics)
# for i in elementList:
# print(i)
# userSearch = raw_input("Enter search query from list: ")
def searchFile(searchType):
| gpl-3.0 | 8,993,912,182,576,660,000 | 34.083333 | 85 | 0.646675 | false |
isb-cgc/User-Data-Processor | isb_cgc_user_data/bigquery_etl/utils/generate_schema.py | 1 | 1428 | #!/usr/bin/env python
# Copyright 2015, Institute for Systems Biology.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import pandas as pd
import numpy as np
import json
from pandas.io import gbq
from gcloud import storage
from cStringIO import StringIO
#------------------------------------------
# This script reads the new line demilited JSON file
# and tries to generate the schema based on the column values
#------------------------------------------
filename = sys.argv[1]
filehandle = open(filename, "r")
# convert new line JSON into dict;
json_string = '[%s]' % ','.join(filehandle.readlines())
# a little time consuming, but is worth converting into dataframe
df = pd.read_json(json_string) # this can be replaced by read_csv for tab-demilited files
filehandle.close()
# use gbq generate bq schema
schema = gbq.generate_bq_schema(df, default_type='STRING')["fields"]
print json.dumps(schema, indent=4)
| apache-2.0 | 7,801,248,351,913,504,000 | 32.209302 | 90 | 0.711485 | false |
leggitta/mne-python | examples/stats/plot_cluster_stats_evoked.py | 18 | 2991 | """
=======================================================
Permutation F-test on sensor data with 1D cluster level
=======================================================
One tests if the evoked response is significantly different
between conditions. Multiple comparison problem is addressed
with cluster level permutation test.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.stats import permutation_cluster_test
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id = 1
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = io.Raw(raw_fname)
events = mne.read_events(event_fname)
channel = 'MEG 1332' # include only this channel in analysis
include = [channel]
###############################################################################
# Read epochs for the channel of interest
picks = mne.pick_types(raw.info, meg=False, eog=True, include=include,
exclude='bads')
event_id = 1
reject = dict(grad=4000e-13, eog=150e-6)
epochs1 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
condition1 = epochs1.get_data() # as 3D matrix
event_id = 2
epochs2 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
condition2 = epochs2.get_data() # as 3D matrix
condition1 = condition1[:, 0, :] # take only one channel to get a 2D array
condition2 = condition2[:, 0, :] # take only one channel to get a 2D array
###############################################################################
# Compute statistic
threshold = 6.0
T_obs, clusters, cluster_p_values, H0 = \
permutation_cluster_test([condition1, condition2], n_permutations=1000,
threshold=threshold, tail=1, n_jobs=2)
###############################################################################
# Plot
times = epochs1.times
plt.close('all')
plt.subplot(211)
plt.title('Channel : ' + channel)
plt.plot(times, condition1.mean(axis=0) - condition2.mean(axis=0),
label="ERF Contrast (Event 1 - Event 2)")
plt.ylabel("MEG (T / m)")
plt.legend()
plt.subplot(212)
for i_c, c in enumerate(clusters):
c = c[0]
if cluster_p_values[i_c] <= 0.05:
h = plt.axvspan(times[c.start], times[c.stop - 1],
color='r', alpha=0.3)
else:
plt.axvspan(times[c.start], times[c.stop - 1], color=(0.3, 0.3, 0.3),
alpha=0.3)
hf = plt.plot(times, T_obs, 'g')
plt.legend((h, ), ('cluster p-value < 0.05', ))
plt.xlabel("time (ms)")
plt.ylabel("f-values")
plt.show()
| bsd-3-clause | 3,223,681,363,937,010,000 | 32.988636 | 79 | 0.566031 | false |
ultimanet/nifty | rg/nifty_rg.py | 1 | 57120 | ## NIFTY (Numerical Information Field Theory) has been developed at the
## Max-Planck-Institute for Astrophysics.
##
## Copyright (C) 2015 Max-Planck-Society
##
## Author: Marco Selig
## Project homepage: <http://www.mpa-garching.mpg.de/ift/nifty/>
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
## See the GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
.. __ ____ __
.. /__/ / _/ / /_
.. __ ___ __ / /_ / _/ __ __
.. / _ | / / / _/ / / / / / /
.. / / / / / / / / / /_ / /_/ /
.. /__/ /__/ /__/ /__/ \___/ \___ / rg
.. /______/
NIFTY submodule for regular Cartesian grids.
"""
from __future__ import division
#from nifty import *
import os
import numpy as np
import pylab as pl
from matplotlib.colors import LogNorm as ln
from matplotlib.ticker import LogFormatter as lf
from nifty.nifty_core import about, \
random, \
space, \
field
import nifty.smoothing as gs
import powerspectrum as gp
try:
import gfft as gf
except(ImportError):
about.infos.cprint('INFO: "plain" gfft version 0.1.0')
import gfft_rg as gf
##-----------------------------------------------------------------------------
class rg_space(space):
"""
.. _____ _______
.. / __/ / _ /
.. / / / /_/ /
.. /__/ \____ / space class
.. /______/
NIFTY subclass for spaces of regular Cartesian grids.
Parameters
----------
num : {int, numpy.ndarray}
Number of gridpoints or numbers of gridpoints along each axis.
naxes : int, *optional*
Number of axes (default: None).
zerocenter : {bool, numpy.ndarray}, *optional*
Whether the Fourier zero-mode is located in the center of the grid
(or the center of each axis speparately) or not (default: True).
hermitian : bool, *optional*
Whether the fields living in the space follow hermitian symmetry or
not (default: True).
purelyreal : bool, *optional*
Whether the field values are purely real (default: True).
dist : {float, numpy.ndarray}, *optional*
Distance between two grid points along each axis (default: None).
fourier : bool, *optional*
Whether the space represents a Fourier or a position grid
(default: False).
Notes
-----
Only even numbers of grid points per axis are supported.
The basis transformations between position `x` and Fourier mode `k`
rely on (inverse) fast Fourier transformations using the
:math:`exp(2 \pi i k^\dagger x)`-formulation.
Attributes
----------
para : numpy.ndarray
One-dimensional array containing information on the axes of the
space in the following form: The first entries give the grid-points
along each axis in reverse order; the next entry is 0 if the
fields defined on the space are purely real-valued, 1 if they are
hermitian and complex, and 2 if they are not hermitian, but
complex-valued; the last entries hold the information on whether
the axes are centered on zero or not, containing a one for each
zero-centered axis and a zero for each other one, in reverse order.
datatype : numpy.dtype
Data type of the field values for a field defined on this space,
either ``numpy.float64`` or ``numpy.complex128``.
discrete : bool
Whether or not the underlying space is discrete, always ``False``
for regular grids.
vol : numpy.ndarray
One-dimensional array containing the distances between two grid
points along each axis, in reverse order. By default, the total
length of each axis is assumed to be one.
fourier : bool
Whether or not the grid represents a Fourier basis.
"""
epsilon = 0.0001 ## relative precision for comparisons
def __init__(self,num,naxes=None,zerocenter=True,hermitian=True,purelyreal=True,dist=None,fourier=False):
"""
Sets the attributes for an rg_space class instance.
Parameters
----------
num : {int, numpy.ndarray}
Number of gridpoints or numbers of gridpoints along each axis.
naxes : int, *optional*
Number of axes (default: None).
zerocenter : {bool, numpy.ndarray}, *optional*
Whether the Fourier zero-mode is located in the center of the
grid (or the center of each axis speparately) or not
(default: True).
hermitian : bool, *optional*
Whether the fields living in the space follow hermitian
symmetry or not (default: True).
purelyreal : bool, *optional*
Whether the field values are purely real (default: True).
dist : {float, numpy.ndarray}, *optional*
Distance between two grid points along each axis
(default: None).
fourier : bool, *optional*
Whether the space represents a Fourier or a position grid
(default: False).
Returns
-------
None
"""
## check parameters
para = np.array([],dtype=np.int)
if(np.isscalar(num)):
num = np.array([num],dtype=np.int)
else:
num = np.array(num,dtype=np.int)
if(np.any(num%2)): ## module restriction
raise ValueError(about._errors.cstring("ERROR: unsupported odd number of grid points."))
if(naxes is None):
naxes = np.size(num)
elif(np.size(num)==1):
num = num*np.ones(naxes,dtype=np.int,order='C')
elif(np.size(num)!=naxes):
raise ValueError(about._errors.cstring("ERROR: size mismatch ( "+str(np.size(num))+" <> "+str(naxes)+" )."))
para = np.append(para,num[::-1],axis=None)
para = np.append(para,2-(bool(hermitian) or bool(purelyreal))-bool(purelyreal),axis=None) ## {0,1,2}
if(np.isscalar(zerocenter)):
zerocenter = bool(zerocenter)*np.ones(naxes,dtype=np.int,order='C')
else:
zerocenter = np.array(zerocenter,dtype=np.bool)
if(np.size(zerocenter)==1):
zerocenter = zerocenter*np.ones(naxes,dtype=np.int,order='C')
elif(np.size(zerocenter)!=naxes):
raise ValueError(about._errors.cstring("ERROR: size mismatch ( "+str(np.size(zerocenter))+" <> "+str(naxes)+" )."))
para = np.append(para,zerocenter[::-1]*-1,axis=None) ## -1 XOR 0 (centered XOR not)
self.para = para
## set data type
if(not self.para[naxes]):
self.datatype = np.float64
else:
self.datatype = np.complex128
self.discrete = False
## set volume
if(dist is None):
dist = 1/num.astype(self.datatype)
elif(np.isscalar(dist)):
dist = self.datatype(dist)*np.ones(naxes,dtype=self.datatype,order='C')
else:
dist = np.array(dist,dtype=self.datatype)
if(np.size(dist)==1):
dist = dist*np.ones(naxes,dtype=self.datatype,order='C')
if(np.size(dist)!=naxes):
raise ValueError(about._errors.cstring("ERROR: size mismatch ( "+str(np.size(dist))+" <> "+str(naxes)+" )."))
if(np.any(dist<=0)):
raise ValueError(about._errors.cstring("ERROR: nonpositive distance(s)."))
self.vol = np.real(dist)[::-1]
self.fourier = bool(fourier)
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def naxes(self):
"""
Returns the number of axes of the grid.
Returns
-------
naxes : int
Number of axes of the regular grid.
"""
return (np.size(self.para)-1)//2
def zerocenter(self):
"""
Returns information on the centering of the axes.
Returns
-------
zerocenter : numpy.ndarray
Whether the grid is centered on zero for each axis or not.
"""
return self.para[-(np.size(self.para)-1)//2:][::-1].astype(np.bool)
def dist(self):
"""
Returns the distances between grid points along each axis.
Returns
-------
dist : np.ndarray
Distances between two grid points on each axis.
"""
return self.vol[::-1]
def dim(self,split=False):
"""
Computes the dimension of the space, i.e.\ the number of pixels.
Parameters
----------
split : bool, *optional*
Whether to return the dimension split up, i.e. the numbers of
pixels along each axis, or their product (default: False).
Returns
-------
dim : {int, numpy.ndarray}
Dimension(s) of the space. If ``split==True``, a
one-dimensional array with an entry for each axis is returned.
"""
## dim = product(n)
if(split):
return self.para[:(np.size(self.para)-1)//2]
else:
return np.prod(self.para[:(np.size(self.para)-1)//2],axis=0,dtype=None,out=None)
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def dof(self):
"""
Computes the number of degrees of freedom of the space, i.e.\ the
number of grid points multiplied with one or two, depending on
complex-valuedness and hermitian symmetry of the fields.
Returns
-------
dof : int
Number of degrees of freedom of the space.
"""
## dof ~ dim
if(self.para[(np.size(self.para)-1)//2]<2):
return np.prod(self.para[:(np.size(self.para)-1)//2],axis=0,dtype=None,out=None)
else:
return 2*np.prod(self.para[:(np.size(self.para)-1)//2],axis=0,dtype=None,out=None)
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def enforce_power(self,spec,size=None,**kwargs):
"""
Provides a valid power spectrum array from a given object.
Parameters
----------
spec : {float, list, numpy.ndarray, nifty.field, function}
Fiducial power spectrum from which a valid power spectrum is to
be calculated. Scalars are interpreted as constant power
spectra.
Returns
-------
spec : numpy.ndarray
Valid power spectrum.
Other parameters
----------------
size : int, *optional*
Number of bands the power spectrum shall have (default: None).
kindex : numpy.ndarray, *optional*
Scale of each band.
codomain : nifty.space, *optional*
A compatible codomain for power indexing (default: None).
log : bool, *optional*
Flag specifying if the spectral binning is performed on logarithmic
scale or not; if set, the number of used bins is set
automatically (if not given otherwise); by default no binning
is done (default: None).
nbin : integer, *optional*
Number of used spectral bins; if given `log` is set to ``False``;
integers below the minimum of 3 induce an automatic setting;
by default no binning is done (default: None).
binbounds : {list, array}, *optional*
User specific inner boundaries of the bins, which are preferred
over the above parameters; by default no binning is done
(default: None). vmin : {scalar, list, ndarray, field}, *optional*
Lower limit of the uniform distribution if ``random == "uni"``
(default: 0).
"""
if(size is None)or(callable(spec)):
## explicit kindex
kindex = kwargs.get("kindex",None)
if(kindex is None):
## quick kindex
if(self.fourier)and(not hasattr(self,"power_indices"))and(len(kwargs)==0):
kindex = gp.nklength(gp.nkdict_fast(self.para[:(np.size(self.para)-1)//2],self.vol,fourier=True))
## implicit kindex
else:
try:
self.set_power_indices(**kwargs)
except:
codomain = kwargs.get("codomain",self.get_codomain())
codomain.set_power_indices(**kwargs)
kindex = codomain.power_indices.get("kindex")
else:
kindex = self.power_indices.get("kindex")
size = len(kindex)
if(isinstance(spec,field)):
spec = spec.val.astype(self.datatype)
elif(callable(spec)):
try:
spec = np.array(spec(kindex),dtype=self.datatype)
except:
raise TypeError(about._errors.cstring("ERROR: invalid power spectra function.")) ## exception in ``spec(kindex)``
elif(np.isscalar(spec)):
spec = np.array([spec],dtype=self.datatype)
else:
spec = np.array(spec,dtype=self.datatype)
## drop imaginary part
spec = np.real(spec)
## check finiteness
if(not np.all(np.isfinite(spec))):
about.warnings.cprint("WARNING: infinite value(s).")
## check positivity (excluding null)
if(np.any(spec<0)):
raise ValueError(about._errors.cstring("ERROR: nonpositive value(s)."))
elif(np.any(spec==0)):
about.warnings.cprint("WARNING: nonpositive value(s).")
## extend
if(np.size(spec)==1):
spec = spec*np.ones(size,dtype=spec.dtype,order='C')
## size check
elif(np.size(spec)<size):
raise ValueError(about._errors.cstring("ERROR: size mismatch ( "+str(np.size(spec))+" < "+str(size)+" )."))
elif(np.size(spec)>size):
about.warnings.cprint("WARNING: power spectrum cut to size ( == "+str(size)+" ).")
spec = spec[:size]
return spec
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def set_power_indices(self,**kwargs):
"""
Sets the (un)indexing objects for spectral indexing internally.
Parameters
----------
log : bool
Flag specifying if the binning is performed on logarithmic
scale or not; if set, the number of used bins is set
automatically (if not given otherwise); by default no binning
is done (default: None).
nbin : integer
Number of used bins; if given `log` is set to ``False``;
integers below the minimum of 3 induce an automatic setting;
by default no binning is done (default: None).
binbounds : {list, array}
User specific inner boundaries of the bins, which are preferred
over the above parameters; by default no binning is done
(default: None).
Returns
-------
None
See Also
--------
get_power_indices
Raises
------
AttributeError
If ``self.fourier == False``.
ValueError
If the binning leaves one or more bins empty.
"""
if(not self.fourier):
raise AttributeError(about._errors.cstring("ERROR: power spectra indexing ill-defined."))
## check storage
if(hasattr(self,"power_indices")):
config = self.power_indices.get("config")
## check configuration
redo = False
if(config.get("log")!=kwargs.get("log",config.get("log"))):
config["log"] = kwargs.get("log")
redo = True
if(config.get("nbin")!=kwargs.get("nbin",config.get("nbin"))):
config["nbin"] = kwargs.get("nbin")
redo = True
if(np.any(config.get("binbounds")!=kwargs.get("binbounds",config.get("binbounds")))):
config["binbounds"] = kwargs.get("binbounds")
redo = True
if(not redo):
return None
else:
config = {"binbounds":kwargs.get("binbounds",None),"log":kwargs.get("log",None),"nbin":kwargs.get("nbin",None)}
## power indices
about.infos.cflush("INFO: setting power indices ...")
pindex,kindex,rho = gp.get_power_indices2(self.para[:(np.size(self.para)-1)//2],self.vol,self.para[-((np.size(self.para)-1)//2):].astype(np.bool),fourier=True)
## bin if ...
if(config.get("log") is not None)or(config.get("nbin") is not None)or(config.get("binbounds") is not None):
pindex,kindex,rho = gp.bin_power_indices(pindex,kindex,rho,**config)
## check binning
if(np.any(rho==0)):
raise ValueError(about._errors.cstring("ERROR: empty bin(s).")) ## binning too fine
## power undex
pundex = np.unique(pindex,return_index=True,return_inverse=False)[1]
## storage
self.power_indices = {"config":config,"kindex":kindex,"pindex":pindex,"pundex":pundex,"rho":rho} ## alphabetical
about.infos.cprint(" done.")
return None
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def enforce_values(self,x,extend=True):
"""
Computes valid field values from a given object, taking care of
data types, shape, and symmetry.
Parameters
----------
x : {float, numpy.ndarray, nifty.field}
Object to be transformed into an array of valid field values.
Returns
-------
x : numpy.ndarray
Array containing the valid field values.
Other parameters
----------------
extend : bool, *optional*
Whether a scalar is extented to a constant array or not
(default: True).
"""
if(isinstance(x,field)):
if(self==x.domain):
if(self.datatype is not x.domain.datatype):
raise TypeError(about._errors.cstring("ERROR: inequal data types ( '"+str(np.result_type(self.datatype))+"' <> '"+str(np.result_type(x.domain.datatype))+"' )."))
else:
x = np.copy(x.val)
else:
raise ValueError(about._errors.cstring("ERROR: inequal domains."))
else:
if(np.size(x)==1):
if(extend):
x = self.datatype(x)*np.ones(self.dim(split=True),dtype=self.datatype,order='C')
else:
if(np.isscalar(x)):
x = np.array([x],dtype=self.datatype)
else:
x = np.array(x,dtype=self.datatype)
else:
x = self.enforce_shape(np.array(x,dtype=self.datatype))
## hermitianize if ...
if(about.hermitianize.status)and(np.size(x)!=1)and(self.para[(np.size(self.para)-1)//2]==1):
x = gp.nhermitianize_fast(x,self.para[-((np.size(self.para)-1)//2):].astype(np.bool),special=False)
## check finiteness
if(not np.all(np.isfinite(x))):
about.warnings.cprint("WARNING: infinite value(s).")
return x
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def get_random_values(self,**kwargs):
"""
Generates random field values according to the specifications given
by the parameters, taking into account possible complex-valuedness
and hermitian symmetry.
Returns
-------
x : numpy.ndarray
Valid field values.
Other parameters
----------------
random : string, *optional*
Specifies the probability distribution from which the random
numbers are to be drawn.
Supported distributions are:
- "pm1" (uniform distribution over {+1,-1} or {+1,+i,-1,-i}
- "gau" (normal distribution with zero-mean and a given standard
deviation or variance)
- "syn" (synthesizes from a given power spectrum)
- "uni" (uniform distribution over [vmin,vmax[)
(default: None).
dev : float, *optional*
Standard deviation (default: 1).
var : float, *optional*
Variance, overriding `dev` if both are specified
(default: 1).
spec : {scalar, list, numpy.ndarray, nifty.field, function}, *optional*
Power spectrum (default: 1).
pindex : numpy.ndarray, *optional*
Indexing array giving the power spectrum index of each band
(default: None).
kindex : numpy.ndarray, *optional*
Scale of each band (default: None).
codomain : nifty.rg_space, *optional*
A compatible codomain with power indices (default: None).
log : bool, *optional*
Flag specifying if the spectral binning is performed on logarithmic
scale or not; if set, the number of used bins is set
automatically (if not given otherwise); by default no binning
is done (default: None).
nbin : integer, *optional*
Number of used spectral bins; if given `log` is set to ``False``;
integers below the minimum of 3 induce an automatic setting;
by default no binning is done (default: None).
binbounds : {list, array}, *optional*
User specific inner boundaries of the bins, which are preferred
over the above parameters; by default no binning is done
(default: None). vmin : {scalar, list, ndarray, field}, *optional*
Lower limit of the uniform distribution if ``random == "uni"``
(default: 0). vmin : float, *optional*
Lower limit for a uniform distribution (default: 0).
vmax : float, *optional*
Upper limit for a uniform distribution (default: 1).
"""
arg = random.arguments(self,**kwargs)
if(arg is None):
return np.zeros(self.dim(split=True),dtype=self.datatype,order='C')
elif(arg[0]=="pm1"):
if(about.hermitianize.status)and(self.para[(np.size(self.para)-1)//2]==1):
return gp.random_hermitian_pm1(self.datatype,self.para[-((np.size(self.para)-1)//2):].astype(np.bool),self.dim(split=True)) ## special case
else:
x = random.pm1(datatype=self.datatype,shape=self.dim(split=True))
elif(arg[0]=="gau"):
x = random.gau(datatype=self.datatype,shape=self.dim(split=True),mean=None,dev=arg[2],var=arg[3])
elif(arg[0]=="syn"):
naxes = (np.size(self.para)-1)//2
x = gp.draw_vector_nd(self.para[:naxes],self.vol,arg[1],symtype=self.para[naxes],fourier=self.fourier,zerocentered=self.para[-naxes:].astype(np.bool),kpack=arg[2])
## correct for 'ifft'
if(not self.fourier):
x = self.calc_weight(x,power=-1)
return x
elif(arg[0]=="uni"):
x = random.uni(datatype=self.datatype,shape=self.dim(split=True),vmin=arg[1],vmax=arg[2])
else:
raise KeyError(about._errors.cstring("ERROR: unsupported random key '"+str(arg[0])+"'."))
## hermitianize if ...
if(about.hermitianize.status)and(self.para[(np.size(self.para)-1)//2]==1):
x = gp.nhermitianize_fast(x,self.para[-((np.size(self.para)-1)//2):].astype(np.bool),special=(arg[0] in ["gau","pm1"]))
return x
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def check_codomain(self,codomain):
"""
Checks whether a given codomain is compatible to the space or not.
Parameters
----------
codomain : nifty.space
Space to be checked for compatibility.
Returns
-------
check : bool
Whether or not the given codomain is compatible to the space.
"""
if(not isinstance(codomain,space)):
raise TypeError(about._errors.cstring("ERROR: invalid input."))
elif(isinstance(codomain,rg_space)):
## naxes==naxes
if((np.size(codomain.para)-1)//2==(np.size(self.para)-1)//2):
naxes = (np.size(self.para)-1)//2
## num'==num
if(np.all(codomain.para[:naxes]==self.para[:naxes])):
## typ'==typ ==2
if(codomain.para[naxes]==self.para[naxes]==2):
## dist'~=1/(num*dist)
if(np.all(np.absolute(self.para[:naxes]*self.vol*codomain.vol-1)<self.epsilon)):
return True
## fourier'==fourier
elif(codomain.fourier==self.fourier):
## dist'~=dist
if(np.all(np.absolute(codomain.vol/self.vol-1)<self.epsilon)):
return True
else:
about.warnings.cprint("WARNING: unrecommended codomain.")
## 2!= typ'!=typ !=2 dist'~=1/(num*dist)
elif(2!=codomain.para[naxes]!=self.para[naxes]!=2)and(np.all(np.absolute(self.para[:naxes]*self.vol*codomain.vol-1)<self.epsilon)):
return True
## typ'==typ !=2
elif(codomain.para[naxes]==self.para[naxes]!=2)and(codomain.fourier==self.fourier):
## dist'~=dist
if(np.all(np.absolute(codomain.vol/self.vol-1)<self.epsilon)):
return True
else:
about.warnings.cprint("WARNING: unrecommended codomain.")
return False
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def get_codomain(self,coname=None,cozerocenter=None,**kwargs):
"""
Generates a compatible codomain to which transformations are
reasonable, i.e.\ either a shifted grid or a Fourier conjugate
grid.
Parameters
----------
coname : string, *optional*
String specifying a desired codomain (default: None).
cozerocenter : {bool, numpy.ndarray}, *optional*
Whether or not the grid is zerocentered for each axis or not
(default: None).
Returns
-------
codomain : nifty.rg_space
A compatible codomain.
Notes
-----
Possible arguments for `coname` are ``'f'`` in which case the
codomain arises from a Fourier transformation, ``'i'`` in which case
it arises from an inverse Fourier transformation, and ``'?'`` in
which case it arises from a simple shift. If no `coname` is given,
the Fourier conjugate grid is produced.
"""
naxes = (np.size(self.para)-1)//2
if(cozerocenter is None):
cozerocenter = self.para[-naxes:][::-1]
elif(np.isscalar(cozerocenter)):
cozerocenter = bool(cozerocenter)
else:
cozerocenter = np.array(cozerocenter,dtype=np.bool)
if(np.size(cozerocenter)==1):
cozerocenter = np.asscalar(cozerocenter)
elif(np.size(cozerocenter)!=naxes):
raise ValueError(about._errors.cstring("ERROR: size mismatch ( "+str(np.size(cozerocenter))+" <> "+str(naxes)+" )."))
if(coname is None):
return rg_space(self.para[:naxes][::-1],naxes=naxes,zerocenter=cozerocenter,hermitian=bool(self.para[naxes]<2),purelyreal=bool(self.para[naxes]==1),dist=1/(self.para[:naxes]*self.vol)[::-1],fourier=bool(not self.fourier)) ## dist',fourier' = 1/(num*dist),NOT fourier
elif(coname[0]=='f'):
return rg_space(self.para[:naxes][::-1],naxes=naxes,zerocenter=cozerocenter,hermitian=bool(self.para[naxes]<2),purelyreal=bool(self.para[naxes]==1),dist=1/(self.para[:naxes]*self.vol)[::-1],fourier=True) ## dist',fourier' = 1/(num*dist),True
elif(coname[0]=='i'):
return rg_space(self.para[:naxes][::-1],naxes=naxes,zerocenter=cozerocenter,hermitian=bool(self.para[naxes]<2),purelyreal=bool(self.para[naxes]==1),dist=1/(self.para[:naxes]*self.vol)[::-1],fourier=False) ## dist',fourier' = 1/(num*dist),False
else:
return rg_space(self.para[:naxes][::-1],naxes=naxes,zerocenter=cozerocenter,hermitian=bool(self.para[naxes]<2),purelyreal=bool(not self.para[naxes]),dist=self.vol[::-1],fourier=self.fourier) ## dist',fourier' = dist,fourier
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def get_meta_volume(self,total=False):
"""
Calculates the meta volumes.
The meta volumes are the volumes associated with each component of
a field, taking into account field components that are not
explicitly included in the array of field values but are determined
by symmetry conditions. In the case of an :py:class:`rg_space`, the
meta volumes are simply the pixel volumes.
Parameters
----------
total : bool, *optional*
Whether to return the total meta volume of the space or the
individual ones of each pixel (default: False).
Returns
-------
mol : {numpy.ndarray, float}
Meta volume of the pixels or the complete space.
"""
if(total):
return self.dim(split=False)*np.prod(self.vol,axis=0,dtype=None,out=None)
else:
mol = np.ones(self.dim(split=True),dtype=self.vol.dtype,order='C')
return self.calc_weight(mol,power=1)
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def calc_weight(self,x,power=1):
"""
Weights a given array with the pixel volumes to a given power.
Parameters
----------
x : numpy.ndarray
Array to be weighted.
power : float, *optional*
Power of the pixel volumes to be used (default: 1).
Returns
-------
y : numpy.ndarray
Weighted array.
"""
x = self.enforce_shape(np.array(x,dtype=self.datatype))
## weight
return x*np.prod(self.vol,axis=0,dtype=None,out=None)**power
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def calc_dot(self,x,y):
"""
Computes the discrete inner product of two given arrays.
Parameters
----------
x : numpy.ndarray
First array
y : numpy.ndarray
Second array
Returns
-------
dot : scalar
Inner product of the two arrays.
"""
x = self.enforce_shape(np.array(x,dtype=self.datatype))
y = self.enforce_shape(np.array(y,dtype=self.datatype))
## inner product
dot = np.dot(np.conjugate(x.flatten(order='C')),y.flatten(order='C'),out=None)
if(np.isreal(dot)):
return np.asscalar(np.real(dot))
elif(self.para[(np.size(self.para)-1)//2]!=2):
## check imaginary part
if(np.absolute(dot.imag)>self.epsilon**2*np.absolute(dot.real)):
about.warnings.cprint("WARNING: discarding considerable imaginary part.")
return np.asscalar(np.real(dot))
else:
return dot
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def calc_transform(self,x,codomain=None,**kwargs):
"""
Computes the transform of a given array of field values.
Parameters
----------
x : numpy.ndarray
Array to be transformed.
codomain : nifty.rg_space, *optional*
Target space to which the transformation shall map
(default: None).
Returns
-------
Tx : numpy.ndarray
Transformed array
"""
x = self.enforce_shape(np.array(x,dtype=self.datatype))
if(codomain is None):
return x ## T == id
## mandatory(!) codomain check
if(isinstance(codomain,rg_space))and(self.check_codomain(codomain)):
naxes = (np.size(self.para)-1)//2
## select machine
if(np.all(np.absolute(self.para[:naxes]*self.vol*codomain.vol-1)<self.epsilon)):
if(codomain.fourier):
ftmachine = "fft"
## correct for 'fft'
x = self.calc_weight(x,power=1)
else:
ftmachine = "ifft"
## correct for 'ifft'
x = self.calc_weight(x,power=1)
x *= self.dim(split=False)
else:
ftmachine = "none"
## transform
if(self.datatype==np.float64):
Tx = gf.gfft(x.astype(np.complex128),in_ax=[],out_ax=[],ftmachine=ftmachine,in_zero_center=self.para[-naxes:].astype(np.bool).tolist(),out_zero_center=codomain.para[-naxes:].astype(np.bool).tolist(),enforce_hermitian_symmetry=bool(codomain.para[naxes]==1),W=-1,alpha=-1,verbose=False)
else:
Tx = gf.gfft(x,in_ax=[],out_ax=[],ftmachine=ftmachine,in_zero_center=self.para[-naxes:].astype(np.bool).tolist(),out_zero_center=codomain.para[-naxes:].astype(np.bool).tolist(),enforce_hermitian_symmetry=bool(codomain.para[naxes]==1),W=-1,alpha=-1,verbose=False)
## check complexity
if(not codomain.para[naxes]): ## purely real
## check imaginary part
if(np.any(Tx.imag!=0))and(np.dot(Tx.imag.flatten(order='C'),Tx.imag.flatten(order='C'),out=None)>self.epsilon**2*np.dot(Tx.real.flatten(order='C'),Tx.real.flatten(order='C'),out=None)):
about.warnings.cprint("WARNING: discarding considerable imaginary part.")
Tx = np.real(Tx)
else:
raise ValueError(about._errors.cstring("ERROR: unsupported transformation."))
return Tx.astype(codomain.datatype)
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def calc_smooth(self,x,sigma=0,**kwargs):
"""
Smoothes an array of field values by convolution with a Gaussian
kernel.
Parameters
----------
x : numpy.ndarray
Array of field values to be smoothed.
sigma : float, *optional*
Standard deviation of the Gaussian kernel, specified in units
of length in position space; for testing: a sigma of -1 will be
reset to a reasonable value (default: 0).
Returns
-------
Gx : numpy.ndarray
Smoothed array.
"""
x = self.enforce_shape(np.array(x,dtype=self.datatype))
naxes = (np.size(self.para)-1)//2
## check sigma
if(sigma==0):
return x
elif(sigma==-1):
about.infos.cprint("INFO: invalid sigma reset.")
if(self.fourier):
sigma = 1.5/np.min(self.para[:naxes]*self.vol) ## sqrt(2)*max(dist)
else:
sigma = 1.5*np.max(self.vol) ## sqrt(2)*max(dist)
elif(sigma<0):
raise ValueError(about._errors.cstring("ERROR: invalid sigma."))
## smooth
Gx = gs.smooth_field(x,self.fourier,self.para[-naxes:].astype(np.bool).tolist(),bool(self.para[naxes]==1),self.vol,smooth_length=sigma)
## check complexity
if(not self.para[naxes]): ## purely real
## check imaginary part
if(np.any(Gx.imag!=0))and(np.dot(Gx.imag.flatten(order='C'),Gx.imag.flatten(order='C'),out=None)>self.epsilon**2*np.dot(Gx.real.flatten(order='C'),Gx.real.flatten(order='C'),out=None)):
about.warnings.cprint("WARNING: discarding considerable imaginary part.")
Gx = np.real(Gx)
return Gx
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def calc_power(self,x,**kwargs):
"""
Computes the power of an array of field values.
Parameters
----------
x : numpy.ndarray
Array containing the field values of which the power is to be
calculated.
Returns
-------
spec : numpy.ndarray
Power contained in the input array.
Other parameters
----------------
pindex : numpy.ndarray, *optional*
Indexing array assigning the input array components to
components of the power spectrum (default: None).
kindex : numpy.ndarray, *optional*
Scale corresponding to each band in the power spectrum
(default: None).
rho : numpy.ndarray, *optional*
Number of degrees of freedom per band (default: None).
codomain : nifty.space, *optional*
A compatible codomain for power indexing (default: None).
log : bool, *optional*
Flag specifying if the spectral binning is performed on logarithmic
scale or not; if set, the number of used bins is set
automatically (if not given otherwise); by default no binning
is done (default: None).
nbin : integer, *optional*
Number of used spectral bins; if given `log` is set to ``False``;
integers below the minimum of 3 induce an automatic setting;
by default no binning is done (default: None).
binbounds : {list, array}, *optional*
User specific inner boundaries of the bins, which are preferred
over the above parameters; by default no binning is done
(default: None). vmin : {scalar, list, ndarray, field}, *optional*
Lower limit of the uniform distribution if ``random == "uni"``
(default: 0).
"""
x = self.enforce_shape(np.array(x,dtype=self.datatype))
## correct for 'fft'
if(not self.fourier):
x = self.calc_weight(x,power=1)
## explicit power indices
pindex,kindex,rho = kwargs.get("pindex",None),kwargs.get("kindex",None),kwargs.get("rho",None)
## implicit power indices
if(pindex is None)or(kindex is None)or(rho is None):
try:
self.set_power_indices(**kwargs)
except:
codomain = kwargs.get("codomain",self.get_codomain())
codomain.set_power_indices(**kwargs)
pindex,kindex,rho = codomain.power_indices.get("pindex"),codomain.power_indices.get("kindex"),codomain.power_indices.get("rho")
else:
pindex,kindex,rho = self.power_indices.get("pindex"),self.power_indices.get("kindex"),self.power_indices.get("rho")
## power spectrum
return gp.calc_ps_fast(x,self.para[:(np.size(self.para)-1)//2],self.vol,self.para[-((np.size(self.para)-1)//2):].astype(np.bool),fourier=self.fourier,pindex=pindex,kindex=kindex,rho=rho)
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def get_plot(self,x,title="",vmin=None,vmax=None,power=None,unit="",norm=None,cmap=None,cbar=True,other=None,legend=False,mono=True,**kwargs):
"""
Creates a plot of field values according to the specifications
given by the parameters.
Parameters
----------
x : numpy.ndarray
Array containing the field values.
Returns
-------
None
Other parameters
----------------
title : string, *optional*
Title of the plot (default: "").
vmin : float, *optional*
Minimum value to be displayed (default: ``min(x)``).
vmax : float, *optional*
Maximum value to be displayed (default: ``max(x)``).
power : bool, *optional*
Whether to plot the power contained in the field or the field
values themselves (default: False).
unit : string, *optional*
Unit of the field values (default: "").
norm : string, *optional*
Scaling of the field values before plotting (default: None).
cmap : matplotlib.colors.LinearSegmentedColormap, *optional*
Color map to be used for two-dimensional plots (default: None).
cbar : bool, *optional*
Whether to show the color bar or not (default: True).
other : {single object, tuple of objects}, *optional*
Object or tuple of objects to be added, where objects can be
scalars, arrays, or fields (default: None).
legend : bool, *optional*
Whether to show the legend or not (default: False).
mono : bool, *optional*
Whether to plot the monopole or not (default: True).
save : string, *optional*
Valid file name where the figure is to be stored, by default
the figure is not saved (default: False).
error : {float, numpy.ndarray, nifty.field}, *optional*
Object indicating some confidence interval to be plotted
(default: None).
kindex : numpy.ndarray, *optional*
Scale corresponding to each band in the power spectrum
(default: None).
codomain : nifty.space, *optional*
A compatible codomain for power indexing (default: None).
log : bool, *optional*
Flag specifying if the spectral binning is performed on logarithmic
scale or not; if set, the number of used bins is set
automatically (if not given otherwise); by default no binning
is done (default: None).
nbin : integer, *optional*
Number of used spectral bins; if given `log` is set to ``False``;
integers below the minimum of 3 induce an automatic setting;
by default no binning is done (default: None).
binbounds : {list, array}, *optional*
User specific inner boundaries of the bins, which are preferred
over the above parameters; by default no binning is done
(default: None). vmin : {scalar, list, ndarray, field}, *optional*
Lower limit of the uniform distribution if ``random == "uni"``
(default: 0).
"""
if(not pl.isinteractive())and(not bool(kwargs.get("save",False))):
about.warnings.cprint("WARNING: interactive mode off.")
naxes = (np.size(self.para)-1)//2
if(power is None):
power = bool(self.para[naxes])
if(power):
x = self.calc_power(x,**kwargs)
fig = pl.figure(num=None,figsize=(6.4,4.8),dpi=None,facecolor="none",edgecolor="none",frameon=False,FigureClass=pl.Figure)
ax0 = fig.add_axes([0.12,0.12,0.82,0.76])
## explicit kindex
xaxes = kwargs.get("kindex",None)
## implicit kindex
if(xaxes is None):
try:
self.set_power_indices(**kwargs)
except:
codomain = kwargs.get("codomain",self.get_codomain())
codomain.set_power_indices(**kwargs)
xaxes = codomain.power_indices.get("kindex")
else:
xaxes = self.power_indices.get("kindex")
if(norm is None)or(not isinstance(norm,int)):
norm = naxes
if(vmin is None):
vmin = np.min(x[:mono].tolist()+(xaxes**norm*x)[1:].tolist(),axis=None,out=None)
if(vmax is None):
vmax = np.max(x[:mono].tolist()+(xaxes**norm*x)[1:].tolist(),axis=None,out=None)
ax0.loglog(xaxes[1:],(xaxes**norm*x)[1:],color=[0.0,0.5,0.0],label="graph 0",linestyle='-',linewidth=2.0,zorder=1)
if(mono):
ax0.scatter(0.5*(xaxes[1]+xaxes[2]),x[0],s=20,color=[0.0,0.5,0.0],marker='o',cmap=None,norm=None,vmin=None,vmax=None,alpha=None,linewidths=None,verts=None,zorder=1)
if(other is not None):
if(isinstance(other,tuple)):
other = list(other)
for ii in xrange(len(other)):
if(isinstance(other[ii],field)):
other[ii] = other[ii].power(**kwargs)
else:
other[ii] = self.enforce_power(other[ii],size=np.size(xaxes),kindex=xaxes)
elif(isinstance(other,field)):
other = [other.power(**kwargs)]
else:
other = [self.enforce_power(other,size=np.size(xaxes),kindex=xaxes)]
imax = max(1,len(other)-1)
for ii in xrange(len(other)):
ax0.loglog(xaxes[1:],(xaxes**norm*other[ii])[1:],color=[max(0.0,1.0-(2*ii/imax)**2),0.5*((2*ii-imax)/imax)**2,max(0.0,1.0-(2*(ii-imax)/imax)**2)],label="graph "+str(ii+1),linestyle='-',linewidth=1.0,zorder=-ii)
if(mono):
ax0.scatter(0.5*(xaxes[1]+xaxes[2]),other[ii][0],s=20,color=[max(0.0,1.0-(2*ii/imax)**2),0.5*((2*ii-imax)/imax)**2,max(0.0,1.0-(2*(ii-imax)/imax)**2)],marker='o',cmap=None,norm=None,vmin=None,vmax=None,alpha=None,linewidths=None,verts=None,zorder=-ii)
if(legend):
ax0.legend()
ax0.set_xlim(xaxes[1],xaxes[-1])
ax0.set_xlabel(r"$|k|$")
ax0.set_ylim(vmin,vmax)
ax0.set_ylabel(r"$|k|^{%i} P_k$"%norm)
ax0.set_title(title)
else:
x = self.enforce_shape(np.array(x))
if(naxes==1):
fig = pl.figure(num=None,figsize=(6.4,4.8),dpi=None,facecolor="none",edgecolor="none",frameon=False,FigureClass=pl.Figure)
ax0 = fig.add_axes([0.12,0.12,0.82,0.76])
xaxes = (np.arange(self.para[0],dtype=np.int)+self.para[2]*(self.para[0]//2))*self.vol
if(vmin is None):
if(np.iscomplexobj(x)):
vmin = min(np.min(np.absolute(x),axis=None,out=None),np.min(np.real(x),axis=None,out=None),np.min(np.imag(x),axis=None,out=None))
else:
vmin = np.min(x,axis=None,out=None)
if(vmax is None):
if(np.iscomplexobj(x)):
vmax = max(np.max(np.absolute(x),axis=None,out=None),np.max(np.real(x),axis=None,out=None),np.max(np.imag(x),axis=None,out=None))
else:
vmax = np.max(x,axis=None,out=None)
if(norm=="log"):
ax0graph = ax0.semilogy
if(vmin<=0):
raise ValueError(about._errors.cstring("ERROR: nonpositive value(s)."))
else:
ax0graph = ax0.plot
if(np.iscomplexobj(x)):
ax0graph(xaxes,np.absolute(x),color=[0.0,0.5,0.0],label="graph (absolute)",linestyle='-',linewidth=2.0,zorder=1)
ax0graph(xaxes,np.real(x),color=[0.0,0.5,0.0],label="graph (real part)",linestyle="--",linewidth=1.0,zorder=0)
ax0graph(xaxes,np.imag(x),color=[0.0,0.5,0.0],label="graph (imaginary part)",linestyle=':',linewidth=1.0,zorder=0)
if(legend):
ax0.legend()
elif(other is not None):
ax0graph(xaxes,x,color=[0.0,0.5,0.0],label="graph 0",linestyle='-',linewidth=2.0,zorder=1)
if(isinstance(other,tuple)):
other = [self.enforce_values(xx,extend=True) for xx in other]
else:
other = [self.enforce_values(other,extend=True)]
imax = max(1,len(other)-1)
for ii in xrange(len(other)):
ax0graph(xaxes,other[ii],color=[max(0.0,1.0-(2*ii/imax)**2),0.5*((2*ii-imax)/imax)**2,max(0.0,1.0-(2*(ii-imax)/imax)**2)],label="graph "+str(ii+1),linestyle='-',linewidth=1.0,zorder=-ii)
if("error" in kwargs):
error = self.enforce_values(np.absolute(kwargs.get("error")),extend=True)
ax0.fill_between(xaxes,x-error,x+error,color=[0.8,0.8,0.8],label="error 0",zorder=-len(other))
if(legend):
ax0.legend()
else:
ax0graph(xaxes,x,color=[0.0,0.5,0.0],label="graph 0",linestyle='-',linewidth=2.0,zorder=1)
if("error" in kwargs):
error = self.enforce_values(np.absolute(kwargs.get("error")),extend=True)
ax0.fill_between(xaxes,x-error,x+error,color=[0.8,0.8,0.8],label="error 0",zorder=0)
ax0.set_xlim(xaxes[0],xaxes[-1])
ax0.set_xlabel("coordinate")
ax0.set_ylim(vmin,vmax)
if(unit):
unit = " ["+unit+"]"
ax0.set_ylabel("values"+unit)
ax0.set_title(title)
elif(naxes==2):
if(np.iscomplexobj(x)):
about.infos.cprint("INFO: absolute values and phases are plotted.")
if(title):
title += " "
if(bool(kwargs.get("save",False))):
save_ = os.path.splitext(os.path.basename(str(kwargs.get("save"))))
kwargs.update(save=save_[0]+"_absolute"+save_[1])
self.get_plot(np.absolute(x),title=title+"(absolute)",vmin=vmin,vmax=vmax,power=False,unit=unit,norm=norm,cmap=cmap,cbar=cbar,other=None,legend=False,**kwargs)
# self.get_plot(np.real(x),title=title+"(real part)",vmin=vmin,vmax=vmax,power=False,unit=unit,norm=norm,cmap=cmap,cbar=cbar,other=None,legend=False,**kwargs)
# self.get_plot(np.imag(x),title=title+"(imaginary part)",vmin=vmin,vmax=vmax,power=False,unit=unit,norm=norm,cmap=cmap,cbar=cbar,other=None,legend=False,**kwargs)
if(unit):
unit = "rad"
if(cmap is None):
cmap = pl.cm.hsv_r
if(bool(kwargs.get("save",False))):
kwargs.update(save=save_[0]+"_phase"+save_[1])
self.get_plot(np.angle(x,deg=False),title=title+"(phase)",vmin=-3.1416,vmax=3.1416,power=False,unit=unit,norm=None,cmap=cmap,cbar=cbar,other=None,legend=False,**kwargs) ## values in [-pi,pi]
return None ## leave method
else:
if(vmin is None):
vmin = np.min(x,axis=None,out=None)
if(vmax is None):
vmax = np.max(x,axis=None,out=None)
if(norm=="log")and(vmin<=0):
raise ValueError(about._errors.cstring("ERROR: nonpositive value(s)."))
s_ = np.array([self.para[1]*self.vol[1]/np.max(self.para[:naxes]*self.vol,axis=None,out=None),self.para[0]*self.vol[0]/np.max(self.para[:naxes]*self.vol,axis=None,out=None)*(1.0+0.159*bool(cbar))])
fig = pl.figure(num=None,figsize=(6.4*s_[0],6.4*s_[1]),dpi=None,facecolor="none",edgecolor="none",frameon=False,FigureClass=pl.Figure)
ax0 = fig.add_axes([0.06/s_[0],0.06/s_[1],1.0-0.12/s_[0],1.0-0.12/s_[1]])
xaxes = (np.arange(self.para[1]+1,dtype=np.int)-0.5+self.para[4]*(self.para[1]//2))*self.vol[1]
yaxes = (np.arange(self.para[0]+1,dtype=np.int)-0.5+self.para[3]*(self.para[0]//2))*self.vol[0]
if(norm=="log"):
n_ = ln(vmin=vmin,vmax=vmax)
else:
n_ = None
sub = ax0.pcolormesh(xaxes,yaxes,x,cmap=cmap,norm=n_,vmin=vmin,vmax=vmax)
ax0.set_xlim(xaxes[0],xaxes[-1])
ax0.set_xticks([0],minor=False)
ax0.set_ylim(yaxes[0],yaxes[-1])
ax0.set_yticks([0],minor=False)
ax0.set_aspect("equal")
if(cbar):
if(norm=="log"):
f_ = lf(10,labelOnlyBase=False)
b_ = sub.norm.inverse(np.linspace(0,1,sub.cmap.N+1))
v_ = np.linspace(sub.norm.vmin,sub.norm.vmax,sub.cmap.N)
else:
f_ = None
b_ = None
v_ = None
cb0 = fig.colorbar(sub,ax=ax0,orientation="horizontal",fraction=0.1,pad=0.05,shrink=0.75,aspect=20,ticks=[vmin,vmax],format=f_,drawedges=False,boundaries=b_,values=v_)
cb0.ax.text(0.5,-1.0,unit,fontdict=None,withdash=False,transform=cb0.ax.transAxes,horizontalalignment="center",verticalalignment="center")
ax0.set_title(title)
else:
raise ValueError(about._errors.cstring("ERROR: unsupported number of axes ( "+str(naxes)+" > 2 )."))
if(bool(kwargs.get("save",False))):
fig.savefig(str(kwargs.get("save")),dpi=None,facecolor="none",edgecolor="none",orientation="portrait",papertype=None,format=None,transparent=False,bbox_inches=None,pad_inches=0.1)
pl.close(fig)
else:
fig.canvas.draw()
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def __repr__(self):
return "<nifty_rg.rg_space>"
def __str__(self):
naxes = (np.size(self.para)-1)//2
num = self.para[:naxes][::-1].tolist()
zerocenter = self.para[-naxes:][::-1].astype(np.bool).tolist()
dist = self.vol[::-1].tolist()
return "nifty_rg.rg_space instance\n- num = "+str(num)+"\n- naxes = "+str(naxes)+"\n- hermitian = "+str(bool(self.para[naxes]<2))+"\n- purelyreal = "+str(bool(not self.para[naxes]))+"\n- zerocenter = "+str(zerocenter)+"\n- dist = "+str(dist)+"\n- fourier = "+str(self.fourier)
##-----------------------------------------------------------------------------
| gpl-3.0 | 1,432,406,918,628,683,300 | 45.896552 | 306 | 0.517682 | false |
mark-burnett/filament-dynamics | plot_scripts/contexts.py | 1 | 4059 | # Copyright (C) 2011 Mark Burnett
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#from matplotlib.backends.backend_ps import FigureCanvasPS as _FigureCanvas
#from matplotlib.backends.backend_svg import FigureCanvasSVG as _FigureCanvas
from matplotlib.backends.backend_pdf import FigureCanvasPdf as _FigureCanvas
from matplotlib.figure import Figure as _Figure
import contextlib
import itertools
from plot_scripts import settings
# Change some MPL default settings
import matplotlib
matplotlib.rc('font', size=settings.DEFAULT_FONT_SIZE)
@contextlib.contextmanager
def complex_figure(filename, dpi=settings.DPI,
width=settings.SINGLE_COLUMN_DEFAULT_SIZE_CM,
height=settings.SINGLE_COLUMN_DEFAULT_SIZE_CM,
draw_frame=False, right_label=False,
**unused_kwargs):
scaled_width = width / settings.CM_SCALE
scaled_height = height / settings.CM_SCALE
scaled_top_margin = 1 - settings.TOP_MARGIN / height
scaled_bottom_margin = settings.BOTTOM_MARGIN / height
scaled_left_margin = settings.LEFT_MARGIN / width
scaled_right_margin = 1 - settings.RIGHT_MARGIN / width
if right_label:
figure = _Figure(dpi=dpi, frameon=draw_frame,
linewidth=settings.DEFAULT_FRAME_LINE_WIDTH,
figsize=(scaled_width, scaled_height),
subplotpars=matplotlib.figure.SubplotParams(
bottom=scaled_bottom_margin,
left=scaled_left_margin,
right=scaled_right_margin))
else:
figure = _Figure(dpi=dpi, frameon=draw_frame,
linewidth=settings.DEFAULT_FRAME_LINE_WIDTH,
figsize=(scaled_width, scaled_height),
subplotpars=matplotlib.figure.SubplotParams(
bottom=scaled_bottom_margin,
left=scaled_left_margin))
yield figure
canvas = _FigureCanvas(figure)
figure.savefig(filename)
@contextlib.contextmanager
def subplot(figure, location, logscale_x=False, logscale_y=False,
x_label=None, y_label=None,
title=None, title_position=0.05,
**unused_kwargs):
axes = figure.add_subplot(*location)
if logscale_x:
axes.set_xscale('log')
if logscale_y:
axes.set_yscale('log')
if title:
axes.set_title(title, x=title_position)
if x_label:
axes.set_xlabel(x_label, size=settings.LABEL_FONT_SIZE)
if y_label:
axes.set_ylabel(y_label, size=settings.LABEL_FONT_SIZE)
yield axes
for label in itertools.chain(axes.get_xticklabels(),
axes.get_yticklabels()):
label.set_size(settings.TICK_FONT_SIZE)
@contextlib.contextmanager
def basic_figure(filename, **kwargs):
with complex_figure(filename, **kwargs) as figure:
with subplot(figure, (1, 1, 1), **kwargs) as axes:
yield axes
# XXX Linewidth is not being set.
def plot(axes, plot_type, #linewidth=settings.DEFAULT_PLOT_LINE_WIDTH,
*args, **kwargs):
plt_cmd = getattr(axes, plot_type)
return plt_cmd(*args, **kwargs)
def add_legend(axes,
# handle_pad=settings.DEFAULT_LEGEND_HANDLEPAD,
*args, **kwargs):
# scaled_handle_pad = handle_pad / settings.CM_SCALE
# l = axes.legend(handletextpad=scaled_handle_pad, **kwargs)
l = axes.legend(*args, **kwargs)
l.draw_frame(False)
for text in l.get_texts():
text.set_size(settings.LEGEND_FONT_SIZE)
| gpl-3.0 | -9,171,369,411,612,449,000 | 33.991379 | 77 | 0.673811 | false |
ProkopHapala/ProbeParticleModel | examples/_bak/watter_monomer_4fold/relaxed_scan.py | 1 | 1792 | #!/usr/bin/python
import os
import numpy as np
import matplotlib.pyplot as plt
import sys
print(" # ========== make & load ProbeParticle C++ library ")
LWD = '/home/prokop/git/ProbeParticleModel/code'
sys.path = [ LWD ]
import basUtils
import elements
import GridUtils as GU
import ProbeParticle as PP
print(" ============= RUN ")
print(" >> WARNING!!! OVEWRITING SETTINGS by params.ini ")
PP.loadParams( 'params.ini' )
print(" load Electrostatic Force-field ")
FFel, lvec, nDim, head = loadVecFieldXsf( "FFel" )
print(" load Lenard-Jones Force-field ")
FFLJ, lvec, nDim, head = loadVecFieldXsf( "FFLJ" )
PP.params['gridA'] = lvec[ 1,: ].copy()
PP.params['gridB'] = lvec[ 2,: ].copy()
PP.params['gridC'] = lvec[ 3,: ].copy()
PP.params['gridN'] = nDim.copy()
xTips,yTips,zTips,lvecScan = prepareGrids( )
#Ks = [ 0.25, 0.5, 1.0 ]
#Qs = [ -0.2, 0.0, +0.2 ]
#Amps = [ 2.0 ]
Ks = [ 0.5 ]
Qs = [ -0.1 ]
Amps = [ 1.0 ]
def main():
for iq,Q in enumerate( Qs ):
FF = FFLJ + FFel * Q
PP.setFF_Pointer( FF )
for ik,K in enumerate( Ks ):
dirname = "Q%1.2fK%1.2f" %(Q,K)
os.makedirs( dirname )
PP.setTip( kSpring = np.array((K,K,0.0))/-PP.eVA_Nm )
fzs = PP.relaxedScan3D( xTips, yTips, zTips )
PP.saveXSF( dirname+'/OutFz.xsf', headScan, lvecScan, fzs )
for iA,Amp in enumerate( Amps ):
AmpStr = "/Amp%2.2f" %Amp
print("Amp= ",AmpStr)
os.makedirs( dirname+AmpStr )
dfs = PP.Fz2df( fzs, dz = dz, k0 = PP.params['kCantilever'], f0=PP.params['f0Cantilever'], n=Amp/dz )
PP.plotImages( dirname+AmpStr+"/df", dfs, slices = list(range( 0, len(dfs))) )
print(" ***** ALL DONE ***** ")
#plt.show()
| mit | -3,440,017,776,031,320,600 | 26.569231 | 117 | 0.564174 | false |
azogue/enerpi | enerpi/command_enerpi.py | 1 | 18937 | # -*- coding: utf-8 -*-
"""
ENERPI - CLI methods & argument parser
"""
import datetime as dt
import os
import re
import sys
from enerpi import PRETTY_NAME, DESCRIPTION, __version__
from enerpi.base import (IMG_TILES_BASEPATH, DATA_PATH, CONFIG, SENSORS, CONFIG_FILENAME, SENSORS_CONFIG_JSON_FILENAME,
FILE_LOGGING, LOGGING_LEVEL, set_logging_conf, log, show_pi_temperature,
DEFAULT_IMG_MASK, COLOR_TILES)
# Config:
UDP_PORT = CONFIG.getint('BROADCAST', 'UDP_PORT', fallback=57775)
HDF_STORE = CONFIG.get('ENERPI_DATA', 'HDF_STORE')
def _enerpi_arguments():
"""
CLI Parser
"""
import argparse
p = argparse.ArgumentParser(description="\033[1m\033[5m\033[32m{}\033[0m\n{}\n\n".format(PRETTY_NAME, DESCRIPTION),
epilog='\033[34m\n*** By default, ENERPI starts as receiver (-r) ***\n' +
'\033[0m', formatter_class=argparse.RawTextHelpFormatter)
g_m = p.add_argument_group(title='☆ \033[1m\033[4mENERPI Working Mode\033[24m',
description='→ Choose working mode between RECEIVER / SENDER')
g_m.add_argument('-e', '--enerpi', action='store_true', help='⚡ SET ENERPI LOGGER & BROADCAST MODE')
g_m.add_argument('-r', '--receive', action='store_true', help='⚡ SET Broadcast Receiver mode (by default)')
g_m.add_argument('--port', '--receiver-port', type=int, action='store', default=UDP_PORT, metavar='XX',
help='⚡ SET Broadcast Receiver PORT')
g_m.add_argument('-d', '--demo', action='store_true', help='☮ SET Demo Mode (broadcast random values)')
g_m.add_argument('--timeout', action='store', nargs='?', type=int, metavar='∆T', const=60,
help='⚡ SET Timeout to finish execution automatically')
g_m.add_argument('--raw', type=int, action='store', nargs='?', const=5, metavar='∆T',
help='☮ SET RAW Data Mode (adquire all samples)')
g_m.add_argument('--config', action='store_true', help='⚒ Shows configuration in INI file')
g_m.add_argument('--install', action='store_true', help='⚒ Install CRON task for exec ENERPI LOGGER as daemon')
g_m.add_argument('--uninstall', action='store_true', help='⚒ Delete all CRON tasks from ENERPI')
g_p = p.add_argument_group(title='︎ℹ️ \033[4mQUERY & REPORT DATA\033[24m')
filter_24h = (dt.datetime.now().replace(microsecond=0) - dt.timedelta(hours=24)).strftime('%Y-%m-%d %H:%M:%S')
g_p.add_argument('-f', '--filter', action='store', nargs='?', metavar='TS', const=filter_24h,
help='✂ Query the HDF Store with pandas-like slicing:'
'\n "2016-01-07 :: 2016-02-01 04:00" --> df.loc["2016-01-07":"2016-02-01 04:00"]'
'\n \t(Pay atention to the double "::"!!)'
'\n · By default, "-f" filters data from 24h ago (.loc[{}:]).\n\n'.format(filter_24h))
default_img_nomask = DEFAULT_IMG_MASK.replace('{', '{{').replace('}', '}}').replace('%', '%%')
help_plot = '''⎙ Plot & save image with matplotlib in any compatible format.
· If not specified, PNG file is generated with MASK:\n "{}" using datetime data limits.
· If only specifying image format, default mask is used with the desired format.
· If image path is passed, initial (and final, optionally) timestamps of filtered data
can be used with formatting masks, like:
"/path/to/image/image_{{:%%c}}_{{:%%H%%M}}.pdf" or "report_{{:%%d%%m%%y}}.svg".'''.format(default_img_nomask)
g_p.add_argument('-p', '--plot', action='store', metavar='IM', nargs='?', const=DEFAULT_IMG_MASK, help=help_plot)
g_p.add_argument('-po', '--plot-options', action='store', metavar='OP', nargs='*',
help='''⎙ Plot options:
· rs=XX := resample data with 'XX' delta (.rolling(XX).mean()).
· rm=XX := Rolling mean data with 'XX' delta (.resample(XX).mean()).
· show := Shows plot (plt.show())''')
g_p.add_argument('-pt', '--plot-tiles', action='store_true', help='⎙ Generate SVG Tiles for enerpiWeb.')
g_st = p.add_argument_group(title='⚙ \033[4mHDF Store Options\033[24m')
g_st.add_argument('--store', action='store', metavar='ST', default=HDF_STORE,
help='✏️ Set the .h5 file where save the HDF store.\n Default: "{}"'.format(HDF_STORE))
g_st.add_argument('--backup', action='store', metavar='BKP', help='☔ Backup ALL data in CSV format')
g_st.add_argument('--reprocess', action='store_true', help='☔ RE-Process all data in ENERPI Catalog')
g_st.add_argument('--clearlog', action='store_true', help='⚠ Delete the LOG FILE at: "{}"'.format(FILE_LOGGING))
g_st.add_argument('-i', '--info', action='store_true', help='︎ℹ Show data info')
g_st.add_argument('--version', action='store_true', help='︎ℹ Show ENERPI version')
g_st.add_argument('--last', action='store_true', help='︎ℹ Show last saved data')
g_d = p.add_argument_group(title='☕ \033[4mDEBUG Options\033[24m')
g_d.add_argument('--temps', action='store_true', help='♨ Show RPI temperatures (CPU + GPU)')
g_d.add_argument('-l', '--log', action='store_true', help='☕ Show LOG FILE')
g_d.add_argument('-s', '--silent', action='store_true', help='‼ Silent mode (Verbose mode ON BY DEFAULT in CLI)')
g_ts = p.add_argument_group(title='⚒ \033[4mCurrent Meter Sampling Configuration\033[24m')
g_ts.add_argument('-T', '--delta', type=float, action='store', default=SENSORS.delta_sec_data, metavar='∆T',
help='⌚ Set Ts sampling (to database & broadcast), in seconds. Default ∆T: {} s'
.format(SENSORS.delta_sec_data))
g_ts.add_argument('-ts', type=int, action='store', default=SENSORS.ts_data_ms, metavar='∆T',
help='⏱ Set Ts raw sampling, in ms. Default ∆T_s: {} ms'.format(SENSORS.ts_data_ms))
g_ts.add_argument('-w', '--window', type=float, action='store', default=SENSORS.rms_roll_window_sec, metavar='∆T',
help='⚖ Set window width in seconds for instant RMS calculation. Default ∆T_w: {} s'
.format(SENSORS.rms_roll_window_sec))
return p.parse_args()
def make_cron_command_task_daemon():
"""
CRON periodic task for exec ENERPI LOGGER as daemon at every boot
Example command:
*/15 * * * * sudo -u www-data /home/pi/PYTHON/py35/bin/python
/home/pi/PYTHON/py35/lib/python3.5/site-packages/enerpiweb/mule_rscgen.py -o
:return: :str: cron_command
"""
# cmd_logger = '@reboot sudo -u {user_logger} {python_pathbin}/enerpi-daemon start'
cmd_logger = 'sudo -u {user_logger} {python_pathbin}/enerpi-daemon start'
local_params = dict(user_logger=CONFIG.get('ENERPI_DATA', 'USER_LOGGER', fallback='pi'),
python_pathbin=os.path.dirname(sys.executable))
return cmd_logger.format(**local_params)
def _check_store_relpath(path_st):
if os.path.pathsep not in path_st:
path_st = os.path.join(DATA_PATH, path_st)
else:
path_st = os.path.abspath(path_st)
if not os.path.splitext(path_st)[1]:
path_st += '.h5'
existe_st = os.path.exists(path_st)
if not existe_st:
log('HDF Store not found at "{}"'.format(path_st), 'warn', True)
return existe_st, path_st
def _extract_time_slice_from_args(args_filter, args_info, catalog):
if args_filter:
if args_filter == 'all':
data, consumption = catalog.get(start=catalog.min_ts, with_summary=True)
else:
loc_data = args_filter.split('::')
if len(loc_data) > 1:
if len(loc_data[0]) > 0:
data, consumption = catalog.get(start=loc_data[0], end=loc_data[1], with_summary=True)
else:
data, consumption = catalog.get(end=loc_data[1], with_summary=True)
else:
last_hours = re.findall('(\d{1,5})h', loc_data[0], flags=re.IGNORECASE)
if last_hours:
data, consumption = catalog.get(last_hours=int(last_hours[0]), with_summary=True)
else:
data, consumption = catalog.get(start=loc_data[0], with_summary=True)
elif args_info:
data, consumption = catalog.get(start=dt.datetime.now() - dt.timedelta(days=30), with_summary=True)
else:
data = consumption = None
return data, consumption
def _extract_plot_params_from_args(args_plot, args_plot_options):
show = True if 'show' in args_plot_options else False
path_saveimg = args_plot if not show else None
rs_data = rm_data = None
for arg in args_plot_options:
if arg.startswith('rs='):
rs_data = arg[3:]
break
elif arg.startswith('rm='):
rm_data = arg[3:]
try:
rm_data = int(rm_data)
except ValueError:
pass
break
return rs_data, rm_data, path_saveimg, show
def enerpi_main_cli(test_mode=False):
"""
Uso de ENERPI desde CLI
enerpi -h para mostrar las diferentes opciones
"""
# CLI Arguments
args = _enerpi_arguments()
verbose = not args.silent
if args.version:
return __version__
# CONTROL LOGIC
# Shows RPI Temps
timer_temps = show_pi_temperature(args.temps, 3, args.timeout)
if args.install or args.uninstall:
from enerpi.config.crontasks import set_command_on_reboot, clear_cron_commands
# INSTALL / UNINSTALL CRON TASKS & KEY
cmd_logger = make_cron_command_task_daemon()
if args.install:
# Logging configuration
set_logging_conf(FILE_LOGGING, LOGGING_LEVEL, True)
log('** Installing CRON task for start logger at reboot:\n"{}"'.format(cmd_logger), 'ok', True, False)
set_command_on_reboot(cmd_logger, verbose=verbose)
try:
os.chmod(DATA_PATH, 0o777)
[os.chmod(os.path.join(base, f), 0o777)
for base, dirs, files in os.walk(DATA_PATH) for f in files + dirs]
except PermissionError:
log("Can't set 777 permissions on {0}/* files...\nDo it manually, please: 'sudo chmod 777 -R {0}'"
.format(DATA_PATH), 'warning', True, False)
else:
log('** Deleting CRON task for start logger at reboot:\n"{}"'.format(cmd_logger), 'warn', True, False)
clear_cron_commands([cmd_logger], verbose=verbose)
elif (args.enerpi or args.info or args.backup or args.reprocess or args.config or args.raw or
args.last or args.clearlog or args.filter or args.plot or args.plot_tiles):
# Init CLI
# import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
pd.set_option('display.width', 200)
# Logging configuration
set_logging_conf(FILE_LOGGING, LOGGING_LEVEL, True)
# Shows INI config & SENSORS
if args.config:
import json
log('ENERPI Configuration (from INI file in "{}"):'
.format(os.path.join(DATA_PATH, CONFIG_FILENAME)), 'ok', True, False)
for s in CONFIG.sections():
log('* Section {}:'.format(s), 'info', True, False)
for opt in CONFIG.options(s):
log('{:27} -->\t{}'.format(opt.upper(), CONFIG.get(s, opt)), 'debug', True, False)
log('*' * 80 + '\n', 'ok', True, False)
log('\nENERPI SENSORS Config (from JSON file in "{}"):'
.format(os.path.join(DATA_PATH, SENSORS_CONFIG_JSON_FILENAME)), 'ok', True, False)
json_content = json.loads(open(os.path.join(DATA_PATH, SENSORS_CONFIG_JSON_FILENAME), 'r').read())
log('\n'.join(['{}'.format(s) for s in json_content]), 'magenta', True, False)
log('--> {}\n\n'.format(SENSORS), 'ok', True, False)
# Delete LOG File
if args.clearlog:
from enerpi.database import delete_log_file
delete_log_file(FILE_LOGGING, verbose=verbose)
# Data Store Config
_existe_st, path_st = _check_store_relpath(args.store)
# Starts ENERPI Logger
if args.enerpi:
from enerpi.enerpimeter import enerpi_logger
# Demo logger
if args.demo:
set_logging_conf(FILE_LOGGING + '_demo.log', LOGGING_LEVEL, True)
path_st = os.path.join(DATA_PATH, 'debug_buffer_disk.h5')
enerpi_logger(is_demo=args.demo, verbose=verbose, path_st=path_st, delta_sampling=args.delta,
roll_time=args.window, sampling_ms=args.ts, timeout=args.timeout)
elif args.backup:
from enerpi.database import init_catalog
# Export data to CSV:
catalog = init_catalog(sensors=SENSORS, raw_file=path_st, check_integrity=False,
verbose=verbose, test_mode=test_mode)
export_ok = catalog.export_chunk(args.backup)
log('EXPORT OK? {}'.format(export_ok), 'ok' if export_ok else 'error', True, False)
elif args.reprocess:
from enerpi.database import init_catalog
# Re-process all data in catalog
catalog = init_catalog(sensors=SENSORS, raw_file=path_st, check_integrity=False,
verbose=verbose, test_mode=test_mode)
repro_ok = catalog.reprocess_all_data()
log('REPROCESS OK? {}'.format(repro_ok), 'ok' if repro_ok else 'error', verbose, verbose)
# TODO revisar config X11 + ssh -X para plot en display local
elif args.raw:
from enerpi.enerpimeter import enerpi_raw_data
# Raw mode
delta_secs = args.raw
raw_data = enerpi_raw_data(path_st.replace('.h5', '_raw_sample.h5'), delta_secs=delta_secs,
use_dummy_sensors=args.demo,
roll_time=args.window, sampling_ms=args.ts, verbose=verbose)
t0, tf = raw_data.index[0], raw_data.index[-1]
log('Showing RAW DATA for {} seconds ({} samples, {:.2f} sps)\n** Real data: from {} to {} --> {:.2f} sps'
.format(delta_secs, len(raw_data), len(raw_data) / delta_secs,
t0, tf, len(raw_data) / (tf-t0).total_seconds()), 'info', verbose, False)
raw_data.plot(lw=.5, figsize=(16, 10))
plt.show()
# Shows database info
elif args.info or args.filter or args.plot or args.plot_tiles:
from enerpi.database import init_catalog, show_info_data
catalog = init_catalog(sensors=SENSORS, raw_file=path_st, check_integrity=False,
verbose=verbose, test_mode=test_mode)
if args.plot_tiles:
from enerpiplot.enerplot import gen_svg_tiles
ok = gen_svg_tiles(IMG_TILES_BASEPATH, catalog, color=COLOR_TILES if not test_mode else (1, 0, 0))
if ok:
log('SVG Tiles generated!', 'ok', verbose, True)
else:
log('No generation of SVG Tiles!', 'error', verbose, True)
else:
data, consumption = _extract_time_slice_from_args(args.filter, args.info, catalog)
if (args.info or args.filter) and data is not None and not data.empty:
show_info_data(data, consumption)
if (args.plot and (data is not None) and not data.empty and (consumption is not None) and
not consumption.empty):
from enerpiplot.enerplot import plot_power_consumption_hourly
rs_data, rm_data, path_saveimg, show = _extract_plot_params_from_args(args.plot, args.plot_options)
mean_sensor_data = data[SENSORS.columns_sensors_mean] if SENSORS.columns_sensors_mean else None
log('Generate PLOT with RESAMPLE={}, ROLLING={}, show={}, path_save="{}"'
.format(rs_data, rm_data, show, path_saveimg), 'info', verbose, False)
img_name = plot_power_consumption_hourly(data[SENSORS.columns_sensors_rms],
consumption.kWh, data_mean_s=mean_sensor_data,
rs_data=rs_data, rm_data=rm_data,
show=show, path_saveimg=path_saveimg)
if img_name is not None:
log('IMAGE saved in "{}"'.format(img_name), 'ok', verbose, False)
# Shows database info
else: # Shows last 10 entries
from enerpi.database import get_ts_last_save
last = get_ts_last_save(path_st, get_last_sample=True, verbose=verbose, n=10)
log('Showing last 10 entries in {}:\n{}'.format(path_st, last), 'info', verbose, False)
# Shows & extract info from LOG File
elif args.log:
from enerpi.database import extract_log_file
data_log = extract_log_file(FILE_LOGGING, extract_temps=True, verbose=verbose)
try:
df_temps = data_log[data_log.temp.notnull()].drop(['tipo', 'msg', 'debug_send'], axis=1).dropna(axis=1)
if len(set(df_temps['exec'])) == 1:
df_temps = df_temps.drop(['exec'], axis=1)
path_csv = os.path.join(DATA_PATH, 'debug_rpitemps.csv')
if not df_temps.empty:
df_temps.to_csv(path_csv)
print('*** Saved temperature data extracted from LOG in {}'.format(path_csv))
except AttributeError:
print('No se encuentran datos de Tª de RPI en el LOG')
# Demo sender
elif args.demo:
from enerpi.enerpimeter import enerpi_logger
set_logging_conf(FILE_LOGGING + '_demo.log', LOGGING_LEVEL, True)
path_st = os.path.join(DATA_PATH, 'debug_buffer_disk.h5')
enerpi_logger(is_demo=True, verbose=verbose, path_st=path_st, delta_sampling=args.delta,
roll_time=args.window, sampling_ms=args.ts, timeout=args.timeout)
# Receiver
else: # elif args.receive:
from enerpi.enerpimeter import receiver
log('{}\n {}'.format(PRETTY_NAME, DESCRIPTION), 'ok', verbose, False)
receiver(verbose=verbose, timeout=args.timeout, port=args.port)
if timer_temps is not None:
log('Stopping RPI TEMPS sensing...', 'debug', True)
timer_temps.cancel()
log('Exiting from ENERPI CLI...', 'debug', True)
if not test_mode:
sys.exit(0)
if __name__ == '__main__':
enerpi_main_cli(test_mode=False)
| mit | 455,884,504,553,015,500 | 51.912921 | 119 | 0.582311 | false |